From 82309445ad5c2c113f42e27f032b4c9adbf355ed Mon Sep 17 00:00:00 2001 From: "whesse@chromium.org" Date: Wed, 25 Aug 2010 09:44:44 +0000 Subject: [PATCH] Move code stub implementations from codegen-[platform].cc files to new code-stub-[platform].cc files, and declarations to new code-stub-[platform].h files. Remaining work is to do the same for platform-independent code stub declarations, and to remove all dependencies on codegen header files from code stub files. Review URL: http://codereview.chromium.org/3195022 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@5338 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/SConscript | 3 + src/arm/code-stubs-arm.cc | 4778 ++++++++++++++++++++++++++++++++ src/arm/code-stubs-arm.h | 475 ++++ src/arm/codegen-arm.cc | 4769 +------------------------------ src/arm/codegen-arm.h | 504 ---- src/arm/full-codegen-arm.cc | 1 + src/arm/ic-arm.cc | 2 +- src/ia32/code-stubs-ia32.cc | 4539 ++++++++++++++++++++++++++++++ src/ia32/code-stubs-ia32.h | 362 +++ src/ia32/codegen-ia32.cc | 4496 +----------------------------- src/ia32/codegen-ia32.h | 321 --- src/ia32/full-codegen-ia32.cc | 1 + src/x64/code-stubs-x64.cc | 4016 +++++++++++++++++++++++++++ src/x64/code-stubs-x64.h | 392 +++ src/x64/codegen-x64.cc | 3973 +------------------------- src/x64/codegen-x64.h | 351 --- src/x64/full-codegen-x64.cc | 1 + src/x64/stub-cache-x64.cc | 1 + tools/gyp/v8.gyp | 6 + tools/v8.xcodeproj/project.pbxproj | 20 +- tools/visual_studio/v8_base.vcproj | 8 + tools/visual_studio/v8_base_arm.vcproj | 8 + tools/visual_studio/v8_base_x64.vcproj | 8 + 23 files changed, 14664 insertions(+), 14371 deletions(-) create mode 100644 src/arm/code-stubs-arm.cc create mode 100644 src/arm/code-stubs-arm.h create mode 100644 src/ia32/code-stubs-ia32.cc create mode 100644 src/ia32/code-stubs-ia32.h create mode 100644 src/x64/code-stubs-x64.cc create mode 100644 src/x64/code-stubs-x64.h diff --git a/src/SConscript b/src/SConscript index 20489b5..7fae8d4 100755 --- a/src/SConscript +++ b/src/SConscript @@ -120,6 +120,7 @@ SOURCES = { jump-target-light.cc virtual-frame-light.cc arm/builtins-arm.cc + arm/code-stubs-arm.cc arm/codegen-arm.cc arm/constants-arm.cc arm/cpu-arm.cc @@ -158,6 +159,7 @@ SOURCES = { virtual-frame-heavy.cc ia32/assembler-ia32.cc ia32/builtins-ia32.cc + ia32/code-stubs-ia32.cc ia32/codegen-ia32.cc ia32/cpu-ia32.cc ia32/debug-ia32.cc @@ -177,6 +179,7 @@ SOURCES = { virtual-frame-heavy.cc x64/assembler-x64.cc x64/builtins-x64.cc + x64/code-stubs-x64.cc x64/codegen-x64.cc x64/cpu-x64.cc x64/debug-x64.cc diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc new file mode 100644 index 0000000..0826e0a --- /dev/null +++ b/src/arm/code-stubs-arm.cc @@ -0,0 +1,4778 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#if defined(V8_TARGET_ARCH_ARM) + +#include "bootstrapper.h" +#include "code-stubs-arm.h" +#include "codegen-inl.h" +#include "regexp-macro-assembler.h" + +namespace v8 { +namespace internal { + + +#define __ ACCESS_MASM(masm) + +static void EmitIdenticalObjectComparison(MacroAssembler* masm, + Label* slow, + Condition cc, + bool never_nan_nan); +static void EmitSmiNonsmiComparison(MacroAssembler* masm, + Register lhs, + Register rhs, + Label* lhs_not_nan, + Label* slow, + bool strict); +static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc); +static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, + Register lhs, + Register rhs); + + +void FastNewClosureStub::Generate(MacroAssembler* masm) { + // Create a new closure from the given function info in new + // space. Set the context to the current context in cp. + Label gc; + + // Pop the function info from the stack. + __ pop(r3); + + // Attempt to allocate new JSFunction in new space. + __ AllocateInNewSpace(JSFunction::kSize, + r0, + r1, + r2, + &gc, + TAG_OBJECT); + + // Compute the function map in the current global context and set that + // as the map of the allocated object. + __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); + __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset)); + __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX))); + __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); + + // Initialize the rest of the function. We don't have to update the + // write barrier because the allocated object is in new space. + __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex); + __ LoadRoot(r2, Heap::kTheHoleValueRootIndex); + __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset)); + __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset)); + __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset)); + __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset)); + __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset)); + __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset)); + + // Initialize the code pointer in the function to be the one + // found in the shared function info object. + __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset)); + __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); + + // Return result. The argument function info has been popped already. + __ Ret(); + + // Create a new closure through the slower runtime call. + __ bind(&gc); + __ Push(cp, r3); + __ TailCallRuntime(Runtime::kNewClosure, 2, 1); +} + + +void FastNewContextStub::Generate(MacroAssembler* masm) { + // Try to allocate the context in new space. + Label gc; + int length = slots_ + Context::MIN_CONTEXT_SLOTS; + + // Attempt to allocate the context in new space. + __ AllocateInNewSpace(FixedArray::SizeFor(length), + r0, + r1, + r2, + &gc, + TAG_OBJECT); + + // Load the function from the stack. + __ ldr(r3, MemOperand(sp, 0)); + + // Setup the object header. + __ LoadRoot(r2, Heap::kContextMapRootIndex); + __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ mov(r2, Operand(Smi::FromInt(length))); + __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); + + // Setup the fixed slots. + __ mov(r1, Operand(Smi::FromInt(0))); + __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX))); + __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX))); + __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX))); + __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX))); + + // Copy the global object from the surrounding context. + __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); + __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX))); + + // Initialize the rest of the slots to undefined. + __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); + for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { + __ str(r1, MemOperand(r0, Context::SlotOffset(i))); + } + + // Remove the on-stack argument and return. + __ mov(cp, r0); + __ pop(); + __ Ret(); + + // Need to collect. Call into runtime system. + __ bind(&gc); + __ TailCallRuntime(Runtime::kNewContext, 1, 1); +} + + +void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { + // Stack layout on entry: + // + // [sp]: constant elements. + // [sp + kPointerSize]: literal index. + // [sp + (2 * kPointerSize)]: literals array. + + // All sizes here are multiples of kPointerSize. + int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0; + int size = JSArray::kSize + elements_size; + + // Load boilerplate object into r3 and check if we need to create a + // boilerplate. + Label slow_case; + __ ldr(r3, MemOperand(sp, 2 * kPointerSize)); + __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); + __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ cmp(r3, ip); + __ b(eq, &slow_case); + + if (FLAG_debug_code) { + const char* message; + Heap::RootListIndex expected_map_index; + if (mode_ == CLONE_ELEMENTS) { + message = "Expected (writable) fixed array"; + expected_map_index = Heap::kFixedArrayMapRootIndex; + } else { + ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS); + message = "Expected copy-on-write fixed array"; + expected_map_index = Heap::kFixedCOWArrayMapRootIndex; + } + __ push(r3); + __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset)); + __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset)); + __ LoadRoot(ip, expected_map_index); + __ cmp(r3, ip); + __ Assert(eq, message); + __ pop(r3); + } + + // Allocate both the JS array and the elements array in one big + // allocation. This avoids multiple limit checks. + __ AllocateInNewSpace(size, + r0, + r1, + r2, + &slow_case, + TAG_OBJECT); + + // Copy the JS array part. + for (int i = 0; i < JSArray::kSize; i += kPointerSize) { + if ((i != JSArray::kElementsOffset) || (length_ == 0)) { + __ ldr(r1, FieldMemOperand(r3, i)); + __ str(r1, FieldMemOperand(r0, i)); + } + } + + if (length_ > 0) { + // Get hold of the elements array of the boilerplate and setup the + // elements pointer in the resulting object. + __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset)); + __ add(r2, r0, Operand(JSArray::kSize)); + __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset)); + + // Copy the elements array. + __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize); + } + + // Return and remove the on-stack parameters. + __ add(sp, sp, Operand(3 * kPointerSize)); + __ Ret(); + + __ bind(&slow_case); + __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); +} + + +// Takes a Smi and converts to an IEEE 64 bit floating point value in two +// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and +// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a +// scratch register. Destroys the source register. No GC occurs during this +// stub so you don't have to set up the frame. +class ConvertToDoubleStub : public CodeStub { + public: + ConvertToDoubleStub(Register result_reg_1, + Register result_reg_2, + Register source_reg, + Register scratch_reg) + : result1_(result_reg_1), + result2_(result_reg_2), + source_(source_reg), + zeros_(scratch_reg) { } + + private: + Register result1_; + Register result2_; + Register source_; + Register zeros_; + + // Minor key encoding in 16 bits. + class ModeBits: public BitField {}; + class OpBits: public BitField {}; + + Major MajorKey() { return ConvertToDouble; } + int MinorKey() { + // Encode the parameters in a unique 16 bit value. + return result1_.code() + + (result2_.code() << 4) + + (source_.code() << 8) + + (zeros_.code() << 12); + } + + void Generate(MacroAssembler* masm); + + const char* GetName() { return "ConvertToDoubleStub"; } + +#ifdef DEBUG + void Print() { PrintF("ConvertToDoubleStub\n"); } +#endif +}; + + +void ConvertToDoubleStub::Generate(MacroAssembler* masm) { +#ifndef BIG_ENDIAN_FLOATING_POINT + Register exponent = result1_; + Register mantissa = result2_; +#else + Register exponent = result2_; + Register mantissa = result1_; +#endif + Label not_special; + // Convert from Smi to integer. + __ mov(source_, Operand(source_, ASR, kSmiTagSize)); + // Move sign bit from source to destination. This works because the sign bit + // in the exponent word of the double has the same position and polarity as + // the 2's complement sign bit in a Smi. + STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); + __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC); + // Subtract from 0 if source was negative. + __ rsb(source_, source_, Operand(0), LeaveCC, ne); + + // We have -1, 0 or 1, which we treat specially. Register source_ contains + // absolute value: it is either equal to 1 (special case of -1 and 1), + // greater than 1 (not a special case) or less than 1 (special case of 0). + __ cmp(source_, Operand(1)); + __ b(gt, ¬_special); + + // For 1 or -1 we need to or in the 0 exponent (biased to 1023). + static const uint32_t exponent_word_for_1 = + HeapNumber::kExponentBias << HeapNumber::kExponentShift; + __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq); + // 1, 0 and -1 all have 0 for the second word. + __ mov(mantissa, Operand(0)); + __ Ret(); + + __ bind(¬_special); + // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5. + // Gets the wrong answer for 0, but we already checked for that case above. + __ CountLeadingZeros(zeros_, source_, mantissa); + // Compute exponent and or it into the exponent register. + // We use mantissa as a scratch register here. Use a fudge factor to + // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts + // that fit in the ARM's constant field. + int fudge = 0x400; + __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge)); + __ add(mantissa, mantissa, Operand(fudge)); + __ orr(exponent, + exponent, + Operand(mantissa, LSL, HeapNumber::kExponentShift)); + // Shift up the source chopping the top bit off. + __ add(zeros_, zeros_, Operand(1)); + // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0. + __ mov(source_, Operand(source_, LSL, zeros_)); + // Compute lower part of fraction (last 12 bits). + __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord)); + // And the top (top 20 bits). + __ orr(exponent, + exponent, + Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); + __ Ret(); +} + + +// See comment for class. +void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { + Label max_negative_int; + // the_int_ has the answer which is a signed int32 but not a Smi. + // We test for the special value that has a different exponent. This test + // has the neat side effect of setting the flags according to the sign. + STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); + __ cmp(the_int_, Operand(0x80000000u)); + __ b(eq, &max_negative_int); + // Set up the correct exponent in scratch_. All non-Smi int32s have the same. + // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). + uint32_t non_smi_exponent = + (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; + __ mov(scratch_, Operand(non_smi_exponent)); + // Set the sign bit in scratch_ if the value was negative. + __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs); + // Subtract from 0 if the value was negative. + __ rsb(the_int_, the_int_, Operand(0), LeaveCC, cs); + // We should be masking the implict first digit of the mantissa away here, + // but it just ends up combining harmlessly with the last digit of the + // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get + // the most significant 1 to hit the last bit of the 12 bit sign and exponent. + ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); + const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; + __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance)); + __ str(scratch_, FieldMemOperand(the_heap_number_, + HeapNumber::kExponentOffset)); + __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance)); + __ str(scratch_, FieldMemOperand(the_heap_number_, + HeapNumber::kMantissaOffset)); + __ Ret(); + + __ bind(&max_negative_int); + // The max negative int32 is stored as a positive number in the mantissa of + // a double because it uses a sign bit instead of using two's complement. + // The actual mantissa bits stored are all 0 because the implicit most + // significant 1 bit is not stored. + non_smi_exponent += 1 << HeapNumber::kExponentShift; + __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent)); + __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset)); + __ mov(ip, Operand(0)); + __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); + __ Ret(); +} + + +// Handle the case where the lhs and rhs are the same object. +// Equality is almost reflexive (everything but NaN), so this is a test +// for "identity and not NaN". +static void EmitIdenticalObjectComparison(MacroAssembler* masm, + Label* slow, + Condition cc, + bool never_nan_nan) { + Label not_identical; + Label heap_number, return_equal; + __ cmp(r0, r1); + __ b(ne, ¬_identical); + + // The two objects are identical. If we know that one of them isn't NaN then + // we now know they test equal. + if (cc != eq || !never_nan_nan) { + // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), + // so we do the second best thing - test it ourselves. + // They are both equal and they are not both Smis so both of them are not + // Smis. If it's not a heap number, then return equal. + if (cc == lt || cc == gt) { + __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE); + __ b(ge, slow); + } else { + __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); + __ b(eq, &heap_number); + // Comparing JS objects with <=, >= is complicated. + if (cc != eq) { + __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); + __ b(ge, slow); + // Normally here we fall through to return_equal, but undefined is + // special: (undefined == undefined) == true, but + // (undefined <= undefined) == false! See ECMAScript 11.8.5. + if (cc == le || cc == ge) { + __ cmp(r4, Operand(ODDBALL_TYPE)); + __ b(ne, &return_equal); + __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); + __ cmp(r0, r2); + __ b(ne, &return_equal); + if (cc == le) { + // undefined <= undefined should fail. + __ mov(r0, Operand(GREATER)); + } else { + // undefined >= undefined should fail. + __ mov(r0, Operand(LESS)); + } + __ Ret(); + } + } + } + } + + __ bind(&return_equal); + if (cc == lt) { + __ mov(r0, Operand(GREATER)); // Things aren't less than themselves. + } else if (cc == gt) { + __ mov(r0, Operand(LESS)); // Things aren't greater than themselves. + } else { + __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves. + } + __ Ret(); + + if (cc != eq || !never_nan_nan) { + // For less and greater we don't have to check for NaN since the result of + // x < x is false regardless. For the others here is some code to check + // for NaN. + if (cc != lt && cc != gt) { + __ bind(&heap_number); + // It is a heap number, so return non-equal if it's NaN and equal if it's + // not NaN. + + // The representation of NaN values has all exponent bits (52..62) set, + // and not all mantissa bits (0..51) clear. + // Read top bits of double representation (second word of value). + __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); + // Test that exponent bits are all set. + __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); + // NaNs have all-one exponents so they sign extend to -1. + __ cmp(r3, Operand(-1)); + __ b(ne, &return_equal); + + // Shift out flag and all exponent bits, retaining only mantissa. + __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); + // Or with all low-bits of mantissa. + __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); + __ orr(r0, r3, Operand(r2), SetCC); + // For equal we already have the right value in r0: Return zero (equal) + // if all bits in mantissa are zero (it's an Infinity) and non-zero if + // not (it's a NaN). For <= and >= we need to load r0 with the failing + // value if it's a NaN. + if (cc != eq) { + // All-zero means Infinity means equal. + __ Ret(eq); + if (cc == le) { + __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. + } else { + __ mov(r0, Operand(LESS)); // NaN >= NaN should fail. + } + } + __ Ret(); + } + // No fall through here. + } + + __ bind(¬_identical); +} + + +// See comment at call site. +static void EmitSmiNonsmiComparison(MacroAssembler* masm, + Register lhs, + Register rhs, + Label* lhs_not_nan, + Label* slow, + bool strict) { + ASSERT((lhs.is(r0) && rhs.is(r1)) || + (lhs.is(r1) && rhs.is(r0))); + + Label rhs_is_smi; + __ tst(rhs, Operand(kSmiTagMask)); + __ b(eq, &rhs_is_smi); + + // Lhs is a Smi. Check whether the rhs is a heap number. + __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE); + if (strict) { + // If rhs is not a number and lhs is a Smi then strict equality cannot + // succeed. Return non-equal + // If rhs is r0 then there is already a non zero value in it. + if (!rhs.is(r0)) { + __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); + } + __ Ret(ne); + } else { + // Smi compared non-strictly with a non-Smi non-heap-number. Call + // the runtime. + __ b(ne, slow); + } + + // Lhs is a smi, rhs is a number. + if (CpuFeatures::IsSupported(VFP3)) { + // Convert lhs to a double in d7. + CpuFeatures::Scope scope(VFP3); + __ SmiToDoubleVFPRegister(lhs, d7, r7, s15); + // Load the double from rhs, tagged HeapNumber r0, to d6. + __ sub(r7, rhs, Operand(kHeapObjectTag)); + __ vldr(d6, r7, HeapNumber::kValueOffset); + } else { + __ push(lr); + // Convert lhs to a double in r2, r3. + __ mov(r7, Operand(lhs)); + ConvertToDoubleStub stub1(r3, r2, r7, r6); + __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); + // Load rhs to a double in r0, r1. + __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); + __ pop(lr); + } + + // We now have both loaded as doubles but we can skip the lhs nan check + // since it's a smi. + __ jmp(lhs_not_nan); + + __ bind(&rhs_is_smi); + // Rhs is a smi. Check whether the non-smi lhs is a heap number. + __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); + if (strict) { + // If lhs is not a number and rhs is a smi then strict equality cannot + // succeed. Return non-equal. + // If lhs is r0 then there is already a non zero value in it. + if (!lhs.is(r0)) { + __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); + } + __ Ret(ne); + } else { + // Smi compared non-strictly with a non-smi non-heap-number. Call + // the runtime. + __ b(ne, slow); + } + + // Rhs is a smi, lhs is a heap number. + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + // Load the double from lhs, tagged HeapNumber r1, to d7. + __ sub(r7, lhs, Operand(kHeapObjectTag)); + __ vldr(d7, r7, HeapNumber::kValueOffset); + // Convert rhs to a double in d6 . + __ SmiToDoubleVFPRegister(rhs, d6, r7, s13); + } else { + __ push(lr); + // Load lhs to a double in r2, r3. + __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); + // Convert rhs to a double in r0, r1. + __ mov(r7, Operand(rhs)); + ConvertToDoubleStub stub2(r1, r0, r7, r6); + __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); + __ pop(lr); + } + // Fall through to both_loaded_as_doubles. +} + + +void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) { + bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); + Register rhs_exponent = exp_first ? r0 : r1; + Register lhs_exponent = exp_first ? r2 : r3; + Register rhs_mantissa = exp_first ? r1 : r0; + Register lhs_mantissa = exp_first ? r3 : r2; + Label one_is_nan, neither_is_nan; + + __ Sbfx(r4, + lhs_exponent, + HeapNumber::kExponentShift, + HeapNumber::kExponentBits); + // NaNs have all-one exponents so they sign extend to -1. + __ cmp(r4, Operand(-1)); + __ b(ne, lhs_not_nan); + __ mov(r4, + Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), + SetCC); + __ b(ne, &one_is_nan); + __ cmp(lhs_mantissa, Operand(0)); + __ b(ne, &one_is_nan); + + __ bind(lhs_not_nan); + __ Sbfx(r4, + rhs_exponent, + HeapNumber::kExponentShift, + HeapNumber::kExponentBits); + // NaNs have all-one exponents so they sign extend to -1. + __ cmp(r4, Operand(-1)); + __ b(ne, &neither_is_nan); + __ mov(r4, + Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), + SetCC); + __ b(ne, &one_is_nan); + __ cmp(rhs_mantissa, Operand(0)); + __ b(eq, &neither_is_nan); + + __ bind(&one_is_nan); + // NaN comparisons always fail. + // Load whatever we need in r0 to make the comparison fail. + if (cc == lt || cc == le) { + __ mov(r0, Operand(GREATER)); + } else { + __ mov(r0, Operand(LESS)); + } + __ Ret(); + + __ bind(&neither_is_nan); +} + + +// See comment at call site. +static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { + bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); + Register rhs_exponent = exp_first ? r0 : r1; + Register lhs_exponent = exp_first ? r2 : r3; + Register rhs_mantissa = exp_first ? r1 : r0; + Register lhs_mantissa = exp_first ? r3 : r2; + + // r0, r1, r2, r3 have the two doubles. Neither is a NaN. + if (cc == eq) { + // Doubles are not equal unless they have the same bit pattern. + // Exception: 0 and -0. + __ cmp(rhs_mantissa, Operand(lhs_mantissa)); + __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne); + // Return non-zero if the numbers are unequal. + __ Ret(ne); + + __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC); + // If exponents are equal then return 0. + __ Ret(eq); + + // Exponents are unequal. The only way we can return that the numbers + // are equal is if one is -0 and the other is 0. We already dealt + // with the case where both are -0 or both are 0. + // We start by seeing if the mantissas (that are equal) or the bottom + // 31 bits of the rhs exponent are non-zero. If so we return not + // equal. + __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC); + __ mov(r0, Operand(r4), LeaveCC, ne); + __ Ret(ne); + // Now they are equal if and only if the lhs exponent is zero in its + // low 31 bits. + __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize)); + __ Ret(); + } else { + // Call a native function to do a comparison between two non-NaNs. + // Call C routine that may not cause GC or other trouble. + __ push(lr); + __ PrepareCallCFunction(4, r5); // Two doubles count as 4 arguments. + __ CallCFunction(ExternalReference::compare_doubles(), 4); + __ pop(pc); // Return. + } +} + + +// See comment at call site. +static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, + Register lhs, + Register rhs) { + ASSERT((lhs.is(r0) && rhs.is(r1)) || + (lhs.is(r1) && rhs.is(r0))); + + // If either operand is a JSObject or an oddball value, then they are + // not equal since their pointers are different. + // There is no test for undetectability in strict equality. + STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); + Label first_non_object; + // Get the type of the first operand into r2 and compare it with + // FIRST_JS_OBJECT_TYPE. + __ CompareObjectType(rhs, r2, r2, FIRST_JS_OBJECT_TYPE); + __ b(lt, &first_non_object); + + // Return non-zero (r0 is not zero) + Label return_not_equal; + __ bind(&return_not_equal); + __ Ret(); + + __ bind(&first_non_object); + // Check for oddballs: true, false, null, undefined. + __ cmp(r2, Operand(ODDBALL_TYPE)); + __ b(eq, &return_not_equal); + + __ CompareObjectType(lhs, r3, r3, FIRST_JS_OBJECT_TYPE); + __ b(ge, &return_not_equal); + + // Check for oddballs: true, false, null, undefined. + __ cmp(r3, Operand(ODDBALL_TYPE)); + __ b(eq, &return_not_equal); + + // Now that we have the types we might as well check for symbol-symbol. + // Ensure that no non-strings have the symbol bit set. + STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask); + STATIC_ASSERT(kSymbolTag != 0); + __ and_(r2, r2, Operand(r3)); + __ tst(r2, Operand(kIsSymbolMask)); + __ b(ne, &return_not_equal); +} + + +// See comment at call site. +static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, + Register lhs, + Register rhs, + Label* both_loaded_as_doubles, + Label* not_heap_numbers, + Label* slow) { + ASSERT((lhs.is(r0) && rhs.is(r1)) || + (lhs.is(r1) && rhs.is(r0))); + + __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); + __ b(ne, not_heap_numbers); + __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); + __ cmp(r2, r3); + __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. + + // Both are heap numbers. Load them up then jump to the code we have + // for that. + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + __ sub(r7, rhs, Operand(kHeapObjectTag)); + __ vldr(d6, r7, HeapNumber::kValueOffset); + __ sub(r7, lhs, Operand(kHeapObjectTag)); + __ vldr(d7, r7, HeapNumber::kValueOffset); + } else { + __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); + __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); + } + __ jmp(both_loaded_as_doubles); +} + + +// Fast negative check for symbol-to-symbol equality. +static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm, + Register lhs, + Register rhs, + Label* possible_strings, + Label* not_both_strings) { + ASSERT((lhs.is(r0) && rhs.is(r1)) || + (lhs.is(r1) && rhs.is(r0))); + + // r2 is object type of rhs. + // Ensure that no non-strings have the symbol bit set. + Label object_test; + STATIC_ASSERT(kSymbolTag != 0); + __ tst(r2, Operand(kIsNotStringMask)); + __ b(ne, &object_test); + __ tst(r2, Operand(kIsSymbolMask)); + __ b(eq, possible_strings); + __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE); + __ b(ge, not_both_strings); + __ tst(r3, Operand(kIsSymbolMask)); + __ b(eq, possible_strings); + + // Both are symbols. We already checked they weren't the same pointer + // so they are not equal. + __ mov(r0, Operand(NOT_EQUAL)); + __ Ret(); + + __ bind(&object_test); + __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE)); + __ b(lt, not_both_strings); + __ CompareObjectType(lhs, r2, r3, FIRST_JS_OBJECT_TYPE); + __ b(lt, not_both_strings); + // If both objects are undetectable, they are equal. Otherwise, they + // are not equal, since they are different objects and an object is not + // equal to undefined. + __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset)); + __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset)); + __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset)); + __ and_(r0, r2, Operand(r3)); + __ and_(r0, r0, Operand(1 << Map::kIsUndetectable)); + __ eor(r0, r0, Operand(1 << Map::kIsUndetectable)); + __ Ret(); +} + + +void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, + Register object, + Register result, + Register scratch1, + Register scratch2, + Register scratch3, + bool object_is_smi, + Label* not_found) { + // Use of registers. Register result is used as a temporary. + Register number_string_cache = result; + Register mask = scratch3; + + // Load the number string cache. + __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); + + // Make the hash mask from the length of the number string cache. It + // contains two elements (number and string) for each cache entry. + __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset)); + // Divide length by two (length is a smi). + __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1)); + __ sub(mask, mask, Operand(1)); // Make mask. + + // Calculate the entry in the number string cache. The hash value in the + // number string cache for smis is just the smi value, and the hash for + // doubles is the xor of the upper and lower words. See + // Heap::GetNumberStringCache. + Label is_smi; + Label load_result_from_cache; + if (!object_is_smi) { + __ BranchOnSmi(object, &is_smi); + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + __ CheckMap(object, + scratch1, + Heap::kHeapNumberMapRootIndex, + not_found, + true); + + STATIC_ASSERT(8 == kDoubleSize); + __ add(scratch1, + object, + Operand(HeapNumber::kValueOffset - kHeapObjectTag)); + __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit()); + __ eor(scratch1, scratch1, Operand(scratch2)); + __ and_(scratch1, scratch1, Operand(mask)); + + // Calculate address of entry in string cache: each entry consists + // of two pointer sized fields. + __ add(scratch1, + number_string_cache, + Operand(scratch1, LSL, kPointerSizeLog2 + 1)); + + Register probe = mask; + __ ldr(probe, + FieldMemOperand(scratch1, FixedArray::kHeaderSize)); + __ BranchOnSmi(probe, not_found); + __ sub(scratch2, object, Operand(kHeapObjectTag)); + __ vldr(d0, scratch2, HeapNumber::kValueOffset); + __ sub(probe, probe, Operand(kHeapObjectTag)); + __ vldr(d1, probe, HeapNumber::kValueOffset); + __ vcmp(d0, d1); + __ vmrs(pc); + __ b(ne, not_found); // The cache did not contain this value. + __ b(&load_result_from_cache); + } else { + __ b(not_found); + } + } + + __ bind(&is_smi); + Register scratch = scratch1; + __ and_(scratch, mask, Operand(object, ASR, 1)); + // Calculate address of entry in string cache: each entry consists + // of two pointer sized fields. + __ add(scratch, + number_string_cache, + Operand(scratch, LSL, kPointerSizeLog2 + 1)); + + // Check if the entry is the smi we are looking for. + Register probe = mask; + __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); + __ cmp(object, probe); + __ b(ne, not_found); + + // Get the result from the cache. + __ bind(&load_result_from_cache); + __ ldr(result, + FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); + __ IncrementCounter(&Counters::number_to_string_native, + 1, + scratch1, + scratch2); +} + + +void NumberToStringStub::Generate(MacroAssembler* masm) { + Label runtime; + + __ ldr(r1, MemOperand(sp, 0)); + + // Generate code to lookup number in the number string cache. + GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime); + __ add(sp, sp, Operand(1 * kPointerSize)); + __ Ret(); + + __ bind(&runtime); + // Handle number to string in the runtime system if not found in the cache. + __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); +} + + +void RecordWriteStub::Generate(MacroAssembler* masm) { + __ add(offset_, object_, Operand(offset_)); + __ RecordWriteHelper(object_, offset_, scratch_); + __ Ret(); +} + + +// On entry lhs_ and rhs_ are the values to be compared. +// On exit r0 is 0, positive or negative to indicate the result of +// the comparison. +void CompareStub::Generate(MacroAssembler* masm) { + ASSERT((lhs_.is(r0) && rhs_.is(r1)) || + (lhs_.is(r1) && rhs_.is(r0))); + + Label slow; // Call builtin. + Label not_smis, both_loaded_as_doubles, lhs_not_nan; + + // NOTICE! This code is only reached after a smi-fast-case check, so + // it is certain that at least one operand isn't a smi. + + // Handle the case where the objects are identical. Either returns the answer + // or goes to slow. Only falls through if the objects were not identical. + EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_); + + // If either is a Smi (we know that not both are), then they can only + // be strictly equal if the other is a HeapNumber. + STATIC_ASSERT(kSmiTag == 0); + ASSERT_EQ(0, Smi::FromInt(0)); + __ and_(r2, lhs_, Operand(rhs_)); + __ tst(r2, Operand(kSmiTagMask)); + __ b(ne, ¬_smis); + // One operand is a smi. EmitSmiNonsmiComparison generates code that can: + // 1) Return the answer. + // 2) Go to slow. + // 3) Fall through to both_loaded_as_doubles. + // 4) Jump to lhs_not_nan. + // In cases 3 and 4 we have found out we were dealing with a number-number + // comparison. If VFP3 is supported the double values of the numbers have + // been loaded into d7 and d6. Otherwise, the double values have been loaded + // into r0, r1, r2, and r3. + EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_); + + __ bind(&both_loaded_as_doubles); + // The arguments have been converted to doubles and stored in d6 and d7, if + // VFP3 is supported, or in r0, r1, r2, and r3. + if (CpuFeatures::IsSupported(VFP3)) { + __ bind(&lhs_not_nan); + CpuFeatures::Scope scope(VFP3); + Label no_nan; + // ARMv7 VFP3 instructions to implement double precision comparison. + __ vcmp(d7, d6); + __ vmrs(pc); // Move vector status bits to normal status bits. + Label nan; + __ b(vs, &nan); + __ mov(r0, Operand(EQUAL), LeaveCC, eq); + __ mov(r0, Operand(LESS), LeaveCC, lt); + __ mov(r0, Operand(GREATER), LeaveCC, gt); + __ Ret(); + + __ bind(&nan); + // If one of the sides was a NaN then the v flag is set. Load r0 with + // whatever it takes to make the comparison fail, since comparisons with NaN + // always fail. + if (cc_ == lt || cc_ == le) { + __ mov(r0, Operand(GREATER)); + } else { + __ mov(r0, Operand(LESS)); + } + __ Ret(); + } else { + // Checks for NaN in the doubles we have loaded. Can return the answer or + // fall through if neither is a NaN. Also binds lhs_not_nan. + EmitNanCheck(masm, &lhs_not_nan, cc_); + // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the + // answer. Never falls through. + EmitTwoNonNanDoubleComparison(masm, cc_); + } + + __ bind(¬_smis); + // At this point we know we are dealing with two different objects, + // and neither of them is a Smi. The objects are in rhs_ and lhs_. + if (strict_) { + // This returns non-equal for some object types, or falls through if it + // was not lucky. + EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_); + } + + Label check_for_symbols; + Label flat_string_check; + // Check for heap-number-heap-number comparison. Can jump to slow case, + // or load both doubles into r0, r1, r2, r3 and jump to the code that handles + // that case. If the inputs are not doubles then jumps to check_for_symbols. + // In this case r2 will contain the type of rhs_. Never falls through. + EmitCheckForTwoHeapNumbers(masm, + lhs_, + rhs_, + &both_loaded_as_doubles, + &check_for_symbols, + &flat_string_check); + + __ bind(&check_for_symbols); + // In the strict case the EmitStrictTwoHeapObjectCompare already took care of + // symbols. + if (cc_ == eq && !strict_) { + // Returns an answer for two symbols or two detectable objects. + // Otherwise jumps to string case or not both strings case. + // Assumes that r2 is the type of rhs_ on entry. + EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow); + } + + // Check for both being sequential ASCII strings, and inline if that is the + // case. + __ bind(&flat_string_check); + + __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow); + + __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3); + StringCompareStub::GenerateCompareFlatAsciiStrings(masm, + lhs_, + rhs_, + r2, + r3, + r4, + r5); + // Never falls through to here. + + __ bind(&slow); + + __ Push(lhs_, rhs_); + // Figure out which native to call and setup the arguments. + Builtins::JavaScript native; + if (cc_ == eq) { + native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; + } else { + native = Builtins::COMPARE; + int ncr; // NaN compare result + if (cc_ == lt || cc_ == le) { + ncr = GREATER; + } else { + ASSERT(cc_ == gt || cc_ == ge); // remaining cases + ncr = LESS; + } + __ mov(r0, Operand(Smi::FromInt(ncr))); + __ push(r0); + } + + // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) + // tagged as a small integer. + __ InvokeBuiltin(native, JUMP_JS); +} + + +// This stub does not handle the inlined cases (Smis, Booleans, undefined). +// The stub returns zero for false, and a non-zero value for true. +void ToBooleanStub::Generate(MacroAssembler* masm) { + Label false_result; + Label not_heap_number; + Register scratch = r7; + + // HeapNumber => false iff +0, -0, or NaN. + __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); + __ cmp(scratch, ip); + __ b(¬_heap_number, ne); + + __ sub(ip, tos_, Operand(kHeapObjectTag)); + __ vldr(d1, ip, HeapNumber::kValueOffset); + __ vcmp(d1, 0.0); + __ vmrs(pc); + // "tos_" is a register, and contains a non zero value by default. + // Hence we only need to overwrite "tos_" with zero to return false for + // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. + __ mov(tos_, Operand(0), LeaveCC, eq); // for FP_ZERO + __ mov(tos_, Operand(0), LeaveCC, vs); // for FP_NAN + __ Ret(); + + __ bind(¬_heap_number); + + // Check if the value is 'null'. + // 'null' => false. + __ LoadRoot(ip, Heap::kNullValueRootIndex); + __ cmp(tos_, ip); + __ b(&false_result, eq); + + // It can be an undetectable object. + // Undetectable => false. + __ ldr(ip, FieldMemOperand(tos_, HeapObject::kMapOffset)); + __ ldrb(scratch, FieldMemOperand(ip, Map::kBitFieldOffset)); + __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable)); + __ cmp(scratch, Operand(1 << Map::kIsUndetectable)); + __ b(&false_result, eq); + + // JavaScript object => true. + __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset)); + __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + __ cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE)); + // "tos_" is a register and contains a non-zero value. + // Hence we implicitly return true if the greater than + // condition is satisfied. + __ Ret(gt); + + // Check for string + __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset)); + __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE)); + // "tos_" is a register and contains a non-zero value. + // Hence we implicitly return true if the greater than + // condition is satisfied. + __ Ret(gt); + + // String value => false iff empty, i.e., length is zero + __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset)); + // If length is zero, "tos_" contains zero ==> false. + // If length is not zero, "tos_" contains a non-zero value ==> true. + __ Ret(); + + // Return 0 in "tos_" for false . + __ bind(&false_result); + __ mov(tos_, Operand(0)); + __ Ret(); +} + + +// We fall into this code if the operands were Smis, but the result was +// not (eg. overflow). We branch into this code (to the not_smi label) if +// the operands were not both Smi. The operands are in r0 and r1. In order +// to call the C-implemented binary fp operation routines we need to end up +// with the double precision floating point operands in r0 and r1 (for the +// value in r1) and r2 and r3 (for the value in r0). +void GenericBinaryOpStub::HandleBinaryOpSlowCases( + MacroAssembler* masm, + Label* not_smi, + Register lhs, + Register rhs, + const Builtins::JavaScript& builtin) { + Label slow, slow_reverse, do_the_call; + bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_; + + ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))); + Register heap_number_map = r6; + + if (ShouldGenerateSmiCode()) { + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + + // Smi-smi case (overflow). + // Since both are Smis there is no heap number to overwrite, so allocate. + // The new heap number is in r5. r3 and r7 are scratch. + __ AllocateHeapNumber( + r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow); + + // If we have floating point hardware, inline ADD, SUB, MUL, and DIV, + // using registers d7 and d6 for the double values. + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + __ mov(r7, Operand(rhs, ASR, kSmiTagSize)); + __ vmov(s15, r7); + __ vcvt_f64_s32(d7, s15); + __ mov(r7, Operand(lhs, ASR, kSmiTagSize)); + __ vmov(s13, r7); + __ vcvt_f64_s32(d6, s13); + if (!use_fp_registers) { + __ vmov(r2, r3, d7); + __ vmov(r0, r1, d6); + } + } else { + // Write Smi from rhs to r3 and r2 in double format. r9 is scratch. + __ mov(r7, Operand(rhs)); + ConvertToDoubleStub stub1(r3, r2, r7, r9); + __ push(lr); + __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); + // Write Smi from lhs to r1 and r0 in double format. r9 is scratch. + __ mov(r7, Operand(lhs)); + ConvertToDoubleStub stub2(r1, r0, r7, r9); + __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); + __ pop(lr); + } + __ jmp(&do_the_call); // Tail call. No return. + } + + // We branch here if at least one of r0 and r1 is not a Smi. + __ bind(not_smi); + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + + // After this point we have the left hand side in r1 and the right hand side + // in r0. + if (lhs.is(r0)) { + __ Swap(r0, r1, ip); + } + + // The type transition also calculates the answer. + bool generate_code_to_calculate_answer = true; + + if (ShouldGenerateFPCode()) { + if (runtime_operands_type_ == BinaryOpIC::DEFAULT) { + switch (op_) { + case Token::ADD: + case Token::SUB: + case Token::MUL: + case Token::DIV: + GenerateTypeTransition(masm); // Tail call. + generate_code_to_calculate_answer = false; + break; + + default: + break; + } + } + + if (generate_code_to_calculate_answer) { + Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; + if (mode_ == NO_OVERWRITE) { + // In the case where there is no chance of an overwritable float we may + // as well do the allocation immediately while r0 and r1 are untouched. + __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow); + } + + // Move r0 to a double in r2-r3. + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. + __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + __ cmp(r4, heap_number_map); + __ b(ne, &slow); + if (mode_ == OVERWRITE_RIGHT) { + __ mov(r5, Operand(r0)); // Overwrite this heap number. + } + if (use_fp_registers) { + CpuFeatures::Scope scope(VFP3); + // Load the double from tagged HeapNumber r0 to d7. + __ sub(r7, r0, Operand(kHeapObjectTag)); + __ vldr(d7, r7, HeapNumber::kValueOffset); + } else { + // Calling convention says that second double is in r2 and r3. + __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); + } + __ jmp(&finished_loading_r0); + __ bind(&r0_is_smi); + if (mode_ == OVERWRITE_RIGHT) { + // We can't overwrite a Smi so get address of new heap number into r5. + __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); + } + + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + // Convert smi in r0 to double in d7. + __ mov(r7, Operand(r0, ASR, kSmiTagSize)); + __ vmov(s15, r7); + __ vcvt_f64_s32(d7, s15); + if (!use_fp_registers) { + __ vmov(r2, r3, d7); + } + } else { + // Write Smi from r0 to r3 and r2 in double format. + __ mov(r7, Operand(r0)); + ConvertToDoubleStub stub3(r3, r2, r7, r4); + __ push(lr); + __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); + __ pop(lr); + } + + // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis. + // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC. + Label r1_is_not_smi; + if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) { + __ tst(r1, Operand(kSmiTagMask)); + __ b(ne, &r1_is_not_smi); + GenerateTypeTransition(masm); // Tail call. + } + + __ bind(&finished_loading_r0); + + // Move r1 to a double in r0-r1. + __ tst(r1, Operand(kSmiTagMask)); + __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. + __ bind(&r1_is_not_smi); + __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset)); + __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + __ cmp(r4, heap_number_map); + __ b(ne, &slow); + if (mode_ == OVERWRITE_LEFT) { + __ mov(r5, Operand(r1)); // Overwrite this heap number. + } + if (use_fp_registers) { + CpuFeatures::Scope scope(VFP3); + // Load the double from tagged HeapNumber r1 to d6. + __ sub(r7, r1, Operand(kHeapObjectTag)); + __ vldr(d6, r7, HeapNumber::kValueOffset); + } else { + // Calling convention says that first double is in r0 and r1. + __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset)); + } + __ jmp(&finished_loading_r1); + __ bind(&r1_is_smi); + if (mode_ == OVERWRITE_LEFT) { + // We can't overwrite a Smi so get address of new heap number into r5. + __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); + } + + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + // Convert smi in r1 to double in d6. + __ mov(r7, Operand(r1, ASR, kSmiTagSize)); + __ vmov(s13, r7); + __ vcvt_f64_s32(d6, s13); + if (!use_fp_registers) { + __ vmov(r0, r1, d6); + } + } else { + // Write Smi from r1 to r1 and r0 in double format. + __ mov(r7, Operand(r1)); + ConvertToDoubleStub stub4(r1, r0, r7, r9); + __ push(lr); + __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); + __ pop(lr); + } + + __ bind(&finished_loading_r1); + } + + if (generate_code_to_calculate_answer || do_the_call.is_linked()) { + __ bind(&do_the_call); + // If we are inlining the operation using VFP3 instructions for + // add, subtract, multiply, or divide, the arguments are in d6 and d7. + if (use_fp_registers) { + CpuFeatures::Scope scope(VFP3); + // ARMv7 VFP3 instructions to implement + // double precision, add, subtract, multiply, divide. + + if (Token::MUL == op_) { + __ vmul(d5, d6, d7); + } else if (Token::DIV == op_) { + __ vdiv(d5, d6, d7); + } else if (Token::ADD == op_) { + __ vadd(d5, d6, d7); + } else if (Token::SUB == op_) { + __ vsub(d5, d6, d7); + } else { + UNREACHABLE(); + } + __ sub(r0, r5, Operand(kHeapObjectTag)); + __ vstr(d5, r0, HeapNumber::kValueOffset); + __ add(r0, r0, Operand(kHeapObjectTag)); + __ mov(pc, lr); + } else { + // If we did not inline the operation, then the arguments are in: + // r0: Left value (least significant part of mantissa). + // r1: Left value (sign, exponent, top of mantissa). + // r2: Right value (least significant part of mantissa). + // r3: Right value (sign, exponent, top of mantissa). + // r5: Address of heap number for result. + + __ push(lr); // For later. + __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments. + // Call C routine that may not cause GC or other trouble. r5 is callee + // save. + __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); + // Store answer in the overwritable heap number. + #if !defined(USE_ARM_EABI) + // Double returned in fp coprocessor register 0 and 1, encoded as + // register cr8. Offsets must be divisible by 4 for coprocessor so we + // need to substract the tag from r5. + __ sub(r4, r5, Operand(kHeapObjectTag)); + __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset)); + #else + // Double returned in registers 0 and 1. + __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset)); + #endif + __ mov(r0, Operand(r5)); + // And we are done. + __ pop(pc); + } + } + } + + if (!generate_code_to_calculate_answer && + !slow_reverse.is_linked() && + !slow.is_linked()) { + return; + } + + if (lhs.is(r0)) { + __ b(&slow); + __ bind(&slow_reverse); + __ Swap(r0, r1, ip); + } + + heap_number_map = no_reg; // Don't use this any more from here on. + + // We jump to here if something goes wrong (one param is not a number of any + // sort or new-space allocation fails). + __ bind(&slow); + + // Push arguments to the stack + __ Push(r1, r0); + + if (Token::ADD == op_) { + // Test for string arguments before calling runtime. + // r1 : first argument + // r0 : second argument + // sp[0] : second argument + // sp[4] : first argument + + Label not_strings, not_string1, string1, string1_smi2; + __ tst(r1, Operand(kSmiTagMask)); + __ b(eq, ¬_string1); + __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE); + __ b(ge, ¬_string1); + + // First argument is a a string, test second. + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, &string1_smi2); + __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE); + __ b(ge, &string1); + + // First and second argument are strings. + StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); + __ TailCallStub(&string_add_stub); + + __ bind(&string1_smi2); + // First argument is a string, second is a smi. Try to lookup the number + // string for the smi in the number string cache. + NumberToStringStub::GenerateLookupNumberStringCache( + masm, r0, r2, r4, r5, r6, true, &string1); + + // Replace second argument on stack and tailcall string add stub to make + // the result. + __ str(r2, MemOperand(sp, 0)); + __ TailCallStub(&string_add_stub); + + // Only first argument is a string. + __ bind(&string1); + __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS); + + // First argument was not a string, test second. + __ bind(¬_string1); + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, ¬_strings); + __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE); + __ b(ge, ¬_strings); + + // Only second argument is a string. + __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS); + + __ bind(¬_strings); + } + + __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return. +} + + +// Tries to get a signed int32 out of a double precision floating point heap +// number. Rounds towards 0. Fastest for doubles that are in the ranges +// -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds +// almost to the range of signed int32 values that are not Smis. Jumps to the +// label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0 +// (excluding the endpoints). +static void GetInt32(MacroAssembler* masm, + Register source, + Register dest, + Register scratch, + Register scratch2, + Label* slow) { + Label right_exponent, done; + // Get exponent word. + __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset)); + // Get exponent alone in scratch2. + __ Ubfx(scratch2, + scratch, + HeapNumber::kExponentShift, + HeapNumber::kExponentBits); + // Load dest with zero. We use this either for the final shift or + // for the answer. + __ mov(dest, Operand(0)); + // Check whether the exponent matches a 32 bit signed int that is not a Smi. + // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is + // the exponent that we are fastest at and also the highest exponent we can + // handle here. + const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30; + // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we + // split it up to avoid a constant pool entry. You can't do that in general + // for cmp because of the overflow flag, but we know the exponent is in the + // range 0-2047 so there is no overflow. + int fudge_factor = 0x400; + __ sub(scratch2, scratch2, Operand(fudge_factor)); + __ cmp(scratch2, Operand(non_smi_exponent - fudge_factor)); + // If we have a match of the int32-but-not-Smi exponent then skip some logic. + __ b(eq, &right_exponent); + // If the exponent is higher than that then go to slow case. This catches + // numbers that don't fit in a signed int32, infinities and NaNs. + __ b(gt, slow); + + // We know the exponent is smaller than 30 (biased). If it is less than + // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie + // it rounds to zero. + const uint32_t zero_exponent = HeapNumber::kExponentBias + 0; + __ sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC); + // Dest already has a Smi zero. + __ b(lt, &done); + if (!CpuFeatures::IsSupported(VFP3)) { + // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to + // get how much to shift down. + __ rsb(dest, scratch2, Operand(30)); + } + __ bind(&right_exponent); + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + // ARMv7 VFP3 instructions implementing double precision to integer + // conversion using round to zero. + __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset)); + __ vmov(d7, scratch2, scratch); + __ vcvt_s32_f64(s15, d7); + __ vmov(dest, s15); + } else { + // Get the top bits of the mantissa. + __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask)); + // Put back the implicit 1. + __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift)); + // Shift up the mantissa bits to take up the space the exponent used to + // take. We just orred in the implicit bit so that took care of one and + // we want to leave the sign bit 0 so we subtract 2 bits from the shift + // distance. + const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; + __ mov(scratch2, Operand(scratch2, LSL, shift_distance)); + // Put sign in zero flag. + __ tst(scratch, Operand(HeapNumber::kSignMask)); + // Get the second half of the double. For some exponents we don't + // actually need this because the bits get shifted out again, but + // it's probably slower to test than just to do it. + __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); + // Shift down 22 bits to get the last 10 bits. + __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance)); + // Move down according to the exponent. + __ mov(dest, Operand(scratch, LSR, dest)); + // Fix sign if sign bit was set. + __ rsb(dest, dest, Operand(0), LeaveCC, ne); + } + __ bind(&done); +} + +// For bitwise ops where the inputs are not both Smis we here try to determine +// whether both inputs are either Smis or at least heap numbers that can be +// represented by a 32 bit signed value. We truncate towards zero as required +// by the ES spec. If this is the case we do the bitwise op and see if the +// result is a Smi. If so, great, otherwise we try to find a heap number to +// write the answer into (either by allocating or by overwriting). +// On entry the operands are in lhs and rhs. On exit the answer is in r0. +void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm, + Register lhs, + Register rhs) { + Label slow, result_not_a_smi; + Label rhs_is_smi, lhs_is_smi; + Label done_checking_rhs, done_checking_lhs; + + Register heap_number_map = r6; + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + + __ tst(lhs, Operand(kSmiTagMask)); + __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number. + __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset)); + __ cmp(r4, heap_number_map); + __ b(ne, &slow); + GetInt32(masm, lhs, r3, r5, r4, &slow); + __ jmp(&done_checking_lhs); + __ bind(&lhs_is_smi); + __ mov(r3, Operand(lhs, ASR, 1)); + __ bind(&done_checking_lhs); + + __ tst(rhs, Operand(kSmiTagMask)); + __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number. + __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset)); + __ cmp(r4, heap_number_map); + __ b(ne, &slow); + GetInt32(masm, rhs, r2, r5, r4, &slow); + __ jmp(&done_checking_rhs); + __ bind(&rhs_is_smi); + __ mov(r2, Operand(rhs, ASR, 1)); + __ bind(&done_checking_rhs); + + ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)))); + + // r0 and r1: Original operands (Smi or heap numbers). + // r2 and r3: Signed int32 operands. + switch (op_) { + case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break; + case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break; + case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break; + case Token::SAR: + // Use only the 5 least significant bits of the shift count. + __ and_(r2, r2, Operand(0x1f)); + __ mov(r2, Operand(r3, ASR, r2)); + break; + case Token::SHR: + // Use only the 5 least significant bits of the shift count. + __ and_(r2, r2, Operand(0x1f)); + __ mov(r2, Operand(r3, LSR, r2), SetCC); + // SHR is special because it is required to produce a positive answer. + // The code below for writing into heap numbers isn't capable of writing + // the register as an unsigned int so we go to slow case if we hit this + // case. + if (CpuFeatures::IsSupported(VFP3)) { + __ b(mi, &result_not_a_smi); + } else { + __ b(mi, &slow); + } + break; + case Token::SHL: + // Use only the 5 least significant bits of the shift count. + __ and_(r2, r2, Operand(0x1f)); + __ mov(r2, Operand(r3, LSL, r2)); + break; + default: UNREACHABLE(); + } + // check that the *signed* result fits in a smi + __ add(r3, r2, Operand(0x40000000), SetCC); + __ b(mi, &result_not_a_smi); + __ mov(r0, Operand(r2, LSL, kSmiTagSize)); + __ Ret(); + + Label have_to_allocate, got_a_heap_number; + __ bind(&result_not_a_smi); + switch (mode_) { + case OVERWRITE_RIGHT: { + __ tst(rhs, Operand(kSmiTagMask)); + __ b(eq, &have_to_allocate); + __ mov(r5, Operand(rhs)); + break; + } + case OVERWRITE_LEFT: { + __ tst(lhs, Operand(kSmiTagMask)); + __ b(eq, &have_to_allocate); + __ mov(r5, Operand(lhs)); + break; + } + case NO_OVERWRITE: { + // Get a new heap number in r5. r4 and r7 are scratch. + __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); + } + default: break; + } + __ bind(&got_a_heap_number); + // r2: Answer as signed int32. + // r5: Heap number to write answer into. + + // Nothing can go wrong now, so move the heap number to r0, which is the + // result. + __ mov(r0, Operand(r5)); + + if (CpuFeatures::IsSupported(VFP3)) { + // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. + CpuFeatures::Scope scope(VFP3); + __ vmov(s0, r2); + if (op_ == Token::SHR) { + __ vcvt_f64_u32(d0, s0); + } else { + __ vcvt_f64_s32(d0, s0); + } + __ sub(r3, r0, Operand(kHeapObjectTag)); + __ vstr(d0, r3, HeapNumber::kValueOffset); + __ Ret(); + } else { + // Tail call that writes the int32 in r2 to the heap number in r0, using + // r3 as scratch. r0 is preserved and returned. + WriteInt32ToHeapNumberStub stub(r2, r0, r3); + __ TailCallStub(&stub); + } + + if (mode_ != NO_OVERWRITE) { + __ bind(&have_to_allocate); + // Get a new heap number in r5. r4 and r7 are scratch. + __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); + __ jmp(&got_a_heap_number); + } + + // If all else failed then we go to the runtime system. + __ bind(&slow); + __ Push(lhs, rhs); // Restore stack. + switch (op_) { + case Token::BIT_OR: + __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); + break; + case Token::BIT_AND: + __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS); + break; + case Token::BIT_XOR: + __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS); + break; + case Token::SAR: + __ InvokeBuiltin(Builtins::SAR, JUMP_JS); + break; + case Token::SHR: + __ InvokeBuiltin(Builtins::SHR, JUMP_JS); + break; + case Token::SHL: + __ InvokeBuiltin(Builtins::SHL, JUMP_JS); + break; + default: + UNREACHABLE(); + } +} + + + + +// This function takes the known int in a register for the cases +// where it doesn't know a good trick, and may deliver +// a result that needs shifting. +static void MultiplyByKnownIntInStub( + MacroAssembler* masm, + Register result, + Register source, + Register known_int_register, // Smi tagged. + int known_int, + int* required_shift) { // Including Smi tag shift + switch (known_int) { + case 3: + __ add(result, source, Operand(source, LSL, 1)); + *required_shift = 1; + break; + case 5: + __ add(result, source, Operand(source, LSL, 2)); + *required_shift = 1; + break; + case 6: + __ add(result, source, Operand(source, LSL, 1)); + *required_shift = 2; + break; + case 7: + __ rsb(result, source, Operand(source, LSL, 3)); + *required_shift = 1; + break; + case 9: + __ add(result, source, Operand(source, LSL, 3)); + *required_shift = 1; + break; + case 10: + __ add(result, source, Operand(source, LSL, 2)); + *required_shift = 2; + break; + default: + ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient. + __ mul(result, source, known_int_register); + *required_shift = 0; + } +} + + +// This uses versions of the sum-of-digits-to-see-if-a-number-is-divisible-by-3 +// trick. See http://en.wikipedia.org/wiki/Divisibility_rule +// Takes the sum of the digits base (mask + 1) repeatedly until we have a +// number from 0 to mask. On exit the 'eq' condition flags are set if the +// answer is exactly the mask. +void IntegerModStub::DigitSum(MacroAssembler* masm, + Register lhs, + int mask, + int shift, + Label* entry) { + ASSERT(mask > 0); + ASSERT(mask <= 0xff); // This ensures we don't need ip to use it. + Label loop; + __ bind(&loop); + __ and_(ip, lhs, Operand(mask)); + __ add(lhs, ip, Operand(lhs, LSR, shift)); + __ bind(entry); + __ cmp(lhs, Operand(mask)); + __ b(gt, &loop); +} + + +void IntegerModStub::DigitSum(MacroAssembler* masm, + Register lhs, + Register scratch, + int mask, + int shift1, + int shift2, + Label* entry) { + ASSERT(mask > 0); + ASSERT(mask <= 0xff); // This ensures we don't need ip to use it. + Label loop; + __ bind(&loop); + __ bic(scratch, lhs, Operand(mask)); + __ and_(ip, lhs, Operand(mask)); + __ add(lhs, ip, Operand(lhs, LSR, shift1)); + __ add(lhs, lhs, Operand(scratch, LSR, shift2)); + __ bind(entry); + __ cmp(lhs, Operand(mask)); + __ b(gt, &loop); +} + + +// Splits the number into two halves (bottom half has shift bits). The top +// half is subtracted from the bottom half. If the result is negative then +// rhs is added. +void IntegerModStub::ModGetInRangeBySubtraction(MacroAssembler* masm, + Register lhs, + int shift, + int rhs) { + int mask = (1 << shift) - 1; + __ and_(ip, lhs, Operand(mask)); + __ sub(lhs, ip, Operand(lhs, LSR, shift), SetCC); + __ add(lhs, lhs, Operand(rhs), LeaveCC, mi); +} + + +void IntegerModStub::ModReduce(MacroAssembler* masm, + Register lhs, + int max, + int denominator) { + int limit = denominator; + while (limit * 2 <= max) limit *= 2; + while (limit >= denominator) { + __ cmp(lhs, Operand(limit)); + __ sub(lhs, lhs, Operand(limit), LeaveCC, ge); + limit >>= 1; + } +} + + +void IntegerModStub::ModAnswer(MacroAssembler* masm, + Register result, + Register shift_distance, + Register mask_bits, + Register sum_of_digits) { + __ add(result, mask_bits, Operand(sum_of_digits, LSL, shift_distance)); + __ Ret(); +} + + +// See comment for class. +void IntegerModStub::Generate(MacroAssembler* masm) { + __ mov(lhs_, Operand(lhs_, LSR, shift_distance_)); + __ bic(odd_number_, odd_number_, Operand(1)); + __ mov(odd_number_, Operand(odd_number_, LSL, 1)); + // We now have (odd_number_ - 1) * 2 in the register. + // Build a switch out of branches instead of data because it avoids + // having to teach the assembler about intra-code-object pointers + // that are not in relative branch instructions. + Label mod3, mod5, mod7, mod9, mod11, mod13, mod15, mod17, mod19; + Label mod21, mod23, mod25; + { Assembler::BlockConstPoolScope block_const_pool(masm); + __ add(pc, pc, Operand(odd_number_)); + // When you read pc it is always 8 ahead, but when you write it you always + // write the actual value. So we put in two nops to take up the slack. + __ nop(); + __ nop(); + __ b(&mod3); + __ b(&mod5); + __ b(&mod7); + __ b(&mod9); + __ b(&mod11); + __ b(&mod13); + __ b(&mod15); + __ b(&mod17); + __ b(&mod19); + __ b(&mod21); + __ b(&mod23); + __ b(&mod25); + } + + // For each denominator we find a multiple that is almost only ones + // when expressed in binary. Then we do the sum-of-digits trick for + // that number. If the multiple is not 1 then we have to do a little + // more work afterwards to get the answer into the 0-denominator-1 + // range. + DigitSum(masm, lhs_, 3, 2, &mod3); // 3 = b11. + __ sub(lhs_, lhs_, Operand(3), LeaveCC, eq); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, 0xf, 4, &mod5); // 5 * 3 = b1111. + ModGetInRangeBySubtraction(masm, lhs_, 2, 5); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, 7, 3, &mod7); // 7 = b111. + __ sub(lhs_, lhs_, Operand(7), LeaveCC, eq); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, 0x3f, 6, &mod9); // 7 * 9 = b111111. + ModGetInRangeBySubtraction(masm, lhs_, 3, 9); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, r5, 0x3f, 6, 3, &mod11); // 5 * 11 = b110111. + ModReduce(masm, lhs_, 0x3f, 11); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod13); // 19 * 13 = b11110111. + ModReduce(masm, lhs_, 0xff, 13); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, 0xf, 4, &mod15); // 15 = b1111. + __ sub(lhs_, lhs_, Operand(15), LeaveCC, eq); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, 0xff, 8, &mod17); // 15 * 17 = b11111111. + ModGetInRangeBySubtraction(masm, lhs_, 4, 17); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod19); // 13 * 19 = b11110111. + ModReduce(masm, lhs_, 0xff, 19); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, 0x3f, 6, &mod21); // 3 * 21 = b111111. + ModReduce(masm, lhs_, 0x3f, 21); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, r5, 0xff, 8, 7, &mod23); // 11 * 23 = b11111101. + ModReduce(masm, lhs_, 0xff, 23); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, r5, 0x7f, 7, 6, &mod25); // 5 * 25 = b1111101. + ModReduce(masm, lhs_, 0x7f, 25); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); +} + + +void GenericBinaryOpStub::Generate(MacroAssembler* masm) { + // lhs_ : x + // rhs_ : y + // r0 : result + + Register result = r0; + Register lhs = lhs_; + Register rhs = rhs_; + + // This code can't cope with other register allocations yet. + ASSERT(result.is(r0) && + ((lhs.is(r0) && rhs.is(r1)) || + (lhs.is(r1) && rhs.is(r0)))); + + Register smi_test_reg = r7; + Register scratch = r9; + + // All ops need to know whether we are dealing with two Smis. Set up + // smi_test_reg to tell us that. + if (ShouldGenerateSmiCode()) { + __ orr(smi_test_reg, lhs, Operand(rhs)); + } + + switch (op_) { + case Token::ADD: { + Label not_smi; + // Fast path. + if (ShouldGenerateSmiCode()) { + STATIC_ASSERT(kSmiTag == 0); // Adjust code below. + __ tst(smi_test_reg, Operand(kSmiTagMask)); + __ b(ne, ¬_smi); + __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically. + // Return if no overflow. + __ Ret(vc); + __ sub(r0, r0, Operand(r1)); // Revert optimistic add. + } + HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::ADD); + break; + } + + case Token::SUB: { + Label not_smi; + // Fast path. + if (ShouldGenerateSmiCode()) { + STATIC_ASSERT(kSmiTag == 0); // Adjust code below. + __ tst(smi_test_reg, Operand(kSmiTagMask)); + __ b(ne, ¬_smi); + if (lhs.is(r1)) { + __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically. + // Return if no overflow. + __ Ret(vc); + __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract. + } else { + __ sub(r0, r0, Operand(r1), SetCC); // Subtract y optimistically. + // Return if no overflow. + __ Ret(vc); + __ add(r0, r0, Operand(r1)); // Revert optimistic subtract. + } + } + HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::SUB); + break; + } + + case Token::MUL: { + Label not_smi, slow; + if (ShouldGenerateSmiCode()) { + STATIC_ASSERT(kSmiTag == 0); // adjust code below + __ tst(smi_test_reg, Operand(kSmiTagMask)); + Register scratch2 = smi_test_reg; + smi_test_reg = no_reg; + __ b(ne, ¬_smi); + // Remove tag from one operand (but keep sign), so that result is Smi. + __ mov(ip, Operand(rhs, ASR, kSmiTagSize)); + // Do multiplication + // scratch = lower 32 bits of ip * lhs. + __ smull(scratch, scratch2, lhs, ip); + // Go slow on overflows (overflow bit is not set). + __ mov(ip, Operand(scratch, ASR, 31)); + // No overflow if higher 33 bits are identical. + __ cmp(ip, Operand(scratch2)); + __ b(ne, &slow); + // Go slow on zero result to handle -0. + __ tst(scratch, Operand(scratch)); + __ mov(result, Operand(scratch), LeaveCC, ne); + __ Ret(ne); + // We need -0 if we were multiplying a negative number with 0 to get 0. + // We know one of them was zero. + __ add(scratch2, rhs, Operand(lhs), SetCC); + __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl); + __ Ret(pl); // Return Smi 0 if the non-zero one was positive. + // Slow case. We fall through here if we multiplied a negative number + // with 0, because that would mean we should produce -0. + __ bind(&slow); + } + HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::MUL); + break; + } + + case Token::DIV: + case Token::MOD: { + Label not_smi; + if (ShouldGenerateSmiCode() && specialized_on_rhs_) { + Label lhs_is_unsuitable; + __ BranchOnNotSmi(lhs, ¬_smi); + if (IsPowerOf2(constant_rhs_)) { + if (op_ == Token::MOD) { + __ and_(rhs, + lhs, + Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)), + SetCC); + // We now have the answer, but if the input was negative we also + // have the sign bit. Our work is done if the result is + // positive or zero: + if (!rhs.is(r0)) { + __ mov(r0, rhs, LeaveCC, pl); + } + __ Ret(pl); + // A mod of a negative left hand side must return a negative number. + // Unfortunately if the answer is 0 then we must return -0. And we + // already optimistically trashed rhs so we may need to restore it. + __ eor(rhs, rhs, Operand(0x80000000u), SetCC); + // Next two instructions are conditional on the answer being -0. + __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq); + __ b(eq, &lhs_is_unsuitable); + // We need to subtract the dividend. Eg. -3 % 4 == -3. + __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_))); + } else { + ASSERT(op_ == Token::DIV); + __ tst(lhs, + Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1))); + __ b(ne, &lhs_is_unsuitable); // Go slow on negative or remainder. + int shift = 0; + int d = constant_rhs_; + while ((d & 1) == 0) { + d >>= 1; + shift++; + } + __ mov(r0, Operand(lhs, LSR, shift)); + __ bic(r0, r0, Operand(kSmiTagMask)); + } + } else { + // Not a power of 2. + __ tst(lhs, Operand(0x80000000u)); + __ b(ne, &lhs_is_unsuitable); + // Find a fixed point reciprocal of the divisor so we can divide by + // multiplying. + double divisor = 1.0 / constant_rhs_; + int shift = 32; + double scale = 4294967296.0; // 1 << 32. + uint32_t mul; + // Maximise the precision of the fixed point reciprocal. + while (true) { + mul = static_cast(scale * divisor); + if (mul >= 0x7fffffff) break; + scale *= 2.0; + shift++; + } + mul++; + Register scratch2 = smi_test_reg; + smi_test_reg = no_reg; + __ mov(scratch2, Operand(mul)); + __ umull(scratch, scratch2, scratch2, lhs); + __ mov(scratch2, Operand(scratch2, LSR, shift - 31)); + // scratch2 is lhs / rhs. scratch2 is not Smi tagged. + // rhs is still the known rhs. rhs is Smi tagged. + // lhs is still the unkown lhs. lhs is Smi tagged. + int required_scratch_shift = 0; // Including the Smi tag shift of 1. + // scratch = scratch2 * rhs. + MultiplyByKnownIntInStub(masm, + scratch, + scratch2, + rhs, + constant_rhs_, + &required_scratch_shift); + // scratch << required_scratch_shift is now the Smi tagged rhs * + // (lhs / rhs) where / indicates integer division. + if (op_ == Token::DIV) { + __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift)); + __ b(ne, &lhs_is_unsuitable); // There was a remainder. + __ mov(result, Operand(scratch2, LSL, kSmiTagSize)); + } else { + ASSERT(op_ == Token::MOD); + __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift)); + } + } + __ Ret(); + __ bind(&lhs_is_unsuitable); + } else if (op_ == Token::MOD && + runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS && + runtime_operands_type_ != BinaryOpIC::STRINGS) { + // Do generate a bit of smi code for modulus even though the default for + // modulus is not to do it, but as the ARM processor has no coprocessor + // support for modulus checking for smis makes sense. We can handle + // 1 to 25 times any power of 2. This covers over half the numbers from + // 1 to 100 including all of the first 25. (Actually the constants < 10 + // are handled above by reciprocal multiplication. We only get here for + // those cases if the right hand side is not a constant or for cases + // like 192 which is 3*2^6 and ends up in the 3 case in the integer mod + // stub.) + Label slow; + Label not_power_of_2; + ASSERT(!ShouldGenerateSmiCode()); + STATIC_ASSERT(kSmiTag == 0); // Adjust code below. + // Check for two positive smis. + __ orr(smi_test_reg, lhs, Operand(rhs)); + __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask)); + __ b(ne, &slow); + // Check that rhs is a power of two and not zero. + Register mask_bits = r3; + __ sub(scratch, rhs, Operand(1), SetCC); + __ b(mi, &slow); + __ and_(mask_bits, rhs, Operand(scratch), SetCC); + __ b(ne, ¬_power_of_2); + // Calculate power of two modulus. + __ and_(result, lhs, Operand(scratch)); + __ Ret(); + + __ bind(¬_power_of_2); + __ eor(scratch, scratch, Operand(mask_bits)); + // At least two bits are set in the modulus. The high one(s) are in + // mask_bits and the low one is scratch + 1. + __ and_(mask_bits, scratch, Operand(lhs)); + Register shift_distance = scratch; + scratch = no_reg; + + // The rhs consists of a power of 2 multiplied by some odd number. + // The power-of-2 part we handle by putting the corresponding bits + // from the lhs in the mask_bits register, and the power in the + // shift_distance register. Shift distance is never 0 due to Smi + // tagging. + __ CountLeadingZeros(r4, shift_distance, shift_distance); + __ rsb(shift_distance, r4, Operand(32)); + + // Now we need to find out what the odd number is. The last bit is + // always 1. + Register odd_number = r4; + __ mov(odd_number, Operand(rhs, LSR, shift_distance)); + __ cmp(odd_number, Operand(25)); + __ b(gt, &slow); + + IntegerModStub stub( + result, shift_distance, odd_number, mask_bits, lhs, r5); + __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); // Tail call. + + __ bind(&slow); + } + HandleBinaryOpSlowCases( + masm, + ¬_smi, + lhs, + rhs, + op_ == Token::MOD ? Builtins::MOD : Builtins::DIV); + break; + } + + case Token::BIT_OR: + case Token::BIT_AND: + case Token::BIT_XOR: + case Token::SAR: + case Token::SHR: + case Token::SHL: { + Label slow; + STATIC_ASSERT(kSmiTag == 0); // adjust code below + __ tst(smi_test_reg, Operand(kSmiTagMask)); + __ b(ne, &slow); + Register scratch2 = smi_test_reg; + smi_test_reg = no_reg; + switch (op_) { + case Token::BIT_OR: __ orr(result, rhs, Operand(lhs)); break; + case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break; + case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break; + case Token::SAR: + // Remove tags from right operand. + __ GetLeastBitsFromSmi(scratch2, rhs, 5); + __ mov(result, Operand(lhs, ASR, scratch2)); + // Smi tag result. + __ bic(result, result, Operand(kSmiTagMask)); + break; + case Token::SHR: + // Remove tags from operands. We can't do this on a 31 bit number + // because then the 0s get shifted into bit 30 instead of bit 31. + __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x + __ GetLeastBitsFromSmi(scratch2, rhs, 5); + __ mov(scratch, Operand(scratch, LSR, scratch2)); + // Unsigned shift is not allowed to produce a negative number, so + // check the sign bit and the sign bit after Smi tagging. + __ tst(scratch, Operand(0xc0000000)); + __ b(ne, &slow); + // Smi tag result. + __ mov(result, Operand(scratch, LSL, kSmiTagSize)); + break; + case Token::SHL: + // Remove tags from operands. + __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x + __ GetLeastBitsFromSmi(scratch2, rhs, 5); + __ mov(scratch, Operand(scratch, LSL, scratch2)); + // Check that the signed result fits in a Smi. + __ add(scratch2, scratch, Operand(0x40000000), SetCC); + __ b(mi, &slow); + __ mov(result, Operand(scratch, LSL, kSmiTagSize)); + break; + default: UNREACHABLE(); + } + __ Ret(); + __ bind(&slow); + HandleNonSmiBitwiseOp(masm, lhs, rhs); + break; + } + + default: UNREACHABLE(); + } + // This code should be unreachable. + __ stop("Unreachable"); + + // Generate an unreachable reference to the DEFAULT stub so that it can be + // found at the end of this stub when clearing ICs at GC. + // TODO(kaznacheev): Check performance impact and get rid of this. + if (runtime_operands_type_ != BinaryOpIC::DEFAULT) { + GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT); + __ CallStub(&uninit); + } +} + + +void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { + Label get_result; + + __ Push(r1, r0); + + __ mov(r2, Operand(Smi::FromInt(MinorKey()))); + __ mov(r1, Operand(Smi::FromInt(op_))); + __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_))); + __ Push(r2, r1, r0); + + __ TailCallExternalReference( + ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), + 5, + 1); +} + + +Handle GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { + GenericBinaryOpStub stub(key, type_info); + return stub.GetCode(); +} + + +void TranscendentalCacheStub::Generate(MacroAssembler* masm) { + // Argument is a number and is on stack and in r0. + Label runtime_call; + Label input_not_smi; + Label loaded; + + if (CpuFeatures::IsSupported(VFP3)) { + // Load argument and check if it is a smi. + __ BranchOnNotSmi(r0, &input_not_smi); + + CpuFeatures::Scope scope(VFP3); + // Input is a smi. Convert to double and load the low and high words + // of the double into r2, r3. + __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); + __ b(&loaded); + + __ bind(&input_not_smi); + // Check if input is a HeapNumber. + __ CheckMap(r0, + r1, + Heap::kHeapNumberMapRootIndex, + &runtime_call, + true); + // Input is a HeapNumber. Load it to a double register and store the + // low and high words into r2, r3. + __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); + + __ bind(&loaded); + // r2 = low 32 bits of double value + // r3 = high 32 bits of double value + // Compute hash (the shifts are arithmetic): + // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); + __ eor(r1, r2, Operand(r3)); + __ eor(r1, r1, Operand(r1, ASR, 16)); + __ eor(r1, r1, Operand(r1, ASR, 8)); + ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize)); + __ And(r1, r1, Operand(TranscendentalCache::kCacheSize - 1)); + + // r2 = low 32 bits of double value. + // r3 = high 32 bits of double value. + // r1 = TranscendentalCache::hash(double value). + __ mov(r0, + Operand(ExternalReference::transcendental_cache_array_address())); + // r0 points to cache array. + __ ldr(r0, MemOperand(r0, type_ * sizeof(TranscendentalCache::caches_[0]))); + // r0 points to the cache for the type type_. + // If NULL, the cache hasn't been initialized yet, so go through runtime. + __ cmp(r0, Operand(0)); + __ b(eq, &runtime_call); + +#ifdef DEBUG + // Check that the layout of cache elements match expectations. + { TranscendentalCache::Element test_elem[2]; + char* elem_start = reinterpret_cast(&test_elem[0]); + char* elem2_start = reinterpret_cast(&test_elem[1]); + char* elem_in0 = reinterpret_cast(&(test_elem[0].in[0])); + char* elem_in1 = reinterpret_cast(&(test_elem[0].in[1])); + char* elem_out = reinterpret_cast(&(test_elem[0].output)); + CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. + CHECK_EQ(0, elem_in0 - elem_start); + CHECK_EQ(kIntSize, elem_in1 - elem_start); + CHECK_EQ(2 * kIntSize, elem_out - elem_start); + } +#endif + + // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. + __ add(r1, r1, Operand(r1, LSL, 1)); + __ add(r0, r0, Operand(r1, LSL, 2)); + // Check if cache matches: Double value is stored in uint32_t[2] array. + __ ldm(ia, r0, r4.bit()| r5.bit() | r6.bit()); + __ cmp(r2, r4); + __ b(ne, &runtime_call); + __ cmp(r3, r5); + __ b(ne, &runtime_call); + // Cache hit. Load result, pop argument and return. + __ mov(r0, Operand(r6)); + __ pop(); + __ Ret(); + } + + __ bind(&runtime_call); + __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); +} + + +Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { + switch (type_) { + // Add more cases when necessary. + case TranscendentalCache::SIN: return Runtime::kMath_sin; + case TranscendentalCache::COS: return Runtime::kMath_cos; + default: + UNIMPLEMENTED(); + return Runtime::kAbort; + } +} + + +void StackCheckStub::Generate(MacroAssembler* masm) { + // Do tail-call to runtime routine. Runtime routines expect at least one + // argument, so give it a Smi. + __ mov(r0, Operand(Smi::FromInt(0))); + __ push(r0); + __ TailCallRuntime(Runtime::kStackGuard, 1, 1); + + __ StubReturn(1); +} + + +void GenericUnaryOpStub::Generate(MacroAssembler* masm) { + Label slow, done; + + Register heap_number_map = r6; + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + + if (op_ == Token::SUB) { + // Check whether the value is a smi. + Label try_float; + __ tst(r0, Operand(kSmiTagMask)); + __ b(ne, &try_float); + + // Go slow case if the value of the expression is zero + // to make sure that we switch between 0 and -0. + if (negative_zero_ == kStrictNegativeZero) { + // If we have to check for zero, then we can check for the max negative + // smi while we are at it. + __ bic(ip, r0, Operand(0x80000000), SetCC); + __ b(eq, &slow); + __ rsb(r0, r0, Operand(0)); + __ StubReturn(1); + } else { + // The value of the expression is a smi and 0 is OK for -0. Try + // optimistic subtraction '0 - value'. + __ rsb(r0, r0, Operand(0), SetCC); + __ StubReturn(1, vc); + // We don't have to reverse the optimistic neg since the only case + // where we fall through is the minimum negative Smi, which is the case + // where the neg leaves the register unchanged. + __ jmp(&slow); // Go slow on max negative Smi. + } + + __ bind(&try_float); + __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + __ cmp(r1, heap_number_map); + __ b(ne, &slow); + // r0 is a heap number. Get a new heap number in r1. + if (overwrite_ == UNARY_OVERWRITE) { + __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); + __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. + __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); + } else { + __ AllocateHeapNumber(r1, r2, r3, r6, &slow); + __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); + __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); + __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); + __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. + __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); + __ mov(r0, Operand(r1)); + } + } else if (op_ == Token::BIT_NOT) { + // Check if the operand is a heap number. + __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + __ cmp(r1, heap_number_map); + __ b(ne, &slow); + + // Convert the heap number is r0 to an untagged integer in r1. + GetInt32(masm, r0, r1, r2, r3, &slow); + + // Do the bitwise operation (move negated) and check if the result + // fits in a smi. + Label try_float; + __ mvn(r1, Operand(r1)); + __ add(r2, r1, Operand(0x40000000), SetCC); + __ b(mi, &try_float); + __ mov(r0, Operand(r1, LSL, kSmiTagSize)); + __ b(&done); + + __ bind(&try_float); + if (!overwrite_ == UNARY_OVERWRITE) { + // Allocate a fresh heap number, but don't overwrite r0 until + // we're sure we can do it without going through the slow case + // that needs the value in r0. + __ AllocateHeapNumber(r2, r3, r4, r6, &slow); + __ mov(r0, Operand(r2)); + } + + if (CpuFeatures::IsSupported(VFP3)) { + // Convert the int32 in r1 to the heap number in r0. r2 is corrupted. + CpuFeatures::Scope scope(VFP3); + __ vmov(s0, r1); + __ vcvt_f64_s32(d0, s0); + __ sub(r2, r0, Operand(kHeapObjectTag)); + __ vstr(d0, r2, HeapNumber::kValueOffset); + } else { + // WriteInt32ToHeapNumberStub does not trigger GC, so we do not + // have to set up a frame. + WriteInt32ToHeapNumberStub stub(r1, r0, r2); + __ push(lr); + __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); + __ pop(lr); + } + } else { + UNIMPLEMENTED(); + } + + __ bind(&done); + __ StubReturn(1); + + // Handle the slow case by jumping to the JavaScript builtin. + __ bind(&slow); + __ push(r0); + switch (op_) { + case Token::SUB: + __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS); + break; + case Token::BIT_NOT: + __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS); + break; + default: + UNREACHABLE(); + } +} + + +void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { + // r0 holds the exception. + + // Adjust this code if not the case. + STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); + + // Drop the sp to the top of the handler. + __ mov(r3, Operand(ExternalReference(Top::k_handler_address))); + __ ldr(sp, MemOperand(r3)); + + // Restore the next handler and frame pointer, discard handler state. + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + __ pop(r2); + __ str(r2, MemOperand(r3)); + STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); + __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state. + + // Before returning we restore the context from the frame pointer if + // not NULL. The frame pointer is NULL in the exception handler of a + // JS entry frame. + __ cmp(fp, Operand(0)); + // Set cp to NULL if fp is NULL. + __ mov(cp, Operand(0), LeaveCC, eq); + // Restore cp otherwise. + __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); +#ifdef DEBUG + if (FLAG_debug_code) { + __ mov(lr, Operand(pc)); + } +#endif + STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); + __ pop(pc); +} + + +void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, + UncatchableExceptionType type) { + // Adjust this code if not the case. + STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); + + // Drop sp to the top stack handler. + __ mov(r3, Operand(ExternalReference(Top::k_handler_address))); + __ ldr(sp, MemOperand(r3)); + + // Unwind the handlers until the ENTRY handler is found. + Label loop, done; + __ bind(&loop); + // Load the type of the current stack handler. + const int kStateOffset = StackHandlerConstants::kStateOffset; + __ ldr(r2, MemOperand(sp, kStateOffset)); + __ cmp(r2, Operand(StackHandler::ENTRY)); + __ b(eq, &done); + // Fetch the next handler in the list. + const int kNextOffset = StackHandlerConstants::kNextOffset; + __ ldr(sp, MemOperand(sp, kNextOffset)); + __ jmp(&loop); + __ bind(&done); + + // Set the top handler address to next handler past the current ENTRY handler. + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + __ pop(r2); + __ str(r2, MemOperand(r3)); + + if (type == OUT_OF_MEMORY) { + // Set external caught exception to false. + ExternalReference external_caught(Top::k_external_caught_exception_address); + __ mov(r0, Operand(false)); + __ mov(r2, Operand(external_caught)); + __ str(r0, MemOperand(r2)); + + // Set pending exception and r0 to out of memory exception. + Failure* out_of_memory = Failure::OutOfMemoryException(); + __ mov(r0, Operand(reinterpret_cast(out_of_memory))); + __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address))); + __ str(r0, MemOperand(r2)); + } + + // Stack layout at this point. See also StackHandlerConstants. + // sp -> state (ENTRY) + // fp + // lr + + // Discard handler state (r2 is not used) and restore frame pointer. + STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); + __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state. + // Before returning we restore the context from the frame pointer if + // not NULL. The frame pointer is NULL in the exception handler of a + // JS entry frame. + __ cmp(fp, Operand(0)); + // Set cp to NULL if fp is NULL. + __ mov(cp, Operand(0), LeaveCC, eq); + // Restore cp otherwise. + __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); +#ifdef DEBUG + if (FLAG_debug_code) { + __ mov(lr, Operand(pc)); + } +#endif + STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); + __ pop(pc); +} + + +void CEntryStub::GenerateCore(MacroAssembler* masm, + Label* throw_normal_exception, + Label* throw_termination_exception, + Label* throw_out_of_memory_exception, + bool do_gc, + bool always_allocate, + int frame_alignment_skew) { + // r0: result parameter for PerformGC, if any + // r4: number of arguments including receiver (C callee-saved) + // r5: pointer to builtin function (C callee-saved) + // r6: pointer to the first argument (C callee-saved) + + if (do_gc) { + // Passing r0. + __ PrepareCallCFunction(1, r1); + __ CallCFunction(ExternalReference::perform_gc_function(), 1); + } + + ExternalReference scope_depth = + ExternalReference::heap_always_allocate_scope_depth(); + if (always_allocate) { + __ mov(r0, Operand(scope_depth)); + __ ldr(r1, MemOperand(r0)); + __ add(r1, r1, Operand(1)); + __ str(r1, MemOperand(r0)); + } + + // Call C built-in. + // r0 = argc, r1 = argv + __ mov(r0, Operand(r4)); + __ mov(r1, Operand(r6)); + + int frame_alignment = MacroAssembler::ActivationFrameAlignment(); + int frame_alignment_mask = frame_alignment - 1; +#if defined(V8_HOST_ARCH_ARM) + if (FLAG_debug_code) { + if (frame_alignment > kPointerSize) { + Label alignment_as_expected; + ASSERT(IsPowerOf2(frame_alignment)); + __ sub(r2, sp, Operand(frame_alignment_skew)); + __ tst(r2, Operand(frame_alignment_mask)); + __ b(eq, &alignment_as_expected); + // Don't use Check here, as it will call Runtime_Abort re-entering here. + __ stop("Unexpected alignment"); + __ bind(&alignment_as_expected); + } + } +#endif + + // Just before the call (jump) below lr is pushed, so the actual alignment is + // adding one to the current skew. + int alignment_before_call = + (frame_alignment_skew + kPointerSize) & frame_alignment_mask; + if (alignment_before_call > 0) { + // Push until the alignment before the call is met. + __ mov(r2, Operand(0)); + for (int i = alignment_before_call; + (i & frame_alignment_mask) != 0; + i += kPointerSize) { + __ push(r2); + } + } + + // TODO(1242173): To let the GC traverse the return address of the exit + // frames, we need to know where the return address is. Right now, + // we push it on the stack to be able to find it again, but we never + // restore from it in case of changes, which makes it impossible to + // support moving the C entry code stub. This should be fixed, but currently + // this is OK because the CEntryStub gets generated so early in the V8 boot + // sequence that it is not moving ever. + masm->add(lr, pc, Operand(4)); // Compute return address: (pc + 8) + 4 + masm->push(lr); + masm->Jump(r5); + + // Restore sp back to before aligning the stack. + if (alignment_before_call > 0) { + __ add(sp, sp, Operand(alignment_before_call)); + } + + if (always_allocate) { + // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1 + // though (contain the result). + __ mov(r2, Operand(scope_depth)); + __ ldr(r3, MemOperand(r2)); + __ sub(r3, r3, Operand(1)); + __ str(r3, MemOperand(r2)); + } + + // check for failure result + Label failure_returned; + STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); + // Lower 2 bits of r2 are 0 iff r0 has failure tag. + __ add(r2, r0, Operand(1)); + __ tst(r2, Operand(kFailureTagMask)); + __ b(eq, &failure_returned); + + // Exit C frame and return. + // r0:r1: result + // sp: stack pointer + // fp: frame pointer + __ LeaveExitFrame(mode_); + + // check if we should retry or throw exception + Label retry; + __ bind(&failure_returned); + STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); + __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); + __ b(eq, &retry); + + // Special handling of out of memory exceptions. + Failure* out_of_memory = Failure::OutOfMemoryException(); + __ cmp(r0, Operand(reinterpret_cast(out_of_memory))); + __ b(eq, throw_out_of_memory_exception); + + // Retrieve the pending exception and clear the variable. + __ mov(ip, Operand(ExternalReference::the_hole_value_location())); + __ ldr(r3, MemOperand(ip)); + __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address))); + __ ldr(r0, MemOperand(ip)); + __ str(r3, MemOperand(ip)); + + // Special handling of termination exceptions which are uncatchable + // by javascript code. + __ cmp(r0, Operand(Factory::termination_exception())); + __ b(eq, throw_termination_exception); + + // Handle normal exception. + __ jmp(throw_normal_exception); + + __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying +} + + +void CEntryStub::Generate(MacroAssembler* masm) { + // Called from JavaScript; parameters are on stack as if calling JS function + // r0: number of arguments including receiver + // r1: pointer to builtin function + // fp: frame pointer (restored after C call) + // sp: stack pointer (restored as callee's sp after C call) + // cp: current context (C callee-saved) + + // Result returned in r0 or r0+r1 by default. + + // NOTE: Invocations of builtins may return failure objects + // instead of a proper result. The builtin entry handles + // this by performing a garbage collection and retrying the + // builtin once. + + // Enter the exit frame that transitions from JavaScript to C++. + __ EnterExitFrame(mode_); + + // r4: number of arguments (C callee-saved) + // r5: pointer to builtin function (C callee-saved) + // r6: pointer to first argument (C callee-saved) + + Label throw_normal_exception; + Label throw_termination_exception; + Label throw_out_of_memory_exception; + + // Call into the runtime system. + GenerateCore(masm, + &throw_normal_exception, + &throw_termination_exception, + &throw_out_of_memory_exception, + false, + false, + -kPointerSize); + + // Do space-specific GC and retry runtime call. + GenerateCore(masm, + &throw_normal_exception, + &throw_termination_exception, + &throw_out_of_memory_exception, + true, + false, + 0); + + // Do full GC and retry runtime call one final time. + Failure* failure = Failure::InternalError(); + __ mov(r0, Operand(reinterpret_cast(failure))); + GenerateCore(masm, + &throw_normal_exception, + &throw_termination_exception, + &throw_out_of_memory_exception, + true, + true, + kPointerSize); + + __ bind(&throw_out_of_memory_exception); + GenerateThrowUncatchable(masm, OUT_OF_MEMORY); + + __ bind(&throw_termination_exception); + GenerateThrowUncatchable(masm, TERMINATION); + + __ bind(&throw_normal_exception); + GenerateThrowTOS(masm); +} + + +void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { + // r0: code entry + // r1: function + // r2: receiver + // r3: argc + // [sp+0]: argv + + Label invoke, exit; + + // Called from C, so do not pop argc and args on exit (preserve sp) + // No need to save register-passed args + // Save callee-saved registers (incl. cp and fp), sp, and lr + __ stm(db_w, sp, kCalleeSaved | lr.bit()); + + // Get address of argv, see stm above. + // r0: code entry + // r1: function + // r2: receiver + // r3: argc + __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize)); // argv + + // Push a frame with special values setup to mark it as an entry frame. + // r0: code entry + // r1: function + // r2: receiver + // r3: argc + // r4: argv + __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used. + int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; + __ mov(r7, Operand(Smi::FromInt(marker))); + __ mov(r6, Operand(Smi::FromInt(marker))); + __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address))); + __ ldr(r5, MemOperand(r5)); + __ Push(r8, r7, r6, r5); + + // Setup frame pointer for the frame to be pushed. + __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); + + // Call a faked try-block that does the invoke. + __ bl(&invoke); + + // Caught exception: Store result (exception) in the pending + // exception field in the JSEnv and return a failure sentinel. + // Coming in here the fp will be invalid because the PushTryHandler below + // sets it to 0 to signal the existence of the JSEntry frame. + __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address))); + __ str(r0, MemOperand(ip)); + __ mov(r0, Operand(reinterpret_cast(Failure::Exception()))); + __ b(&exit); + + // Invoke: Link this frame into the handler chain. + __ bind(&invoke); + // Must preserve r0-r4, r5-r7 are available. + __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER); + // If an exception not caught by another handler occurs, this handler + // returns control to the code after the bl(&invoke) above, which + // restores all kCalleeSaved registers (including cp and fp) to their + // saved values before returning a failure to C. + + // Clear any pending exceptions. + __ mov(ip, Operand(ExternalReference::the_hole_value_location())); + __ ldr(r5, MemOperand(ip)); + __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address))); + __ str(r5, MemOperand(ip)); + + // Invoke the function by calling through JS entry trampoline builtin. + // Notice that we cannot store a reference to the trampoline code directly in + // this stub, because runtime stubs are not traversed when doing GC. + + // Expected registers by Builtins::JSEntryTrampoline + // r0: code entry + // r1: function + // r2: receiver + // r3: argc + // r4: argv + if (is_construct) { + ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline); + __ mov(ip, Operand(construct_entry)); + } else { + ExternalReference entry(Builtins::JSEntryTrampoline); + __ mov(ip, Operand(entry)); + } + __ ldr(ip, MemOperand(ip)); // deref address + + // Branch and link to JSEntryTrampoline. We don't use the double underscore + // macro for the add instruction because we don't want the coverage tool + // inserting instructions here after we read the pc. + __ mov(lr, Operand(pc)); + masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); + + // Unlink this frame from the handler chain. When reading the + // address of the next handler, there is no need to use the address + // displacement since the current stack pointer (sp) points directly + // to the stack handler. + __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset)); + __ mov(ip, Operand(ExternalReference(Top::k_handler_address))); + __ str(r3, MemOperand(ip)); + // No need to restore registers + __ add(sp, sp, Operand(StackHandlerConstants::kSize)); + + + __ bind(&exit); // r0 holds result + // Restore the top frame descriptors from the stack. + __ pop(r3); + __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address))); + __ str(r3, MemOperand(ip)); + + // Reset the stack to the callee saved registers. + __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); + + // Restore callee-saved registers and return. +#ifdef DEBUG + if (FLAG_debug_code) { + __ mov(lr, Operand(pc)); + } +#endif + __ ldm(ia_w, sp, kCalleeSaved | pc.bit()); +} + + +// This stub performs an instanceof, calling the builtin function if +// necessary. Uses r1 for the object, r0 for the function that it may +// be an instance of (these are fetched from the stack). +void InstanceofStub::Generate(MacroAssembler* masm) { + // Get the object - slow case for smis (we may need to throw an exception + // depending on the rhs). + Label slow, loop, is_instance, is_not_instance; + __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); + __ BranchOnSmi(r0, &slow); + + // Check that the left hand is a JS object and put map in r3. + __ CompareObjectType(r0, r3, r2, FIRST_JS_OBJECT_TYPE); + __ b(lt, &slow); + __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE)); + __ b(gt, &slow); + + // Get the prototype of the function (r4 is result, r2 is scratch). + __ ldr(r1, MemOperand(sp, 0)); + // r1 is function, r3 is map. + + // Look up the function and the map in the instanceof cache. + Label miss; + __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex); + __ cmp(r1, ip); + __ b(ne, &miss); + __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex); + __ cmp(r3, ip); + __ b(ne, &miss); + __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); + __ pop(); + __ pop(); + __ mov(pc, Operand(lr)); + + __ bind(&miss); + __ TryGetFunctionPrototype(r1, r4, r2, &slow); + + // Check that the function prototype is a JS object. + __ BranchOnSmi(r4, &slow); + __ CompareObjectType(r4, r5, r5, FIRST_JS_OBJECT_TYPE); + __ b(lt, &slow); + __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE)); + __ b(gt, &slow); + + __ StoreRoot(r1, Heap::kInstanceofCacheFunctionRootIndex); + __ StoreRoot(r3, Heap::kInstanceofCacheMapRootIndex); + + // Register mapping: r3 is object map and r4 is function prototype. + // Get prototype of object into r2. + __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset)); + + // Loop through the prototype chain looking for the function prototype. + __ bind(&loop); + __ cmp(r2, Operand(r4)); + __ b(eq, &is_instance); + __ LoadRoot(ip, Heap::kNullValueRootIndex); + __ cmp(r2, ip); + __ b(eq, &is_not_instance); + __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset)); + __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset)); + __ jmp(&loop); + + __ bind(&is_instance); + __ mov(r0, Operand(Smi::FromInt(0))); + __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); + __ pop(); + __ pop(); + __ mov(pc, Operand(lr)); // Return. + + __ bind(&is_not_instance); + __ mov(r0, Operand(Smi::FromInt(1))); + __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); + __ pop(); + __ pop(); + __ mov(pc, Operand(lr)); // Return. + + // Slow-case. Tail call builtin. + __ bind(&slow); + __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS); +} + + +void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { + // The displacement is the offset of the last parameter (if any) + // relative to the frame pointer. + static const int kDisplacement = + StandardFrameConstants::kCallerSPOffset - kPointerSize; + + // Check that the key is a smi. + Label slow; + __ BranchOnNotSmi(r1, &slow); + + // Check if the calling frame is an arguments adaptor frame. + Label adaptor; + __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); + __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ b(eq, &adaptor); + + // Check index against formal parameters count limit passed in + // through register r0. Use unsigned comparison to get negative + // check for free. + __ cmp(r1, r0); + __ b(cs, &slow); + + // Read the argument from the stack and return it. + __ sub(r3, r0, r1); + __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ ldr(r0, MemOperand(r3, kDisplacement)); + __ Jump(lr); + + // Arguments adaptor case: Check index against actual arguments + // limit found in the arguments adaptor frame. Use unsigned + // comparison to get negative check for free. + __ bind(&adaptor); + __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ cmp(r1, r0); + __ b(cs, &slow); + + // Read the argument from the adaptor frame and return it. + __ sub(r3, r0, r1); + __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ ldr(r0, MemOperand(r3, kDisplacement)); + __ Jump(lr); + + // Slow-case: Handle non-smi or out-of-bounds access to arguments + // by calling the runtime system. + __ bind(&slow); + __ push(r1); + __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); +} + + +void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { + // sp[0] : number of parameters + // sp[4] : receiver displacement + // sp[8] : function + + // Check if the calling frame is an arguments adaptor frame. + Label adaptor_frame, try_allocate, runtime; + __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); + __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ b(eq, &adaptor_frame); + + // Get the length from the frame. + __ ldr(r1, MemOperand(sp, 0)); + __ b(&try_allocate); + + // Patch the arguments.length and the parameters pointer. + __ bind(&adaptor_frame); + __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ str(r1, MemOperand(sp, 0)); + __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); + __ str(r3, MemOperand(sp, 1 * kPointerSize)); + + // Try the new space allocation. Start out with computing the size + // of the arguments object and the elements array in words. + Label add_arguments_object; + __ bind(&try_allocate); + __ cmp(r1, Operand(0)); + __ b(eq, &add_arguments_object); + __ mov(r1, Operand(r1, LSR, kSmiTagSize)); + __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize)); + __ bind(&add_arguments_object); + __ add(r1, r1, Operand(Heap::kArgumentsObjectSize / kPointerSize)); + + // Do the allocation of both objects in one go. + __ AllocateInNewSpace( + r1, + r0, + r2, + r3, + &runtime, + static_cast(TAG_OBJECT | SIZE_IN_WORDS)); + + // Get the arguments boilerplate from the current (global) context. + int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); + __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); + __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset)); + __ ldr(r4, MemOperand(r4, offset)); + + // Copy the JS object part. + __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize); + + // Setup the callee in-object property. + STATIC_ASSERT(Heap::arguments_callee_index == 0); + __ ldr(r3, MemOperand(sp, 2 * kPointerSize)); + __ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize)); + + // Get the length (smi tagged) and set that as an in-object property too. + STATIC_ASSERT(Heap::arguments_length_index == 1); + __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); + __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize)); + + // If there are no actual arguments, we're done. + Label done; + __ cmp(r1, Operand(0)); + __ b(eq, &done); + + // Get the parameters pointer from the stack. + __ ldr(r2, MemOperand(sp, 1 * kPointerSize)); + + // Setup the elements pointer in the allocated arguments object and + // initialize the header in the elements fixed array. + __ add(r4, r0, Operand(Heap::kArgumentsObjectSize)); + __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); + __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex); + __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset)); + __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset)); + __ mov(r1, Operand(r1, LSR, kSmiTagSize)); // Untag the length for the loop. + + // Copy the fixed array slots. + Label loop; + // Setup r4 to point to the first array slot. + __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ bind(&loop); + // Pre-decrement r2 with kPointerSize on each iteration. + // Pre-decrement in order to skip receiver. + __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex)); + // Post-increment r4 with kPointerSize on each iteration. + __ str(r3, MemOperand(r4, kPointerSize, PostIndex)); + __ sub(r1, r1, Operand(1)); + __ cmp(r1, Operand(0)); + __ b(ne, &loop); + + // Return and remove the on-stack parameters. + __ bind(&done); + __ add(sp, sp, Operand(3 * kPointerSize)); + __ Ret(); + + // Do the runtime call to allocate the arguments object. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); +} + + +void RegExpExecStub::Generate(MacroAssembler* masm) { + // Just jump directly to runtime if native RegExp is not selected at compile + // time or if regexp entry in generated code is turned off runtime switch or + // at compilation. +#ifdef V8_INTERPRETED_REGEXP + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); +#else // V8_INTERPRETED_REGEXP + if (!FLAG_regexp_entry_native) { + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); + return; + } + + // Stack frame on entry. + // sp[0]: last_match_info (expected JSArray) + // sp[4]: previous index + // sp[8]: subject string + // sp[12]: JSRegExp object + + static const int kLastMatchInfoOffset = 0 * kPointerSize; + static const int kPreviousIndexOffset = 1 * kPointerSize; + static const int kSubjectOffset = 2 * kPointerSize; + static const int kJSRegExpOffset = 3 * kPointerSize; + + Label runtime, invoke_regexp; + + // Allocation of registers for this function. These are in callee save + // registers and will be preserved by the call to the native RegExp code, as + // this code is called using the normal C calling convention. When calling + // directly from generated code the native RegExp code will not do a GC and + // therefore the content of these registers are safe to use after the call. + Register subject = r4; + Register regexp_data = r5; + Register last_match_info_elements = r6; + + // Ensure that a RegExp stack is allocated. + ExternalReference address_of_regexp_stack_memory_address = + ExternalReference::address_of_regexp_stack_memory_address(); + ExternalReference address_of_regexp_stack_memory_size = + ExternalReference::address_of_regexp_stack_memory_size(); + __ mov(r0, Operand(address_of_regexp_stack_memory_size)); + __ ldr(r0, MemOperand(r0, 0)); + __ tst(r0, Operand(r0)); + __ b(eq, &runtime); + + // Check that the first argument is a JSRegExp object. + __ ldr(r0, MemOperand(sp, kJSRegExpOffset)); + STATIC_ASSERT(kSmiTag == 0); + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, &runtime); + __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); + __ b(ne, &runtime); + + // Check that the RegExp has been compiled (data contains a fixed array). + __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset)); + if (FLAG_debug_code) { + __ tst(regexp_data, Operand(kSmiTagMask)); + __ Check(nz, "Unexpected type for RegExp data, FixedArray expected"); + __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE); + __ Check(eq, "Unexpected type for RegExp data, FixedArray expected"); + } + + // regexp_data: RegExp data (FixedArray) + // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. + __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); + __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); + __ b(ne, &runtime); + + // regexp_data: RegExp data (FixedArray) + // Check that the number of captures fit in the static offsets vector buffer. + __ ldr(r2, + FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); + // Calculate number of capture registers (number_of_captures + 1) * 2. This + // uses the asumption that smis are 2 * their untagged value. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); + __ add(r2, r2, Operand(2)); // r2 was a smi. + // Check that the static offsets vector buffer is large enough. + __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize)); + __ b(hi, &runtime); + + // r2: Number of capture registers + // regexp_data: RegExp data (FixedArray) + // Check that the second argument is a string. + __ ldr(subject, MemOperand(sp, kSubjectOffset)); + __ tst(subject, Operand(kSmiTagMask)); + __ b(eq, &runtime); + Condition is_string = masm->IsObjectStringType(subject, r0); + __ b(NegateCondition(is_string), &runtime); + // Get the length of the string to r3. + __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset)); + + // r2: Number of capture registers + // r3: Length of subject string as a smi + // subject: Subject string + // regexp_data: RegExp data (FixedArray) + // Check that the third argument is a positive smi less than the subject + // string length. A negative value will be greater (unsigned comparison). + __ ldr(r0, MemOperand(sp, kPreviousIndexOffset)); + __ tst(r0, Operand(kSmiTagMask)); + __ b(ne, &runtime); + __ cmp(r3, Operand(r0)); + __ b(ls, &runtime); + + // r2: Number of capture registers + // subject: Subject string + // regexp_data: RegExp data (FixedArray) + // Check that the fourth object is a JSArray object. + __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, &runtime); + __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE); + __ b(ne, &runtime); + // Check that the JSArray is in fast case. + __ ldr(last_match_info_elements, + FieldMemOperand(r0, JSArray::kElementsOffset)); + __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); + __ cmp(r0, ip); + __ b(ne, &runtime); + // Check that the last match info has space for the capture registers and the + // additional information. + __ ldr(r0, + FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); + __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead)); + __ cmp(r2, Operand(r0, ASR, kSmiTagSize)); + __ b(gt, &runtime); + + // subject: Subject string + // regexp_data: RegExp data (FixedArray) + // Check the representation and encoding of the subject string. + Label seq_string; + __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); + __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); + // First check for flat string. + __ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask)); + STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); + __ b(eq, &seq_string); + + // subject: Subject string + // regexp_data: RegExp data (FixedArray) + // Check for flat cons string. + // A flat cons string is a cons string where the second part is the empty + // string. In that case the subject string is just the first part of the cons + // string. Also in this case the first part of the cons string is known to be + // a sequential string or an external string. + STATIC_ASSERT(kExternalStringTag !=0); + STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0); + __ tst(r0, Operand(kIsNotStringMask | kExternalStringTag)); + __ b(ne, &runtime); + __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset)); + __ LoadRoot(r1, Heap::kEmptyStringRootIndex); + __ cmp(r0, r1); + __ b(ne, &runtime); + __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); + __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); + __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); + // Is first part a flat string? + STATIC_ASSERT(kSeqStringTag == 0); + __ tst(r0, Operand(kStringRepresentationMask)); + __ b(nz, &runtime); + + __ bind(&seq_string); + // subject: Subject string + // regexp_data: RegExp data (FixedArray) + // r0: Instance type of subject string + STATIC_ASSERT(4 == kAsciiStringTag); + STATIC_ASSERT(kTwoByteStringTag == 0); + // Find the code object based on the assumptions above. + __ and_(r0, r0, Operand(kStringEncodingMask)); + __ mov(r3, Operand(r0, ASR, 2), SetCC); + __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne); + __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq); + + // Check that the irregexp code has been generated for the actual string + // encoding. If it has, the field contains a code object otherwise it contains + // the hole. + __ CompareObjectType(r7, r0, r0, CODE_TYPE); + __ b(ne, &runtime); + + // r3: encoding of subject string (1 if ascii, 0 if two_byte); + // r7: code + // subject: Subject string + // regexp_data: RegExp data (FixedArray) + // Load used arguments before starting to push arguments for call to native + // RegExp code to avoid handling changing stack height. + __ ldr(r1, MemOperand(sp, kPreviousIndexOffset)); + __ mov(r1, Operand(r1, ASR, kSmiTagSize)); + + // r1: previous index + // r3: encoding of subject string (1 if ascii, 0 if two_byte); + // r7: code + // subject: Subject string + // regexp_data: RegExp data (FixedArray) + // All checks done. Now push arguments for native regexp code. + __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2); + + static const int kRegExpExecuteArguments = 7; + __ push(lr); + __ PrepareCallCFunction(kRegExpExecuteArguments, r0); + + // Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript. + __ mov(r0, Operand(1)); + __ str(r0, MemOperand(sp, 2 * kPointerSize)); + + // Argument 6 (sp[4]): Start (high end) of backtracking stack memory area. + __ mov(r0, Operand(address_of_regexp_stack_memory_address)); + __ ldr(r0, MemOperand(r0, 0)); + __ mov(r2, Operand(address_of_regexp_stack_memory_size)); + __ ldr(r2, MemOperand(r2, 0)); + __ add(r0, r0, Operand(r2)); + __ str(r0, MemOperand(sp, 1 * kPointerSize)); + + // Argument 5 (sp[0]): static offsets vector buffer. + __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector())); + __ str(r0, MemOperand(sp, 0 * kPointerSize)); + + // For arguments 4 and 3 get string length, calculate start of string data and + // calculate the shift of the index (0 for ASCII and 1 for two byte). + __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset)); + __ mov(r0, Operand(r0, ASR, kSmiTagSize)); + STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); + __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ eor(r3, r3, Operand(1)); + // Argument 4 (r3): End of string data + // Argument 3 (r2): Start of string data + __ add(r2, r9, Operand(r1, LSL, r3)); + __ add(r3, r9, Operand(r0, LSL, r3)); + + // Argument 2 (r1): Previous index. + // Already there + + // Argument 1 (r0): Subject string. + __ mov(r0, subject); + + // Locate the code entry and call it. + __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ CallCFunction(r7, kRegExpExecuteArguments); + __ pop(lr); + + // r0: result + // subject: subject string (callee saved) + // regexp_data: RegExp data (callee saved) + // last_match_info_elements: Last match info elements (callee saved) + + // Check the result. + Label success; + __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS)); + __ b(eq, &success); + Label failure; + __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE)); + __ b(eq, &failure); + __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION)); + // If not exception it can only be retry. Handle that in the runtime system. + __ b(ne, &runtime); + // Result must now be exception. If there is no pending exception already a + // stack overflow (on the backtrack stack) was detected in RegExp code but + // haven't created the exception yet. Handle that in the runtime system. + // TODO(592): Rerunning the RegExp to get the stack overflow exception. + __ mov(r0, Operand(ExternalReference::the_hole_value_location())); + __ ldr(r0, MemOperand(r0, 0)); + __ mov(r1, Operand(ExternalReference(Top::k_pending_exception_address))); + __ ldr(r1, MemOperand(r1, 0)); + __ cmp(r0, r1); + __ b(eq, &runtime); + __ bind(&failure); + // For failure and exception return null. + __ mov(r0, Operand(Factory::null_value())); + __ add(sp, sp, Operand(4 * kPointerSize)); + __ Ret(); + + // Process the result from the native regexp code. + __ bind(&success); + __ ldr(r1, + FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); + // Calculate number of capture registers (number_of_captures + 1) * 2. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); + __ add(r1, r1, Operand(2)); // r1 was a smi. + + // r1: number of capture registers + // r4: subject string + // Store the capture count. + __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi. + __ str(r2, FieldMemOperand(last_match_info_elements, + RegExpImpl::kLastCaptureCountOffset)); + // Store last subject and last input. + __ mov(r3, last_match_info_elements); // Moved up to reduce latency. + __ str(subject, + FieldMemOperand(last_match_info_elements, + RegExpImpl::kLastSubjectOffset)); + __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7); + __ str(subject, + FieldMemOperand(last_match_info_elements, + RegExpImpl::kLastInputOffset)); + __ mov(r3, last_match_info_elements); + __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7); + + // Get the static offsets vector filled by the native regexp code. + ExternalReference address_of_static_offsets_vector = + ExternalReference::address_of_static_offsets_vector(); + __ mov(r2, Operand(address_of_static_offsets_vector)); + + // r1: number of capture registers + // r2: offsets vector + Label next_capture, done; + // Capture register counter starts from number of capture registers and + // counts down until wraping after zero. + __ add(r0, + last_match_info_elements, + Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag)); + __ bind(&next_capture); + __ sub(r1, r1, Operand(1), SetCC); + __ b(mi, &done); + // Read the value from the static offsets vector buffer. + __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex)); + // Store the smi value in the last match info. + __ mov(r3, Operand(r3, LSL, kSmiTagSize)); + __ str(r3, MemOperand(r0, kPointerSize, PostIndex)); + __ jmp(&next_capture); + __ bind(&done); + + // Return last match info. + __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); + __ add(sp, sp, Operand(4 * kPointerSize)); + __ Ret(); + + // Do the runtime call to execute the regexp. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); +#endif // V8_INTERPRETED_REGEXP +} + + +void CallFunctionStub::Generate(MacroAssembler* masm) { + Label slow; + + // If the receiver might be a value (string, number or boolean) check for this + // and box it if it is. + if (ReceiverMightBeValue()) { + // Get the receiver from the stack. + // function, receiver [, arguments] + Label receiver_is_value, receiver_is_js_object; + __ ldr(r1, MemOperand(sp, argc_ * kPointerSize)); + + // Check if receiver is a smi (which is a number value). + __ BranchOnSmi(r1, &receiver_is_value); + + // Check if the receiver is a valid JS object. + __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE); + __ b(ge, &receiver_is_js_object); + + // Call the runtime to box the value. + __ bind(&receiver_is_value); + __ EnterInternalFrame(); + __ push(r1); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS); + __ LeaveInternalFrame(); + __ str(r0, MemOperand(sp, argc_ * kPointerSize)); + + __ bind(&receiver_is_js_object); + } + + // Get the function to call from the stack. + // function, receiver [, arguments] + __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize)); + + // Check that the function is really a JavaScript function. + // r1: pushed function (to be verified) + __ BranchOnSmi(r1, &slow); + // Get the map of the function object. + __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); + __ b(ne, &slow); + + // Fast-case: Invoke the function now. + // r1: pushed function + ParameterCount actual(argc_); + __ InvokeFunction(r1, actual, JUMP_FUNCTION); + + // Slow-case: Non-function called. + __ bind(&slow); + // CALL_NON_FUNCTION expects the non-function callee as receiver (instead + // of the original receiver from the call site). + __ str(r1, MemOperand(sp, argc_ * kPointerSize)); + __ mov(r0, Operand(argc_)); // Setup the number of arguments. + __ mov(r2, Operand(0)); + __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); + __ Jump(Handle(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)), + RelocInfo::CODE_TARGET); +} + + +// Unfortunately you have to run without snapshots to see most of these +// names in the profile since most compare stubs end up in the snapshot. +const char* CompareStub::GetName() { + ASSERT((lhs_.is(r0) && rhs_.is(r1)) || + (lhs_.is(r1) && rhs_.is(r0))); + + if (name_ != NULL) return name_; + const int kMaxNameLength = 100; + name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); + if (name_ == NULL) return "OOM"; + + const char* cc_name; + switch (cc_) { + case lt: cc_name = "LT"; break; + case gt: cc_name = "GT"; break; + case le: cc_name = "LE"; break; + case ge: cc_name = "GE"; break; + case eq: cc_name = "EQ"; break; + case ne: cc_name = "NE"; break; + default: cc_name = "UnknownCondition"; break; + } + + const char* lhs_name = lhs_.is(r0) ? "_r0" : "_r1"; + const char* rhs_name = rhs_.is(r0) ? "_r0" : "_r1"; + + const char* strict_name = ""; + if (strict_ && (cc_ == eq || cc_ == ne)) { + strict_name = "_STRICT"; + } + + const char* never_nan_nan_name = ""; + if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) { + never_nan_nan_name = "_NO_NAN"; + } + + const char* include_number_compare_name = ""; + if (!include_number_compare_) { + include_number_compare_name = "_NO_NUMBER"; + } + + OS::SNPrintF(Vector(name_, kMaxNameLength), + "CompareStub_%s%s%s%s%s%s", + cc_name, + lhs_name, + rhs_name, + strict_name, + never_nan_nan_name, + include_number_compare_name); + return name_; +} + + +int CompareStub::MinorKey() { + // Encode the three parameters in a unique 16 bit value. To avoid duplicate + // stubs the never NaN NaN condition is only taken into account if the + // condition is equals. + ASSERT((static_cast(cc_) >> 28) < (1 << 12)); + ASSERT((lhs_.is(r0) && rhs_.is(r1)) || + (lhs_.is(r1) && rhs_.is(r0))); + return ConditionField::encode(static_cast(cc_) >> 28) + | RegisterField::encode(lhs_.is(r0)) + | StrictField::encode(strict_) + | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false) + | IncludeNumberCompareField::encode(include_number_compare_); +} + + +// StringCharCodeAtGenerator + +void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { + Label flat_string; + Label ascii_string; + Label got_char_code; + + // If the receiver is a smi trigger the non-string case. + __ BranchOnSmi(object_, receiver_not_string_); + + // Fetch the instance type of the receiver into result register. + __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); + __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); + // If the receiver is not a string trigger the non-string case. + __ tst(result_, Operand(kIsNotStringMask)); + __ b(ne, receiver_not_string_); + + // If the index is non-smi trigger the non-smi case. + __ BranchOnNotSmi(index_, &index_not_smi_); + + // Put smi-tagged index into scratch register. + __ mov(scratch_, index_); + __ bind(&got_smi_index_); + + // Check for index out of range. + __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset)); + __ cmp(ip, Operand(scratch_)); + __ b(ls, index_out_of_range_); + + // We need special handling for non-flat strings. + STATIC_ASSERT(kSeqStringTag == 0); + __ tst(result_, Operand(kStringRepresentationMask)); + __ b(eq, &flat_string); + + // Handle non-flat strings. + __ tst(result_, Operand(kIsConsStringMask)); + __ b(eq, &call_runtime_); + + // ConsString. + // Check whether the right hand side is the empty string (i.e. if + // this is really a flat string in a cons string). If that is not + // the case we would rather go to the runtime system now to flatten + // the string. + __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset)); + __ LoadRoot(ip, Heap::kEmptyStringRootIndex); + __ cmp(result_, Operand(ip)); + __ b(ne, &call_runtime_); + // Get the first of the two strings and load its instance type. + __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset)); + __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); + __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); + // If the first cons component is also non-flat, then go to runtime. + STATIC_ASSERT(kSeqStringTag == 0); + __ tst(result_, Operand(kStringRepresentationMask)); + __ b(nz, &call_runtime_); + + // Check for 1-byte or 2-byte string. + __ bind(&flat_string); + STATIC_ASSERT(kAsciiStringTag != 0); + __ tst(result_, Operand(kStringEncodingMask)); + __ b(nz, &ascii_string); + + // 2-byte string. + // Load the 2-byte character code into the result register. We can + // add without shifting since the smi tag size is the log2 of the + // number of bytes in a two-byte character. + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0); + __ add(scratch_, object_, Operand(scratch_)); + __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize)); + __ jmp(&got_char_code); + + // ASCII string. + // Load the byte into the result register. + __ bind(&ascii_string); + __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize)); + __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize)); + + __ bind(&got_char_code); + __ mov(result_, Operand(result_, LSL, kSmiTagSize)); + __ bind(&exit_); +} + + +void StringCharCodeAtGenerator::GenerateSlow( + MacroAssembler* masm, const RuntimeCallHelper& call_helper) { + __ Abort("Unexpected fallthrough to CharCodeAt slow case"); + + // Index is not a smi. + __ bind(&index_not_smi_); + // If index is a heap number, try converting it to an integer. + __ CheckMap(index_, + scratch_, + Heap::kHeapNumberMapRootIndex, + index_not_number_, + true); + call_helper.BeforeCall(masm); + __ Push(object_, index_); + __ push(index_); // Consumed by runtime conversion function. + if (index_flags_ == STRING_INDEX_IS_NUMBER) { + __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); + } else { + ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); + // NumberToSmi discards numbers that are not exact integers. + __ CallRuntime(Runtime::kNumberToSmi, 1); + } + // Save the conversion result before the pop instructions below + // have a chance to overwrite it. + __ Move(scratch_, r0); + __ pop(index_); + __ pop(object_); + // Reload the instance type. + __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); + __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); + call_helper.AfterCall(masm); + // If index is still not a smi, it must be out of range. + __ BranchOnNotSmi(scratch_, index_out_of_range_); + // Otherwise, return to the fast path. + __ jmp(&got_smi_index_); + + // Call runtime. We get here when the receiver is a string and the + // index is a number, but the code of getting the actual character + // is too complex (e.g., when the string needs to be flattened). + __ bind(&call_runtime_); + call_helper.BeforeCall(masm); + __ Push(object_, index_); + __ CallRuntime(Runtime::kStringCharCodeAt, 2); + __ Move(result_, r0); + call_helper.AfterCall(masm); + __ jmp(&exit_); + + __ Abort("Unexpected fallthrough from CharCodeAt slow case"); +} + + +// ------------------------------------------------------------------------- +// StringCharFromCodeGenerator + +void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { + // Fast case of Heap::LookupSingleCharacterStringFromCode. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiShiftSize == 0); + ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); + __ tst(code_, + Operand(kSmiTagMask | + ((~String::kMaxAsciiCharCode) << kSmiTagSize))); + __ b(nz, &slow_case_); + + __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); + // At this point code register contains smi tagged ascii char code. + STATIC_ASSERT(kSmiTag == 0); + __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ cmp(result_, Operand(ip)); + __ b(eq, &slow_case_); + __ bind(&exit_); +} + + +void StringCharFromCodeGenerator::GenerateSlow( + MacroAssembler* masm, const RuntimeCallHelper& call_helper) { + __ Abort("Unexpected fallthrough to CharFromCode slow case"); + + __ bind(&slow_case_); + call_helper.BeforeCall(masm); + __ push(code_); + __ CallRuntime(Runtime::kCharFromCode, 1); + __ Move(result_, r0); + call_helper.AfterCall(masm); + __ jmp(&exit_); + + __ Abort("Unexpected fallthrough from CharFromCode slow case"); +} + + +// ------------------------------------------------------------------------- +// StringCharAtGenerator + +void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) { + char_code_at_generator_.GenerateFast(masm); + char_from_code_generator_.GenerateFast(masm); +} + + +void StringCharAtGenerator::GenerateSlow( + MacroAssembler* masm, const RuntimeCallHelper& call_helper) { + char_code_at_generator_.GenerateSlow(masm, call_helper); + char_from_code_generator_.GenerateSlow(masm, call_helper); +} + + +class StringHelper : public AllStatic { + public: + // Generate code for copying characters using a simple loop. This should only + // be used in places where the number of characters is small and the + // additional setup and checking in GenerateCopyCharactersLong adds too much + // overhead. Copying of overlapping regions is not supported. + // Dest register ends at the position after the last character written. + static void GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch, + bool ascii); + + // Generate code for copying a large number of characters. This function + // is allowed to spend extra time setting up conditions to make copying + // faster. Copying of overlapping regions is not supported. + // Dest register ends at the position after the last character written. + static void GenerateCopyCharactersLong(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Register scratch5, + int flags); + + + // Probe the symbol table for a two character string. If the string is + // not found by probing a jump to the label not_found is performed. This jump + // does not guarantee that the string is not in the symbol table. If the + // string is found the code falls through with the string in register r0. + // Contents of both c1 and c2 registers are modified. At the exit c1 is + // guaranteed to contain halfword with low and high bytes equal to + // initial contents of c1 and c2 respectively. + static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, + Register c1, + Register c2, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Register scratch5, + Label* not_found); + + // Generate string hash. + static void GenerateHashInit(MacroAssembler* masm, + Register hash, + Register character); + + static void GenerateHashAddCharacter(MacroAssembler* masm, + Register hash, + Register character); + + static void GenerateHashGetHash(MacroAssembler* masm, + Register hash); + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); +}; + + +void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch, + bool ascii) { + Label loop; + Label done; + // This loop just copies one character at a time, as it is only used for very + // short strings. + if (!ascii) { + __ add(count, count, Operand(count), SetCC); + } else { + __ cmp(count, Operand(0)); + } + __ b(eq, &done); + + __ bind(&loop); + __ ldrb(scratch, MemOperand(src, 1, PostIndex)); + // Perform sub between load and dependent store to get the load time to + // complete. + __ sub(count, count, Operand(1), SetCC); + __ strb(scratch, MemOperand(dest, 1, PostIndex)); + // last iteration. + __ b(gt, &loop); + + __ bind(&done); +} + + +enum CopyCharactersFlags { + COPY_ASCII = 1, + DEST_ALWAYS_ALIGNED = 2 +}; + + +void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Register scratch5, + int flags) { + bool ascii = (flags & COPY_ASCII) != 0; + bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; + + if (dest_always_aligned && FLAG_debug_code) { + // Check that destination is actually word aligned if the flag says + // that it is. + __ tst(dest, Operand(kPointerAlignmentMask)); + __ Check(eq, "Destination of copy not aligned."); + } + + const int kReadAlignment = 4; + const int kReadAlignmentMask = kReadAlignment - 1; + // Ensure that reading an entire aligned word containing the last character + // of a string will not read outside the allocated area (because we pad up + // to kObjectAlignment). + STATIC_ASSERT(kObjectAlignment >= kReadAlignment); + // Assumes word reads and writes are little endian. + // Nothing to do for zero characters. + Label done; + if (!ascii) { + __ add(count, count, Operand(count), SetCC); + } else { + __ cmp(count, Operand(0)); + } + __ b(eq, &done); + + // Assume that you cannot read (or write) unaligned. + Label byte_loop; + // Must copy at least eight bytes, otherwise just do it one byte at a time. + __ cmp(count, Operand(8)); + __ add(count, dest, Operand(count)); + Register limit = count; // Read until src equals this. + __ b(lt, &byte_loop); + + if (!dest_always_aligned) { + // Align dest by byte copying. Copies between zero and three bytes. + __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC); + Label dest_aligned; + __ b(eq, &dest_aligned); + __ cmp(scratch4, Operand(2)); + __ ldrb(scratch1, MemOperand(src, 1, PostIndex)); + __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le); + __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt); + __ strb(scratch1, MemOperand(dest, 1, PostIndex)); + __ strb(scratch2, MemOperand(dest, 1, PostIndex), le); + __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt); + __ bind(&dest_aligned); + } + + Label simple_loop; + + __ sub(scratch4, dest, Operand(src)); + __ and_(scratch4, scratch4, Operand(0x03), SetCC); + __ b(eq, &simple_loop); + // Shift register is number of bits in a source word that + // must be combined with bits in the next source word in order + // to create a destination word. + + // Complex loop for src/dst that are not aligned the same way. + { + Label loop; + __ mov(scratch4, Operand(scratch4, LSL, 3)); + Register left_shift = scratch4; + __ and_(src, src, Operand(~3)); // Round down to load previous word. + __ ldr(scratch1, MemOperand(src, 4, PostIndex)); + // Store the "shift" most significant bits of scratch in the least + // signficant bits (i.e., shift down by (32-shift)). + __ rsb(scratch2, left_shift, Operand(32)); + Register right_shift = scratch2; + __ mov(scratch1, Operand(scratch1, LSR, right_shift)); + + __ bind(&loop); + __ ldr(scratch3, MemOperand(src, 4, PostIndex)); + __ sub(scratch5, limit, Operand(dest)); + __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift)); + __ str(scratch1, MemOperand(dest, 4, PostIndex)); + __ mov(scratch1, Operand(scratch3, LSR, right_shift)); + // Loop if four or more bytes left to copy. + // Compare to eight, because we did the subtract before increasing dst. + __ sub(scratch5, scratch5, Operand(8), SetCC); + __ b(ge, &loop); + } + // There is now between zero and three bytes left to copy (negative that + // number is in scratch5), and between one and three bytes already read into + // scratch1 (eight times that number in scratch4). We may have read past + // the end of the string, but because objects are aligned, we have not read + // past the end of the object. + // Find the minimum of remaining characters to move and preloaded characters + // and write those as bytes. + __ add(scratch5, scratch5, Operand(4), SetCC); + __ b(eq, &done); + __ cmp(scratch4, Operand(scratch5, LSL, 3), ne); + // Move minimum of bytes read and bytes left to copy to scratch4. + __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt); + // Between one and three (value in scratch5) characters already read into + // scratch ready to write. + __ cmp(scratch5, Operand(2)); + __ strb(scratch1, MemOperand(dest, 1, PostIndex)); + __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge); + __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge); + __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt); + __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt); + // Copy any remaining bytes. + __ b(&byte_loop); + + // Simple loop. + // Copy words from src to dst, until less than four bytes left. + // Both src and dest are word aligned. + __ bind(&simple_loop); + { + Label loop; + __ bind(&loop); + __ ldr(scratch1, MemOperand(src, 4, PostIndex)); + __ sub(scratch3, limit, Operand(dest)); + __ str(scratch1, MemOperand(dest, 4, PostIndex)); + // Compare to 8, not 4, because we do the substraction before increasing + // dest. + __ cmp(scratch3, Operand(8)); + __ b(ge, &loop); + } + + // Copy bytes from src to dst until dst hits limit. + __ bind(&byte_loop); + __ cmp(dest, Operand(limit)); + __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt); + __ b(ge, &done); + __ strb(scratch1, MemOperand(dest, 1, PostIndex)); + __ b(&byte_loop); + + __ bind(&done); +} + + +void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, + Register c1, + Register c2, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Register scratch5, + Label* not_found) { + // Register scratch3 is the general scratch register in this function. + Register scratch = scratch3; + + // Make sure that both characters are not digits as such strings has a + // different hash algorithm. Don't try to look for these in the symbol table. + Label not_array_index; + __ sub(scratch, c1, Operand(static_cast('0'))); + __ cmp(scratch, Operand(static_cast('9' - '0'))); + __ b(hi, ¬_array_index); + __ sub(scratch, c2, Operand(static_cast('0'))); + __ cmp(scratch, Operand(static_cast('9' - '0'))); + + // If check failed combine both characters into single halfword. + // This is required by the contract of the method: code at the + // not_found branch expects this combination in c1 register + __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls); + __ b(ls, not_found); + + __ bind(¬_array_index); + // Calculate the two character string hash. + Register hash = scratch1; + StringHelper::GenerateHashInit(masm, hash, c1); + StringHelper::GenerateHashAddCharacter(masm, hash, c2); + StringHelper::GenerateHashGetHash(masm, hash); + + // Collect the two characters in a register. + Register chars = c1; + __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte)); + + // chars: two character string, char 1 in byte 0 and char 2 in byte 1. + // hash: hash of two character string. + + // Load symbol table + // Load address of first element of the symbol table. + Register symbol_table = c2; + __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex); + + // Load undefined value + Register undefined = scratch4; + __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); + + // Calculate capacity mask from the symbol table capacity. + Register mask = scratch2; + __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset)); + __ mov(mask, Operand(mask, ASR, 1)); + __ sub(mask, mask, Operand(1)); + + // Calculate untagged address of the first element of the symbol table. + Register first_symbol_table_element = symbol_table; + __ add(first_symbol_table_element, symbol_table, + Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag)); + + // Registers + // chars: two character string, char 1 in byte 0 and char 2 in byte 1. + // hash: hash of two character string + // mask: capacity mask + // first_symbol_table_element: address of the first element of + // the symbol table + // scratch: - + + // Perform a number of probes in the symbol table. + static const int kProbes = 4; + Label found_in_symbol_table; + Label next_probe[kProbes]; + for (int i = 0; i < kProbes; i++) { + Register candidate = scratch5; // Scratch register contains candidate. + + // Calculate entry in symbol table. + if (i > 0) { + __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i))); + } else { + __ mov(candidate, hash); + } + + __ and_(candidate, candidate, Operand(mask)); + + // Load the entry from the symble table. + STATIC_ASSERT(SymbolTable::kEntrySize == 1); + __ ldr(candidate, + MemOperand(first_symbol_table_element, + candidate, + LSL, + kPointerSizeLog2)); + + // If entry is undefined no string with this hash can be found. + __ cmp(candidate, undefined); + __ b(eq, not_found); + + // If length is not 2 the string is not a candidate. + __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset)); + __ cmp(scratch, Operand(Smi::FromInt(2))); + __ b(ne, &next_probe[i]); + + // Check that the candidate is a non-external ascii string. + __ ldr(scratch, FieldMemOperand(candidate, HeapObject::kMapOffset)); + __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, + &next_probe[i]); + + // Check if the two characters match. + // Assumes that word load is little endian. + __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize)); + __ cmp(chars, scratch); + __ b(eq, &found_in_symbol_table); + __ bind(&next_probe[i]); + } + + // No matching 2 character string found by probing. + __ jmp(not_found); + + // Scratch register contains result when we fall through to here. + Register result = scratch; + __ bind(&found_in_symbol_table); + __ Move(r0, result); +} + + +void StringHelper::GenerateHashInit(MacroAssembler* masm, + Register hash, + Register character) { + // hash = character + (character << 10); + __ add(hash, character, Operand(character, LSL, 10)); + // hash ^= hash >> 6; + __ eor(hash, hash, Operand(hash, ASR, 6)); +} + + +void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, + Register hash, + Register character) { + // hash += character; + __ add(hash, hash, Operand(character)); + // hash += hash << 10; + __ add(hash, hash, Operand(hash, LSL, 10)); + // hash ^= hash >> 6; + __ eor(hash, hash, Operand(hash, ASR, 6)); +} + + +void StringHelper::GenerateHashGetHash(MacroAssembler* masm, + Register hash) { + // hash += hash << 3; + __ add(hash, hash, Operand(hash, LSL, 3)); + // hash ^= hash >> 11; + __ eor(hash, hash, Operand(hash, ASR, 11)); + // hash += hash << 15; + __ add(hash, hash, Operand(hash, LSL, 15), SetCC); + + // if (hash == 0) hash = 27; + __ mov(hash, Operand(27), LeaveCC, nz); +} + + +void SubStringStub::Generate(MacroAssembler* masm) { + Label runtime; + + // Stack frame on entry. + // lr: return address + // sp[0]: to + // sp[4]: from + // sp[8]: string + + // This stub is called from the native-call %_SubString(...), so + // nothing can be assumed about the arguments. It is tested that: + // "string" is a sequential string, + // both "from" and "to" are smis, and + // 0 <= from <= to <= string.length. + // If any of these assumptions fail, we call the runtime system. + + static const int kToOffset = 0 * kPointerSize; + static const int kFromOffset = 1 * kPointerSize; + static const int kStringOffset = 2 * kPointerSize; + + + // Check bounds and smi-ness. + __ ldr(r7, MemOperand(sp, kToOffset)); + __ ldr(r6, MemOperand(sp, kFromOffset)); + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); + // I.e., arithmetic shift right by one un-smi-tags. + __ mov(r2, Operand(r7, ASR, 1), SetCC); + __ mov(r3, Operand(r6, ASR, 1), SetCC, cc); + // If either r2 or r6 had the smi tag bit set, then carry is set now. + __ b(cs, &runtime); // Either "from" or "to" is not a smi. + __ b(mi, &runtime); // From is negative. + + __ sub(r2, r2, Operand(r3), SetCC); + __ b(mi, &runtime); // Fail if from > to. + // Special handling of sub-strings of length 1 and 2. One character strings + // are handled in the runtime system (looked up in the single character + // cache). Two character strings are looked for in the symbol cache. + __ cmp(r2, Operand(2)); + __ b(lt, &runtime); + + // r2: length + // r3: from index (untaged smi) + // r6: from (smi) + // r7: to (smi) + + // Make sure first argument is a sequential (or flat) string. + __ ldr(r5, MemOperand(sp, kStringOffset)); + STATIC_ASSERT(kSmiTag == 0); + __ tst(r5, Operand(kSmiTagMask)); + __ b(eq, &runtime); + Condition is_string = masm->IsObjectStringType(r5, r1); + __ b(NegateCondition(is_string), &runtime); + + // r1: instance type + // r2: length + // r3: from index (untaged smi) + // r5: string + // r6: from (smi) + // r7: to (smi) + Label seq_string; + __ and_(r4, r1, Operand(kStringRepresentationMask)); + STATIC_ASSERT(kSeqStringTag < kConsStringTag); + STATIC_ASSERT(kConsStringTag < kExternalStringTag); + __ cmp(r4, Operand(kConsStringTag)); + __ b(gt, &runtime); // External strings go to runtime. + __ b(lt, &seq_string); // Sequential strings are handled directly. + + // Cons string. Try to recurse (once) on the first substring. + // (This adds a little more generality than necessary to handle flattened + // cons strings, but not much). + __ ldr(r5, FieldMemOperand(r5, ConsString::kFirstOffset)); + __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset)); + __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset)); + __ tst(r1, Operand(kStringRepresentationMask)); + STATIC_ASSERT(kSeqStringTag == 0); + __ b(ne, &runtime); // Cons and External strings go to runtime. + + // Definitly a sequential string. + __ bind(&seq_string); + + // r1: instance type. + // r2: length + // r3: from index (untaged smi) + // r5: string + // r6: from (smi) + // r7: to (smi) + __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset)); + __ cmp(r4, Operand(r7)); + __ b(lt, &runtime); // Fail if to > length. + + // r1: instance type. + // r2: result string length. + // r3: from index (untaged smi) + // r5: string. + // r6: from offset (smi) + // Check for flat ascii string. + Label non_ascii_flat; + __ tst(r1, Operand(kStringEncodingMask)); + STATIC_ASSERT(kTwoByteStringTag == 0); + __ b(eq, &non_ascii_flat); + + Label result_longer_than_two; + __ cmp(r2, Operand(2)); + __ b(gt, &result_longer_than_two); + + // Sub string of length 2 requested. + // Get the two characters forming the sub string. + __ add(r5, r5, Operand(r3)); + __ ldrb(r3, FieldMemOperand(r5, SeqAsciiString::kHeaderSize)); + __ ldrb(r4, FieldMemOperand(r5, SeqAsciiString::kHeaderSize + 1)); + + // Try to lookup two character string in symbol table. + Label make_two_character_string; + StringHelper::GenerateTwoCharacterSymbolTableProbe( + masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string); + __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); + __ add(sp, sp, Operand(3 * kPointerSize)); + __ Ret(); + + // r2: result string length. + // r3: two characters combined into halfword in little endian byte order. + __ bind(&make_two_character_string); + __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime); + __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); + __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); + __ add(sp, sp, Operand(3 * kPointerSize)); + __ Ret(); + + __ bind(&result_longer_than_two); + + // Allocate the result. + __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime); + + // r0: result string. + // r2: result string length. + // r5: string. + // r6: from offset (smi) + // Locate first character of result. + __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + // Locate 'from' character of string. + __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(r5, r5, Operand(r6, ASR, 1)); + + // r0: result string. + // r1: first character of result string. + // r2: result string length. + // r5: first character of sub string to copy. + STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); + StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, + COPY_ASCII | DEST_ALWAYS_ALIGNED); + __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); + __ add(sp, sp, Operand(3 * kPointerSize)); + __ Ret(); + + __ bind(&non_ascii_flat); + // r2: result string length. + // r5: string. + // r6: from offset (smi) + // Check for flat two byte string. + + // Allocate the result. + __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime); + + // r0: result string. + // r2: result string length. + // r5: string. + // Locate first character of result. + __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + // Locate 'from' character of string. + __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + // As "from" is a smi it is 2 times the value which matches the size of a two + // byte character. + __ add(r5, r5, Operand(r6)); + + // r0: result string. + // r1: first character of result. + // r2: result length. + // r5: first character of string to copy. + STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); + StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, + DEST_ALWAYS_ALIGNED); + __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); + __ add(sp, sp, Operand(3 * kPointerSize)); + __ Ret(); + + // Just jump to runtime to create the sub string. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kSubString, 3, 1); +} + + +void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, + Register left, + Register right, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4) { + Label compare_lengths; + // Find minimum length and length difference. + __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset)); + __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); + __ sub(scratch3, scratch1, Operand(scratch2), SetCC); + Register length_delta = scratch3; + __ mov(scratch1, scratch2, LeaveCC, gt); + Register min_length = scratch1; + STATIC_ASSERT(kSmiTag == 0); + __ tst(min_length, Operand(min_length)); + __ b(eq, &compare_lengths); + + // Untag smi. + __ mov(min_length, Operand(min_length, ASR, kSmiTagSize)); + + // Setup registers so that we only need to increment one register + // in the loop. + __ add(scratch2, min_length, + Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(left, left, Operand(scratch2)); + __ add(right, right, Operand(scratch2)); + // Registers left and right points to the min_length character of strings. + __ rsb(min_length, min_length, Operand(-1)); + Register index = min_length; + // Index starts at -min_length. + + { + // Compare loop. + Label loop; + __ bind(&loop); + // Compare characters. + __ add(index, index, Operand(1), SetCC); + __ ldrb(scratch2, MemOperand(left, index), ne); + __ ldrb(scratch4, MemOperand(right, index), ne); + // Skip to compare lengths with eq condition true. + __ b(eq, &compare_lengths); + __ cmp(scratch2, scratch4); + __ b(eq, &loop); + // Fallthrough with eq condition false. + } + // Compare lengths - strings up to min-length are equal. + __ bind(&compare_lengths); + ASSERT(Smi::FromInt(EQUAL) == static_cast(0)); + // Use zero length_delta as result. + __ mov(r0, Operand(length_delta), SetCC, eq); + // Fall through to here if characters compare not-equal. + __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt); + __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt); + __ Ret(); +} + + +void StringCompareStub::Generate(MacroAssembler* masm) { + Label runtime; + + // Stack frame on entry. + // sp[0]: right string + // sp[4]: left string + __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // left + __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // right + + Label not_same; + __ cmp(r0, r1); + __ b(ne, ¬_same); + STATIC_ASSERT(EQUAL == 0); + STATIC_ASSERT(kSmiTag == 0); + __ mov(r0, Operand(Smi::FromInt(EQUAL))); + __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2); + __ add(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + __ bind(¬_same); + + // Check that both objects are sequential ascii strings. + __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime); + + // Compare flat ascii strings natively. Remove arguments from stack first. + __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3); + __ add(sp, sp, Operand(2 * kPointerSize)); + GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5); + + // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) + // tagged as a small integer. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kStringCompare, 2, 1); +} + + +void StringAddStub::Generate(MacroAssembler* masm) { + Label string_add_runtime; + // Stack on entry: + // sp[0]: second argument. + // sp[4]: first argument. + + // Load the two arguments. + __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument. + __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument. + + // Make sure that both arguments are strings if not known in advance. + if (string_check_) { + STATIC_ASSERT(kSmiTag == 0); + __ JumpIfEitherSmi(r0, r1, &string_add_runtime); + // Load instance types. + __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); + __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); + STATIC_ASSERT(kStringTag == 0); + // If either is not a string, go to runtime. + __ tst(r4, Operand(kIsNotStringMask)); + __ tst(r5, Operand(kIsNotStringMask), eq); + __ b(ne, &string_add_runtime); + } + + // Both arguments are strings. + // r0: first string + // r1: second string + // r4: first string instance type (if string_check_) + // r5: second string instance type (if string_check_) + { + Label strings_not_empty; + // Check if either of the strings are empty. In that case return the other. + __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset)); + __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset)); + STATIC_ASSERT(kSmiTag == 0); + __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty. + __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second. + STATIC_ASSERT(kSmiTag == 0); + // Else test if second string is empty. + __ cmp(r3, Operand(Smi::FromInt(0)), ne); + __ b(ne, &strings_not_empty); // If either string was empty, return r0. + + __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); + __ add(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + __ bind(&strings_not_empty); + } + + __ mov(r2, Operand(r2, ASR, kSmiTagSize)); + __ mov(r3, Operand(r3, ASR, kSmiTagSize)); + // Both strings are non-empty. + // r0: first string + // r1: second string + // r2: length of first string + // r3: length of second string + // r4: first string instance type (if string_check_) + // r5: second string instance type (if string_check_) + // Look at the length of the result of adding the two strings. + Label string_add_flat_result, longer_than_two; + // Adding two lengths can't overflow. + STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2); + __ add(r6, r2, Operand(r3)); + // Use the runtime system when adding two one character strings, as it + // contains optimizations for this specific case using the symbol table. + __ cmp(r6, Operand(2)); + __ b(ne, &longer_than_two); + + // Check that both strings are non-external ascii strings. + if (!string_check_) { + __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); + __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); + } + __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7, + &string_add_runtime); + + // Get the two characters forming the sub string. + __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); + __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize)); + + // Try to lookup two character string in symbol table. If it is not found + // just allocate a new one. + Label make_two_character_string; + StringHelper::GenerateTwoCharacterSymbolTableProbe( + masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string); + __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); + __ add(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + __ bind(&make_two_character_string); + // Resulting string has length 2 and first chars of two strings + // are combined into single halfword in r2 register. + // So we can fill resulting string without two loops by a single + // halfword store instruction (which assumes that processor is + // in a little endian mode) + __ mov(r6, Operand(2)); + __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime); + __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); + __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); + __ add(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + __ bind(&longer_than_two); + // Check if resulting string will be flat. + __ cmp(r6, Operand(String::kMinNonFlatLength)); + __ b(lt, &string_add_flat_result); + // Handle exceptionally long strings in the runtime system. + STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0); + ASSERT(IsPowerOf2(String::kMaxLength + 1)); + // kMaxLength + 1 is representable as shifted literal, kMaxLength is not. + __ cmp(r6, Operand(String::kMaxLength + 1)); + __ b(hs, &string_add_runtime); + + // If result is not supposed to be flat, allocate a cons string object. + // If both strings are ascii the result is an ascii cons string. + if (!string_check_) { + __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); + __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); + } + Label non_ascii, allocated, ascii_data; + STATIC_ASSERT(kTwoByteStringTag == 0); + __ tst(r4, Operand(kStringEncodingMask)); + __ tst(r5, Operand(kStringEncodingMask), ne); + __ b(eq, &non_ascii); + + // Allocate an ASCII cons string. + __ bind(&ascii_data); + __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime); + __ bind(&allocated); + // Fill the fields of the cons string. + __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); + __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); + __ mov(r0, Operand(r7)); + __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); + __ add(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + __ bind(&non_ascii); + // At least one of the strings is two-byte. Check whether it happens + // to contain only ascii characters. + // r4: first instance type. + // r5: second instance type. + __ tst(r4, Operand(kAsciiDataHintMask)); + __ tst(r5, Operand(kAsciiDataHintMask), ne); + __ b(ne, &ascii_data); + __ eor(r4, r4, Operand(r5)); + STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); + __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); + __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); + __ b(eq, &ascii_data); + + // Allocate a two byte cons string. + __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime); + __ jmp(&allocated); + + // Handle creating a flat result. First check that both strings are + // sequential and that they have the same encoding. + // r0: first string + // r1: second string + // r2: length of first string + // r3: length of second string + // r4: first string instance type (if string_check_) + // r5: second string instance type (if string_check_) + // r6: sum of lengths. + __ bind(&string_add_flat_result); + if (!string_check_) { + __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); + __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); + } + // Check that both strings are sequential. + STATIC_ASSERT(kSeqStringTag == 0); + __ tst(r4, Operand(kStringRepresentationMask)); + __ tst(r5, Operand(kStringRepresentationMask), eq); + __ b(ne, &string_add_runtime); + // Now check if both strings have the same encoding (ASCII/Two-byte). + // r0: first string. + // r1: second string. + // r2: length of first string. + // r3: length of second string. + // r6: sum of lengths.. + Label non_ascii_string_add_flat_result; + ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test. + __ eor(r7, r4, Operand(r5)); + __ tst(r7, Operand(kStringEncodingMask)); + __ b(ne, &string_add_runtime); + // And see if it's ASCII or two-byte. + __ tst(r4, Operand(kStringEncodingMask)); + __ b(eq, &non_ascii_string_add_flat_result); + + // Both strings are sequential ASCII strings. We also know that they are + // short (since the sum of the lengths is less than kMinNonFlatLength). + // r6: length of resulting flat string + __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime); + // Locate first character of result. + __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + // Locate first character of first argument. + __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + // r0: first character of first string. + // r1: second string. + // r2: length of first string. + // r3: length of second string. + // r6: first character of result. + // r7: result string. + StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true); + + // Load second argument and locate first character. + __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + // r1: first character of second string. + // r3: length of second string. + // r6: next character of result. + // r7: result string. + StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true); + __ mov(r0, Operand(r7)); + __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); + __ add(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + __ bind(&non_ascii_string_add_flat_result); + // Both strings are sequential two byte strings. + // r0: first string. + // r1: second string. + // r2: length of first string. + // r3: length of second string. + // r6: sum of length of strings. + __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime); + // r0: first string. + // r1: second string. + // r2: length of first string. + // r3: length of second string. + // r7: result string. + + // Locate first character of result. + __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + // Locate first character of first argument. + __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + + // r0: first character of first string. + // r1: second string. + // r2: length of first string. + // r3: length of second string. + // r6: first character of result. + // r7: result string. + StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false); + + // Locate first character of second argument. + __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + + // r1: first character of second string. + // r3: length of second string. + // r6: next character of result (after copy of first string). + // r7: result string. + StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false); + + __ mov(r0, Operand(r7)); + __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); + __ add(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + // Just jump to runtime to add the two strings. + __ bind(&string_add_runtime); + __ TailCallRuntime(Runtime::kStringAdd, 2, 1); +} + + +#undef __ + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_ARM diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h new file mode 100644 index 0000000..ec30a4d --- /dev/null +++ b/src/arm/code-stubs-arm.h @@ -0,0 +1,475 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_ARM_CODE_STUBS_ARM_H_ +#define V8_ARM_CODE_STUBS_ARM_H_ + +#include "codegen-inl.h" +#include "ic-inl.h" +#include "ast.h" + +namespace v8 { +namespace internal { + + +// Compute a transcendental math function natively, or call the +// TranscendentalCache runtime function. +class TranscendentalCacheStub: public CodeStub { + public: + explicit TranscendentalCacheStub(TranscendentalCache::Type type) + : type_(type) {} + void Generate(MacroAssembler* masm); + private: + TranscendentalCache::Type type_; + Major MajorKey() { return TranscendentalCache; } + int MinorKey() { return type_; } + Runtime::FunctionId RuntimeFunction(); +}; + + +class ToBooleanStub: public CodeStub { + public: + explicit ToBooleanStub(Register tos) : tos_(tos) { } + + void Generate(MacroAssembler* masm); + + private: + Register tos_; + Major MajorKey() { return ToBoolean; } + int MinorKey() { return tos_.code(); } +}; + + +class GenericBinaryOpStub : public CodeStub { + public: + GenericBinaryOpStub(Token::Value op, + OverwriteMode mode, + Register lhs, + Register rhs, + int constant_rhs = CodeGenerator::kUnknownIntValue) + : op_(op), + mode_(mode), + lhs_(lhs), + rhs_(rhs), + constant_rhs_(constant_rhs), + specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)), + runtime_operands_type_(BinaryOpIC::DEFAULT), + name_(NULL) { } + + GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) + : op_(OpBits::decode(key)), + mode_(ModeBits::decode(key)), + lhs_(LhsRegister(RegisterBits::decode(key))), + rhs_(RhsRegister(RegisterBits::decode(key))), + constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))), + specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)), + runtime_operands_type_(type_info), + name_(NULL) { } + + private: + Token::Value op_; + OverwriteMode mode_; + Register lhs_; + Register rhs_; + int constant_rhs_; + bool specialized_on_rhs_; + BinaryOpIC::TypeInfo runtime_operands_type_; + char* name_; + + static const int kMaxKnownRhs = 0x40000000; + static const int kKnownRhsKeyBits = 6; + + // Minor key encoding in 17 bits. + class ModeBits: public BitField {}; + class OpBits: public BitField {}; + class TypeInfoBits: public BitField {}; + class RegisterBits: public BitField {}; + class KnownIntBits: public BitField {}; + + Major MajorKey() { return GenericBinaryOp; } + int MinorKey() { + ASSERT((lhs_.is(r0) && rhs_.is(r1)) || + (lhs_.is(r1) && rhs_.is(r0))); + // Encode the parameters in a unique 18 bit value. + return OpBits::encode(op_) + | ModeBits::encode(mode_) + | KnownIntBits::encode(MinorKeyForKnownInt()) + | TypeInfoBits::encode(runtime_operands_type_) + | RegisterBits::encode(lhs_.is(r0)); + } + + void Generate(MacroAssembler* masm); + void HandleNonSmiBitwiseOp(MacroAssembler* masm, + Register lhs, + Register rhs); + void HandleBinaryOpSlowCases(MacroAssembler* masm, + Label* not_smi, + Register lhs, + Register rhs, + const Builtins::JavaScript& builtin); + void GenerateTypeTransition(MacroAssembler* masm); + + static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) { + if (constant_rhs == CodeGenerator::kUnknownIntValue) return false; + if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3; + if (op == Token::MOD) { + if (constant_rhs <= 1) return false; + if (constant_rhs <= 10) return true; + if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true; + return false; + } + return false; + } + + int MinorKeyForKnownInt() { + if (!specialized_on_rhs_) return 0; + if (constant_rhs_ <= 10) return constant_rhs_ + 1; + ASSERT(IsPowerOf2(constant_rhs_)); + int key = 12; + int d = constant_rhs_; + while ((d & 1) == 0) { + key++; + d >>= 1; + } + ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits)); + return key; + } + + int KnownBitsForMinorKey(int key) { + if (!key) return 0; + if (key <= 11) return key - 1; + int d = 1; + while (key != 12) { + key--; + d <<= 1; + } + return d; + } + + Register LhsRegister(bool lhs_is_r0) { + return lhs_is_r0 ? r0 : r1; + } + + Register RhsRegister(bool lhs_is_r0) { + return lhs_is_r0 ? r1 : r0; + } + + bool ShouldGenerateSmiCode() { + return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) && + runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS && + runtime_operands_type_ != BinaryOpIC::STRINGS; + } + + bool ShouldGenerateFPCode() { + return runtime_operands_type_ != BinaryOpIC::STRINGS; + } + + virtual int GetCodeKind() { return Code::BINARY_OP_IC; } + + virtual InlineCacheState GetICState() { + return BinaryOpIC::ToState(runtime_operands_type_); + } + + const char* GetName(); + +#ifdef DEBUG + void Print() { + if (!specialized_on_rhs_) { + PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_)); + } else { + PrintF("GenericBinaryOpStub (%s by %d)\n", + Token::String(op_), + constant_rhs_); + } + } +#endif +}; + + +// Flag that indicates how to generate code for the stub StringAddStub. +enum StringAddFlags { + NO_STRING_ADD_FLAGS = 0, + NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub. +}; + + +class StringAddStub: public CodeStub { + public: + explicit StringAddStub(StringAddFlags flags) { + string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0); + } + + private: + Major MajorKey() { return StringAdd; } + int MinorKey() { return string_check_ ? 0 : 1; } + + void Generate(MacroAssembler* masm); + + // Should the stub check whether arguments are strings? + bool string_check_; +}; + + +class SubStringStub: public CodeStub { + public: + SubStringStub() {} + + private: + Major MajorKey() { return SubString; } + int MinorKey() { return 0; } + + void Generate(MacroAssembler* masm); +}; + + + +class StringCompareStub: public CodeStub { + public: + StringCompareStub() { } + + // Compare two flat ASCII strings and returns result in r0. + // Does not use the stack. + static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, + Register left, + Register right, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4); + + private: + Major MajorKey() { return StringCompare; } + int MinorKey() { return 0; } + + void Generate(MacroAssembler* masm); +}; + + +// This stub can do a fast mod operation without using fp. +// It is tail called from the GenericBinaryOpStub and it always +// returns an answer. It never causes GC so it doesn't need a real frame. +// +// The inputs are always positive Smis. This is never called +// where the denominator is a power of 2. We handle that separately. +// +// If we consider the denominator as an odd number multiplied by a power of 2, +// then: +// * The exponent (power of 2) is in the shift_distance register. +// * The odd number is in the odd_number register. It is always in the range +// of 3 to 25. +// * The bits from the numerator that are to be copied to the answer (there are +// shift_distance of them) are in the mask_bits register. +// * The other bits of the numerator have been shifted down and are in the lhs +// register. +class IntegerModStub : public CodeStub { + public: + IntegerModStub(Register result, + Register shift_distance, + Register odd_number, + Register mask_bits, + Register lhs, + Register scratch) + : result_(result), + shift_distance_(shift_distance), + odd_number_(odd_number), + mask_bits_(mask_bits), + lhs_(lhs), + scratch_(scratch) { + // We don't code these in the minor key, so they should always be the same. + // We don't really want to fix that since this stub is rather large and we + // don't want many copies of it. + ASSERT(shift_distance_.is(r9)); + ASSERT(odd_number_.is(r4)); + ASSERT(mask_bits_.is(r3)); + ASSERT(scratch_.is(r5)); + } + + private: + Register result_; + Register shift_distance_; + Register odd_number_; + Register mask_bits_; + Register lhs_; + Register scratch_; + + // Minor key encoding in 16 bits. + class ResultRegisterBits: public BitField {}; + class LhsRegisterBits: public BitField {}; + + Major MajorKey() { return IntegerMod; } + int MinorKey() { + // Encode the parameters in a unique 16 bit value. + return ResultRegisterBits::encode(result_.code()) + | LhsRegisterBits::encode(lhs_.code()); + } + + void Generate(MacroAssembler* masm); + + const char* GetName() { return "IntegerModStub"; } + + // Utility functions. + void DigitSum(MacroAssembler* masm, + Register lhs, + int mask, + int shift, + Label* entry); + void DigitSum(MacroAssembler* masm, + Register lhs, + Register scratch, + int mask, + int shift1, + int shift2, + Label* entry); + void ModGetInRangeBySubtraction(MacroAssembler* masm, + Register lhs, + int shift, + int rhs); + void ModReduce(MacroAssembler* masm, + Register lhs, + int max, + int denominator); + void ModAnswer(MacroAssembler* masm, + Register result, + Register shift_distance, + Register mask_bits, + Register sum_of_digits); + + +#ifdef DEBUG + void Print() { PrintF("IntegerModStub\n"); } +#endif +}; + + +// This stub can convert a signed int32 to a heap number (double). It does +// not work for int32s that are in Smi range! No GC occurs during this stub +// so you don't have to set up the frame. +class WriteInt32ToHeapNumberStub : public CodeStub { + public: + WriteInt32ToHeapNumberStub(Register the_int, + Register the_heap_number, + Register scratch) + : the_int_(the_int), + the_heap_number_(the_heap_number), + scratch_(scratch) { } + + private: + Register the_int_; + Register the_heap_number_; + Register scratch_; + + // Minor key encoding in 16 bits. + class IntRegisterBits: public BitField {}; + class HeapNumberRegisterBits: public BitField {}; + class ScratchRegisterBits: public BitField {}; + + Major MajorKey() { return WriteInt32ToHeapNumber; } + int MinorKey() { + // Encode the parameters in a unique 16 bit value. + return IntRegisterBits::encode(the_int_.code()) + | HeapNumberRegisterBits::encode(the_heap_number_.code()) + | ScratchRegisterBits::encode(scratch_.code()); + } + + void Generate(MacroAssembler* masm); + + const char* GetName() { return "WriteInt32ToHeapNumberStub"; } + +#ifdef DEBUG + void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); } +#endif +}; + + +class NumberToStringStub: public CodeStub { + public: + NumberToStringStub() { } + + // Generate code to do a lookup in the number string cache. If the number in + // the register object is found in the cache the generated code falls through + // with the result in the result register. The object and the result register + // can be the same. If the number is not found in the cache the code jumps to + // the label not_found with only the content of register object unchanged. + static void GenerateLookupNumberStringCache(MacroAssembler* masm, + Register object, + Register result, + Register scratch1, + Register scratch2, + Register scratch3, + bool object_is_smi, + Label* not_found); + + private: + Major MajorKey() { return NumberToString; } + int MinorKey() { return 0; } + + void Generate(MacroAssembler* masm); + + const char* GetName() { return "NumberToStringStub"; } +}; + + +class RecordWriteStub : public CodeStub { + public: + RecordWriteStub(Register object, Register offset, Register scratch) + : object_(object), offset_(offset), scratch_(scratch) { } + + void Generate(MacroAssembler* masm); + + private: + Register object_; + Register offset_; + Register scratch_; + + // Minor key encoding in 12 bits. 4 bits for each of the three + // registers (object, offset and scratch) OOOOAAAASSSS. + class ScratchBits: public BitField {}; + class OffsetBits: public BitField {}; + class ObjectBits: public BitField {}; + + Major MajorKey() { return RecordWrite; } + + int MinorKey() { + // Encode the registers. + return ObjectBits::encode(object_.code()) | + OffsetBits::encode(offset_.code()) | + ScratchBits::encode(scratch_.code()); + } + +#ifdef DEBUG + void Print() { + PrintF("RecordWriteStub (object reg %d), (offset reg %d)," + " (scratch reg %d)\n", + object_.code(), offset_.code(), scratch_.code()); + } +#endif +}; + + +} } // namespace v8::internal + +#endif // V8_ARM_CODE_STUBS_ARM_H_ diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc index 47f5cbb..89c51a2 100644 --- a/src/arm/codegen-arm.cc +++ b/src/arm/codegen-arm.cc @@ -30,6 +30,7 @@ #if defined(V8_TARGET_ARCH_ARM) #include "bootstrapper.h" +#include "code-stubs-arm.h" #include "codegen-inl.h" #include "compiler.h" #include "debug.h" @@ -49,27 +50,6 @@ namespace v8 { namespace internal { -static void EmitIdenticalObjectComparison(MacroAssembler* masm, - Label* slow, - Condition cc, - bool never_nan_nan); -static void EmitSmiNonsmiComparison(MacroAssembler* masm, - Register lhs, - Register rhs, - Label* lhs_not_nan, - Label* slow, - bool strict); -static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc); -static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, - Register lhs, - Register rhs); -static void MultiplyByKnownInt(MacroAssembler* masm, - Register source, - Register destination, - int known_int); -static bool IsEasyToMultiplyBy(int x); - - #define __ ACCESS_MASM(masm_) // ------------------------------------------------------------------------- @@ -1049,6 +1029,43 @@ static int BitPosition(unsigned x) { } +// Can we multiply by x with max two shifts and an add. +// This answers yes to all integers from 2 to 10. +static bool IsEasyToMultiplyBy(int x) { + if (x < 2) return false; // Avoid special cases. + if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows. + if (IsPowerOf2(x)) return true; // Simple shift. + if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift. + if (IsPowerOf2(x + 1)) return true; // Patterns like 11111. + return false; +} + + +// Can multiply by anything that IsEasyToMultiplyBy returns true for. +// Source and destination may be the same register. This routine does +// not set carry and overflow the way a mul instruction would. +static void InlineMultiplyByKnownInt(MacroAssembler* masm, + Register source, + Register destination, + int known_int) { + if (IsPowerOf2(known_int)) { + masm->mov(destination, Operand(source, LSL, BitPosition(known_int))); + } else if (PopCountLessThanEqual2(known_int)) { + int first_bit = BitPosition(known_int); + int second_bit = BitPosition(known_int ^ (1 << first_bit)); + masm->add(destination, source, + Operand(source, LSL, second_bit - first_bit)); + if (first_bit != 0) { + masm->mov(destination, Operand(destination, LSL, first_bit)); + } + } else { + ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111. + int the_bit = BitPosition(known_int + 1); + masm->rsb(destination, source, Operand(source, LSL, the_bit)); + } +} + + void CodeGenerator::SmiOperation(Token::Value op, Handle value, bool reversed, @@ -1359,7 +1376,7 @@ void CodeGenerator::SmiOperation(Token::Value op, // brevity to comprehensiveness. __ tst(tos, Operand(mask)); deferred->Branch(ne); - MultiplyByKnownInt(masm_, tos, tos, int_value); + InlineMultiplyByKnownInt(masm_, tos, tos, int_value); deferred->BindExit(); frame_->EmitPush(tos); break; @@ -7056,1923 +7073,18 @@ void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) { } -void FastNewClosureStub::Generate(MacroAssembler* masm) { - // Create a new closure from the given function info in new - // space. Set the context to the current context in cp. - Label gc; - - // Pop the function info from the stack. - __ pop(r3); - - // Attempt to allocate new JSFunction in new space. - __ AllocateInNewSpace(JSFunction::kSize, - r0, - r1, - r2, - &gc, - TAG_OBJECT); - - // Compute the function map in the current global context and set that - // as the map of the allocated object. - __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); - __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset)); - __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX))); - __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); - - // Initialize the rest of the function. We don't have to update the - // write barrier because the allocated object is in new space. - __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex); - __ LoadRoot(r2, Heap::kTheHoleValueRootIndex); - __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset)); - __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset)); - __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset)); - __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset)); - __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset)); - __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset)); - - // Initialize the code pointer in the function to be the one - // found in the shared function info object. - __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset)); - __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); - __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); - - // Return result. The argument function info has been popped already. - __ Ret(); - - // Create a new closure through the slower runtime call. - __ bind(&gc); - __ Push(cp, r3); - __ TailCallRuntime(Runtime::kNewClosure, 2, 1); -} - - -void FastNewContextStub::Generate(MacroAssembler* masm) { - // Try to allocate the context in new space. - Label gc; - int length = slots_ + Context::MIN_CONTEXT_SLOTS; - - // Attempt to allocate the context in new space. - __ AllocateInNewSpace(FixedArray::SizeFor(length), - r0, - r1, - r2, - &gc, - TAG_OBJECT); - - // Load the function from the stack. - __ ldr(r3, MemOperand(sp, 0)); - - // Setup the object header. - __ LoadRoot(r2, Heap::kContextMapRootIndex); - __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ mov(r2, Operand(Smi::FromInt(length))); - __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); - - // Setup the fixed slots. - __ mov(r1, Operand(Smi::FromInt(0))); - __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX))); - __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX))); - __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX))); - __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX))); - - // Copy the global object from the surrounding context. - __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); - __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX))); - - // Initialize the rest of the slots to undefined. - __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); - for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { - __ str(r1, MemOperand(r0, Context::SlotOffset(i))); - } - - // Remove the on-stack argument and return. - __ mov(cp, r0); - __ pop(); - __ Ret(); - - // Need to collect. Call into runtime system. - __ bind(&gc); - __ TailCallRuntime(Runtime::kNewContext, 1, 1); -} - - -void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { - // Stack layout on entry: - // - // [sp]: constant elements. - // [sp + kPointerSize]: literal index. - // [sp + (2 * kPointerSize)]: literals array. - - // All sizes here are multiples of kPointerSize. - int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0; - int size = JSArray::kSize + elements_size; - - // Load boilerplate object into r3 and check if we need to create a - // boilerplate. - Label slow_case; - __ ldr(r3, MemOperand(sp, 2 * kPointerSize)); - __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); - __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(r3, ip); - __ b(eq, &slow_case); - - if (FLAG_debug_code) { - const char* message; - Heap::RootListIndex expected_map_index; - if (mode_ == CLONE_ELEMENTS) { - message = "Expected (writable) fixed array"; - expected_map_index = Heap::kFixedArrayMapRootIndex; - } else { - ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS); - message = "Expected copy-on-write fixed array"; - expected_map_index = Heap::kFixedCOWArrayMapRootIndex; - } - __ push(r3); - __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset)); - __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset)); - __ LoadRoot(ip, expected_map_index); - __ cmp(r3, ip); - __ Assert(eq, message); - __ pop(r3); - } - - // Allocate both the JS array and the elements array in one big - // allocation. This avoids multiple limit checks. - __ AllocateInNewSpace(size, - r0, - r1, - r2, - &slow_case, - TAG_OBJECT); - - // Copy the JS array part. - for (int i = 0; i < JSArray::kSize; i += kPointerSize) { - if ((i != JSArray::kElementsOffset) || (length_ == 0)) { - __ ldr(r1, FieldMemOperand(r3, i)); - __ str(r1, FieldMemOperand(r0, i)); - } - } - - if (length_ > 0) { - // Get hold of the elements array of the boilerplate and setup the - // elements pointer in the resulting object. - __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset)); - __ add(r2, r0, Operand(JSArray::kSize)); - __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset)); - - // Copy the elements array. - __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize); - } - - // Return and remove the on-stack parameters. - __ add(sp, sp, Operand(3 * kPointerSize)); - __ Ret(); - - __ bind(&slow_case); - __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); -} - - -// Takes a Smi and converts to an IEEE 64 bit floating point value in two -// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and -// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a -// scratch register. Destroys the source register. No GC occurs during this -// stub so you don't have to set up the frame. -class ConvertToDoubleStub : public CodeStub { - public: - ConvertToDoubleStub(Register result_reg_1, - Register result_reg_2, - Register source_reg, - Register scratch_reg) - : result1_(result_reg_1), - result2_(result_reg_2), - source_(source_reg), - zeros_(scratch_reg) { } - - private: - Register result1_; - Register result2_; - Register source_; - Register zeros_; - - // Minor key encoding in 16 bits. - class ModeBits: public BitField {}; - class OpBits: public BitField {}; - - Major MajorKey() { return ConvertToDouble; } - int MinorKey() { - // Encode the parameters in a unique 16 bit value. - return result1_.code() + - (result2_.code() << 4) + - (source_.code() << 8) + - (zeros_.code() << 12); - } - - void Generate(MacroAssembler* masm); - - const char* GetName() { return "ConvertToDoubleStub"; } - -#ifdef DEBUG - void Print() { PrintF("ConvertToDoubleStub\n"); } -#endif -}; - - -void ConvertToDoubleStub::Generate(MacroAssembler* masm) { -#ifndef BIG_ENDIAN_FLOATING_POINT - Register exponent = result1_; - Register mantissa = result2_; -#else - Register exponent = result2_; - Register mantissa = result1_; -#endif - Label not_special; - // Convert from Smi to integer. - __ mov(source_, Operand(source_, ASR, kSmiTagSize)); - // Move sign bit from source to destination. This works because the sign bit - // in the exponent word of the double has the same position and polarity as - // the 2's complement sign bit in a Smi. - STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); - __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC); - // Subtract from 0 if source was negative. - __ rsb(source_, source_, Operand(0), LeaveCC, ne); - - // We have -1, 0 or 1, which we treat specially. Register source_ contains - // absolute value: it is either equal to 1 (special case of -1 and 1), - // greater than 1 (not a special case) or less than 1 (special case of 0). - __ cmp(source_, Operand(1)); - __ b(gt, ¬_special); - - // For 1 or -1 we need to or in the 0 exponent (biased to 1023). - static const uint32_t exponent_word_for_1 = - HeapNumber::kExponentBias << HeapNumber::kExponentShift; - __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq); - // 1, 0 and -1 all have 0 for the second word. - __ mov(mantissa, Operand(0)); - __ Ret(); - - __ bind(¬_special); - // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5. - // Gets the wrong answer for 0, but we already checked for that case above. - __ CountLeadingZeros(zeros_, source_, mantissa); - // Compute exponent and or it into the exponent register. - // We use mantissa as a scratch register here. Use a fudge factor to - // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts - // that fit in the ARM's constant field. - int fudge = 0x400; - __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge)); - __ add(mantissa, mantissa, Operand(fudge)); - __ orr(exponent, - exponent, - Operand(mantissa, LSL, HeapNumber::kExponentShift)); - // Shift up the source chopping the top bit off. - __ add(zeros_, zeros_, Operand(1)); - // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0. - __ mov(source_, Operand(source_, LSL, zeros_)); - // Compute lower part of fraction (last 12 bits). - __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord)); - // And the top (top 20 bits). - __ orr(exponent, - exponent, - Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); - __ Ret(); -} - - -// See comment for class. -void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { - Label max_negative_int; - // the_int_ has the answer which is a signed int32 but not a Smi. - // We test for the special value that has a different exponent. This test - // has the neat side effect of setting the flags according to the sign. - STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); - __ cmp(the_int_, Operand(0x80000000u)); - __ b(eq, &max_negative_int); - // Set up the correct exponent in scratch_. All non-Smi int32s have the same. - // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). - uint32_t non_smi_exponent = - (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; - __ mov(scratch_, Operand(non_smi_exponent)); - // Set the sign bit in scratch_ if the value was negative. - __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs); - // Subtract from 0 if the value was negative. - __ rsb(the_int_, the_int_, Operand(0), LeaveCC, cs); - // We should be masking the implict first digit of the mantissa away here, - // but it just ends up combining harmlessly with the last digit of the - // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get - // the most significant 1 to hit the last bit of the 12 bit sign and exponent. - ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); - const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; - __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance)); - __ str(scratch_, FieldMemOperand(the_heap_number_, - HeapNumber::kExponentOffset)); - __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance)); - __ str(scratch_, FieldMemOperand(the_heap_number_, - HeapNumber::kMantissaOffset)); - __ Ret(); - - __ bind(&max_negative_int); - // The max negative int32 is stored as a positive number in the mantissa of - // a double because it uses a sign bit instead of using two's complement. - // The actual mantissa bits stored are all 0 because the implicit most - // significant 1 bit is not stored. - non_smi_exponent += 1 << HeapNumber::kExponentShift; - __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent)); - __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset)); - __ mov(ip, Operand(0)); - __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); - __ Ret(); -} - - -// Handle the case where the lhs and rhs are the same object. -// Equality is almost reflexive (everything but NaN), so this is a test -// for "identity and not NaN". -static void EmitIdenticalObjectComparison(MacroAssembler* masm, - Label* slow, - Condition cc, - bool never_nan_nan) { - Label not_identical; - Label heap_number, return_equal; - __ cmp(r0, r1); - __ b(ne, ¬_identical); - - // The two objects are identical. If we know that one of them isn't NaN then - // we now know they test equal. - if (cc != eq || !never_nan_nan) { - // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), - // so we do the second best thing - test it ourselves. - // They are both equal and they are not both Smis so both of them are not - // Smis. If it's not a heap number, then return equal. - if (cc == lt || cc == gt) { - __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE); - __ b(ge, slow); - } else { - __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); - __ b(eq, &heap_number); - // Comparing JS objects with <=, >= is complicated. - if (cc != eq) { - __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); - __ b(ge, slow); - // Normally here we fall through to return_equal, but undefined is - // special: (undefined == undefined) == true, but - // (undefined <= undefined) == false! See ECMAScript 11.8.5. - if (cc == le || cc == ge) { - __ cmp(r4, Operand(ODDBALL_TYPE)); - __ b(ne, &return_equal); - __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); - __ cmp(r0, r2); - __ b(ne, &return_equal); - if (cc == le) { - // undefined <= undefined should fail. - __ mov(r0, Operand(GREATER)); - } else { - // undefined >= undefined should fail. - __ mov(r0, Operand(LESS)); - } - __ Ret(); - } - } - } - } - - __ bind(&return_equal); - if (cc == lt) { - __ mov(r0, Operand(GREATER)); // Things aren't less than themselves. - } else if (cc == gt) { - __ mov(r0, Operand(LESS)); // Things aren't greater than themselves. - } else { - __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves. - } - __ Ret(); - - if (cc != eq || !never_nan_nan) { - // For less and greater we don't have to check for NaN since the result of - // x < x is false regardless. For the others here is some code to check - // for NaN. - if (cc != lt && cc != gt) { - __ bind(&heap_number); - // It is a heap number, so return non-equal if it's NaN and equal if it's - // not NaN. - - // The representation of NaN values has all exponent bits (52..62) set, - // and not all mantissa bits (0..51) clear. - // Read top bits of double representation (second word of value). - __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); - // Test that exponent bits are all set. - __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); - // NaNs have all-one exponents so they sign extend to -1. - __ cmp(r3, Operand(-1)); - __ b(ne, &return_equal); - - // Shift out flag and all exponent bits, retaining only mantissa. - __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); - // Or with all low-bits of mantissa. - __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); - __ orr(r0, r3, Operand(r2), SetCC); - // For equal we already have the right value in r0: Return zero (equal) - // if all bits in mantissa are zero (it's an Infinity) and non-zero if - // not (it's a NaN). For <= and >= we need to load r0 with the failing - // value if it's a NaN. - if (cc != eq) { - // All-zero means Infinity means equal. - __ Ret(eq); - if (cc == le) { - __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. - } else { - __ mov(r0, Operand(LESS)); // NaN >= NaN should fail. - } - } - __ Ret(); - } - // No fall through here. - } - - __ bind(¬_identical); -} - - -// See comment at call site. -static void EmitSmiNonsmiComparison(MacroAssembler* masm, - Register lhs, - Register rhs, - Label* lhs_not_nan, - Label* slow, - bool strict) { - ASSERT((lhs.is(r0) && rhs.is(r1)) || - (lhs.is(r1) && rhs.is(r0))); - - Label rhs_is_smi; - __ tst(rhs, Operand(kSmiTagMask)); - __ b(eq, &rhs_is_smi); - - // Lhs is a Smi. Check whether the rhs is a heap number. - __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE); - if (strict) { - // If rhs is not a number and lhs is a Smi then strict equality cannot - // succeed. Return non-equal - // If rhs is r0 then there is already a non zero value in it. - if (!rhs.is(r0)) { - __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); - } - __ Ret(ne); - } else { - // Smi compared non-strictly with a non-Smi non-heap-number. Call - // the runtime. - __ b(ne, slow); - } - - // Lhs is a smi, rhs is a number. - if (CpuFeatures::IsSupported(VFP3)) { - // Convert lhs to a double in d7. - CpuFeatures::Scope scope(VFP3); - __ SmiToDoubleVFPRegister(lhs, d7, r7, s15); - // Load the double from rhs, tagged HeapNumber r0, to d6. - __ sub(r7, rhs, Operand(kHeapObjectTag)); - __ vldr(d6, r7, HeapNumber::kValueOffset); - } else { - __ push(lr); - // Convert lhs to a double in r2, r3. - __ mov(r7, Operand(lhs)); - ConvertToDoubleStub stub1(r3, r2, r7, r6); - __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); - // Load rhs to a double in r0, r1. - __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); - __ pop(lr); - } - - // We now have both loaded as doubles but we can skip the lhs nan check - // since it's a smi. - __ jmp(lhs_not_nan); - - __ bind(&rhs_is_smi); - // Rhs is a smi. Check whether the non-smi lhs is a heap number. - __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); - if (strict) { - // If lhs is not a number and rhs is a smi then strict equality cannot - // succeed. Return non-equal. - // If lhs is r0 then there is already a non zero value in it. - if (!lhs.is(r0)) { - __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); - } - __ Ret(ne); - } else { - // Smi compared non-strictly with a non-smi non-heap-number. Call - // the runtime. - __ b(ne, slow); - } - - // Rhs is a smi, lhs is a heap number. - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - // Load the double from lhs, tagged HeapNumber r1, to d7. - __ sub(r7, lhs, Operand(kHeapObjectTag)); - __ vldr(d7, r7, HeapNumber::kValueOffset); - // Convert rhs to a double in d6 . - __ SmiToDoubleVFPRegister(rhs, d6, r7, s13); - } else { - __ push(lr); - // Load lhs to a double in r2, r3. - __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); - // Convert rhs to a double in r0, r1. - __ mov(r7, Operand(rhs)); - ConvertToDoubleStub stub2(r1, r0, r7, r6); - __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); - __ pop(lr); - } - // Fall through to both_loaded_as_doubles. -} - - -void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) { - bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); - Register rhs_exponent = exp_first ? r0 : r1; - Register lhs_exponent = exp_first ? r2 : r3; - Register rhs_mantissa = exp_first ? r1 : r0; - Register lhs_mantissa = exp_first ? r3 : r2; - Label one_is_nan, neither_is_nan; - - __ Sbfx(r4, - lhs_exponent, - HeapNumber::kExponentShift, - HeapNumber::kExponentBits); - // NaNs have all-one exponents so they sign extend to -1. - __ cmp(r4, Operand(-1)); - __ b(ne, lhs_not_nan); - __ mov(r4, - Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), - SetCC); - __ b(ne, &one_is_nan); - __ cmp(lhs_mantissa, Operand(0)); - __ b(ne, &one_is_nan); - - __ bind(lhs_not_nan); - __ Sbfx(r4, - rhs_exponent, - HeapNumber::kExponentShift, - HeapNumber::kExponentBits); - // NaNs have all-one exponents so they sign extend to -1. - __ cmp(r4, Operand(-1)); - __ b(ne, &neither_is_nan); - __ mov(r4, - Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), - SetCC); - __ b(ne, &one_is_nan); - __ cmp(rhs_mantissa, Operand(0)); - __ b(eq, &neither_is_nan); - - __ bind(&one_is_nan); - // NaN comparisons always fail. - // Load whatever we need in r0 to make the comparison fail. - if (cc == lt || cc == le) { - __ mov(r0, Operand(GREATER)); - } else { - __ mov(r0, Operand(LESS)); - } - __ Ret(); - - __ bind(&neither_is_nan); -} - - -// See comment at call site. -static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { - bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); - Register rhs_exponent = exp_first ? r0 : r1; - Register lhs_exponent = exp_first ? r2 : r3; - Register rhs_mantissa = exp_first ? r1 : r0; - Register lhs_mantissa = exp_first ? r3 : r2; - - // r0, r1, r2, r3 have the two doubles. Neither is a NaN. - if (cc == eq) { - // Doubles are not equal unless they have the same bit pattern. - // Exception: 0 and -0. - __ cmp(rhs_mantissa, Operand(lhs_mantissa)); - __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne); - // Return non-zero if the numbers are unequal. - __ Ret(ne); - - __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC); - // If exponents are equal then return 0. - __ Ret(eq); - - // Exponents are unequal. The only way we can return that the numbers - // are equal is if one is -0 and the other is 0. We already dealt - // with the case where both are -0 or both are 0. - // We start by seeing if the mantissas (that are equal) or the bottom - // 31 bits of the rhs exponent are non-zero. If so we return not - // equal. - __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC); - __ mov(r0, Operand(r4), LeaveCC, ne); - __ Ret(ne); - // Now they are equal if and only if the lhs exponent is zero in its - // low 31 bits. - __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize)); - __ Ret(); - } else { - // Call a native function to do a comparison between two non-NaNs. - // Call C routine that may not cause GC or other trouble. - __ push(lr); - __ PrepareCallCFunction(4, r5); // Two doubles count as 4 arguments. - __ CallCFunction(ExternalReference::compare_doubles(), 4); - __ pop(pc); // Return. - } -} - - -// See comment at call site. -static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, - Register lhs, - Register rhs) { - ASSERT((lhs.is(r0) && rhs.is(r1)) || - (lhs.is(r1) && rhs.is(r0))); - - // If either operand is a JSObject or an oddball value, then they are - // not equal since their pointers are different. - // There is no test for undetectability in strict equality. - STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); - Label first_non_object; - // Get the type of the first operand into r2 and compare it with - // FIRST_JS_OBJECT_TYPE. - __ CompareObjectType(rhs, r2, r2, FIRST_JS_OBJECT_TYPE); - __ b(lt, &first_non_object); - - // Return non-zero (r0 is not zero) - Label return_not_equal; - __ bind(&return_not_equal); - __ Ret(); - - __ bind(&first_non_object); - // Check for oddballs: true, false, null, undefined. - __ cmp(r2, Operand(ODDBALL_TYPE)); - __ b(eq, &return_not_equal); - - __ CompareObjectType(lhs, r3, r3, FIRST_JS_OBJECT_TYPE); - __ b(ge, &return_not_equal); - - // Check for oddballs: true, false, null, undefined. - __ cmp(r3, Operand(ODDBALL_TYPE)); - __ b(eq, &return_not_equal); - - // Now that we have the types we might as well check for symbol-symbol. - // Ensure that no non-strings have the symbol bit set. - STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask); - STATIC_ASSERT(kSymbolTag != 0); - __ and_(r2, r2, Operand(r3)); - __ tst(r2, Operand(kIsSymbolMask)); - __ b(ne, &return_not_equal); -} - - -// See comment at call site. -static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, - Register lhs, - Register rhs, - Label* both_loaded_as_doubles, - Label* not_heap_numbers, - Label* slow) { - ASSERT((lhs.is(r0) && rhs.is(r1)) || - (lhs.is(r1) && rhs.is(r0))); - - __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); - __ b(ne, not_heap_numbers); - __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); - __ cmp(r2, r3); - __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. - - // Both are heap numbers. Load them up then jump to the code we have - // for that. - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - __ sub(r7, rhs, Operand(kHeapObjectTag)); - __ vldr(d6, r7, HeapNumber::kValueOffset); - __ sub(r7, lhs, Operand(kHeapObjectTag)); - __ vldr(d7, r7, HeapNumber::kValueOffset); - } else { - __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); - __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); - } - __ jmp(both_loaded_as_doubles); -} - - -// Fast negative check for symbol-to-symbol equality. -static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm, - Register lhs, - Register rhs, - Label* possible_strings, - Label* not_both_strings) { - ASSERT((lhs.is(r0) && rhs.is(r1)) || - (lhs.is(r1) && rhs.is(r0))); - - // r2 is object type of rhs. - // Ensure that no non-strings have the symbol bit set. - Label object_test; - STATIC_ASSERT(kSymbolTag != 0); - __ tst(r2, Operand(kIsNotStringMask)); - __ b(ne, &object_test); - __ tst(r2, Operand(kIsSymbolMask)); - __ b(eq, possible_strings); - __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE); - __ b(ge, not_both_strings); - __ tst(r3, Operand(kIsSymbolMask)); - __ b(eq, possible_strings); - - // Both are symbols. We already checked they weren't the same pointer - // so they are not equal. - __ mov(r0, Operand(NOT_EQUAL)); - __ Ret(); - - __ bind(&object_test); - __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE)); - __ b(lt, not_both_strings); - __ CompareObjectType(lhs, r2, r3, FIRST_JS_OBJECT_TYPE); - __ b(lt, not_both_strings); - // If both objects are undetectable, they are equal. Otherwise, they - // are not equal, since they are different objects and an object is not - // equal to undefined. - __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset)); - __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset)); - __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset)); - __ and_(r0, r2, Operand(r3)); - __ and_(r0, r0, Operand(1 << Map::kIsUndetectable)); - __ eor(r0, r0, Operand(1 << Map::kIsUndetectable)); - __ Ret(); -} - - -void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, - Register object, - Register result, - Register scratch1, - Register scratch2, - Register scratch3, - bool object_is_smi, - Label* not_found) { - // Use of registers. Register result is used as a temporary. - Register number_string_cache = result; - Register mask = scratch3; - - // Load the number string cache. - __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); - - // Make the hash mask from the length of the number string cache. It - // contains two elements (number and string) for each cache entry. - __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset)); - // Divide length by two (length is a smi). - __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1)); - __ sub(mask, mask, Operand(1)); // Make mask. - - // Calculate the entry in the number string cache. The hash value in the - // number string cache for smis is just the smi value, and the hash for - // doubles is the xor of the upper and lower words. See - // Heap::GetNumberStringCache. - Label is_smi; - Label load_result_from_cache; - if (!object_is_smi) { - __ BranchOnSmi(object, &is_smi); - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - __ CheckMap(object, - scratch1, - Heap::kHeapNumberMapRootIndex, - not_found, - true); - - STATIC_ASSERT(8 == kDoubleSize); - __ add(scratch1, - object, - Operand(HeapNumber::kValueOffset - kHeapObjectTag)); - __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit()); - __ eor(scratch1, scratch1, Operand(scratch2)); - __ and_(scratch1, scratch1, Operand(mask)); - - // Calculate address of entry in string cache: each entry consists - // of two pointer sized fields. - __ add(scratch1, - number_string_cache, - Operand(scratch1, LSL, kPointerSizeLog2 + 1)); - - Register probe = mask; - __ ldr(probe, - FieldMemOperand(scratch1, FixedArray::kHeaderSize)); - __ BranchOnSmi(probe, not_found); - __ sub(scratch2, object, Operand(kHeapObjectTag)); - __ vldr(d0, scratch2, HeapNumber::kValueOffset); - __ sub(probe, probe, Operand(kHeapObjectTag)); - __ vldr(d1, probe, HeapNumber::kValueOffset); - __ vcmp(d0, d1); - __ vmrs(pc); - __ b(ne, not_found); // The cache did not contain this value. - __ b(&load_result_from_cache); - } else { - __ b(not_found); - } - } - - __ bind(&is_smi); - Register scratch = scratch1; - __ and_(scratch, mask, Operand(object, ASR, 1)); - // Calculate address of entry in string cache: each entry consists - // of two pointer sized fields. - __ add(scratch, - number_string_cache, - Operand(scratch, LSL, kPointerSizeLog2 + 1)); - - // Check if the entry is the smi we are looking for. - Register probe = mask; - __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); - __ cmp(object, probe); - __ b(ne, not_found); - - // Get the result from the cache. - __ bind(&load_result_from_cache); - __ ldr(result, - FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); - __ IncrementCounter(&Counters::number_to_string_native, - 1, - scratch1, - scratch2); -} - - -void NumberToStringStub::Generate(MacroAssembler* masm) { - Label runtime; - - __ ldr(r1, MemOperand(sp, 0)); - - // Generate code to lookup number in the number string cache. - GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime); - __ add(sp, sp, Operand(1 * kPointerSize)); - __ Ret(); - - __ bind(&runtime); - // Handle number to string in the runtime system if not found in the cache. - __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); -} - - -void RecordWriteStub::Generate(MacroAssembler* masm) { - __ add(offset_, object_, Operand(offset_)); - __ RecordWriteHelper(object_, offset_, scratch_); - __ Ret(); -} - - -// On entry lhs_ and rhs_ are the values to be compared. -// On exit r0 is 0, positive or negative to indicate the result of -// the comparison. -void CompareStub::Generate(MacroAssembler* masm) { - ASSERT((lhs_.is(r0) && rhs_.is(r1)) || - (lhs_.is(r1) && rhs_.is(r0))); - - Label slow; // Call builtin. - Label not_smis, both_loaded_as_doubles, lhs_not_nan; - - // NOTICE! This code is only reached after a smi-fast-case check, so - // it is certain that at least one operand isn't a smi. - - // Handle the case where the objects are identical. Either returns the answer - // or goes to slow. Only falls through if the objects were not identical. - EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_); - - // If either is a Smi (we know that not both are), then they can only - // be strictly equal if the other is a HeapNumber. - STATIC_ASSERT(kSmiTag == 0); - ASSERT_EQ(0, Smi::FromInt(0)); - __ and_(r2, lhs_, Operand(rhs_)); - __ tst(r2, Operand(kSmiTagMask)); - __ b(ne, ¬_smis); - // One operand is a smi. EmitSmiNonsmiComparison generates code that can: - // 1) Return the answer. - // 2) Go to slow. - // 3) Fall through to both_loaded_as_doubles. - // 4) Jump to lhs_not_nan. - // In cases 3 and 4 we have found out we were dealing with a number-number - // comparison. If VFP3 is supported the double values of the numbers have - // been loaded into d7 and d6. Otherwise, the double values have been loaded - // into r0, r1, r2, and r3. - EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_); - - __ bind(&both_loaded_as_doubles); - // The arguments have been converted to doubles and stored in d6 and d7, if - // VFP3 is supported, or in r0, r1, r2, and r3. - if (CpuFeatures::IsSupported(VFP3)) { - __ bind(&lhs_not_nan); - CpuFeatures::Scope scope(VFP3); - Label no_nan; - // ARMv7 VFP3 instructions to implement double precision comparison. - __ vcmp(d7, d6); - __ vmrs(pc); // Move vector status bits to normal status bits. - Label nan; - __ b(vs, &nan); - __ mov(r0, Operand(EQUAL), LeaveCC, eq); - __ mov(r0, Operand(LESS), LeaveCC, lt); - __ mov(r0, Operand(GREATER), LeaveCC, gt); - __ Ret(); - - __ bind(&nan); - // If one of the sides was a NaN then the v flag is set. Load r0 with - // whatever it takes to make the comparison fail, since comparisons with NaN - // always fail. - if (cc_ == lt || cc_ == le) { - __ mov(r0, Operand(GREATER)); - } else { - __ mov(r0, Operand(LESS)); - } - __ Ret(); - } else { - // Checks for NaN in the doubles we have loaded. Can return the answer or - // fall through if neither is a NaN. Also binds lhs_not_nan. - EmitNanCheck(masm, &lhs_not_nan, cc_); - // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the - // answer. Never falls through. - EmitTwoNonNanDoubleComparison(masm, cc_); - } - - __ bind(¬_smis); - // At this point we know we are dealing with two different objects, - // and neither of them is a Smi. The objects are in rhs_ and lhs_. - if (strict_) { - // This returns non-equal for some object types, or falls through if it - // was not lucky. - EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_); - } - - Label check_for_symbols; - Label flat_string_check; - // Check for heap-number-heap-number comparison. Can jump to slow case, - // or load both doubles into r0, r1, r2, r3 and jump to the code that handles - // that case. If the inputs are not doubles then jumps to check_for_symbols. - // In this case r2 will contain the type of rhs_. Never falls through. - EmitCheckForTwoHeapNumbers(masm, - lhs_, - rhs_, - &both_loaded_as_doubles, - &check_for_symbols, - &flat_string_check); - - __ bind(&check_for_symbols); - // In the strict case the EmitStrictTwoHeapObjectCompare already took care of - // symbols. - if (cc_ == eq && !strict_) { - // Returns an answer for two symbols or two detectable objects. - // Otherwise jumps to string case or not both strings case. - // Assumes that r2 is the type of rhs_ on entry. - EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow); - } - - // Check for both being sequential ASCII strings, and inline if that is the - // case. - __ bind(&flat_string_check); - - __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow); - - __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3); - StringCompareStub::GenerateCompareFlatAsciiStrings(masm, - lhs_, - rhs_, - r2, - r3, - r4, - r5); - // Never falls through to here. - - __ bind(&slow); - - __ Push(lhs_, rhs_); - // Figure out which native to call and setup the arguments. - Builtins::JavaScript native; - if (cc_ == eq) { - native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; - } else { - native = Builtins::COMPARE; - int ncr; // NaN compare result - if (cc_ == lt || cc_ == le) { - ncr = GREATER; - } else { - ASSERT(cc_ == gt || cc_ == ge); // remaining cases - ncr = LESS; - } - __ mov(r0, Operand(Smi::FromInt(ncr))); - __ push(r0); - } - - // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) - // tagged as a small integer. - __ InvokeBuiltin(native, JUMP_JS); -} - - -// This stub does not handle the inlined cases (Smis, Booleans, undefined). -// The stub returns zero for false, and a non-zero value for true. -void ToBooleanStub::Generate(MacroAssembler* masm) { - Label false_result; - Label not_heap_number; - Register scratch0 = VirtualFrame::scratch0(); - - // HeapNumber => false iff +0, -0, or NaN. - __ ldr(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); - __ cmp(scratch0, ip); - __ b(¬_heap_number, ne); - - __ sub(ip, tos_, Operand(kHeapObjectTag)); - __ vldr(d1, ip, HeapNumber::kValueOffset); - __ vcmp(d1, 0.0); - __ vmrs(pc); - // "tos_" is a register, and contains a non zero value by default. - // Hence we only need to overwrite "tos_" with zero to return false for - // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. - __ mov(tos_, Operand(0), LeaveCC, eq); // for FP_ZERO - __ mov(tos_, Operand(0), LeaveCC, vs); // for FP_NAN - __ Ret(); - - __ bind(¬_heap_number); - - // Check if the value is 'null'. - // 'null' => false. - __ LoadRoot(ip, Heap::kNullValueRootIndex); - __ cmp(tos_, ip); - __ b(&false_result, eq); - - // It can be an undetectable object. - // Undetectable => false. - __ ldr(ip, FieldMemOperand(tos_, HeapObject::kMapOffset)); - __ ldrb(scratch0, FieldMemOperand(ip, Map::kBitFieldOffset)); - __ and_(scratch0, scratch0, Operand(1 << Map::kIsUndetectable)); - __ cmp(scratch0, Operand(1 << Map::kIsUndetectable)); - __ b(&false_result, eq); - - // JavaScript object => true. - __ ldr(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); - __ ldrb(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset)); - __ cmp(scratch0, Operand(FIRST_JS_OBJECT_TYPE)); - // "tos_" is a register and contains a non-zero value. - // Hence we implicitly return true if the greater than - // condition is satisfied. - __ Ret(gt); - - // Check for string - __ ldr(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); - __ ldrb(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset)); - __ cmp(scratch0, Operand(FIRST_NONSTRING_TYPE)); - // "tos_" is a register and contains a non-zero value. - // Hence we implicitly return true if the greater than - // condition is satisfied. - __ Ret(gt); - - // String value => false iff empty, i.e., length is zero - __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset)); - // If length is zero, "tos_" contains zero ==> false. - // If length is not zero, "tos_" contains a non-zero value ==> true. - __ Ret(); - - // Return 0 in "tos_" for false . - __ bind(&false_result); - __ mov(tos_, Operand(0)); - __ Ret(); -} - - -// We fall into this code if the operands were Smis, but the result was -// not (eg. overflow). We branch into this code (to the not_smi label) if -// the operands were not both Smi. The operands are in r0 and r1. In order -// to call the C-implemented binary fp operation routines we need to end up -// with the double precision floating point operands in r0 and r1 (for the -// value in r1) and r2 and r3 (for the value in r0). -void GenericBinaryOpStub::HandleBinaryOpSlowCases( - MacroAssembler* masm, - Label* not_smi, - Register lhs, - Register rhs, - const Builtins::JavaScript& builtin) { - Label slow, slow_reverse, do_the_call; - bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_; - - ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))); - Register heap_number_map = r6; - - if (ShouldGenerateSmiCode()) { - __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - - // Smi-smi case (overflow). - // Since both are Smis there is no heap number to overwrite, so allocate. - // The new heap number is in r5. r3 and r7 are scratch. - __ AllocateHeapNumber( - r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow); - - // If we have floating point hardware, inline ADD, SUB, MUL, and DIV, - // using registers d7 and d6 for the double values. - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - __ mov(r7, Operand(rhs, ASR, kSmiTagSize)); - __ vmov(s15, r7); - __ vcvt_f64_s32(d7, s15); - __ mov(r7, Operand(lhs, ASR, kSmiTagSize)); - __ vmov(s13, r7); - __ vcvt_f64_s32(d6, s13); - if (!use_fp_registers) { - __ vmov(r2, r3, d7); - __ vmov(r0, r1, d6); - } - } else { - // Write Smi from rhs to r3 and r2 in double format. r9 is scratch. - __ mov(r7, Operand(rhs)); - ConvertToDoubleStub stub1(r3, r2, r7, r9); - __ push(lr); - __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); - // Write Smi from lhs to r1 and r0 in double format. r9 is scratch. - __ mov(r7, Operand(lhs)); - ConvertToDoubleStub stub2(r1, r0, r7, r9); - __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); - __ pop(lr); - } - __ jmp(&do_the_call); // Tail call. No return. - } - - // We branch here if at least one of r0 and r1 is not a Smi. - __ bind(not_smi); - __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - - // After this point we have the left hand side in r1 and the right hand side - // in r0. - if (lhs.is(r0)) { - __ Swap(r0, r1, ip); - } - - // The type transition also calculates the answer. - bool generate_code_to_calculate_answer = true; - - if (ShouldGenerateFPCode()) { - if (runtime_operands_type_ == BinaryOpIC::DEFAULT) { - switch (op_) { - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: - GenerateTypeTransition(masm); // Tail call. - generate_code_to_calculate_answer = false; - break; - - default: - break; - } - } - - if (generate_code_to_calculate_answer) { - Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; - if (mode_ == NO_OVERWRITE) { - // In the case where there is no chance of an overwritable float we may - // as well do the allocation immediately while r0 and r1 are untouched. - __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow); - } - - // Move r0 to a double in r2-r3. - __ tst(r0, Operand(kSmiTagMask)); - __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. - __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - __ cmp(r4, heap_number_map); - __ b(ne, &slow); - if (mode_ == OVERWRITE_RIGHT) { - __ mov(r5, Operand(r0)); // Overwrite this heap number. - } - if (use_fp_registers) { - CpuFeatures::Scope scope(VFP3); - // Load the double from tagged HeapNumber r0 to d7. - __ sub(r7, r0, Operand(kHeapObjectTag)); - __ vldr(d7, r7, HeapNumber::kValueOffset); - } else { - // Calling convention says that second double is in r2 and r3. - __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); - } - __ jmp(&finished_loading_r0); - __ bind(&r0_is_smi); - if (mode_ == OVERWRITE_RIGHT) { - // We can't overwrite a Smi so get address of new heap number into r5. - __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); - } - - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - // Convert smi in r0 to double in d7. - __ mov(r7, Operand(r0, ASR, kSmiTagSize)); - __ vmov(s15, r7); - __ vcvt_f64_s32(d7, s15); - if (!use_fp_registers) { - __ vmov(r2, r3, d7); - } - } else { - // Write Smi from r0 to r3 and r2 in double format. - __ mov(r7, Operand(r0)); - ConvertToDoubleStub stub3(r3, r2, r7, r4); - __ push(lr); - __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); - __ pop(lr); - } - - // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis. - // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC. - Label r1_is_not_smi; - if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) { - __ tst(r1, Operand(kSmiTagMask)); - __ b(ne, &r1_is_not_smi); - GenerateTypeTransition(masm); // Tail call. - } - - __ bind(&finished_loading_r0); - - // Move r1 to a double in r0-r1. - __ tst(r1, Operand(kSmiTagMask)); - __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. - __ bind(&r1_is_not_smi); - __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset)); - __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - __ cmp(r4, heap_number_map); - __ b(ne, &slow); - if (mode_ == OVERWRITE_LEFT) { - __ mov(r5, Operand(r1)); // Overwrite this heap number. - } - if (use_fp_registers) { - CpuFeatures::Scope scope(VFP3); - // Load the double from tagged HeapNumber r1 to d6. - __ sub(r7, r1, Operand(kHeapObjectTag)); - __ vldr(d6, r7, HeapNumber::kValueOffset); - } else { - // Calling convention says that first double is in r0 and r1. - __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset)); - } - __ jmp(&finished_loading_r1); - __ bind(&r1_is_smi); - if (mode_ == OVERWRITE_LEFT) { - // We can't overwrite a Smi so get address of new heap number into r5. - __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); - } - - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - // Convert smi in r1 to double in d6. - __ mov(r7, Operand(r1, ASR, kSmiTagSize)); - __ vmov(s13, r7); - __ vcvt_f64_s32(d6, s13); - if (!use_fp_registers) { - __ vmov(r0, r1, d6); - } - } else { - // Write Smi from r1 to r1 and r0 in double format. - __ mov(r7, Operand(r1)); - ConvertToDoubleStub stub4(r1, r0, r7, r9); - __ push(lr); - __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); - __ pop(lr); - } - - __ bind(&finished_loading_r1); - } - - if (generate_code_to_calculate_answer || do_the_call.is_linked()) { - __ bind(&do_the_call); - // If we are inlining the operation using VFP3 instructions for - // add, subtract, multiply, or divide, the arguments are in d6 and d7. - if (use_fp_registers) { - CpuFeatures::Scope scope(VFP3); - // ARMv7 VFP3 instructions to implement - // double precision, add, subtract, multiply, divide. - - if (Token::MUL == op_) { - __ vmul(d5, d6, d7); - } else if (Token::DIV == op_) { - __ vdiv(d5, d6, d7); - } else if (Token::ADD == op_) { - __ vadd(d5, d6, d7); - } else if (Token::SUB == op_) { - __ vsub(d5, d6, d7); - } else { - UNREACHABLE(); - } - __ sub(r0, r5, Operand(kHeapObjectTag)); - __ vstr(d5, r0, HeapNumber::kValueOffset); - __ add(r0, r0, Operand(kHeapObjectTag)); - __ mov(pc, lr); - } else { - // If we did not inline the operation, then the arguments are in: - // r0: Left value (least significant part of mantissa). - // r1: Left value (sign, exponent, top of mantissa). - // r2: Right value (least significant part of mantissa). - // r3: Right value (sign, exponent, top of mantissa). - // r5: Address of heap number for result. - - __ push(lr); // For later. - __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments. - // Call C routine that may not cause GC or other trouble. r5 is callee - // save. - __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); - // Store answer in the overwritable heap number. - #if !defined(USE_ARM_EABI) - // Double returned in fp coprocessor register 0 and 1, encoded as - // register cr8. Offsets must be divisible by 4 for coprocessor so we - // need to substract the tag from r5. - __ sub(r4, r5, Operand(kHeapObjectTag)); - __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset)); - #else - // Double returned in registers 0 and 1. - __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset)); - #endif - __ mov(r0, Operand(r5)); - // And we are done. - __ pop(pc); - } - } - } - - if (!generate_code_to_calculate_answer && - !slow_reverse.is_linked() && - !slow.is_linked()) { - return; - } - - if (lhs.is(r0)) { - __ b(&slow); - __ bind(&slow_reverse); - __ Swap(r0, r1, ip); - } - - heap_number_map = no_reg; // Don't use this any more from here on. - - // We jump to here if something goes wrong (one param is not a number of any - // sort or new-space allocation fails). - __ bind(&slow); - - // Push arguments to the stack - __ Push(r1, r0); - - if (Token::ADD == op_) { - // Test for string arguments before calling runtime. - // r1 : first argument - // r0 : second argument - // sp[0] : second argument - // sp[4] : first argument - - Label not_strings, not_string1, string1, string1_smi2; - __ tst(r1, Operand(kSmiTagMask)); - __ b(eq, ¬_string1); - __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE); - __ b(ge, ¬_string1); - - // First argument is a a string, test second. - __ tst(r0, Operand(kSmiTagMask)); - __ b(eq, &string1_smi2); - __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE); - __ b(ge, &string1); - - // First and second argument are strings. - StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); - __ TailCallStub(&string_add_stub); - - __ bind(&string1_smi2); - // First argument is a string, second is a smi. Try to lookup the number - // string for the smi in the number string cache. - NumberToStringStub::GenerateLookupNumberStringCache( - masm, r0, r2, r4, r5, r6, true, &string1); - - // Replace second argument on stack and tailcall string add stub to make - // the result. - __ str(r2, MemOperand(sp, 0)); - __ TailCallStub(&string_add_stub); - - // Only first argument is a string. - __ bind(&string1); - __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS); - - // First argument was not a string, test second. - __ bind(¬_string1); - __ tst(r0, Operand(kSmiTagMask)); - __ b(eq, ¬_strings); - __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE); - __ b(ge, ¬_strings); - - // Only second argument is a string. - __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS); - - __ bind(¬_strings); - } - - __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return. -} - - -// Tries to get a signed int32 out of a double precision floating point heap -// number. Rounds towards 0. Fastest for doubles that are in the ranges -// -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds -// almost to the range of signed int32 values that are not Smis. Jumps to the -// label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0 -// (excluding the endpoints). -static void GetInt32(MacroAssembler* masm, - Register source, - Register dest, - Register scratch, - Register scratch2, - Label* slow) { - Label right_exponent, done; - // Get exponent word. - __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset)); - // Get exponent alone in scratch2. - __ Ubfx(scratch2, - scratch, - HeapNumber::kExponentShift, - HeapNumber::kExponentBits); - // Load dest with zero. We use this either for the final shift or - // for the answer. - __ mov(dest, Operand(0)); - // Check whether the exponent matches a 32 bit signed int that is not a Smi. - // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is - // the exponent that we are fastest at and also the highest exponent we can - // handle here. - const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30; - // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we - // split it up to avoid a constant pool entry. You can't do that in general - // for cmp because of the overflow flag, but we know the exponent is in the - // range 0-2047 so there is no overflow. - int fudge_factor = 0x400; - __ sub(scratch2, scratch2, Operand(fudge_factor)); - __ cmp(scratch2, Operand(non_smi_exponent - fudge_factor)); - // If we have a match of the int32-but-not-Smi exponent then skip some logic. - __ b(eq, &right_exponent); - // If the exponent is higher than that then go to slow case. This catches - // numbers that don't fit in a signed int32, infinities and NaNs. - __ b(gt, slow); - - // We know the exponent is smaller than 30 (biased). If it is less than - // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie - // it rounds to zero. - const uint32_t zero_exponent = HeapNumber::kExponentBias + 0; - __ sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC); - // Dest already has a Smi zero. - __ b(lt, &done); - if (!CpuFeatures::IsSupported(VFP3)) { - // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to - // get how much to shift down. - __ rsb(dest, scratch2, Operand(30)); - } - __ bind(&right_exponent); - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - // ARMv7 VFP3 instructions implementing double precision to integer - // conversion using round to zero. - __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset)); - __ vmov(d7, scratch2, scratch); - __ vcvt_s32_f64(s15, d7); - __ vmov(dest, s15); - } else { - // Get the top bits of the mantissa. - __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask)); - // Put back the implicit 1. - __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift)); - // Shift up the mantissa bits to take up the space the exponent used to - // take. We just orred in the implicit bit so that took care of one and - // we want to leave the sign bit 0 so we subtract 2 bits from the shift - // distance. - const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; - __ mov(scratch2, Operand(scratch2, LSL, shift_distance)); - // Put sign in zero flag. - __ tst(scratch, Operand(HeapNumber::kSignMask)); - // Get the second half of the double. For some exponents we don't - // actually need this because the bits get shifted out again, but - // it's probably slower to test than just to do it. - __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); - // Shift down 22 bits to get the last 10 bits. - __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance)); - // Move down according to the exponent. - __ mov(dest, Operand(scratch, LSR, dest)); - // Fix sign if sign bit was set. - __ rsb(dest, dest, Operand(0), LeaveCC, ne); - } - __ bind(&done); -} - -// For bitwise ops where the inputs are not both Smis we here try to determine -// whether both inputs are either Smis or at least heap numbers that can be -// represented by a 32 bit signed value. We truncate towards zero as required -// by the ES spec. If this is the case we do the bitwise op and see if the -// result is a Smi. If so, great, otherwise we try to find a heap number to -// write the answer into (either by allocating or by overwriting). -// On entry the operands are in lhs and rhs. On exit the answer is in r0. -void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm, - Register lhs, - Register rhs) { - Label slow, result_not_a_smi; - Label rhs_is_smi, lhs_is_smi; - Label done_checking_rhs, done_checking_lhs; - - Register heap_number_map = r6; - __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - - __ tst(lhs, Operand(kSmiTagMask)); - __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number. - __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset)); - __ cmp(r4, heap_number_map); - __ b(ne, &slow); - GetInt32(masm, lhs, r3, r5, r4, &slow); - __ jmp(&done_checking_lhs); - __ bind(&lhs_is_smi); - __ mov(r3, Operand(lhs, ASR, 1)); - __ bind(&done_checking_lhs); - - __ tst(rhs, Operand(kSmiTagMask)); - __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number. - __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset)); - __ cmp(r4, heap_number_map); - __ b(ne, &slow); - GetInt32(masm, rhs, r2, r5, r4, &slow); - __ jmp(&done_checking_rhs); - __ bind(&rhs_is_smi); - __ mov(r2, Operand(rhs, ASR, 1)); - __ bind(&done_checking_rhs); - - ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)))); - - // r0 and r1: Original operands (Smi or heap numbers). - // r2 and r3: Signed int32 operands. - switch (op_) { - case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break; - case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break; - case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break; - case Token::SAR: - // Use only the 5 least significant bits of the shift count. - __ and_(r2, r2, Operand(0x1f)); - __ mov(r2, Operand(r3, ASR, r2)); - break; - case Token::SHR: - // Use only the 5 least significant bits of the shift count. - __ and_(r2, r2, Operand(0x1f)); - __ mov(r2, Operand(r3, LSR, r2), SetCC); - // SHR is special because it is required to produce a positive answer. - // The code below for writing into heap numbers isn't capable of writing - // the register as an unsigned int so we go to slow case if we hit this - // case. - if (CpuFeatures::IsSupported(VFP3)) { - __ b(mi, &result_not_a_smi); - } else { - __ b(mi, &slow); - } - break; - case Token::SHL: - // Use only the 5 least significant bits of the shift count. - __ and_(r2, r2, Operand(0x1f)); - __ mov(r2, Operand(r3, LSL, r2)); - break; - default: UNREACHABLE(); - } - // check that the *signed* result fits in a smi - __ add(r3, r2, Operand(0x40000000), SetCC); - __ b(mi, &result_not_a_smi); - __ mov(r0, Operand(r2, LSL, kSmiTagSize)); - __ Ret(); - - Label have_to_allocate, got_a_heap_number; - __ bind(&result_not_a_smi); +const char* GenericBinaryOpStub::GetName() { + if (name_ != NULL) return name_; + const int len = 100; + name_ = Bootstrapper::AllocateAutoDeletedArray(len); + if (name_ == NULL) return "OOM"; + const char* op_name = Token::Name(op_); + const char* overwrite_name; switch (mode_) { - case OVERWRITE_RIGHT: { - __ tst(rhs, Operand(kSmiTagMask)); - __ b(eq, &have_to_allocate); - __ mov(r5, Operand(rhs)); - break; - } - case OVERWRITE_LEFT: { - __ tst(lhs, Operand(kSmiTagMask)); - __ b(eq, &have_to_allocate); - __ mov(r5, Operand(lhs)); - break; - } - case NO_OVERWRITE: { - // Get a new heap number in r5. r4 and r7 are scratch. - __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); - } - default: break; - } - __ bind(&got_a_heap_number); - // r2: Answer as signed int32. - // r5: Heap number to write answer into. - - // Nothing can go wrong now, so move the heap number to r0, which is the - // result. - __ mov(r0, Operand(r5)); - - if (CpuFeatures::IsSupported(VFP3)) { - // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. - CpuFeatures::Scope scope(VFP3); - __ vmov(s0, r2); - if (op_ == Token::SHR) { - __ vcvt_f64_u32(d0, s0); - } else { - __ vcvt_f64_s32(d0, s0); - } - __ sub(r3, r0, Operand(kHeapObjectTag)); - __ vstr(d0, r3, HeapNumber::kValueOffset); - __ Ret(); - } else { - // Tail call that writes the int32 in r2 to the heap number in r0, using - // r3 as scratch. r0 is preserved and returned. - WriteInt32ToHeapNumberStub stub(r2, r0, r3); - __ TailCallStub(&stub); - } - - if (mode_ != NO_OVERWRITE) { - __ bind(&have_to_allocate); - // Get a new heap number in r5. r4 and r7 are scratch. - __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); - __ jmp(&got_a_heap_number); - } - - // If all else failed then we go to the runtime system. - __ bind(&slow); - __ Push(lhs, rhs); // Restore stack. - switch (op_) { - case Token::BIT_OR: - __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); - break; - case Token::BIT_AND: - __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS); - break; - case Token::BIT_XOR: - __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS); - break; - case Token::SAR: - __ InvokeBuiltin(Builtins::SAR, JUMP_JS); - break; - case Token::SHR: - __ InvokeBuiltin(Builtins::SHR, JUMP_JS); - break; - case Token::SHL: - __ InvokeBuiltin(Builtins::SHL, JUMP_JS); - break; - default: - UNREACHABLE(); - } -} - - -// Can we multiply by x with max two shifts and an add. -// This answers yes to all integers from 2 to 10. -static bool IsEasyToMultiplyBy(int x) { - if (x < 2) return false; // Avoid special cases. - if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows. - if (IsPowerOf2(x)) return true; // Simple shift. - if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift. - if (IsPowerOf2(x + 1)) return true; // Patterns like 11111. - return false; -} - - -// Can multiply by anything that IsEasyToMultiplyBy returns true for. -// Source and destination may be the same register. This routine does -// not set carry and overflow the way a mul instruction would. -static void MultiplyByKnownInt(MacroAssembler* masm, - Register source, - Register destination, - int known_int) { - if (IsPowerOf2(known_int)) { - __ mov(destination, Operand(source, LSL, BitPosition(known_int))); - } else if (PopCountLessThanEqual2(known_int)) { - int first_bit = BitPosition(known_int); - int second_bit = BitPosition(known_int ^ (1 << first_bit)); - __ add(destination, source, Operand(source, LSL, second_bit - first_bit)); - if (first_bit != 0) { - __ mov(destination, Operand(destination, LSL, first_bit)); - } - } else { - ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111. - int the_bit = BitPosition(known_int + 1); - __ rsb(destination, source, Operand(source, LSL, the_bit)); - } -} - - -// This function (as opposed to MultiplyByKnownInt) takes the known int in a -// a register for the cases where it doesn't know a good trick, and may deliver -// a result that needs shifting. -static void MultiplyByKnownInt2( - MacroAssembler* masm, - Register result, - Register source, - Register known_int_register, // Smi tagged. - int known_int, - int* required_shift) { // Including Smi tag shift - switch (known_int) { - case 3: - __ add(result, source, Operand(source, LSL, 1)); - *required_shift = 1; - break; - case 5: - __ add(result, source, Operand(source, LSL, 2)); - *required_shift = 1; - break; - case 6: - __ add(result, source, Operand(source, LSL, 1)); - *required_shift = 2; - break; - case 7: - __ rsb(result, source, Operand(source, LSL, 3)); - *required_shift = 1; - break; - case 9: - __ add(result, source, Operand(source, LSL, 3)); - *required_shift = 1; - break; - case 10: - __ add(result, source, Operand(source, LSL, 2)); - *required_shift = 2; - break; - default: - ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient. - __ mul(result, source, known_int_register); - *required_shift = 0; - } -} - - -// This uses versions of the sum-of-digits-to-see-if-a-number-is-divisible-by-3 -// trick. See http://en.wikipedia.org/wiki/Divisibility_rule -// Takes the sum of the digits base (mask + 1) repeatedly until we have a -// number from 0 to mask. On exit the 'eq' condition flags are set if the -// answer is exactly the mask. -void IntegerModStub::DigitSum(MacroAssembler* masm, - Register lhs, - int mask, - int shift, - Label* entry) { - ASSERT(mask > 0); - ASSERT(mask <= 0xff); // This ensures we don't need ip to use it. - Label loop; - __ bind(&loop); - __ and_(ip, lhs, Operand(mask)); - __ add(lhs, ip, Operand(lhs, LSR, shift)); - __ bind(entry); - __ cmp(lhs, Operand(mask)); - __ b(gt, &loop); -} - - -void IntegerModStub::DigitSum(MacroAssembler* masm, - Register lhs, - Register scratch, - int mask, - int shift1, - int shift2, - Label* entry) { - ASSERT(mask > 0); - ASSERT(mask <= 0xff); // This ensures we don't need ip to use it. - Label loop; - __ bind(&loop); - __ bic(scratch, lhs, Operand(mask)); - __ and_(ip, lhs, Operand(mask)); - __ add(lhs, ip, Operand(lhs, LSR, shift1)); - __ add(lhs, lhs, Operand(scratch, LSR, shift2)); - __ bind(entry); - __ cmp(lhs, Operand(mask)); - __ b(gt, &loop); -} - - -// Splits the number into two halves (bottom half has shift bits). The top -// half is subtracted from the bottom half. If the result is negative then -// rhs is added. -void IntegerModStub::ModGetInRangeBySubtraction(MacroAssembler* masm, - Register lhs, - int shift, - int rhs) { - int mask = (1 << shift) - 1; - __ and_(ip, lhs, Operand(mask)); - __ sub(lhs, ip, Operand(lhs, LSR, shift), SetCC); - __ add(lhs, lhs, Operand(rhs), LeaveCC, mi); -} - - -void IntegerModStub::ModReduce(MacroAssembler* masm, - Register lhs, - int max, - int denominator) { - int limit = denominator; - while (limit * 2 <= max) limit *= 2; - while (limit >= denominator) { - __ cmp(lhs, Operand(limit)); - __ sub(lhs, lhs, Operand(limit), LeaveCC, ge); - limit >>= 1; - } -} - - -void IntegerModStub::ModAnswer(MacroAssembler* masm, - Register result, - Register shift_distance, - Register mask_bits, - Register sum_of_digits) { - __ add(result, mask_bits, Operand(sum_of_digits, LSL, shift_distance)); - __ Ret(); -} - - -// See comment for class. -void IntegerModStub::Generate(MacroAssembler* masm) { - __ mov(lhs_, Operand(lhs_, LSR, shift_distance_)); - __ bic(odd_number_, odd_number_, Operand(1)); - __ mov(odd_number_, Operand(odd_number_, LSL, 1)); - // We now have (odd_number_ - 1) * 2 in the register. - // Build a switch out of branches instead of data because it avoids - // having to teach the assembler about intra-code-object pointers - // that are not in relative branch instructions. - Label mod3, mod5, mod7, mod9, mod11, mod13, mod15, mod17, mod19; - Label mod21, mod23, mod25; - { Assembler::BlockConstPoolScope block_const_pool(masm); - __ add(pc, pc, Operand(odd_number_)); - // When you read pc it is always 8 ahead, but when you write it you always - // write the actual value. So we put in two nops to take up the slack. - __ nop(); - __ nop(); - __ b(&mod3); - __ b(&mod5); - __ b(&mod7); - __ b(&mod9); - __ b(&mod11); - __ b(&mod13); - __ b(&mod15); - __ b(&mod17); - __ b(&mod19); - __ b(&mod21); - __ b(&mod23); - __ b(&mod25); - } - - // For each denominator we find a multiple that is almost only ones - // when expressed in binary. Then we do the sum-of-digits trick for - // that number. If the multiple is not 1 then we have to do a little - // more work afterwards to get the answer into the 0-denominator-1 - // range. - DigitSum(masm, lhs_, 3, 2, &mod3); // 3 = b11. - __ sub(lhs_, lhs_, Operand(3), LeaveCC, eq); - ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); - - DigitSum(masm, lhs_, 0xf, 4, &mod5); // 5 * 3 = b1111. - ModGetInRangeBySubtraction(masm, lhs_, 2, 5); - ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); - - DigitSum(masm, lhs_, 7, 3, &mod7); // 7 = b111. - __ sub(lhs_, lhs_, Operand(7), LeaveCC, eq); - ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); - - DigitSum(masm, lhs_, 0x3f, 6, &mod9); // 7 * 9 = b111111. - ModGetInRangeBySubtraction(masm, lhs_, 3, 9); - ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); - - DigitSum(masm, lhs_, r5, 0x3f, 6, 3, &mod11); // 5 * 11 = b110111. - ModReduce(masm, lhs_, 0x3f, 11); - ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); - - DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod13); // 19 * 13 = b11110111. - ModReduce(masm, lhs_, 0xff, 13); - ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); - - DigitSum(masm, lhs_, 0xf, 4, &mod15); // 15 = b1111. - __ sub(lhs_, lhs_, Operand(15), LeaveCC, eq); - ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); - - DigitSum(masm, lhs_, 0xff, 8, &mod17); // 15 * 17 = b11111111. - ModGetInRangeBySubtraction(masm, lhs_, 4, 17); - ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); - - DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod19); // 13 * 19 = b11110111. - ModReduce(masm, lhs_, 0xff, 19); - ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); - - DigitSum(masm, lhs_, 0x3f, 6, &mod21); // 3 * 21 = b111111. - ModReduce(masm, lhs_, 0x3f, 21); - ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); - - DigitSum(masm, lhs_, r5, 0xff, 8, 7, &mod23); // 11 * 23 = b11111101. - ModReduce(masm, lhs_, 0xff, 23); - ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); - - DigitSum(masm, lhs_, r5, 0x7f, 7, 6, &mod25); // 5 * 25 = b1111101. - ModReduce(masm, lhs_, 0x7f, 25); - ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); -} - - -const char* GenericBinaryOpStub::GetName() { - if (name_ != NULL) return name_; - const int len = 100; - name_ = Bootstrapper::AllocateAutoDeletedArray(len); - if (name_ == NULL) return "OOM"; - const char* op_name = Token::Name(op_); - const char* overwrite_name; - switch (mode_) { - case NO_OVERWRITE: overwrite_name = "Alloc"; break; - case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; - case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; - default: overwrite_name = "UnknownOverwrite"; break; + case NO_OVERWRITE: overwrite_name = "Alloc"; break; + case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; + case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; + default: overwrite_name = "UnknownOverwrite"; break; } OS::SNPrintF(Vector(name_, len), @@ -8985,2787 +7097,6 @@ const char* GenericBinaryOpStub::GetName() { } - -void GenericBinaryOpStub::Generate(MacroAssembler* masm) { - // lhs_ : x - // rhs_ : y - // r0 : result - - Register result = r0; - Register lhs = lhs_; - Register rhs = rhs_; - - // This code can't cope with other register allocations yet. - ASSERT(result.is(r0) && - ((lhs.is(r0) && rhs.is(r1)) || - (lhs.is(r1) && rhs.is(r0)))); - - Register smi_test_reg = VirtualFrame::scratch0(); - Register scratch = VirtualFrame::scratch1(); - - // All ops need to know whether we are dealing with two Smis. Set up - // smi_test_reg to tell us that. - if (ShouldGenerateSmiCode()) { - __ orr(smi_test_reg, lhs, Operand(rhs)); - } - - switch (op_) { - case Token::ADD: { - Label not_smi; - // Fast path. - if (ShouldGenerateSmiCode()) { - STATIC_ASSERT(kSmiTag == 0); // Adjust code below. - __ tst(smi_test_reg, Operand(kSmiTagMask)); - __ b(ne, ¬_smi); - __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically. - // Return if no overflow. - __ Ret(vc); - __ sub(r0, r0, Operand(r1)); // Revert optimistic add. - } - HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::ADD); - break; - } - - case Token::SUB: { - Label not_smi; - // Fast path. - if (ShouldGenerateSmiCode()) { - STATIC_ASSERT(kSmiTag == 0); // Adjust code below. - __ tst(smi_test_reg, Operand(kSmiTagMask)); - __ b(ne, ¬_smi); - if (lhs.is(r1)) { - __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically. - // Return if no overflow. - __ Ret(vc); - __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract. - } else { - __ sub(r0, r0, Operand(r1), SetCC); // Subtract y optimistically. - // Return if no overflow. - __ Ret(vc); - __ add(r0, r0, Operand(r1)); // Revert optimistic subtract. - } - } - HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::SUB); - break; - } - - case Token::MUL: { - Label not_smi, slow; - if (ShouldGenerateSmiCode()) { - STATIC_ASSERT(kSmiTag == 0); // adjust code below - __ tst(smi_test_reg, Operand(kSmiTagMask)); - Register scratch2 = smi_test_reg; - smi_test_reg = no_reg; - __ b(ne, ¬_smi); - // Remove tag from one operand (but keep sign), so that result is Smi. - __ mov(ip, Operand(rhs, ASR, kSmiTagSize)); - // Do multiplication - // scratch = lower 32 bits of ip * lhs. - __ smull(scratch, scratch2, lhs, ip); - // Go slow on overflows (overflow bit is not set). - __ mov(ip, Operand(scratch, ASR, 31)); - // No overflow if higher 33 bits are identical. - __ cmp(ip, Operand(scratch2)); - __ b(ne, &slow); - // Go slow on zero result to handle -0. - __ tst(scratch, Operand(scratch)); - __ mov(result, Operand(scratch), LeaveCC, ne); - __ Ret(ne); - // We need -0 if we were multiplying a negative number with 0 to get 0. - // We know one of them was zero. - __ add(scratch2, rhs, Operand(lhs), SetCC); - __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl); - __ Ret(pl); // Return Smi 0 if the non-zero one was positive. - // Slow case. We fall through here if we multiplied a negative number - // with 0, because that would mean we should produce -0. - __ bind(&slow); - } - HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::MUL); - break; - } - - case Token::DIV: - case Token::MOD: { - Label not_smi; - if (ShouldGenerateSmiCode() && specialized_on_rhs_) { - Label lhs_is_unsuitable; - __ BranchOnNotSmi(lhs, ¬_smi); - if (IsPowerOf2(constant_rhs_)) { - if (op_ == Token::MOD) { - __ and_(rhs, - lhs, - Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)), - SetCC); - // We now have the answer, but if the input was negative we also - // have the sign bit. Our work is done if the result is - // positive or zero: - if (!rhs.is(r0)) { - __ mov(r0, rhs, LeaveCC, pl); - } - __ Ret(pl); - // A mod of a negative left hand side must return a negative number. - // Unfortunately if the answer is 0 then we must return -0. And we - // already optimistically trashed rhs so we may need to restore it. - __ eor(rhs, rhs, Operand(0x80000000u), SetCC); - // Next two instructions are conditional on the answer being -0. - __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq); - __ b(eq, &lhs_is_unsuitable); - // We need to subtract the dividend. Eg. -3 % 4 == -3. - __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_))); - } else { - ASSERT(op_ == Token::DIV); - __ tst(lhs, - Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1))); - __ b(ne, &lhs_is_unsuitable); // Go slow on negative or remainder. - int shift = 0; - int d = constant_rhs_; - while ((d & 1) == 0) { - d >>= 1; - shift++; - } - __ mov(r0, Operand(lhs, LSR, shift)); - __ bic(r0, r0, Operand(kSmiTagMask)); - } - } else { - // Not a power of 2. - __ tst(lhs, Operand(0x80000000u)); - __ b(ne, &lhs_is_unsuitable); - // Find a fixed point reciprocal of the divisor so we can divide by - // multiplying. - double divisor = 1.0 / constant_rhs_; - int shift = 32; - double scale = 4294967296.0; // 1 << 32. - uint32_t mul; - // Maximise the precision of the fixed point reciprocal. - while (true) { - mul = static_cast(scale * divisor); - if (mul >= 0x7fffffff) break; - scale *= 2.0; - shift++; - } - mul++; - Register scratch2 = smi_test_reg; - smi_test_reg = no_reg; - __ mov(scratch2, Operand(mul)); - __ umull(scratch, scratch2, scratch2, lhs); - __ mov(scratch2, Operand(scratch2, LSR, shift - 31)); - // scratch2 is lhs / rhs. scratch2 is not Smi tagged. - // rhs is still the known rhs. rhs is Smi tagged. - // lhs is still the unkown lhs. lhs is Smi tagged. - int required_scratch_shift = 0; // Including the Smi tag shift of 1. - // scratch = scratch2 * rhs. - MultiplyByKnownInt2(masm, - scratch, - scratch2, - rhs, - constant_rhs_, - &required_scratch_shift); - // scratch << required_scratch_shift is now the Smi tagged rhs * - // (lhs / rhs) where / indicates integer division. - if (op_ == Token::DIV) { - __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift)); - __ b(ne, &lhs_is_unsuitable); // There was a remainder. - __ mov(result, Operand(scratch2, LSL, kSmiTagSize)); - } else { - ASSERT(op_ == Token::MOD); - __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift)); - } - } - __ Ret(); - __ bind(&lhs_is_unsuitable); - } else if (op_ == Token::MOD && - runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS && - runtime_operands_type_ != BinaryOpIC::STRINGS) { - // Do generate a bit of smi code for modulus even though the default for - // modulus is not to do it, but as the ARM processor has no coprocessor - // support for modulus checking for smis makes sense. We can handle - // 1 to 25 times any power of 2. This covers over half the numbers from - // 1 to 100 including all of the first 25. (Actually the constants < 10 - // are handled above by reciprocal multiplication. We only get here for - // those cases if the right hand side is not a constant or for cases - // like 192 which is 3*2^6 and ends up in the 3 case in the integer mod - // stub.) - Label slow; - Label not_power_of_2; - ASSERT(!ShouldGenerateSmiCode()); - STATIC_ASSERT(kSmiTag == 0); // Adjust code below. - // Check for two positive smis. - __ orr(smi_test_reg, lhs, Operand(rhs)); - __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask)); - __ b(ne, &slow); - // Check that rhs is a power of two and not zero. - Register mask_bits = r3; - __ sub(scratch, rhs, Operand(1), SetCC); - __ b(mi, &slow); - __ and_(mask_bits, rhs, Operand(scratch), SetCC); - __ b(ne, ¬_power_of_2); - // Calculate power of two modulus. - __ and_(result, lhs, Operand(scratch)); - __ Ret(); - - __ bind(¬_power_of_2); - __ eor(scratch, scratch, Operand(mask_bits)); - // At least two bits are set in the modulus. The high one(s) are in - // mask_bits and the low one is scratch + 1. - __ and_(mask_bits, scratch, Operand(lhs)); - Register shift_distance = scratch; - scratch = no_reg; - - // The rhs consists of a power of 2 multiplied by some odd number. - // The power-of-2 part we handle by putting the corresponding bits - // from the lhs in the mask_bits register, and the power in the - // shift_distance register. Shift distance is never 0 due to Smi - // tagging. - __ CountLeadingZeros(r4, shift_distance, shift_distance); - __ rsb(shift_distance, r4, Operand(32)); - - // Now we need to find out what the odd number is. The last bit is - // always 1. - Register odd_number = r4; - __ mov(odd_number, Operand(rhs, LSR, shift_distance)); - __ cmp(odd_number, Operand(25)); - __ b(gt, &slow); - - IntegerModStub stub( - result, shift_distance, odd_number, mask_bits, lhs, r5); - __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); // Tail call. - - __ bind(&slow); - } - HandleBinaryOpSlowCases( - masm, - ¬_smi, - lhs, - rhs, - op_ == Token::MOD ? Builtins::MOD : Builtins::DIV); - break; - } - - case Token::BIT_OR: - case Token::BIT_AND: - case Token::BIT_XOR: - case Token::SAR: - case Token::SHR: - case Token::SHL: { - Label slow; - STATIC_ASSERT(kSmiTag == 0); // adjust code below - __ tst(smi_test_reg, Operand(kSmiTagMask)); - __ b(ne, &slow); - Register scratch2 = smi_test_reg; - smi_test_reg = no_reg; - switch (op_) { - case Token::BIT_OR: __ orr(result, rhs, Operand(lhs)); break; - case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break; - case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break; - case Token::SAR: - // Remove tags from right operand. - __ GetLeastBitsFromSmi(scratch2, rhs, 5); - __ mov(result, Operand(lhs, ASR, scratch2)); - // Smi tag result. - __ bic(result, result, Operand(kSmiTagMask)); - break; - case Token::SHR: - // Remove tags from operands. We can't do this on a 31 bit number - // because then the 0s get shifted into bit 30 instead of bit 31. - __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x - __ GetLeastBitsFromSmi(scratch2, rhs, 5); - __ mov(scratch, Operand(scratch, LSR, scratch2)); - // Unsigned shift is not allowed to produce a negative number, so - // check the sign bit and the sign bit after Smi tagging. - __ tst(scratch, Operand(0xc0000000)); - __ b(ne, &slow); - // Smi tag result. - __ mov(result, Operand(scratch, LSL, kSmiTagSize)); - break; - case Token::SHL: - // Remove tags from operands. - __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x - __ GetLeastBitsFromSmi(scratch2, rhs, 5); - __ mov(scratch, Operand(scratch, LSL, scratch2)); - // Check that the signed result fits in a Smi. - __ add(scratch2, scratch, Operand(0x40000000), SetCC); - __ b(mi, &slow); - __ mov(result, Operand(scratch, LSL, kSmiTagSize)); - break; - default: UNREACHABLE(); - } - __ Ret(); - __ bind(&slow); - HandleNonSmiBitwiseOp(masm, lhs, rhs); - break; - } - - default: UNREACHABLE(); - } - // This code should be unreachable. - __ stop("Unreachable"); - - // Generate an unreachable reference to the DEFAULT stub so that it can be - // found at the end of this stub when clearing ICs at GC. - // TODO(kaznacheev): Check performance impact and get rid of this. - if (runtime_operands_type_ != BinaryOpIC::DEFAULT) { - GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT); - __ CallStub(&uninit); - } -} - - -void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { - Label get_result; - - __ Push(r1, r0); - - __ mov(r2, Operand(Smi::FromInt(MinorKey()))); - __ mov(r1, Operand(Smi::FromInt(op_))); - __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_))); - __ Push(r2, r1, r0); - - __ TailCallExternalReference( - ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), - 5, - 1); -} - - -Handle GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { - GenericBinaryOpStub stub(key, type_info); - return stub.GetCode(); -} - - -void TranscendentalCacheStub::Generate(MacroAssembler* masm) { - // Argument is a number and is on stack and in r0. - Label runtime_call; - Label input_not_smi; - Label loaded; - - if (CpuFeatures::IsSupported(VFP3)) { - // Load argument and check if it is a smi. - __ BranchOnNotSmi(r0, &input_not_smi); - - CpuFeatures::Scope scope(VFP3); - // Input is a smi. Convert to double and load the low and high words - // of the double into r2, r3. - __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); - __ b(&loaded); - - __ bind(&input_not_smi); - // Check if input is a HeapNumber. - __ CheckMap(r0, - r1, - Heap::kHeapNumberMapRootIndex, - &runtime_call, - true); - // Input is a HeapNumber. Load it to a double register and store the - // low and high words into r2, r3. - __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); - - __ bind(&loaded); - // r2 = low 32 bits of double value - // r3 = high 32 bits of double value - // Compute hash (the shifts are arithmetic): - // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); - __ eor(r1, r2, Operand(r3)); - __ eor(r1, r1, Operand(r1, ASR, 16)); - __ eor(r1, r1, Operand(r1, ASR, 8)); - ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize)); - __ And(r1, r1, Operand(TranscendentalCache::kCacheSize - 1)); - - // r2 = low 32 bits of double value. - // r3 = high 32 bits of double value. - // r1 = TranscendentalCache::hash(double value). - __ mov(r0, - Operand(ExternalReference::transcendental_cache_array_address())); - // r0 points to cache array. - __ ldr(r0, MemOperand(r0, type_ * sizeof(TranscendentalCache::caches_[0]))); - // r0 points to the cache for the type type_. - // If NULL, the cache hasn't been initialized yet, so go through runtime. - __ cmp(r0, Operand(0)); - __ b(eq, &runtime_call); - -#ifdef DEBUG - // Check that the layout of cache elements match expectations. - { TranscendentalCache::Element test_elem[2]; - char* elem_start = reinterpret_cast(&test_elem[0]); - char* elem2_start = reinterpret_cast(&test_elem[1]); - char* elem_in0 = reinterpret_cast(&(test_elem[0].in[0])); - char* elem_in1 = reinterpret_cast(&(test_elem[0].in[1])); - char* elem_out = reinterpret_cast(&(test_elem[0].output)); - CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. - CHECK_EQ(0, elem_in0 - elem_start); - CHECK_EQ(kIntSize, elem_in1 - elem_start); - CHECK_EQ(2 * kIntSize, elem_out - elem_start); - } -#endif - - // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. - __ add(r1, r1, Operand(r1, LSL, 1)); - __ add(r0, r0, Operand(r1, LSL, 2)); - // Check if cache matches: Double value is stored in uint32_t[2] array. - __ ldm(ia, r0, r4.bit()| r5.bit() | r6.bit()); - __ cmp(r2, r4); - __ b(ne, &runtime_call); - __ cmp(r3, r5); - __ b(ne, &runtime_call); - // Cache hit. Load result, pop argument and return. - __ mov(r0, Operand(r6)); - __ pop(); - __ Ret(); - } - - __ bind(&runtime_call); - __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); -} - - -Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { - switch (type_) { - // Add more cases when necessary. - case TranscendentalCache::SIN: return Runtime::kMath_sin; - case TranscendentalCache::COS: return Runtime::kMath_cos; - default: - UNIMPLEMENTED(); - return Runtime::kAbort; - } -} - - -void StackCheckStub::Generate(MacroAssembler* masm) { - // Do tail-call to runtime routine. Runtime routines expect at least one - // argument, so give it a Smi. - __ mov(r0, Operand(Smi::FromInt(0))); - __ push(r0); - __ TailCallRuntime(Runtime::kStackGuard, 1, 1); - - __ StubReturn(1); -} - - -void GenericUnaryOpStub::Generate(MacroAssembler* masm) { - Label slow, done; - - Register heap_number_map = r6; - __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - - if (op_ == Token::SUB) { - // Check whether the value is a smi. - Label try_float; - __ tst(r0, Operand(kSmiTagMask)); - __ b(ne, &try_float); - - // Go slow case if the value of the expression is zero - // to make sure that we switch between 0 and -0. - if (negative_zero_ == kStrictNegativeZero) { - // If we have to check for zero, then we can check for the max negative - // smi while we are at it. - __ bic(ip, r0, Operand(0x80000000), SetCC); - __ b(eq, &slow); - __ rsb(r0, r0, Operand(0)); - __ StubReturn(1); - } else { - // The value of the expression is a smi and 0 is OK for -0. Try - // optimistic subtraction '0 - value'. - __ rsb(r0, r0, Operand(0), SetCC); - __ StubReturn(1, vc); - // We don't have to reverse the optimistic neg since the only case - // where we fall through is the minimum negative Smi, which is the case - // where the neg leaves the register unchanged. - __ jmp(&slow); // Go slow on max negative Smi. - } - - __ bind(&try_float); - __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - __ cmp(r1, heap_number_map); - __ b(ne, &slow); - // r0 is a heap number. Get a new heap number in r1. - if (overwrite_ == UNARY_OVERWRITE) { - __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); - __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. - __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); - } else { - __ AllocateHeapNumber(r1, r2, r3, r6, &slow); - __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); - __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); - __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); - __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. - __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); - __ mov(r0, Operand(r1)); - } - } else if (op_ == Token::BIT_NOT) { - // Check if the operand is a heap number. - __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - __ cmp(r1, heap_number_map); - __ b(ne, &slow); - - // Convert the heap number is r0 to an untagged integer in r1. - GetInt32(masm, r0, r1, r2, r3, &slow); - - // Do the bitwise operation (move negated) and check if the result - // fits in a smi. - Label try_float; - __ mvn(r1, Operand(r1)); - __ add(r2, r1, Operand(0x40000000), SetCC); - __ b(mi, &try_float); - __ mov(r0, Operand(r1, LSL, kSmiTagSize)); - __ b(&done); - - __ bind(&try_float); - if (!overwrite_ == UNARY_OVERWRITE) { - // Allocate a fresh heap number, but don't overwrite r0 until - // we're sure we can do it without going through the slow case - // that needs the value in r0. - __ AllocateHeapNumber(r2, r3, r4, r6, &slow); - __ mov(r0, Operand(r2)); - } - - if (CpuFeatures::IsSupported(VFP3)) { - // Convert the int32 in r1 to the heap number in r0. r2 is corrupted. - CpuFeatures::Scope scope(VFP3); - __ vmov(s0, r1); - __ vcvt_f64_s32(d0, s0); - __ sub(r2, r0, Operand(kHeapObjectTag)); - __ vstr(d0, r2, HeapNumber::kValueOffset); - } else { - // WriteInt32ToHeapNumberStub does not trigger GC, so we do not - // have to set up a frame. - WriteInt32ToHeapNumberStub stub(r1, r0, r2); - __ push(lr); - __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); - __ pop(lr); - } - } else { - UNIMPLEMENTED(); - } - - __ bind(&done); - __ StubReturn(1); - - // Handle the slow case by jumping to the JavaScript builtin. - __ bind(&slow); - __ push(r0); - switch (op_) { - case Token::SUB: - __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS); - break; - case Token::BIT_NOT: - __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS); - break; - default: - UNREACHABLE(); - } -} - - -void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { - // r0 holds the exception. - - // Adjust this code if not the case. - STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); - - // Drop the sp to the top of the handler. - __ mov(r3, Operand(ExternalReference(Top::k_handler_address))); - __ ldr(sp, MemOperand(r3)); - - // Restore the next handler and frame pointer, discard handler state. - STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); - __ pop(r2); - __ str(r2, MemOperand(r3)); - STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); - __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state. - - // Before returning we restore the context from the frame pointer if - // not NULL. The frame pointer is NULL in the exception handler of a - // JS entry frame. - __ cmp(fp, Operand(0)); - // Set cp to NULL if fp is NULL. - __ mov(cp, Operand(0), LeaveCC, eq); - // Restore cp otherwise. - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); -#ifdef DEBUG - if (FLAG_debug_code) { - __ mov(lr, Operand(pc)); - } -#endif - STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); - __ pop(pc); -} - - -void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, - UncatchableExceptionType type) { - // Adjust this code if not the case. - STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); - - // Drop sp to the top stack handler. - __ mov(r3, Operand(ExternalReference(Top::k_handler_address))); - __ ldr(sp, MemOperand(r3)); - - // Unwind the handlers until the ENTRY handler is found. - Label loop, done; - __ bind(&loop); - // Load the type of the current stack handler. - const int kStateOffset = StackHandlerConstants::kStateOffset; - __ ldr(r2, MemOperand(sp, kStateOffset)); - __ cmp(r2, Operand(StackHandler::ENTRY)); - __ b(eq, &done); - // Fetch the next handler in the list. - const int kNextOffset = StackHandlerConstants::kNextOffset; - __ ldr(sp, MemOperand(sp, kNextOffset)); - __ jmp(&loop); - __ bind(&done); - - // Set the top handler address to next handler past the current ENTRY handler. - STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); - __ pop(r2); - __ str(r2, MemOperand(r3)); - - if (type == OUT_OF_MEMORY) { - // Set external caught exception to false. - ExternalReference external_caught(Top::k_external_caught_exception_address); - __ mov(r0, Operand(false)); - __ mov(r2, Operand(external_caught)); - __ str(r0, MemOperand(r2)); - - // Set pending exception and r0 to out of memory exception. - Failure* out_of_memory = Failure::OutOfMemoryException(); - __ mov(r0, Operand(reinterpret_cast(out_of_memory))); - __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address))); - __ str(r0, MemOperand(r2)); - } - - // Stack layout at this point. See also StackHandlerConstants. - // sp -> state (ENTRY) - // fp - // lr - - // Discard handler state (r2 is not used) and restore frame pointer. - STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); - __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state. - // Before returning we restore the context from the frame pointer if - // not NULL. The frame pointer is NULL in the exception handler of a - // JS entry frame. - __ cmp(fp, Operand(0)); - // Set cp to NULL if fp is NULL. - __ mov(cp, Operand(0), LeaveCC, eq); - // Restore cp otherwise. - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); -#ifdef DEBUG - if (FLAG_debug_code) { - __ mov(lr, Operand(pc)); - } -#endif - STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); - __ pop(pc); -} - - -void CEntryStub::GenerateCore(MacroAssembler* masm, - Label* throw_normal_exception, - Label* throw_termination_exception, - Label* throw_out_of_memory_exception, - bool do_gc, - bool always_allocate, - int frame_alignment_skew) { - // r0: result parameter for PerformGC, if any - // r4: number of arguments including receiver (C callee-saved) - // r5: pointer to builtin function (C callee-saved) - // r6: pointer to the first argument (C callee-saved) - - if (do_gc) { - // Passing r0. - __ PrepareCallCFunction(1, r1); - __ CallCFunction(ExternalReference::perform_gc_function(), 1); - } - - ExternalReference scope_depth = - ExternalReference::heap_always_allocate_scope_depth(); - if (always_allocate) { - __ mov(r0, Operand(scope_depth)); - __ ldr(r1, MemOperand(r0)); - __ add(r1, r1, Operand(1)); - __ str(r1, MemOperand(r0)); - } - - // Call C built-in. - // r0 = argc, r1 = argv - __ mov(r0, Operand(r4)); - __ mov(r1, Operand(r6)); - - int frame_alignment = MacroAssembler::ActivationFrameAlignment(); - int frame_alignment_mask = frame_alignment - 1; -#if defined(V8_HOST_ARCH_ARM) - if (FLAG_debug_code) { - if (frame_alignment > kPointerSize) { - Label alignment_as_expected; - ASSERT(IsPowerOf2(frame_alignment)); - __ sub(r2, sp, Operand(frame_alignment_skew)); - __ tst(r2, Operand(frame_alignment_mask)); - __ b(eq, &alignment_as_expected); - // Don't use Check here, as it will call Runtime_Abort re-entering here. - __ stop("Unexpected alignment"); - __ bind(&alignment_as_expected); - } - } -#endif - - // Just before the call (jump) below lr is pushed, so the actual alignment is - // adding one to the current skew. - int alignment_before_call = - (frame_alignment_skew + kPointerSize) & frame_alignment_mask; - if (alignment_before_call > 0) { - // Push until the alignment before the call is met. - __ mov(r2, Operand(0)); - for (int i = alignment_before_call; - (i & frame_alignment_mask) != 0; - i += kPointerSize) { - __ push(r2); - } - } - - // TODO(1242173): To let the GC traverse the return address of the exit - // frames, we need to know where the return address is. Right now, - // we push it on the stack to be able to find it again, but we never - // restore from it in case of changes, which makes it impossible to - // support moving the C entry code stub. This should be fixed, but currently - // this is OK because the CEntryStub gets generated so early in the V8 boot - // sequence that it is not moving ever. - masm->add(lr, pc, Operand(4)); // Compute return address: (pc + 8) + 4 - masm->push(lr); - masm->Jump(r5); - - // Restore sp back to before aligning the stack. - if (alignment_before_call > 0) { - __ add(sp, sp, Operand(alignment_before_call)); - } - - if (always_allocate) { - // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1 - // though (contain the result). - __ mov(r2, Operand(scope_depth)); - __ ldr(r3, MemOperand(r2)); - __ sub(r3, r3, Operand(1)); - __ str(r3, MemOperand(r2)); - } - - // check for failure result - Label failure_returned; - STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); - // Lower 2 bits of r2 are 0 iff r0 has failure tag. - __ add(r2, r0, Operand(1)); - __ tst(r2, Operand(kFailureTagMask)); - __ b(eq, &failure_returned); - - // Exit C frame and return. - // r0:r1: result - // sp: stack pointer - // fp: frame pointer - __ LeaveExitFrame(mode_); - - // check if we should retry or throw exception - Label retry; - __ bind(&failure_returned); - STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); - __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); - __ b(eq, &retry); - - // Special handling of out of memory exceptions. - Failure* out_of_memory = Failure::OutOfMemoryException(); - __ cmp(r0, Operand(reinterpret_cast(out_of_memory))); - __ b(eq, throw_out_of_memory_exception); - - // Retrieve the pending exception and clear the variable. - __ mov(ip, Operand(ExternalReference::the_hole_value_location())); - __ ldr(r3, MemOperand(ip)); - __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address))); - __ ldr(r0, MemOperand(ip)); - __ str(r3, MemOperand(ip)); - - // Special handling of termination exceptions which are uncatchable - // by javascript code. - __ cmp(r0, Operand(Factory::termination_exception())); - __ b(eq, throw_termination_exception); - - // Handle normal exception. - __ jmp(throw_normal_exception); - - __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying -} - - -void CEntryStub::Generate(MacroAssembler* masm) { - // Called from JavaScript; parameters are on stack as if calling JS function - // r0: number of arguments including receiver - // r1: pointer to builtin function - // fp: frame pointer (restored after C call) - // sp: stack pointer (restored as callee's sp after C call) - // cp: current context (C callee-saved) - - // Result returned in r0 or r0+r1 by default. - - // NOTE: Invocations of builtins may return failure objects - // instead of a proper result. The builtin entry handles - // this by performing a garbage collection and retrying the - // builtin once. - - // Enter the exit frame that transitions from JavaScript to C++. - __ EnterExitFrame(mode_); - - // r4: number of arguments (C callee-saved) - // r5: pointer to builtin function (C callee-saved) - // r6: pointer to first argument (C callee-saved) - - Label throw_normal_exception; - Label throw_termination_exception; - Label throw_out_of_memory_exception; - - // Call into the runtime system. - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - &throw_out_of_memory_exception, - false, - false, - -kPointerSize); - - // Do space-specific GC and retry runtime call. - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - &throw_out_of_memory_exception, - true, - false, - 0); - - // Do full GC and retry runtime call one final time. - Failure* failure = Failure::InternalError(); - __ mov(r0, Operand(reinterpret_cast(failure))); - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - &throw_out_of_memory_exception, - true, - true, - kPointerSize); - - __ bind(&throw_out_of_memory_exception); - GenerateThrowUncatchable(masm, OUT_OF_MEMORY); - - __ bind(&throw_termination_exception); - GenerateThrowUncatchable(masm, TERMINATION); - - __ bind(&throw_normal_exception); - GenerateThrowTOS(masm); -} - - -void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { - // r0: code entry - // r1: function - // r2: receiver - // r3: argc - // [sp+0]: argv - - Label invoke, exit; - - // Called from C, so do not pop argc and args on exit (preserve sp) - // No need to save register-passed args - // Save callee-saved registers (incl. cp and fp), sp, and lr - __ stm(db_w, sp, kCalleeSaved | lr.bit()); - - // Get address of argv, see stm above. - // r0: code entry - // r1: function - // r2: receiver - // r3: argc - __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize)); // argv - - // Push a frame with special values setup to mark it as an entry frame. - // r0: code entry - // r1: function - // r2: receiver - // r3: argc - // r4: argv - __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used. - int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; - __ mov(r7, Operand(Smi::FromInt(marker))); - __ mov(r6, Operand(Smi::FromInt(marker))); - __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address))); - __ ldr(r5, MemOperand(r5)); - __ Push(r8, r7, r6, r5); - - // Setup frame pointer for the frame to be pushed. - __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); - - // Call a faked try-block that does the invoke. - __ bl(&invoke); - - // Caught exception: Store result (exception) in the pending - // exception field in the JSEnv and return a failure sentinel. - // Coming in here the fp will be invalid because the PushTryHandler below - // sets it to 0 to signal the existence of the JSEntry frame. - __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address))); - __ str(r0, MemOperand(ip)); - __ mov(r0, Operand(reinterpret_cast(Failure::Exception()))); - __ b(&exit); - - // Invoke: Link this frame into the handler chain. - __ bind(&invoke); - // Must preserve r0-r4, r5-r7 are available. - __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER); - // If an exception not caught by another handler occurs, this handler - // returns control to the code after the bl(&invoke) above, which - // restores all kCalleeSaved registers (including cp and fp) to their - // saved values before returning a failure to C. - - // Clear any pending exceptions. - __ mov(ip, Operand(ExternalReference::the_hole_value_location())); - __ ldr(r5, MemOperand(ip)); - __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address))); - __ str(r5, MemOperand(ip)); - - // Invoke the function by calling through JS entry trampoline builtin. - // Notice that we cannot store a reference to the trampoline code directly in - // this stub, because runtime stubs are not traversed when doing GC. - - // Expected registers by Builtins::JSEntryTrampoline - // r0: code entry - // r1: function - // r2: receiver - // r3: argc - // r4: argv - if (is_construct) { - ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline); - __ mov(ip, Operand(construct_entry)); - } else { - ExternalReference entry(Builtins::JSEntryTrampoline); - __ mov(ip, Operand(entry)); - } - __ ldr(ip, MemOperand(ip)); // deref address - - // Branch and link to JSEntryTrampoline. We don't use the double underscore - // macro for the add instruction because we don't want the coverage tool - // inserting instructions here after we read the pc. - __ mov(lr, Operand(pc)); - masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); - - // Unlink this frame from the handler chain. When reading the - // address of the next handler, there is no need to use the address - // displacement since the current stack pointer (sp) points directly - // to the stack handler. - __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset)); - __ mov(ip, Operand(ExternalReference(Top::k_handler_address))); - __ str(r3, MemOperand(ip)); - // No need to restore registers - __ add(sp, sp, Operand(StackHandlerConstants::kSize)); - - - __ bind(&exit); // r0 holds result - // Restore the top frame descriptors from the stack. - __ pop(r3); - __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address))); - __ str(r3, MemOperand(ip)); - - // Reset the stack to the callee saved registers. - __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); - - // Restore callee-saved registers and return. -#ifdef DEBUG - if (FLAG_debug_code) { - __ mov(lr, Operand(pc)); - } -#endif - __ ldm(ia_w, sp, kCalleeSaved | pc.bit()); -} - - -// This stub performs an instanceof, calling the builtin function if -// necessary. Uses r1 for the object, r0 for the function that it may -// be an instance of (these are fetched from the stack). -void InstanceofStub::Generate(MacroAssembler* masm) { - // Get the object - slow case for smis (we may need to throw an exception - // depending on the rhs). - Label slow, loop, is_instance, is_not_instance; - __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); - __ BranchOnSmi(r0, &slow); - - // Check that the left hand is a JS object and put map in r3. - __ CompareObjectType(r0, r3, r2, FIRST_JS_OBJECT_TYPE); - __ b(lt, &slow); - __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE)); - __ b(gt, &slow); - - // Get the prototype of the function (r4 is result, r2 is scratch). - __ ldr(r1, MemOperand(sp, 0)); - // r1 is function, r3 is map. - - // Look up the function and the map in the instanceof cache. - Label miss; - __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex); - __ cmp(r1, ip); - __ b(ne, &miss); - __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex); - __ cmp(r3, ip); - __ b(ne, &miss); - __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); - __ pop(); - __ pop(); - __ mov(pc, Operand(lr)); - - __ bind(&miss); - __ TryGetFunctionPrototype(r1, r4, r2, &slow); - - // Check that the function prototype is a JS object. - __ BranchOnSmi(r4, &slow); - __ CompareObjectType(r4, r5, r5, FIRST_JS_OBJECT_TYPE); - __ b(lt, &slow); - __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE)); - __ b(gt, &slow); - - __ StoreRoot(r1, Heap::kInstanceofCacheFunctionRootIndex); - __ StoreRoot(r3, Heap::kInstanceofCacheMapRootIndex); - - // Register mapping: r3 is object map and r4 is function prototype. - // Get prototype of object into r2. - __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset)); - - // Loop through the prototype chain looking for the function prototype. - __ bind(&loop); - __ cmp(r2, Operand(r4)); - __ b(eq, &is_instance); - __ LoadRoot(ip, Heap::kNullValueRootIndex); - __ cmp(r2, ip); - __ b(eq, &is_not_instance); - __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset)); - __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset)); - __ jmp(&loop); - - __ bind(&is_instance); - __ mov(r0, Operand(Smi::FromInt(0))); - __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); - __ pop(); - __ pop(); - __ mov(pc, Operand(lr)); // Return. - - __ bind(&is_not_instance); - __ mov(r0, Operand(Smi::FromInt(1))); - __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); - __ pop(); - __ pop(); - __ mov(pc, Operand(lr)); // Return. - - // Slow-case. Tail call builtin. - __ bind(&slow); - __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS); -} - - -void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { - // The displacement is the offset of the last parameter (if any) - // relative to the frame pointer. - static const int kDisplacement = - StandardFrameConstants::kCallerSPOffset - kPointerSize; - - // Check that the key is a smi. - Label slow; - __ BranchOnNotSmi(r1, &slow); - - // Check if the calling frame is an arguments adaptor frame. - Label adaptor; - __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); - __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); - __ b(eq, &adaptor); - - // Check index against formal parameters count limit passed in - // through register r0. Use unsigned comparison to get negative - // check for free. - __ cmp(r1, r0); - __ b(cs, &slow); - - // Read the argument from the stack and return it. - __ sub(r3, r0, r1); - __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ ldr(r0, MemOperand(r3, kDisplacement)); - __ Jump(lr); - - // Arguments adaptor case: Check index against actual arguments - // limit found in the arguments adaptor frame. Use unsigned - // comparison to get negative check for free. - __ bind(&adaptor); - __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ cmp(r1, r0); - __ b(cs, &slow); - - // Read the argument from the adaptor frame and return it. - __ sub(r3, r0, r1); - __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ ldr(r0, MemOperand(r3, kDisplacement)); - __ Jump(lr); - - // Slow-case: Handle non-smi or out-of-bounds access to arguments - // by calling the runtime system. - __ bind(&slow); - __ push(r1); - __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); -} - - -void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { - // sp[0] : number of parameters - // sp[4] : receiver displacement - // sp[8] : function - - // Check if the calling frame is an arguments adaptor frame. - Label adaptor_frame, try_allocate, runtime; - __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); - __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); - __ b(eq, &adaptor_frame); - - // Get the length from the frame. - __ ldr(r1, MemOperand(sp, 0)); - __ b(&try_allocate); - - // Patch the arguments.length and the parameters pointer. - __ bind(&adaptor_frame); - __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ str(r1, MemOperand(sp, 0)); - __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); - __ str(r3, MemOperand(sp, 1 * kPointerSize)); - - // Try the new space allocation. Start out with computing the size - // of the arguments object and the elements array in words. - Label add_arguments_object; - __ bind(&try_allocate); - __ cmp(r1, Operand(0)); - __ b(eq, &add_arguments_object); - __ mov(r1, Operand(r1, LSR, kSmiTagSize)); - __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize)); - __ bind(&add_arguments_object); - __ add(r1, r1, Operand(Heap::kArgumentsObjectSize / kPointerSize)); - - // Do the allocation of both objects in one go. - __ AllocateInNewSpace( - r1, - r0, - r2, - r3, - &runtime, - static_cast(TAG_OBJECT | SIZE_IN_WORDS)); - - // Get the arguments boilerplate from the current (global) context. - int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); - __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); - __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset)); - __ ldr(r4, MemOperand(r4, offset)); - - // Copy the JS object part. - __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize); - - // Setup the callee in-object property. - STATIC_ASSERT(Heap::arguments_callee_index == 0); - __ ldr(r3, MemOperand(sp, 2 * kPointerSize)); - __ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize)); - - // Get the length (smi tagged) and set that as an in-object property too. - STATIC_ASSERT(Heap::arguments_length_index == 1); - __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); - __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize)); - - // If there are no actual arguments, we're done. - Label done; - __ cmp(r1, Operand(0)); - __ b(eq, &done); - - // Get the parameters pointer from the stack. - __ ldr(r2, MemOperand(sp, 1 * kPointerSize)); - - // Setup the elements pointer in the allocated arguments object and - // initialize the header in the elements fixed array. - __ add(r4, r0, Operand(Heap::kArgumentsObjectSize)); - __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); - __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex); - __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset)); - __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset)); - __ mov(r1, Operand(r1, LSR, kSmiTagSize)); // Untag the length for the loop. - - // Copy the fixed array slots. - Label loop; - // Setup r4 to point to the first array slot. - __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ bind(&loop); - // Pre-decrement r2 with kPointerSize on each iteration. - // Pre-decrement in order to skip receiver. - __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex)); - // Post-increment r4 with kPointerSize on each iteration. - __ str(r3, MemOperand(r4, kPointerSize, PostIndex)); - __ sub(r1, r1, Operand(1)); - __ cmp(r1, Operand(0)); - __ b(ne, &loop); - - // Return and remove the on-stack parameters. - __ bind(&done); - __ add(sp, sp, Operand(3 * kPointerSize)); - __ Ret(); - - // Do the runtime call to allocate the arguments object. - __ bind(&runtime); - __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); -} - - -void RegExpExecStub::Generate(MacroAssembler* masm) { - // Just jump directly to runtime if native RegExp is not selected at compile - // time or if regexp entry in generated code is turned off runtime switch or - // at compilation. -#ifdef V8_INTERPRETED_REGEXP - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); -#else // V8_INTERPRETED_REGEXP - if (!FLAG_regexp_entry_native) { - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); - return; - } - - // Stack frame on entry. - // sp[0]: last_match_info (expected JSArray) - // sp[4]: previous index - // sp[8]: subject string - // sp[12]: JSRegExp object - - static const int kLastMatchInfoOffset = 0 * kPointerSize; - static const int kPreviousIndexOffset = 1 * kPointerSize; - static const int kSubjectOffset = 2 * kPointerSize; - static const int kJSRegExpOffset = 3 * kPointerSize; - - Label runtime, invoke_regexp; - - // Allocation of registers for this function. These are in callee save - // registers and will be preserved by the call to the native RegExp code, as - // this code is called using the normal C calling convention. When calling - // directly from generated code the native RegExp code will not do a GC and - // therefore the content of these registers are safe to use after the call. - Register subject = r4; - Register regexp_data = r5; - Register last_match_info_elements = r6; - - // Ensure that a RegExp stack is allocated. - ExternalReference address_of_regexp_stack_memory_address = - ExternalReference::address_of_regexp_stack_memory_address(); - ExternalReference address_of_regexp_stack_memory_size = - ExternalReference::address_of_regexp_stack_memory_size(); - __ mov(r0, Operand(address_of_regexp_stack_memory_size)); - __ ldr(r0, MemOperand(r0, 0)); - __ tst(r0, Operand(r0)); - __ b(eq, &runtime); - - // Check that the first argument is a JSRegExp object. - __ ldr(r0, MemOperand(sp, kJSRegExpOffset)); - STATIC_ASSERT(kSmiTag == 0); - __ tst(r0, Operand(kSmiTagMask)); - __ b(eq, &runtime); - __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); - __ b(ne, &runtime); - - // Check that the RegExp has been compiled (data contains a fixed array). - __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset)); - if (FLAG_debug_code) { - __ tst(regexp_data, Operand(kSmiTagMask)); - __ Check(nz, "Unexpected type for RegExp data, FixedArray expected"); - __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE); - __ Check(eq, "Unexpected type for RegExp data, FixedArray expected"); - } - - // regexp_data: RegExp data (FixedArray) - // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. - __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); - __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); - __ b(ne, &runtime); - - // regexp_data: RegExp data (FixedArray) - // Check that the number of captures fit in the static offsets vector buffer. - __ ldr(r2, - FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); - // Calculate number of capture registers (number_of_captures + 1) * 2. This - // uses the asumption that smis are 2 * their untagged value. - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - __ add(r2, r2, Operand(2)); // r2 was a smi. - // Check that the static offsets vector buffer is large enough. - __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize)); - __ b(hi, &runtime); - - // r2: Number of capture registers - // regexp_data: RegExp data (FixedArray) - // Check that the second argument is a string. - __ ldr(subject, MemOperand(sp, kSubjectOffset)); - __ tst(subject, Operand(kSmiTagMask)); - __ b(eq, &runtime); - Condition is_string = masm->IsObjectStringType(subject, r0); - __ b(NegateCondition(is_string), &runtime); - // Get the length of the string to r3. - __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset)); - - // r2: Number of capture registers - // r3: Length of subject string as a smi - // subject: Subject string - // regexp_data: RegExp data (FixedArray) - // Check that the third argument is a positive smi less than the subject - // string length. A negative value will be greater (unsigned comparison). - __ ldr(r0, MemOperand(sp, kPreviousIndexOffset)); - __ tst(r0, Operand(kSmiTagMask)); - __ b(ne, &runtime); - __ cmp(r3, Operand(r0)); - __ b(ls, &runtime); - - // r2: Number of capture registers - // subject: Subject string - // regexp_data: RegExp data (FixedArray) - // Check that the fourth object is a JSArray object. - __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); - __ tst(r0, Operand(kSmiTagMask)); - __ b(eq, &runtime); - __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE); - __ b(ne, &runtime); - // Check that the JSArray is in fast case. - __ ldr(last_match_info_elements, - FieldMemOperand(r0, JSArray::kElementsOffset)); - __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); - __ cmp(r0, ip); - __ b(ne, &runtime); - // Check that the last match info has space for the capture registers and the - // additional information. - __ ldr(r0, - FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); - __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead)); - __ cmp(r2, Operand(r0, ASR, kSmiTagSize)); - __ b(gt, &runtime); - - // subject: Subject string - // regexp_data: RegExp data (FixedArray) - // Check the representation and encoding of the subject string. - Label seq_string; - __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); - __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); - // First check for flat string. - __ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask)); - STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); - __ b(eq, &seq_string); - - // subject: Subject string - // regexp_data: RegExp data (FixedArray) - // Check for flat cons string. - // A flat cons string is a cons string where the second part is the empty - // string. In that case the subject string is just the first part of the cons - // string. Also in this case the first part of the cons string is known to be - // a sequential string or an external string. - STATIC_ASSERT(kExternalStringTag !=0); - STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0); - __ tst(r0, Operand(kIsNotStringMask | kExternalStringTag)); - __ b(ne, &runtime); - __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset)); - __ LoadRoot(r1, Heap::kEmptyStringRootIndex); - __ cmp(r0, r1); - __ b(ne, &runtime); - __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); - __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); - __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); - // Is first part a flat string? - STATIC_ASSERT(kSeqStringTag == 0); - __ tst(r0, Operand(kStringRepresentationMask)); - __ b(nz, &runtime); - - __ bind(&seq_string); - // subject: Subject string - // regexp_data: RegExp data (FixedArray) - // r0: Instance type of subject string - STATIC_ASSERT(4 == kAsciiStringTag); - STATIC_ASSERT(kTwoByteStringTag == 0); - // Find the code object based on the assumptions above. - __ and_(r0, r0, Operand(kStringEncodingMask)); - __ mov(r3, Operand(r0, ASR, 2), SetCC); - __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne); - __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq); - - // Check that the irregexp code has been generated for the actual string - // encoding. If it has, the field contains a code object otherwise it contains - // the hole. - __ CompareObjectType(r7, r0, r0, CODE_TYPE); - __ b(ne, &runtime); - - // r3: encoding of subject string (1 if ascii, 0 if two_byte); - // r7: code - // subject: Subject string - // regexp_data: RegExp data (FixedArray) - // Load used arguments before starting to push arguments for call to native - // RegExp code to avoid handling changing stack height. - __ ldr(r1, MemOperand(sp, kPreviousIndexOffset)); - __ mov(r1, Operand(r1, ASR, kSmiTagSize)); - - // r1: previous index - // r3: encoding of subject string (1 if ascii, 0 if two_byte); - // r7: code - // subject: Subject string - // regexp_data: RegExp data (FixedArray) - // All checks done. Now push arguments for native regexp code. - __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2); - - static const int kRegExpExecuteArguments = 7; - __ push(lr); - __ PrepareCallCFunction(kRegExpExecuteArguments, r0); - - // Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript. - __ mov(r0, Operand(1)); - __ str(r0, MemOperand(sp, 2 * kPointerSize)); - - // Argument 6 (sp[4]): Start (high end) of backtracking stack memory area. - __ mov(r0, Operand(address_of_regexp_stack_memory_address)); - __ ldr(r0, MemOperand(r0, 0)); - __ mov(r2, Operand(address_of_regexp_stack_memory_size)); - __ ldr(r2, MemOperand(r2, 0)); - __ add(r0, r0, Operand(r2)); - __ str(r0, MemOperand(sp, 1 * kPointerSize)); - - // Argument 5 (sp[0]): static offsets vector buffer. - __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector())); - __ str(r0, MemOperand(sp, 0 * kPointerSize)); - - // For arguments 4 and 3 get string length, calculate start of string data and - // calculate the shift of the index (0 for ASCII and 1 for two byte). - __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset)); - __ mov(r0, Operand(r0, ASR, kSmiTagSize)); - STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); - __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - __ eor(r3, r3, Operand(1)); - // Argument 4 (r3): End of string data - // Argument 3 (r2): Start of string data - __ add(r2, r9, Operand(r1, LSL, r3)); - __ add(r3, r9, Operand(r0, LSL, r3)); - - // Argument 2 (r1): Previous index. - // Already there - - // Argument 1 (r0): Subject string. - __ mov(r0, subject); - - // Locate the code entry and call it. - __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); - __ CallCFunction(r7, kRegExpExecuteArguments); - __ pop(lr); - - // r0: result - // subject: subject string (callee saved) - // regexp_data: RegExp data (callee saved) - // last_match_info_elements: Last match info elements (callee saved) - - // Check the result. - Label success; - __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS)); - __ b(eq, &success); - Label failure; - __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE)); - __ b(eq, &failure); - __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION)); - // If not exception it can only be retry. Handle that in the runtime system. - __ b(ne, &runtime); - // Result must now be exception. If there is no pending exception already a - // stack overflow (on the backtrack stack) was detected in RegExp code but - // haven't created the exception yet. Handle that in the runtime system. - // TODO(592): Rerunning the RegExp to get the stack overflow exception. - __ mov(r0, Operand(ExternalReference::the_hole_value_location())); - __ ldr(r0, MemOperand(r0, 0)); - __ mov(r1, Operand(ExternalReference(Top::k_pending_exception_address))); - __ ldr(r1, MemOperand(r1, 0)); - __ cmp(r0, r1); - __ b(eq, &runtime); - __ bind(&failure); - // For failure and exception return null. - __ mov(r0, Operand(Factory::null_value())); - __ add(sp, sp, Operand(4 * kPointerSize)); - __ Ret(); - - // Process the result from the native regexp code. - __ bind(&success); - __ ldr(r1, - FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); - // Calculate number of capture registers (number_of_captures + 1) * 2. - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - __ add(r1, r1, Operand(2)); // r1 was a smi. - - // r1: number of capture registers - // r4: subject string - // Store the capture count. - __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi. - __ str(r2, FieldMemOperand(last_match_info_elements, - RegExpImpl::kLastCaptureCountOffset)); - // Store last subject and last input. - __ mov(r3, last_match_info_elements); // Moved up to reduce latency. - __ str(subject, - FieldMemOperand(last_match_info_elements, - RegExpImpl::kLastSubjectOffset)); - __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7); - __ str(subject, - FieldMemOperand(last_match_info_elements, - RegExpImpl::kLastInputOffset)); - __ mov(r3, last_match_info_elements); - __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7); - - // Get the static offsets vector filled by the native regexp code. - ExternalReference address_of_static_offsets_vector = - ExternalReference::address_of_static_offsets_vector(); - __ mov(r2, Operand(address_of_static_offsets_vector)); - - // r1: number of capture registers - // r2: offsets vector - Label next_capture, done; - // Capture register counter starts from number of capture registers and - // counts down until wraping after zero. - __ add(r0, - last_match_info_elements, - Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag)); - __ bind(&next_capture); - __ sub(r1, r1, Operand(1), SetCC); - __ b(mi, &done); - // Read the value from the static offsets vector buffer. - __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex)); - // Store the smi value in the last match info. - __ mov(r3, Operand(r3, LSL, kSmiTagSize)); - __ str(r3, MemOperand(r0, kPointerSize, PostIndex)); - __ jmp(&next_capture); - __ bind(&done); - - // Return last match info. - __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); - __ add(sp, sp, Operand(4 * kPointerSize)); - __ Ret(); - - // Do the runtime call to execute the regexp. - __ bind(&runtime); - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); -#endif // V8_INTERPRETED_REGEXP -} - - -void CallFunctionStub::Generate(MacroAssembler* masm) { - Label slow; - - // If the receiver might be a value (string, number or boolean) check for this - // and box it if it is. - if (ReceiverMightBeValue()) { - // Get the receiver from the stack. - // function, receiver [, arguments] - Label receiver_is_value, receiver_is_js_object; - __ ldr(r1, MemOperand(sp, argc_ * kPointerSize)); - - // Check if receiver is a smi (which is a number value). - __ BranchOnSmi(r1, &receiver_is_value); - - // Check if the receiver is a valid JS object. - __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE); - __ b(ge, &receiver_is_js_object); - - // Call the runtime to box the value. - __ bind(&receiver_is_value); - __ EnterInternalFrame(); - __ push(r1); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS); - __ LeaveInternalFrame(); - __ str(r0, MemOperand(sp, argc_ * kPointerSize)); - - __ bind(&receiver_is_js_object); - } - - // Get the function to call from the stack. - // function, receiver [, arguments] - __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize)); - - // Check that the function is really a JavaScript function. - // r1: pushed function (to be verified) - __ BranchOnSmi(r1, &slow); - // Get the map of the function object. - __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); - __ b(ne, &slow); - - // Fast-case: Invoke the function now. - // r1: pushed function - ParameterCount actual(argc_); - __ InvokeFunction(r1, actual, JUMP_FUNCTION); - - // Slow-case: Non-function called. - __ bind(&slow); - // CALL_NON_FUNCTION expects the non-function callee as receiver (instead - // of the original receiver from the call site). - __ str(r1, MemOperand(sp, argc_ * kPointerSize)); - __ mov(r0, Operand(argc_)); // Setup the number of arguments. - __ mov(r2, Operand(0)); - __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); - __ Jump(Handle(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)), - RelocInfo::CODE_TARGET); -} - - -// Unfortunately you have to run without snapshots to see most of these -// names in the profile since most compare stubs end up in the snapshot. -const char* CompareStub::GetName() { - ASSERT((lhs_.is(r0) && rhs_.is(r1)) || - (lhs_.is(r1) && rhs_.is(r0))); - - if (name_ != NULL) return name_; - const int kMaxNameLength = 100; - name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); - if (name_ == NULL) return "OOM"; - - const char* cc_name; - switch (cc_) { - case lt: cc_name = "LT"; break; - case gt: cc_name = "GT"; break; - case le: cc_name = "LE"; break; - case ge: cc_name = "GE"; break; - case eq: cc_name = "EQ"; break; - case ne: cc_name = "NE"; break; - default: cc_name = "UnknownCondition"; break; - } - - const char* lhs_name = lhs_.is(r0) ? "_r0" : "_r1"; - const char* rhs_name = rhs_.is(r0) ? "_r0" : "_r1"; - - const char* strict_name = ""; - if (strict_ && (cc_ == eq || cc_ == ne)) { - strict_name = "_STRICT"; - } - - const char* never_nan_nan_name = ""; - if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) { - never_nan_nan_name = "_NO_NAN"; - } - - const char* include_number_compare_name = ""; - if (!include_number_compare_) { - include_number_compare_name = "_NO_NUMBER"; - } - - OS::SNPrintF(Vector(name_, kMaxNameLength), - "CompareStub_%s%s%s%s%s%s", - cc_name, - lhs_name, - rhs_name, - strict_name, - never_nan_nan_name, - include_number_compare_name); - return name_; -} - - -int CompareStub::MinorKey() { - // Encode the three parameters in a unique 16 bit value. To avoid duplicate - // stubs the never NaN NaN condition is only taken into account if the - // condition is equals. - ASSERT((static_cast(cc_) >> 28) < (1 << 12)); - ASSERT((lhs_.is(r0) && rhs_.is(r1)) || - (lhs_.is(r1) && rhs_.is(r0))); - return ConditionField::encode(static_cast(cc_) >> 28) - | RegisterField::encode(lhs_.is(r0)) - | StrictField::encode(strict_) - | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false) - | IncludeNumberCompareField::encode(include_number_compare_); -} - - -// StringCharCodeAtGenerator - -void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { - Label flat_string; - Label ascii_string; - Label got_char_code; - - // If the receiver is a smi trigger the non-string case. - __ BranchOnSmi(object_, receiver_not_string_); - - // Fetch the instance type of the receiver into result register. - __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); - __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); - // If the receiver is not a string trigger the non-string case. - __ tst(result_, Operand(kIsNotStringMask)); - __ b(ne, receiver_not_string_); - - // If the index is non-smi trigger the non-smi case. - __ BranchOnNotSmi(index_, &index_not_smi_); - - // Put smi-tagged index into scratch register. - __ mov(scratch_, index_); - __ bind(&got_smi_index_); - - // Check for index out of range. - __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset)); - __ cmp(ip, Operand(scratch_)); - __ b(ls, index_out_of_range_); - - // We need special handling for non-flat strings. - STATIC_ASSERT(kSeqStringTag == 0); - __ tst(result_, Operand(kStringRepresentationMask)); - __ b(eq, &flat_string); - - // Handle non-flat strings. - __ tst(result_, Operand(kIsConsStringMask)); - __ b(eq, &call_runtime_); - - // ConsString. - // Check whether the right hand side is the empty string (i.e. if - // this is really a flat string in a cons string). If that is not - // the case we would rather go to the runtime system now to flatten - // the string. - __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset)); - __ LoadRoot(ip, Heap::kEmptyStringRootIndex); - __ cmp(result_, Operand(ip)); - __ b(ne, &call_runtime_); - // Get the first of the two strings and load its instance type. - __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset)); - __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); - __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); - // If the first cons component is also non-flat, then go to runtime. - STATIC_ASSERT(kSeqStringTag == 0); - __ tst(result_, Operand(kStringRepresentationMask)); - __ b(nz, &call_runtime_); - - // Check for 1-byte or 2-byte string. - __ bind(&flat_string); - STATIC_ASSERT(kAsciiStringTag != 0); - __ tst(result_, Operand(kStringEncodingMask)); - __ b(nz, &ascii_string); - - // 2-byte string. - // Load the 2-byte character code into the result register. We can - // add without shifting since the smi tag size is the log2 of the - // number of bytes in a two-byte character. - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0); - __ add(scratch_, object_, Operand(scratch_)); - __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize)); - __ jmp(&got_char_code); - - // ASCII string. - // Load the byte into the result register. - __ bind(&ascii_string); - __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize)); - __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize)); - - __ bind(&got_char_code); - __ mov(result_, Operand(result_, LSL, kSmiTagSize)); - __ bind(&exit_); -} - - -void StringCharCodeAtGenerator::GenerateSlow( - MacroAssembler* masm, const RuntimeCallHelper& call_helper) { - __ Abort("Unexpected fallthrough to CharCodeAt slow case"); - - // Index is not a smi. - __ bind(&index_not_smi_); - // If index is a heap number, try converting it to an integer. - __ CheckMap(index_, - scratch_, - Heap::kHeapNumberMapRootIndex, - index_not_number_, - true); - call_helper.BeforeCall(masm); - __ Push(object_, index_); - __ push(index_); // Consumed by runtime conversion function. - if (index_flags_ == STRING_INDEX_IS_NUMBER) { - __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); - } else { - ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); - // NumberToSmi discards numbers that are not exact integers. - __ CallRuntime(Runtime::kNumberToSmi, 1); - } - // Save the conversion result before the pop instructions below - // have a chance to overwrite it. - __ Move(scratch_, r0); - __ pop(index_); - __ pop(object_); - // Reload the instance type. - __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); - __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); - call_helper.AfterCall(masm); - // If index is still not a smi, it must be out of range. - __ BranchOnNotSmi(scratch_, index_out_of_range_); - // Otherwise, return to the fast path. - __ jmp(&got_smi_index_); - - // Call runtime. We get here when the receiver is a string and the - // index is a number, but the code of getting the actual character - // is too complex (e.g., when the string needs to be flattened). - __ bind(&call_runtime_); - call_helper.BeforeCall(masm); - __ Push(object_, index_); - __ CallRuntime(Runtime::kStringCharCodeAt, 2); - __ Move(result_, r0); - call_helper.AfterCall(masm); - __ jmp(&exit_); - - __ Abort("Unexpected fallthrough from CharCodeAt slow case"); -} - - -// ------------------------------------------------------------------------- -// StringCharFromCodeGenerator - -void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { - // Fast case of Heap::LookupSingleCharacterStringFromCode. - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiShiftSize == 0); - ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); - __ tst(code_, - Operand(kSmiTagMask | - ((~String::kMaxAsciiCharCode) << kSmiTagSize))); - __ b(nz, &slow_case_); - - __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); - // At this point code register contains smi tagged ascii char code. - STATIC_ASSERT(kSmiTag == 0); - __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(result_, Operand(ip)); - __ b(eq, &slow_case_); - __ bind(&exit_); -} - - -void StringCharFromCodeGenerator::GenerateSlow( - MacroAssembler* masm, const RuntimeCallHelper& call_helper) { - __ Abort("Unexpected fallthrough to CharFromCode slow case"); - - __ bind(&slow_case_); - call_helper.BeforeCall(masm); - __ push(code_); - __ CallRuntime(Runtime::kCharFromCode, 1); - __ Move(result_, r0); - call_helper.AfterCall(masm); - __ jmp(&exit_); - - __ Abort("Unexpected fallthrough from CharFromCode slow case"); -} - - -// ------------------------------------------------------------------------- -// StringCharAtGenerator - -void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) { - char_code_at_generator_.GenerateFast(masm); - char_from_code_generator_.GenerateFast(masm); -} - - -void StringCharAtGenerator::GenerateSlow( - MacroAssembler* masm, const RuntimeCallHelper& call_helper) { - char_code_at_generator_.GenerateSlow(masm, call_helper); - char_from_code_generator_.GenerateSlow(masm, call_helper); -} - - -void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, - Register dest, - Register src, - Register count, - Register scratch, - bool ascii) { - Label loop; - Label done; - // This loop just copies one character at a time, as it is only used for very - // short strings. - if (!ascii) { - __ add(count, count, Operand(count), SetCC); - } else { - __ cmp(count, Operand(0)); - } - __ b(eq, &done); - - __ bind(&loop); - __ ldrb(scratch, MemOperand(src, 1, PostIndex)); - // Perform sub between load and dependent store to get the load time to - // complete. - __ sub(count, count, Operand(1), SetCC); - __ strb(scratch, MemOperand(dest, 1, PostIndex)); - // last iteration. - __ b(gt, &loop); - - __ bind(&done); -} - - -enum CopyCharactersFlags { - COPY_ASCII = 1, - DEST_ALWAYS_ALIGNED = 2 -}; - - -void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, - Register dest, - Register src, - Register count, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Register scratch5, - int flags) { - bool ascii = (flags & COPY_ASCII) != 0; - bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; - - if (dest_always_aligned && FLAG_debug_code) { - // Check that destination is actually word aligned if the flag says - // that it is. - __ tst(dest, Operand(kPointerAlignmentMask)); - __ Check(eq, "Destination of copy not aligned."); - } - - const int kReadAlignment = 4; - const int kReadAlignmentMask = kReadAlignment - 1; - // Ensure that reading an entire aligned word containing the last character - // of a string will not read outside the allocated area (because we pad up - // to kObjectAlignment). - STATIC_ASSERT(kObjectAlignment >= kReadAlignment); - // Assumes word reads and writes are little endian. - // Nothing to do for zero characters. - Label done; - if (!ascii) { - __ add(count, count, Operand(count), SetCC); - } else { - __ cmp(count, Operand(0)); - } - __ b(eq, &done); - - // Assume that you cannot read (or write) unaligned. - Label byte_loop; - // Must copy at least eight bytes, otherwise just do it one byte at a time. - __ cmp(count, Operand(8)); - __ add(count, dest, Operand(count)); - Register limit = count; // Read until src equals this. - __ b(lt, &byte_loop); - - if (!dest_always_aligned) { - // Align dest by byte copying. Copies between zero and three bytes. - __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC); - Label dest_aligned; - __ b(eq, &dest_aligned); - __ cmp(scratch4, Operand(2)); - __ ldrb(scratch1, MemOperand(src, 1, PostIndex)); - __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le); - __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt); - __ strb(scratch1, MemOperand(dest, 1, PostIndex)); - __ strb(scratch2, MemOperand(dest, 1, PostIndex), le); - __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt); - __ bind(&dest_aligned); - } - - Label simple_loop; - - __ sub(scratch4, dest, Operand(src)); - __ and_(scratch4, scratch4, Operand(0x03), SetCC); - __ b(eq, &simple_loop); - // Shift register is number of bits in a source word that - // must be combined with bits in the next source word in order - // to create a destination word. - - // Complex loop for src/dst that are not aligned the same way. - { - Label loop; - __ mov(scratch4, Operand(scratch4, LSL, 3)); - Register left_shift = scratch4; - __ and_(src, src, Operand(~3)); // Round down to load previous word. - __ ldr(scratch1, MemOperand(src, 4, PostIndex)); - // Store the "shift" most significant bits of scratch in the least - // signficant bits (i.e., shift down by (32-shift)). - __ rsb(scratch2, left_shift, Operand(32)); - Register right_shift = scratch2; - __ mov(scratch1, Operand(scratch1, LSR, right_shift)); - - __ bind(&loop); - __ ldr(scratch3, MemOperand(src, 4, PostIndex)); - __ sub(scratch5, limit, Operand(dest)); - __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift)); - __ str(scratch1, MemOperand(dest, 4, PostIndex)); - __ mov(scratch1, Operand(scratch3, LSR, right_shift)); - // Loop if four or more bytes left to copy. - // Compare to eight, because we did the subtract before increasing dst. - __ sub(scratch5, scratch5, Operand(8), SetCC); - __ b(ge, &loop); - } - // There is now between zero and three bytes left to copy (negative that - // number is in scratch5), and between one and three bytes already read into - // scratch1 (eight times that number in scratch4). We may have read past - // the end of the string, but because objects are aligned, we have not read - // past the end of the object. - // Find the minimum of remaining characters to move and preloaded characters - // and write those as bytes. - __ add(scratch5, scratch5, Operand(4), SetCC); - __ b(eq, &done); - __ cmp(scratch4, Operand(scratch5, LSL, 3), ne); - // Move minimum of bytes read and bytes left to copy to scratch4. - __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt); - // Between one and three (value in scratch5) characters already read into - // scratch ready to write. - __ cmp(scratch5, Operand(2)); - __ strb(scratch1, MemOperand(dest, 1, PostIndex)); - __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge); - __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge); - __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt); - __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt); - // Copy any remaining bytes. - __ b(&byte_loop); - - // Simple loop. - // Copy words from src to dst, until less than four bytes left. - // Both src and dest are word aligned. - __ bind(&simple_loop); - { - Label loop; - __ bind(&loop); - __ ldr(scratch1, MemOperand(src, 4, PostIndex)); - __ sub(scratch3, limit, Operand(dest)); - __ str(scratch1, MemOperand(dest, 4, PostIndex)); - // Compare to 8, not 4, because we do the substraction before increasing - // dest. - __ cmp(scratch3, Operand(8)); - __ b(ge, &loop); - } - - // Copy bytes from src to dst until dst hits limit. - __ bind(&byte_loop); - __ cmp(dest, Operand(limit)); - __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt); - __ b(ge, &done); - __ strb(scratch1, MemOperand(dest, 1, PostIndex)); - __ b(&byte_loop); - - __ bind(&done); -} - - -void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, - Register c1, - Register c2, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Register scratch5, - Label* not_found) { - // Register scratch3 is the general scratch register in this function. - Register scratch = scratch3; - - // Make sure that both characters are not digits as such strings has a - // different hash algorithm. Don't try to look for these in the symbol table. - Label not_array_index; - __ sub(scratch, c1, Operand(static_cast('0'))); - __ cmp(scratch, Operand(static_cast('9' - '0'))); - __ b(hi, ¬_array_index); - __ sub(scratch, c2, Operand(static_cast('0'))); - __ cmp(scratch, Operand(static_cast('9' - '0'))); - - // If check failed combine both characters into single halfword. - // This is required by the contract of the method: code at the - // not_found branch expects this combination in c1 register - __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls); - __ b(ls, not_found); - - __ bind(¬_array_index); - // Calculate the two character string hash. - Register hash = scratch1; - StringHelper::GenerateHashInit(masm, hash, c1); - StringHelper::GenerateHashAddCharacter(masm, hash, c2); - StringHelper::GenerateHashGetHash(masm, hash); - - // Collect the two characters in a register. - Register chars = c1; - __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte)); - - // chars: two character string, char 1 in byte 0 and char 2 in byte 1. - // hash: hash of two character string. - - // Load symbol table - // Load address of first element of the symbol table. - Register symbol_table = c2; - __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex); - - // Load undefined value - Register undefined = scratch4; - __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); - - // Calculate capacity mask from the symbol table capacity. - Register mask = scratch2; - __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset)); - __ mov(mask, Operand(mask, ASR, 1)); - __ sub(mask, mask, Operand(1)); - - // Calculate untagged address of the first element of the symbol table. - Register first_symbol_table_element = symbol_table; - __ add(first_symbol_table_element, symbol_table, - Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag)); - - // Registers - // chars: two character string, char 1 in byte 0 and char 2 in byte 1. - // hash: hash of two character string - // mask: capacity mask - // first_symbol_table_element: address of the first element of - // the symbol table - // scratch: - - - // Perform a number of probes in the symbol table. - static const int kProbes = 4; - Label found_in_symbol_table; - Label next_probe[kProbes]; - for (int i = 0; i < kProbes; i++) { - Register candidate = scratch5; // Scratch register contains candidate. - - // Calculate entry in symbol table. - if (i > 0) { - __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i))); - } else { - __ mov(candidate, hash); - } - - __ and_(candidate, candidate, Operand(mask)); - - // Load the entry from the symble table. - STATIC_ASSERT(SymbolTable::kEntrySize == 1); - __ ldr(candidate, - MemOperand(first_symbol_table_element, - candidate, - LSL, - kPointerSizeLog2)); - - // If entry is undefined no string with this hash can be found. - __ cmp(candidate, undefined); - __ b(eq, not_found); - - // If length is not 2 the string is not a candidate. - __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset)); - __ cmp(scratch, Operand(Smi::FromInt(2))); - __ b(ne, &next_probe[i]); - - // Check that the candidate is a non-external ascii string. - __ ldr(scratch, FieldMemOperand(candidate, HeapObject::kMapOffset)); - __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); - __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, - &next_probe[i]); - - // Check if the two characters match. - // Assumes that word load is little endian. - __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize)); - __ cmp(chars, scratch); - __ b(eq, &found_in_symbol_table); - __ bind(&next_probe[i]); - } - - // No matching 2 character string found by probing. - __ jmp(not_found); - - // Scratch register contains result when we fall through to here. - Register result = scratch; - __ bind(&found_in_symbol_table); - __ Move(r0, result); -} - - -void StringHelper::GenerateHashInit(MacroAssembler* masm, - Register hash, - Register character) { - // hash = character + (character << 10); - __ add(hash, character, Operand(character, LSL, 10)); - // hash ^= hash >> 6; - __ eor(hash, hash, Operand(hash, ASR, 6)); -} - - -void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, - Register hash, - Register character) { - // hash += character; - __ add(hash, hash, Operand(character)); - // hash += hash << 10; - __ add(hash, hash, Operand(hash, LSL, 10)); - // hash ^= hash >> 6; - __ eor(hash, hash, Operand(hash, ASR, 6)); -} - - -void StringHelper::GenerateHashGetHash(MacroAssembler* masm, - Register hash) { - // hash += hash << 3; - __ add(hash, hash, Operand(hash, LSL, 3)); - // hash ^= hash >> 11; - __ eor(hash, hash, Operand(hash, ASR, 11)); - // hash += hash << 15; - __ add(hash, hash, Operand(hash, LSL, 15), SetCC); - - // if (hash == 0) hash = 27; - __ mov(hash, Operand(27), LeaveCC, nz); -} - - -void SubStringStub::Generate(MacroAssembler* masm) { - Label runtime; - - // Stack frame on entry. - // lr: return address - // sp[0]: to - // sp[4]: from - // sp[8]: string - - // This stub is called from the native-call %_SubString(...), so - // nothing can be assumed about the arguments. It is tested that: - // "string" is a sequential string, - // both "from" and "to" are smis, and - // 0 <= from <= to <= string.length. - // If any of these assumptions fail, we call the runtime system. - - static const int kToOffset = 0 * kPointerSize; - static const int kFromOffset = 1 * kPointerSize; - static const int kStringOffset = 2 * kPointerSize; - - - // Check bounds and smi-ness. - __ ldr(r7, MemOperand(sp, kToOffset)); - __ ldr(r6, MemOperand(sp, kFromOffset)); - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - // I.e., arithmetic shift right by one un-smi-tags. - __ mov(r2, Operand(r7, ASR, 1), SetCC); - __ mov(r3, Operand(r6, ASR, 1), SetCC, cc); - // If either r2 or r6 had the smi tag bit set, then carry is set now. - __ b(cs, &runtime); // Either "from" or "to" is not a smi. - __ b(mi, &runtime); // From is negative. - - __ sub(r2, r2, Operand(r3), SetCC); - __ b(mi, &runtime); // Fail if from > to. - // Special handling of sub-strings of length 1 and 2. One character strings - // are handled in the runtime system (looked up in the single character - // cache). Two character strings are looked for in the symbol cache. - __ cmp(r2, Operand(2)); - __ b(lt, &runtime); - - // r2: length - // r3: from index (untaged smi) - // r6: from (smi) - // r7: to (smi) - - // Make sure first argument is a sequential (or flat) string. - __ ldr(r5, MemOperand(sp, kStringOffset)); - STATIC_ASSERT(kSmiTag == 0); - __ tst(r5, Operand(kSmiTagMask)); - __ b(eq, &runtime); - Condition is_string = masm->IsObjectStringType(r5, r1); - __ b(NegateCondition(is_string), &runtime); - - // r1: instance type - // r2: length - // r3: from index (untaged smi) - // r5: string - // r6: from (smi) - // r7: to (smi) - Label seq_string; - __ and_(r4, r1, Operand(kStringRepresentationMask)); - STATIC_ASSERT(kSeqStringTag < kConsStringTag); - STATIC_ASSERT(kConsStringTag < kExternalStringTag); - __ cmp(r4, Operand(kConsStringTag)); - __ b(gt, &runtime); // External strings go to runtime. - __ b(lt, &seq_string); // Sequential strings are handled directly. - - // Cons string. Try to recurse (once) on the first substring. - // (This adds a little more generality than necessary to handle flattened - // cons strings, but not much). - __ ldr(r5, FieldMemOperand(r5, ConsString::kFirstOffset)); - __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset)); - __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset)); - __ tst(r1, Operand(kStringRepresentationMask)); - STATIC_ASSERT(kSeqStringTag == 0); - __ b(ne, &runtime); // Cons and External strings go to runtime. - - // Definitly a sequential string. - __ bind(&seq_string); - - // r1: instance type. - // r2: length - // r3: from index (untaged smi) - // r5: string - // r6: from (smi) - // r7: to (smi) - __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset)); - __ cmp(r4, Operand(r7)); - __ b(lt, &runtime); // Fail if to > length. - - // r1: instance type. - // r2: result string length. - // r3: from index (untaged smi) - // r5: string. - // r6: from offset (smi) - // Check for flat ascii string. - Label non_ascii_flat; - __ tst(r1, Operand(kStringEncodingMask)); - STATIC_ASSERT(kTwoByteStringTag == 0); - __ b(eq, &non_ascii_flat); - - Label result_longer_than_two; - __ cmp(r2, Operand(2)); - __ b(gt, &result_longer_than_two); - - // Sub string of length 2 requested. - // Get the two characters forming the sub string. - __ add(r5, r5, Operand(r3)); - __ ldrb(r3, FieldMemOperand(r5, SeqAsciiString::kHeaderSize)); - __ ldrb(r4, FieldMemOperand(r5, SeqAsciiString::kHeaderSize + 1)); - - // Try to lookup two character string in symbol table. - Label make_two_character_string; - StringHelper::GenerateTwoCharacterSymbolTableProbe( - masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string); - __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); - __ add(sp, sp, Operand(3 * kPointerSize)); - __ Ret(); - - // r2: result string length. - // r3: two characters combined into halfword in little endian byte order. - __ bind(&make_two_character_string); - __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime); - __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); - __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); - __ add(sp, sp, Operand(3 * kPointerSize)); - __ Ret(); - - __ bind(&result_longer_than_two); - - // Allocate the result. - __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime); - - // r0: result string. - // r2: result string length. - // r5: string. - // r6: from offset (smi) - // Locate first character of result. - __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - // Locate 'from' character of string. - __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - __ add(r5, r5, Operand(r6, ASR, 1)); - - // r0: result string. - // r1: first character of result string. - // r2: result string length. - // r5: first character of sub string to copy. - STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); - StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, - COPY_ASCII | DEST_ALWAYS_ALIGNED); - __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); - __ add(sp, sp, Operand(3 * kPointerSize)); - __ Ret(); - - __ bind(&non_ascii_flat); - // r2: result string length. - // r5: string. - // r6: from offset (smi) - // Check for flat two byte string. - - // Allocate the result. - __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime); - - // r0: result string. - // r2: result string length. - // r5: string. - // Locate first character of result. - __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - // Locate 'from' character of string. - __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - // As "from" is a smi it is 2 times the value which matches the size of a two - // byte character. - __ add(r5, r5, Operand(r6)); - - // r0: result string. - // r1: first character of result. - // r2: result length. - // r5: first character of string to copy. - STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); - StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, - DEST_ALWAYS_ALIGNED); - __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); - __ add(sp, sp, Operand(3 * kPointerSize)); - __ Ret(); - - // Just jump to runtime to create the sub string. - __ bind(&runtime); - __ TailCallRuntime(Runtime::kSubString, 3, 1); -} - - -void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, - Register left, - Register right, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4) { - Label compare_lengths; - // Find minimum length and length difference. - __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset)); - __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); - __ sub(scratch3, scratch1, Operand(scratch2), SetCC); - Register length_delta = scratch3; - __ mov(scratch1, scratch2, LeaveCC, gt); - Register min_length = scratch1; - STATIC_ASSERT(kSmiTag == 0); - __ tst(min_length, Operand(min_length)); - __ b(eq, &compare_lengths); - - // Untag smi. - __ mov(min_length, Operand(min_length, ASR, kSmiTagSize)); - - // Setup registers so that we only need to increment one register - // in the loop. - __ add(scratch2, min_length, - Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - __ add(left, left, Operand(scratch2)); - __ add(right, right, Operand(scratch2)); - // Registers left and right points to the min_length character of strings. - __ rsb(min_length, min_length, Operand(-1)); - Register index = min_length; - // Index starts at -min_length. - - { - // Compare loop. - Label loop; - __ bind(&loop); - // Compare characters. - __ add(index, index, Operand(1), SetCC); - __ ldrb(scratch2, MemOperand(left, index), ne); - __ ldrb(scratch4, MemOperand(right, index), ne); - // Skip to compare lengths with eq condition true. - __ b(eq, &compare_lengths); - __ cmp(scratch2, scratch4); - __ b(eq, &loop); - // Fallthrough with eq condition false. - } - // Compare lengths - strings up to min-length are equal. - __ bind(&compare_lengths); - ASSERT(Smi::FromInt(EQUAL) == static_cast(0)); - // Use zero length_delta as result. - __ mov(r0, Operand(length_delta), SetCC, eq); - // Fall through to here if characters compare not-equal. - __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt); - __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt); - __ Ret(); -} - - -void StringCompareStub::Generate(MacroAssembler* masm) { - Label runtime; - - // Stack frame on entry. - // sp[0]: right string - // sp[4]: left string - __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // left - __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // right - - Label not_same; - __ cmp(r0, r1); - __ b(ne, ¬_same); - STATIC_ASSERT(EQUAL == 0); - STATIC_ASSERT(kSmiTag == 0); - __ mov(r0, Operand(Smi::FromInt(EQUAL))); - __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2); - __ add(sp, sp, Operand(2 * kPointerSize)); - __ Ret(); - - __ bind(¬_same); - - // Check that both objects are sequential ascii strings. - __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime); - - // Compare flat ascii strings natively. Remove arguments from stack first. - __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3); - __ add(sp, sp, Operand(2 * kPointerSize)); - GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5); - - // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) - // tagged as a small integer. - __ bind(&runtime); - __ TailCallRuntime(Runtime::kStringCompare, 2, 1); -} - - -void StringAddStub::Generate(MacroAssembler* masm) { - Label string_add_runtime; - // Stack on entry: - // sp[0]: second argument. - // sp[4]: first argument. - - // Load the two arguments. - __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument. - __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument. - - // Make sure that both arguments are strings if not known in advance. - if (string_check_) { - STATIC_ASSERT(kSmiTag == 0); - __ JumpIfEitherSmi(r0, r1, &string_add_runtime); - // Load instance types. - __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); - __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); - __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); - STATIC_ASSERT(kStringTag == 0); - // If either is not a string, go to runtime. - __ tst(r4, Operand(kIsNotStringMask)); - __ tst(r5, Operand(kIsNotStringMask), eq); - __ b(ne, &string_add_runtime); - } - - // Both arguments are strings. - // r0: first string - // r1: second string - // r4: first string instance type (if string_check_) - // r5: second string instance type (if string_check_) - { - Label strings_not_empty; - // Check if either of the strings are empty. In that case return the other. - __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset)); - __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset)); - STATIC_ASSERT(kSmiTag == 0); - __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty. - __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second. - STATIC_ASSERT(kSmiTag == 0); - // Else test if second string is empty. - __ cmp(r3, Operand(Smi::FromInt(0)), ne); - __ b(ne, &strings_not_empty); // If either string was empty, return r0. - - __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); - __ add(sp, sp, Operand(2 * kPointerSize)); - __ Ret(); - - __ bind(&strings_not_empty); - } - - __ mov(r2, Operand(r2, ASR, kSmiTagSize)); - __ mov(r3, Operand(r3, ASR, kSmiTagSize)); - // Both strings are non-empty. - // r0: first string - // r1: second string - // r2: length of first string - // r3: length of second string - // r4: first string instance type (if string_check_) - // r5: second string instance type (if string_check_) - // Look at the length of the result of adding the two strings. - Label string_add_flat_result, longer_than_two; - // Adding two lengths can't overflow. - STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2); - __ add(r6, r2, Operand(r3)); - // Use the runtime system when adding two one character strings, as it - // contains optimizations for this specific case using the symbol table. - __ cmp(r6, Operand(2)); - __ b(ne, &longer_than_two); - - // Check that both strings are non-external ascii strings. - if (!string_check_) { - __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); - __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); - __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); - } - __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7, - &string_add_runtime); - - // Get the two characters forming the sub string. - __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); - __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize)); - - // Try to lookup two character string in symbol table. If it is not found - // just allocate a new one. - Label make_two_character_string; - StringHelper::GenerateTwoCharacterSymbolTableProbe( - masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string); - __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); - __ add(sp, sp, Operand(2 * kPointerSize)); - __ Ret(); - - __ bind(&make_two_character_string); - // Resulting string has length 2 and first chars of two strings - // are combined into single halfword in r2 register. - // So we can fill resulting string without two loops by a single - // halfword store instruction (which assumes that processor is - // in a little endian mode) - __ mov(r6, Operand(2)); - __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime); - __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); - __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); - __ add(sp, sp, Operand(2 * kPointerSize)); - __ Ret(); - - __ bind(&longer_than_two); - // Check if resulting string will be flat. - __ cmp(r6, Operand(String::kMinNonFlatLength)); - __ b(lt, &string_add_flat_result); - // Handle exceptionally long strings in the runtime system. - STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0); - ASSERT(IsPowerOf2(String::kMaxLength + 1)); - // kMaxLength + 1 is representable as shifted literal, kMaxLength is not. - __ cmp(r6, Operand(String::kMaxLength + 1)); - __ b(hs, &string_add_runtime); - - // If result is not supposed to be flat, allocate a cons string object. - // If both strings are ascii the result is an ascii cons string. - if (!string_check_) { - __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); - __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); - __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); - } - Label non_ascii, allocated, ascii_data; - STATIC_ASSERT(kTwoByteStringTag == 0); - __ tst(r4, Operand(kStringEncodingMask)); - __ tst(r5, Operand(kStringEncodingMask), ne); - __ b(eq, &non_ascii); - - // Allocate an ASCII cons string. - __ bind(&ascii_data); - __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime); - __ bind(&allocated); - // Fill the fields of the cons string. - __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); - __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); - __ mov(r0, Operand(r7)); - __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); - __ add(sp, sp, Operand(2 * kPointerSize)); - __ Ret(); - - __ bind(&non_ascii); - // At least one of the strings is two-byte. Check whether it happens - // to contain only ascii characters. - // r4: first instance type. - // r5: second instance type. - __ tst(r4, Operand(kAsciiDataHintMask)); - __ tst(r5, Operand(kAsciiDataHintMask), ne); - __ b(ne, &ascii_data); - __ eor(r4, r4, Operand(r5)); - STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); - __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); - __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); - __ b(eq, &ascii_data); - - // Allocate a two byte cons string. - __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime); - __ jmp(&allocated); - - // Handle creating a flat result. First check that both strings are - // sequential and that they have the same encoding. - // r0: first string - // r1: second string - // r2: length of first string - // r3: length of second string - // r4: first string instance type (if string_check_) - // r5: second string instance type (if string_check_) - // r6: sum of lengths. - __ bind(&string_add_flat_result); - if (!string_check_) { - __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); - __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); - __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); - } - // Check that both strings are sequential. - STATIC_ASSERT(kSeqStringTag == 0); - __ tst(r4, Operand(kStringRepresentationMask)); - __ tst(r5, Operand(kStringRepresentationMask), eq); - __ b(ne, &string_add_runtime); - // Now check if both strings have the same encoding (ASCII/Two-byte). - // r0: first string. - // r1: second string. - // r2: length of first string. - // r3: length of second string. - // r6: sum of lengths.. - Label non_ascii_string_add_flat_result; - ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test. - __ eor(r7, r4, Operand(r5)); - __ tst(r7, Operand(kStringEncodingMask)); - __ b(ne, &string_add_runtime); - // And see if it's ASCII or two-byte. - __ tst(r4, Operand(kStringEncodingMask)); - __ b(eq, &non_ascii_string_add_flat_result); - - // Both strings are sequential ASCII strings. We also know that they are - // short (since the sum of the lengths is less than kMinNonFlatLength). - // r6: length of resulting flat string - __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime); - // Locate first character of result. - __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - // Locate first character of first argument. - __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - // r0: first character of first string. - // r1: second string. - // r2: length of first string. - // r3: length of second string. - // r6: first character of result. - // r7: result string. - StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true); - - // Load second argument and locate first character. - __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - // r1: first character of second string. - // r3: length of second string. - // r6: next character of result. - // r7: result string. - StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true); - __ mov(r0, Operand(r7)); - __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); - __ add(sp, sp, Operand(2 * kPointerSize)); - __ Ret(); - - __ bind(&non_ascii_string_add_flat_result); - // Both strings are sequential two byte strings. - // r0: first string. - // r1: second string. - // r2: length of first string. - // r3: length of second string. - // r6: sum of length of strings. - __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime); - // r0: first string. - // r1: second string. - // r2: length of first string. - // r3: length of second string. - // r7: result string. - - // Locate first character of result. - __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - // Locate first character of first argument. - __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - - // r0: first character of first string. - // r1: second string. - // r2: length of first string. - // r3: length of second string. - // r6: first character of result. - // r7: result string. - StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false); - - // Locate first character of second argument. - __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - - // r1: first character of second string. - // r3: length of second string. - // r6: next character of result (after copy of first string). - // r7: result string. - StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false); - - __ mov(r0, Operand(r7)); - __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); - __ add(sp, sp, Operand(2 * kPointerSize)); - __ Ret(); - - // Just jump to runtime to add the two strings. - __ bind(&string_add_runtime); - __ TailCallRuntime(Runtime::kStringAdd, 2, 1); -} - - #undef __ } } // namespace v8::internal diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h index e550a62..85b27b4 100644 --- a/src/arm/codegen-arm.h +++ b/src/arm/codegen-arm.h @@ -612,510 +612,6 @@ class CodeGenerator: public AstVisitor { }; -// Compute a transcendental math function natively, or call the -// TranscendentalCache runtime function. -class TranscendentalCacheStub: public CodeStub { - public: - explicit TranscendentalCacheStub(TranscendentalCache::Type type) - : type_(type) {} - void Generate(MacroAssembler* masm); - private: - TranscendentalCache::Type type_; - Major MajorKey() { return TranscendentalCache; } - int MinorKey() { return type_; } - Runtime::FunctionId RuntimeFunction(); -}; - - -class ToBooleanStub: public CodeStub { - public: - explicit ToBooleanStub(Register tos) : tos_(tos) { } - - void Generate(MacroAssembler* masm); - - private: - Register tos_; - Major MajorKey() { return ToBoolean; } - int MinorKey() { return tos_.code(); } -}; - - -class GenericBinaryOpStub : public CodeStub { - public: - GenericBinaryOpStub(Token::Value op, - OverwriteMode mode, - Register lhs, - Register rhs, - int constant_rhs = CodeGenerator::kUnknownIntValue) - : op_(op), - mode_(mode), - lhs_(lhs), - rhs_(rhs), - constant_rhs_(constant_rhs), - specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)), - runtime_operands_type_(BinaryOpIC::DEFAULT), - name_(NULL) { } - - GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) - : op_(OpBits::decode(key)), - mode_(ModeBits::decode(key)), - lhs_(LhsRegister(RegisterBits::decode(key))), - rhs_(RhsRegister(RegisterBits::decode(key))), - constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))), - specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)), - runtime_operands_type_(type_info), - name_(NULL) { } - - private: - Token::Value op_; - OverwriteMode mode_; - Register lhs_; - Register rhs_; - int constant_rhs_; - bool specialized_on_rhs_; - BinaryOpIC::TypeInfo runtime_operands_type_; - char* name_; - - static const int kMaxKnownRhs = 0x40000000; - static const int kKnownRhsKeyBits = 6; - - // Minor key encoding in 17 bits. - class ModeBits: public BitField {}; - class OpBits: public BitField {}; - class TypeInfoBits: public BitField {}; - class RegisterBits: public BitField {}; - class KnownIntBits: public BitField {}; - - Major MajorKey() { return GenericBinaryOp; } - int MinorKey() { - ASSERT((lhs_.is(r0) && rhs_.is(r1)) || - (lhs_.is(r1) && rhs_.is(r0))); - // Encode the parameters in a unique 18 bit value. - return OpBits::encode(op_) - | ModeBits::encode(mode_) - | KnownIntBits::encode(MinorKeyForKnownInt()) - | TypeInfoBits::encode(runtime_operands_type_) - | RegisterBits::encode(lhs_.is(r0)); - } - - void Generate(MacroAssembler* masm); - void HandleNonSmiBitwiseOp(MacroAssembler* masm, - Register lhs, - Register rhs); - void HandleBinaryOpSlowCases(MacroAssembler* masm, - Label* not_smi, - Register lhs, - Register rhs, - const Builtins::JavaScript& builtin); - void GenerateTypeTransition(MacroAssembler* masm); - - static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) { - if (constant_rhs == CodeGenerator::kUnknownIntValue) return false; - if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3; - if (op == Token::MOD) { - if (constant_rhs <= 1) return false; - if (constant_rhs <= 10) return true; - if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true; - return false; - } - return false; - } - - int MinorKeyForKnownInt() { - if (!specialized_on_rhs_) return 0; - if (constant_rhs_ <= 10) return constant_rhs_ + 1; - ASSERT(IsPowerOf2(constant_rhs_)); - int key = 12; - int d = constant_rhs_; - while ((d & 1) == 0) { - key++; - d >>= 1; - } - ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits)); - return key; - } - - int KnownBitsForMinorKey(int key) { - if (!key) return 0; - if (key <= 11) return key - 1; - int d = 1; - while (key != 12) { - key--; - d <<= 1; - } - return d; - } - - Register LhsRegister(bool lhs_is_r0) { - return lhs_is_r0 ? r0 : r1; - } - - Register RhsRegister(bool lhs_is_r0) { - return lhs_is_r0 ? r1 : r0; - } - - bool ShouldGenerateSmiCode() { - return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) && - runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS && - runtime_operands_type_ != BinaryOpIC::STRINGS; - } - - bool ShouldGenerateFPCode() { - return runtime_operands_type_ != BinaryOpIC::STRINGS; - } - - virtual int GetCodeKind() { return Code::BINARY_OP_IC; } - - virtual InlineCacheState GetICState() { - return BinaryOpIC::ToState(runtime_operands_type_); - } - - const char* GetName(); - -#ifdef DEBUG - void Print() { - if (!specialized_on_rhs_) { - PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_)); - } else { - PrintF("GenericBinaryOpStub (%s by %d)\n", - Token::String(op_), - constant_rhs_); - } - } -#endif -}; - - -class StringHelper : public AllStatic { - public: - // Generate code for copying characters using a simple loop. This should only - // be used in places where the number of characters is small and the - // additional setup and checking in GenerateCopyCharactersLong adds too much - // overhead. Copying of overlapping regions is not supported. - // Dest register ends at the position after the last character written. - static void GenerateCopyCharacters(MacroAssembler* masm, - Register dest, - Register src, - Register count, - Register scratch, - bool ascii); - - // Generate code for copying a large number of characters. This function - // is allowed to spend extra time setting up conditions to make copying - // faster. Copying of overlapping regions is not supported. - // Dest register ends at the position after the last character written. - static void GenerateCopyCharactersLong(MacroAssembler* masm, - Register dest, - Register src, - Register count, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Register scratch5, - int flags); - - - // Probe the symbol table for a two character string. If the string is - // not found by probing a jump to the label not_found is performed. This jump - // does not guarantee that the string is not in the symbol table. If the - // string is found the code falls through with the string in register r0. - // Contents of both c1 and c2 registers are modified. At the exit c1 is - // guaranteed to contain halfword with low and high bytes equal to - // initial contents of c1 and c2 respectively. - static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, - Register c1, - Register c2, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Register scratch5, - Label* not_found); - - // Generate string hash. - static void GenerateHashInit(MacroAssembler* masm, - Register hash, - Register character); - - static void GenerateHashAddCharacter(MacroAssembler* masm, - Register hash, - Register character); - - static void GenerateHashGetHash(MacroAssembler* masm, - Register hash); - - private: - DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); -}; - - -// Flag that indicates how to generate code for the stub StringAddStub. -enum StringAddFlags { - NO_STRING_ADD_FLAGS = 0, - NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub. -}; - - -class StringAddStub: public CodeStub { - public: - explicit StringAddStub(StringAddFlags flags) { - string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0); - } - - private: - Major MajorKey() { return StringAdd; } - int MinorKey() { return string_check_ ? 0 : 1; } - - void Generate(MacroAssembler* masm); - - // Should the stub check whether arguments are strings? - bool string_check_; -}; - - -class SubStringStub: public CodeStub { - public: - SubStringStub() {} - - private: - Major MajorKey() { return SubString; } - int MinorKey() { return 0; } - - void Generate(MacroAssembler* masm); -}; - - - -class StringCompareStub: public CodeStub { - public: - StringCompareStub() { } - - // Compare two flat ASCII strings and returns result in r0. - // Does not use the stack. - static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, - Register left, - Register right, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4); - - private: - Major MajorKey() { return StringCompare; } - int MinorKey() { return 0; } - - void Generate(MacroAssembler* masm); -}; - - -// This stub can do a fast mod operation without using fp. -// It is tail called from the GenericBinaryOpStub and it always -// returns an answer. It never causes GC so it doesn't need a real frame. -// -// The inputs are always positive Smis. This is never called -// where the denominator is a power of 2. We handle that separately. -// -// If we consider the denominator as an odd number multiplied by a power of 2, -// then: -// * The exponent (power of 2) is in the shift_distance register. -// * The odd number is in the odd_number register. It is always in the range -// of 3 to 25. -// * The bits from the numerator that are to be copied to the answer (there are -// shift_distance of them) are in the mask_bits register. -// * The other bits of the numerator have been shifted down and are in the lhs -// register. -class IntegerModStub : public CodeStub { - public: - IntegerModStub(Register result, - Register shift_distance, - Register odd_number, - Register mask_bits, - Register lhs, - Register scratch) - : result_(result), - shift_distance_(shift_distance), - odd_number_(odd_number), - mask_bits_(mask_bits), - lhs_(lhs), - scratch_(scratch) { - // We don't code these in the minor key, so they should always be the same. - // We don't really want to fix that since this stub is rather large and we - // don't want many copies of it. - ASSERT(shift_distance_.is(r9)); - ASSERT(odd_number_.is(r4)); - ASSERT(mask_bits_.is(r3)); - ASSERT(scratch_.is(r5)); - } - - private: - Register result_; - Register shift_distance_; - Register odd_number_; - Register mask_bits_; - Register lhs_; - Register scratch_; - - // Minor key encoding in 16 bits. - class ResultRegisterBits: public BitField {}; - class LhsRegisterBits: public BitField {}; - - Major MajorKey() { return IntegerMod; } - int MinorKey() { - // Encode the parameters in a unique 16 bit value. - return ResultRegisterBits::encode(result_.code()) - | LhsRegisterBits::encode(lhs_.code()); - } - - void Generate(MacroAssembler* masm); - - const char* GetName() { return "IntegerModStub"; } - - // Utility functions. - void DigitSum(MacroAssembler* masm, - Register lhs, - int mask, - int shift, - Label* entry); - void DigitSum(MacroAssembler* masm, - Register lhs, - Register scratch, - int mask, - int shift1, - int shift2, - Label* entry); - void ModGetInRangeBySubtraction(MacroAssembler* masm, - Register lhs, - int shift, - int rhs); - void ModReduce(MacroAssembler* masm, - Register lhs, - int max, - int denominator); - void ModAnswer(MacroAssembler* masm, - Register result, - Register shift_distance, - Register mask_bits, - Register sum_of_digits); - - -#ifdef DEBUG - void Print() { PrintF("IntegerModStub\n"); } -#endif -}; - - -// This stub can convert a signed int32 to a heap number (double). It does -// not work for int32s that are in Smi range! No GC occurs during this stub -// so you don't have to set up the frame. -class WriteInt32ToHeapNumberStub : public CodeStub { - public: - WriteInt32ToHeapNumberStub(Register the_int, - Register the_heap_number, - Register scratch) - : the_int_(the_int), - the_heap_number_(the_heap_number), - scratch_(scratch) { } - - private: - Register the_int_; - Register the_heap_number_; - Register scratch_; - - // Minor key encoding in 16 bits. - class IntRegisterBits: public BitField {}; - class HeapNumberRegisterBits: public BitField {}; - class ScratchRegisterBits: public BitField {}; - - Major MajorKey() { return WriteInt32ToHeapNumber; } - int MinorKey() { - // Encode the parameters in a unique 16 bit value. - return IntRegisterBits::encode(the_int_.code()) - | HeapNumberRegisterBits::encode(the_heap_number_.code()) - | ScratchRegisterBits::encode(scratch_.code()); - } - - void Generate(MacroAssembler* masm); - - const char* GetName() { return "WriteInt32ToHeapNumberStub"; } - -#ifdef DEBUG - void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); } -#endif -}; - - -class NumberToStringStub: public CodeStub { - public: - NumberToStringStub() { } - - // Generate code to do a lookup in the number string cache. If the number in - // the register object is found in the cache the generated code falls through - // with the result in the result register. The object and the result register - // can be the same. If the number is not found in the cache the code jumps to - // the label not_found with only the content of register object unchanged. - static void GenerateLookupNumberStringCache(MacroAssembler* masm, - Register object, - Register result, - Register scratch1, - Register scratch2, - Register scratch3, - bool object_is_smi, - Label* not_found); - - private: - Major MajorKey() { return NumberToString; } - int MinorKey() { return 0; } - - void Generate(MacroAssembler* masm); - - const char* GetName() { return "NumberToStringStub"; } - -#ifdef DEBUG - void Print() { - PrintF("NumberToStringStub\n"); - } -#endif -}; - - -class RecordWriteStub : public CodeStub { - public: - RecordWriteStub(Register object, Register offset, Register scratch) - : object_(object), offset_(offset), scratch_(scratch) { } - - void Generate(MacroAssembler* masm); - - private: - Register object_; - Register offset_; - Register scratch_; - -#ifdef DEBUG - void Print() { - PrintF("RecordWriteStub (object reg %d), (offset reg %d)," - " (scratch reg %d)\n", - object_.code(), offset_.code(), scratch_.code()); - } -#endif - - // Minor key encoding in 12 bits. 4 bits for each of the three - // registers (object, offset and scratch) OOOOAAAASSSS. - class ScratchBits: public BitField {}; - class OffsetBits: public BitField {}; - class ObjectBits: public BitField {}; - - Major MajorKey() { return RecordWrite; } - - int MinorKey() { - // Encode the registers. - return ObjectBits::encode(object_.code()) | - OffsetBits::encode(offset_.code()) | - ScratchBits::encode(scratch_.code()); - } -}; - - } } // namespace v8::internal #endif // V8_ARM_CODEGEN_ARM_H_ diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc index e0caa91..3c48dbd 100644 --- a/src/arm/full-codegen-arm.cc +++ b/src/arm/full-codegen-arm.cc @@ -29,6 +29,7 @@ #if defined(V8_TARGET_ARCH_ARM) +#include "code-stubs-arm.h" #include "codegen-inl.h" #include "compiler.h" #include "debug.h" diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc index abc0922..816aba0 100644 --- a/src/arm/ic-arm.cc +++ b/src/arm/ic-arm.cc @@ -30,7 +30,7 @@ #if defined(V8_TARGET_ARCH_ARM) #include "assembler-arm.h" -#include "codegen.h" +#include "code-stubs-arm.h" #include "codegen-inl.h" #include "disasm.h" #include "ic-inl.h" diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc new file mode 100644 index 0000000..3fbf18a --- /dev/null +++ b/src/ia32/code-stubs-ia32.cc @@ -0,0 +1,4539 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#if defined(V8_TARGET_ARCH_IA32) + +#include "bootstrapper.h" +#include "code-stubs-ia32.h" +#include "codegen-inl.h" +#include "regexp-macro-assembler.h" + +namespace v8 { +namespace internal { + +#define __ ACCESS_MASM(masm) +void FastNewClosureStub::Generate(MacroAssembler* masm) { + // Create a new closure from the given function info in new + // space. Set the context to the current context in esi. + Label gc; + __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT); + + // Get the function info from the stack. + __ mov(edx, Operand(esp, 1 * kPointerSize)); + + // Compute the function map in the current global context and set that + // as the map of the allocated object. + __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX))); + __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset)); + __ mov(ecx, Operand(ecx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX))); + __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx); + + // Initialize the rest of the function. We don't have to update the + // write barrier because the allocated object is in new space. + __ mov(ebx, Immediate(Factory::empty_fixed_array())); + __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx); + __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx); + __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset), + Immediate(Factory::the_hole_value())); + __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx); + __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi); + __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx); + + // Initialize the code pointer in the function to be the one + // found in the shared function info object. + __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset)); + __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); + __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx); + + // Return and remove the on-stack parameter. + __ ret(1 * kPointerSize); + + // Create a new closure through the slower runtime call. + __ bind(&gc); + __ pop(ecx); // Temporarily remove return address. + __ pop(edx); + __ push(esi); + __ push(edx); + __ push(ecx); // Restore return address. + __ TailCallRuntime(Runtime::kNewClosure, 2, 1); +} + + +void FastNewContextStub::Generate(MacroAssembler* masm) { + // Try to allocate the context in new space. + Label gc; + int length = slots_ + Context::MIN_CONTEXT_SLOTS; + __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize, + eax, ebx, ecx, &gc, TAG_OBJECT); + + // Get the function from the stack. + __ mov(ecx, Operand(esp, 1 * kPointerSize)); + + // Setup the object header. + __ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map()); + __ mov(FieldOperand(eax, Context::kLengthOffset), + Immediate(Smi::FromInt(length))); + + // Setup the fixed slots. + __ xor_(ebx, Operand(ebx)); // Set to NULL. + __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx); + __ mov(Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)), eax); + __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), ebx); + __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx); + + // Copy the global object from the surrounding context. We go through the + // context in the function (ecx) to match the allocation behavior we have + // in the runtime system (see Heap::AllocateFunctionContext). + __ mov(ebx, FieldOperand(ecx, JSFunction::kContextOffset)); + __ mov(ebx, Operand(ebx, Context::SlotOffset(Context::GLOBAL_INDEX))); + __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx); + + // Initialize the rest of the slots to undefined. + __ mov(ebx, Factory::undefined_value()); + for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { + __ mov(Operand(eax, Context::SlotOffset(i)), ebx); + } + + // Return and remove the on-stack parameter. + __ mov(esi, Operand(eax)); + __ ret(1 * kPointerSize); + + // Need to collect. Call into runtime system. + __ bind(&gc); + __ TailCallRuntime(Runtime::kNewContext, 1, 1); +} + + +void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { + // Stack layout on entry: + // + // [esp + kPointerSize]: constant elements. + // [esp + (2 * kPointerSize)]: literal index. + // [esp + (3 * kPointerSize)]: literals array. + + // All sizes here are multiples of kPointerSize. + int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0; + int size = JSArray::kSize + elements_size; + + // Load boilerplate object into ecx and check if we need to create a + // boilerplate. + Label slow_case; + __ mov(ecx, Operand(esp, 3 * kPointerSize)); + __ mov(eax, Operand(esp, 2 * kPointerSize)); + STATIC_ASSERT(kPointerSize == 4); + STATIC_ASSERT(kSmiTagSize == 1); + STATIC_ASSERT(kSmiTag == 0); + __ mov(ecx, CodeGenerator::FixedArrayElementOperand(ecx, eax)); + __ cmp(ecx, Factory::undefined_value()); + __ j(equal, &slow_case); + + if (FLAG_debug_code) { + const char* message; + Handle expected_map; + if (mode_ == CLONE_ELEMENTS) { + message = "Expected (writable) fixed array"; + expected_map = Factory::fixed_array_map(); + } else { + ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS); + message = "Expected copy-on-write fixed array"; + expected_map = Factory::fixed_cow_array_map(); + } + __ push(ecx); + __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset)); + __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map); + __ Assert(equal, message); + __ pop(ecx); + } + + // Allocate both the JS array and the elements array in one big + // allocation. This avoids multiple limit checks. + __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT); + + // Copy the JS array part. + for (int i = 0; i < JSArray::kSize; i += kPointerSize) { + if ((i != JSArray::kElementsOffset) || (length_ == 0)) { + __ mov(ebx, FieldOperand(ecx, i)); + __ mov(FieldOperand(eax, i), ebx); + } + } + + if (length_ > 0) { + // Get hold of the elements array of the boilerplate and setup the + // elements pointer in the resulting object. + __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset)); + __ lea(edx, Operand(eax, JSArray::kSize)); + __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx); + + // Copy the elements array. + for (int i = 0; i < elements_size; i += kPointerSize) { + __ mov(ebx, FieldOperand(ecx, i)); + __ mov(FieldOperand(edx, i), ebx); + } + } + + // Return and remove the on-stack parameters. + __ ret(3 * kPointerSize); + + __ bind(&slow_case); + __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); +} + + +// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined). +void ToBooleanStub::Generate(MacroAssembler* masm) { + Label false_result, true_result, not_string; + __ mov(eax, Operand(esp, 1 * kPointerSize)); + + // 'null' => false. + __ cmp(eax, Factory::null_value()); + __ j(equal, &false_result); + + // Get the map and type of the heap object. + __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); + __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset)); + + // Undetectable => false. + __ test_b(FieldOperand(edx, Map::kBitFieldOffset), + 1 << Map::kIsUndetectable); + __ j(not_zero, &false_result); + + // JavaScript object => true. + __ CmpInstanceType(edx, FIRST_JS_OBJECT_TYPE); + __ j(above_equal, &true_result); + + // String value => false iff empty. + __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE); + __ j(above_equal, ¬_string); + STATIC_ASSERT(kSmiTag == 0); + __ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0)); + __ j(zero, &false_result); + __ jmp(&true_result); + + __ bind(¬_string); + // HeapNumber => false iff +0, -0, or NaN. + __ cmp(edx, Factory::heap_number_map()); + __ j(not_equal, &true_result); + __ fldz(); + __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); + __ FCmp(); + __ j(zero, &false_result); + // Fall through to |true_result|. + + // Return 1/0 for true/false in eax. + __ bind(&true_result); + __ mov(eax, 1); + __ ret(1 * kPointerSize); + __ bind(&false_result); + __ mov(eax, 0); + __ ret(1 * kPointerSize); +} + + +const char* GenericBinaryOpStub::GetName() { + if (name_ != NULL) return name_; + const int kMaxNameLength = 100; + name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); + if (name_ == NULL) return "OOM"; + const char* op_name = Token::Name(op_); + const char* overwrite_name; + switch (mode_) { + case NO_OVERWRITE: overwrite_name = "Alloc"; break; + case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; + case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; + default: overwrite_name = "UnknownOverwrite"; break; + } + + OS::SNPrintF(Vector(name_, kMaxNameLength), + "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s", + op_name, + overwrite_name, + (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "", + args_in_registers_ ? "RegArgs" : "StackArgs", + args_reversed_ ? "_R" : "", + static_operands_type_.ToString(), + BinaryOpIC::GetName(runtime_operands_type_)); + return name_; +} + + +void GenericBinaryOpStub::GenerateCall( + MacroAssembler* masm, + Register left, + Register right) { + if (!ArgsInRegistersSupported()) { + // Pass arguments on the stack. + __ push(left); + __ push(right); + } else { + // The calling convention with registers is left in edx and right in eax. + Register left_arg = edx; + Register right_arg = eax; + if (!(left.is(left_arg) && right.is(right_arg))) { + if (left.is(right_arg) && right.is(left_arg)) { + if (IsOperationCommutative()) { + SetArgsReversed(); + } else { + __ xchg(left, right); + } + } else if (left.is(left_arg)) { + __ mov(right_arg, right); + } else if (right.is(right_arg)) { + __ mov(left_arg, left); + } else if (left.is(right_arg)) { + if (IsOperationCommutative()) { + __ mov(left_arg, right); + SetArgsReversed(); + } else { + // Order of moves important to avoid destroying left argument. + __ mov(left_arg, left); + __ mov(right_arg, right); + } + } else if (right.is(left_arg)) { + if (IsOperationCommutative()) { + __ mov(right_arg, left); + SetArgsReversed(); + } else { + // Order of moves important to avoid destroying right argument. + __ mov(right_arg, right); + __ mov(left_arg, left); + } + } else { + // Order of moves is not important. + __ mov(left_arg, left); + __ mov(right_arg, right); + } + } + + // Update flags to indicate that arguments are in registers. + SetArgsInRegisters(); + __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); + } + + // Call the stub. + __ CallStub(this); +} + + +void GenericBinaryOpStub::GenerateCall( + MacroAssembler* masm, + Register left, + Smi* right) { + if (!ArgsInRegistersSupported()) { + // Pass arguments on the stack. + __ push(left); + __ push(Immediate(right)); + } else { + // The calling convention with registers is left in edx and right in eax. + Register left_arg = edx; + Register right_arg = eax; + if (left.is(left_arg)) { + __ mov(right_arg, Immediate(right)); + } else if (left.is(right_arg) && IsOperationCommutative()) { + __ mov(left_arg, Immediate(right)); + SetArgsReversed(); + } else { + // For non-commutative operations, left and right_arg might be + // the same register. Therefore, the order of the moves is + // important here in order to not overwrite left before moving + // it to left_arg. + __ mov(left_arg, left); + __ mov(right_arg, Immediate(right)); + } + + // Update flags to indicate that arguments are in registers. + SetArgsInRegisters(); + __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); + } + + // Call the stub. + __ CallStub(this); +} + + +void GenericBinaryOpStub::GenerateCall( + MacroAssembler* masm, + Smi* left, + Register right) { + if (!ArgsInRegistersSupported()) { + // Pass arguments on the stack. + __ push(Immediate(left)); + __ push(right); + } else { + // The calling convention with registers is left in edx and right in eax. + Register left_arg = edx; + Register right_arg = eax; + if (right.is(right_arg)) { + __ mov(left_arg, Immediate(left)); + } else if (right.is(left_arg) && IsOperationCommutative()) { + __ mov(right_arg, Immediate(left)); + SetArgsReversed(); + } else { + // For non-commutative operations, right and left_arg might be + // the same register. Therefore, the order of the moves is + // important here in order to not overwrite right before moving + // it to right_arg. + __ mov(right_arg, right); + __ mov(left_arg, Immediate(left)); + } + // Update flags to indicate that arguments are in registers. + SetArgsInRegisters(); + __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); + } + + // Call the stub. + __ CallStub(this); +} + + +class FloatingPointHelper : public AllStatic { + public: + + enum ArgLocation { + ARGS_ON_STACK, + ARGS_IN_REGISTERS + }; + + // Code pattern for loading a floating point value. Input value must + // be either a smi or a heap number object (fp value). Requirements: + // operand in register number. Returns operand as floating point number + // on FPU stack. + static void LoadFloatOperand(MacroAssembler* masm, Register number); + + // Code pattern for loading floating point values. Input values must + // be either smi or heap number objects (fp values). Requirements: + // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax. + // Returns operands as floating point numbers on FPU stack. + static void LoadFloatOperands(MacroAssembler* masm, + Register scratch, + ArgLocation arg_location = ARGS_ON_STACK); + + // Similar to LoadFloatOperand but assumes that both operands are smis. + // Expects operands in edx, eax. + static void LoadFloatSmis(MacroAssembler* masm, Register scratch); + + // Test if operands are smi or number objects (fp). Requirements: + // operand_1 in eax, operand_2 in edx; falls through on float + // operands, jumps to the non_float label otherwise. + static void CheckFloatOperands(MacroAssembler* masm, + Label* non_float, + Register scratch); + + // Takes the operands in edx and eax and loads them as integers in eax + // and ecx. + static void LoadAsIntegers(MacroAssembler* masm, + TypeInfo type_info, + bool use_sse3, + Label* operand_conversion_failure); + static void LoadNumbersAsIntegers(MacroAssembler* masm, + TypeInfo type_info, + bool use_sse3, + Label* operand_conversion_failure); + static void LoadUnknownsAsIntegers(MacroAssembler* masm, + bool use_sse3, + Label* operand_conversion_failure); + + // Test if operands are smis or heap numbers and load them + // into xmm0 and xmm1 if they are. Operands are in edx and eax. + // Leaves operands unchanged. + static void LoadSSE2Operands(MacroAssembler* masm); + + // Test if operands are numbers (smi or HeapNumber objects), and load + // them into xmm0 and xmm1 if they are. Jump to label not_numbers if + // either operand is not a number. Operands are in edx and eax. + // Leaves operands unchanged. + static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers); + + // Similar to LoadSSE2Operands but assumes that both operands are smis. + // Expects operands in edx, eax. + static void LoadSSE2Smis(MacroAssembler* masm, Register scratch); +}; + + +void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { + // 1. Move arguments into edx, eax except for DIV and MOD, which need the + // dividend in eax and edx free for the division. Use eax, ebx for those. + Comment load_comment(masm, "-- Load arguments"); + Register left = edx; + Register right = eax; + if (op_ == Token::DIV || op_ == Token::MOD) { + left = eax; + right = ebx; + if (HasArgsInRegisters()) { + __ mov(ebx, eax); + __ mov(eax, edx); + } + } + if (!HasArgsInRegisters()) { + __ mov(right, Operand(esp, 1 * kPointerSize)); + __ mov(left, Operand(esp, 2 * kPointerSize)); + } + + if (static_operands_type_.IsSmi()) { + if (FLAG_debug_code) { + __ AbortIfNotSmi(left); + __ AbortIfNotSmi(right); + } + if (op_ == Token::BIT_OR) { + __ or_(right, Operand(left)); + GenerateReturn(masm); + return; + } else if (op_ == Token::BIT_AND) { + __ and_(right, Operand(left)); + GenerateReturn(masm); + return; + } else if (op_ == Token::BIT_XOR) { + __ xor_(right, Operand(left)); + GenerateReturn(masm); + return; + } + } + + // 2. Prepare the smi check of both operands by oring them together. + Comment smi_check_comment(masm, "-- Smi check arguments"); + Label not_smis; + Register combined = ecx; + ASSERT(!left.is(combined) && !right.is(combined)); + switch (op_) { + case Token::BIT_OR: + // Perform the operation into eax and smi check the result. Preserve + // eax in case the result is not a smi. + ASSERT(!left.is(ecx) && !right.is(ecx)); + __ mov(ecx, right); + __ or_(right, Operand(left)); // Bitwise or is commutative. + combined = right; + break; + + case Token::BIT_XOR: + case Token::BIT_AND: + case Token::ADD: + case Token::SUB: + case Token::MUL: + case Token::DIV: + case Token::MOD: + __ mov(combined, right); + __ or_(combined, Operand(left)); + break; + + case Token::SHL: + case Token::SAR: + case Token::SHR: + // Move the right operand into ecx for the shift operation, use eax + // for the smi check register. + ASSERT(!left.is(ecx) && !right.is(ecx)); + __ mov(ecx, right); + __ or_(right, Operand(left)); + combined = right; + break; + + default: + break; + } + + // 3. Perform the smi check of the operands. + STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case. + __ test(combined, Immediate(kSmiTagMask)); + __ j(not_zero, ¬_smis, not_taken); + + // 4. Operands are both smis, perform the operation leaving the result in + // eax and check the result if necessary. + Comment perform_smi(masm, "-- Perform smi operation"); + Label use_fp_on_smis; + switch (op_) { + case Token::BIT_OR: + // Nothing to do. + break; + + case Token::BIT_XOR: + ASSERT(right.is(eax)); + __ xor_(right, Operand(left)); // Bitwise xor is commutative. + break; + + case Token::BIT_AND: + ASSERT(right.is(eax)); + __ and_(right, Operand(left)); // Bitwise and is commutative. + break; + + case Token::SHL: + // Remove tags from operands (but keep sign). + __ SmiUntag(left); + __ SmiUntag(ecx); + // Perform the operation. + __ shl_cl(left); + // Check that the *signed* result fits in a smi. + __ cmp(left, 0xc0000000); + __ j(sign, &use_fp_on_smis, not_taken); + // Tag the result and store it in register eax. + __ SmiTag(left); + __ mov(eax, left); + break; + + case Token::SAR: + // Remove tags from operands (but keep sign). + __ SmiUntag(left); + __ SmiUntag(ecx); + // Perform the operation. + __ sar_cl(left); + // Tag the result and store it in register eax. + __ SmiTag(left); + __ mov(eax, left); + break; + + case Token::SHR: + // Remove tags from operands (but keep sign). + __ SmiUntag(left); + __ SmiUntag(ecx); + // Perform the operation. + __ shr_cl(left); + // Check that the *unsigned* result fits in a smi. + // Neither of the two high-order bits can be set: + // - 0x80000000: high bit would be lost when smi tagging. + // - 0x40000000: this number would convert to negative when + // Smi tagging these two cases can only happen with shifts + // by 0 or 1 when handed a valid smi. + __ test(left, Immediate(0xc0000000)); + __ j(not_zero, slow, not_taken); + // Tag the result and store it in register eax. + __ SmiTag(left); + __ mov(eax, left); + break; + + case Token::ADD: + ASSERT(right.is(eax)); + __ add(right, Operand(left)); // Addition is commutative. + __ j(overflow, &use_fp_on_smis, not_taken); + break; + + case Token::SUB: + __ sub(left, Operand(right)); + __ j(overflow, &use_fp_on_smis, not_taken); + __ mov(eax, left); + break; + + case Token::MUL: + // If the smi tag is 0 we can just leave the tag on one operand. + STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case. + // We can't revert the multiplication if the result is not a smi + // so save the right operand. + __ mov(ebx, right); + // Remove tag from one of the operands (but keep sign). + __ SmiUntag(right); + // Do multiplication. + __ imul(right, Operand(left)); // Multiplication is commutative. + __ j(overflow, &use_fp_on_smis, not_taken); + // Check for negative zero result. Use combined = left | right. + __ NegativeZeroTest(right, combined, &use_fp_on_smis); + break; + + case Token::DIV: + // We can't revert the division if the result is not a smi so + // save the left operand. + __ mov(edi, left); + // Check for 0 divisor. + __ test(right, Operand(right)); + __ j(zero, &use_fp_on_smis, not_taken); + // Sign extend left into edx:eax. + ASSERT(left.is(eax)); + __ cdq(); + // Divide edx:eax by right. + __ idiv(right); + // Check for the corner case of dividing the most negative smi by + // -1. We cannot use the overflow flag, since it is not set by idiv + // instruction. + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); + __ cmp(eax, 0x40000000); + __ j(equal, &use_fp_on_smis); + // Check for negative zero result. Use combined = left | right. + __ NegativeZeroTest(eax, combined, &use_fp_on_smis); + // Check that the remainder is zero. + __ test(edx, Operand(edx)); + __ j(not_zero, &use_fp_on_smis); + // Tag the result and store it in register eax. + __ SmiTag(eax); + break; + + case Token::MOD: + // Check for 0 divisor. + __ test(right, Operand(right)); + __ j(zero, ¬_smis, not_taken); + + // Sign extend left into edx:eax. + ASSERT(left.is(eax)); + __ cdq(); + // Divide edx:eax by right. + __ idiv(right); + // Check for negative zero result. Use combined = left | right. + __ NegativeZeroTest(edx, combined, slow); + // Move remainder to register eax. + __ mov(eax, edx); + break; + + default: + UNREACHABLE(); + } + + // 5. Emit return of result in eax. + GenerateReturn(masm); + + // 6. For some operations emit inline code to perform floating point + // operations on known smis (e.g., if the result of the operation + // overflowed the smi range). + switch (op_) { + case Token::SHL: { + Comment perform_float(masm, "-- Perform float operation on smis"); + __ bind(&use_fp_on_smis); + // Result we want is in left == edx, so we can put the allocated heap + // number in eax. + __ AllocateHeapNumber(eax, ecx, ebx, slow); + // Store the result in the HeapNumber and return. + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatures::Scope use_sse2(SSE2); + __ cvtsi2sd(xmm0, Operand(left)); + __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); + } else { + // It's OK to overwrite the right argument on the stack because we + // are about to return. + __ mov(Operand(esp, 1 * kPointerSize), left); + __ fild_s(Operand(esp, 1 * kPointerSize)); + __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); + } + GenerateReturn(masm); + break; + } + + case Token::ADD: + case Token::SUB: + case Token::MUL: + case Token::DIV: { + Comment perform_float(masm, "-- Perform float operation on smis"); + __ bind(&use_fp_on_smis); + // Restore arguments to edx, eax. + switch (op_) { + case Token::ADD: + // Revert right = right + left. + __ sub(right, Operand(left)); + break; + case Token::SUB: + // Revert left = left - right. + __ add(left, Operand(right)); + break; + case Token::MUL: + // Right was clobbered but a copy is in ebx. + __ mov(right, ebx); + break; + case Token::DIV: + // Left was clobbered but a copy is in edi. Right is in ebx for + // division. + __ mov(edx, edi); + __ mov(eax, right); + break; + default: UNREACHABLE(); + break; + } + __ AllocateHeapNumber(ecx, ebx, no_reg, slow); + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatures::Scope use_sse2(SSE2); + FloatingPointHelper::LoadSSE2Smis(masm, ebx); + switch (op_) { + case Token::ADD: __ addsd(xmm0, xmm1); break; + case Token::SUB: __ subsd(xmm0, xmm1); break; + case Token::MUL: __ mulsd(xmm0, xmm1); break; + case Token::DIV: __ divsd(xmm0, xmm1); break; + default: UNREACHABLE(); + } + __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0); + } else { // SSE2 not available, use FPU. + FloatingPointHelper::LoadFloatSmis(masm, ebx); + switch (op_) { + case Token::ADD: __ faddp(1); break; + case Token::SUB: __ fsubp(1); break; + case Token::MUL: __ fmulp(1); break; + case Token::DIV: __ fdivp(1); break; + default: UNREACHABLE(); + } + __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset)); + } + __ mov(eax, ecx); + GenerateReturn(masm); + break; + } + + default: + break; + } + + // 7. Non-smi operands, fall out to the non-smi code with the operands in + // edx and eax. + Comment done_comment(masm, "-- Enter non-smi code"); + __ bind(¬_smis); + switch (op_) { + case Token::BIT_OR: + case Token::SHL: + case Token::SAR: + case Token::SHR: + // Right operand is saved in ecx and eax was destroyed by the smi + // check. + __ mov(eax, ecx); + break; + + case Token::DIV: + case Token::MOD: + // Operands are in eax, ebx at this point. + __ mov(edx, eax); + __ mov(eax, ebx); + break; + + default: + break; + } +} + + +void GenericBinaryOpStub::Generate(MacroAssembler* masm) { + Label call_runtime; + + __ IncrementCounter(&Counters::generic_binary_stub_calls, 1); + + // Generate fast case smi code if requested. This flag is set when the fast + // case smi code is not generated by the caller. Generating it here will speed + // up common operations. + if (ShouldGenerateSmiCode()) { + GenerateSmiCode(masm, &call_runtime); + } else if (op_ != Token::MOD) { // MOD goes straight to runtime. + if (!HasArgsInRegisters()) { + GenerateLoadArguments(masm); + } + } + + // Floating point case. + if (ShouldGenerateFPCode()) { + switch (op_) { + case Token::ADD: + case Token::SUB: + case Token::MUL: + case Token::DIV: { + if (runtime_operands_type_ == BinaryOpIC::DEFAULT && + HasSmiCodeInStub()) { + // Execution reaches this point when the first non-smi argument occurs + // (and only if smi code is generated). This is the right moment to + // patch to HEAP_NUMBERS state. The transition is attempted only for + // the four basic operations. The stub stays in the DEFAULT state + // forever for all other operations (also if smi code is skipped). + GenerateTypeTransition(masm); + break; + } + + Label not_floats; + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatures::Scope use_sse2(SSE2); + if (static_operands_type_.IsNumber()) { + if (FLAG_debug_code) { + // Assert at runtime that inputs are only numbers. + __ AbortIfNotNumber(edx); + __ AbortIfNotNumber(eax); + } + if (static_operands_type_.IsSmi()) { + if (FLAG_debug_code) { + __ AbortIfNotSmi(edx); + __ AbortIfNotSmi(eax); + } + FloatingPointHelper::LoadSSE2Smis(masm, ecx); + } else { + FloatingPointHelper::LoadSSE2Operands(masm); + } + } else { + FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime); + } + + switch (op_) { + case Token::ADD: __ addsd(xmm0, xmm1); break; + case Token::SUB: __ subsd(xmm0, xmm1); break; + case Token::MUL: __ mulsd(xmm0, xmm1); break; + case Token::DIV: __ divsd(xmm0, xmm1); break; + default: UNREACHABLE(); + } + GenerateHeapResultAllocation(masm, &call_runtime); + __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); + GenerateReturn(masm); + } else { // SSE2 not available, use FPU. + if (static_operands_type_.IsNumber()) { + if (FLAG_debug_code) { + // Assert at runtime that inputs are only numbers. + __ AbortIfNotNumber(edx); + __ AbortIfNotNumber(eax); + } + } else { + FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx); + } + FloatingPointHelper::LoadFloatOperands( + masm, + ecx, + FloatingPointHelper::ARGS_IN_REGISTERS); + switch (op_) { + case Token::ADD: __ faddp(1); break; + case Token::SUB: __ fsubp(1); break; + case Token::MUL: __ fmulp(1); break; + case Token::DIV: __ fdivp(1); break; + default: UNREACHABLE(); + } + Label after_alloc_failure; + GenerateHeapResultAllocation(masm, &after_alloc_failure); + __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); + GenerateReturn(masm); + __ bind(&after_alloc_failure); + __ ffree(); + __ jmp(&call_runtime); + } + __ bind(¬_floats); + if (runtime_operands_type_ == BinaryOpIC::DEFAULT && + !HasSmiCodeInStub()) { + // Execution reaches this point when the first non-number argument + // occurs (and only if smi code is skipped from the stub, otherwise + // the patching has already been done earlier in this case branch). + // Try patching to STRINGS for ADD operation. + if (op_ == Token::ADD) { + GenerateTypeTransition(masm); + } + } + break; + } + case Token::MOD: { + // For MOD we go directly to runtime in the non-smi case. + break; + } + case Token::BIT_OR: + case Token::BIT_AND: + case Token::BIT_XOR: + case Token::SAR: + case Token::SHL: + case Token::SHR: { + Label non_smi_result; + FloatingPointHelper::LoadAsIntegers(masm, + static_operands_type_, + use_sse3_, + &call_runtime); + switch (op_) { + case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; + case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; + case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; + case Token::SAR: __ sar_cl(eax); break; + case Token::SHL: __ shl_cl(eax); break; + case Token::SHR: __ shr_cl(eax); break; + default: UNREACHABLE(); + } + if (op_ == Token::SHR) { + // Check if result is non-negative and fits in a smi. + __ test(eax, Immediate(0xc0000000)); + __ j(not_zero, &call_runtime); + } else { + // Check if result fits in a smi. + __ cmp(eax, 0xc0000000); + __ j(negative, &non_smi_result); + } + // Tag smi result and return. + __ SmiTag(eax); + GenerateReturn(masm); + + // All ops except SHR return a signed int32 that we load in + // a HeapNumber. + if (op_ != Token::SHR) { + __ bind(&non_smi_result); + // Allocate a heap number if needed. + __ mov(ebx, Operand(eax)); // ebx: result + Label skip_allocation; + switch (mode_) { + case OVERWRITE_LEFT: + case OVERWRITE_RIGHT: + // If the operand was an object, we skip the + // allocation of a heap number. + __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ? + 1 * kPointerSize : 2 * kPointerSize)); + __ test(eax, Immediate(kSmiTagMask)); + __ j(not_zero, &skip_allocation, not_taken); + // Fall through! + case NO_OVERWRITE: + __ AllocateHeapNumber(eax, ecx, edx, &call_runtime); + __ bind(&skip_allocation); + break; + default: UNREACHABLE(); + } + // Store the result in the HeapNumber and return. + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatures::Scope use_sse2(SSE2); + __ cvtsi2sd(xmm0, Operand(ebx)); + __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); + } else { + __ mov(Operand(esp, 1 * kPointerSize), ebx); + __ fild_s(Operand(esp, 1 * kPointerSize)); + __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); + } + GenerateReturn(masm); + } + break; + } + default: UNREACHABLE(); break; + } + } + + // If all else fails, use the runtime system to get the correct + // result. If arguments was passed in registers now place them on the + // stack in the correct order below the return address. + __ bind(&call_runtime); + if (HasArgsInRegisters()) { + GenerateRegisterArgsPush(masm); + } + + switch (op_) { + case Token::ADD: { + // Test for string arguments before calling runtime. + Label not_strings, not_string1, string1, string1_smi2; + + // If this stub has already generated FP-specific code then the arguments + // are already in edx, eax + if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) { + GenerateLoadArguments(masm); + } + + // Registers containing left and right operands respectively. + Register lhs, rhs; + if (HasArgsReversed()) { + lhs = eax; + rhs = edx; + } else { + lhs = edx; + rhs = eax; + } + + // Test if first argument is a string. + __ test(lhs, Immediate(kSmiTagMask)); + __ j(zero, ¬_string1); + __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx); + __ j(above_equal, ¬_string1); + + // First argument is a string, test second. + __ test(rhs, Immediate(kSmiTagMask)); + __ j(zero, &string1_smi2); + __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx); + __ j(above_equal, &string1); + + // First and second argument are strings. Jump to the string add stub. + StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); + __ TailCallStub(&string_add_stub); + + __ bind(&string1_smi2); + // First argument is a string, second is a smi. Try to lookup the number + // string for the smi in the number string cache. + NumberToStringStub::GenerateLookupNumberStringCache( + masm, rhs, edi, ebx, ecx, true, &string1); + + // Replace second argument on stack and tailcall string add stub to make + // the result. + __ mov(Operand(esp, 1 * kPointerSize), edi); + __ TailCallStub(&string_add_stub); + + // Only first argument is a string. + __ bind(&string1); + __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION); + + // First argument was not a string, test second. + __ bind(¬_string1); + __ test(rhs, Immediate(kSmiTagMask)); + __ j(zero, ¬_strings); + __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx); + __ j(above_equal, ¬_strings); + + // Only second argument is a string. + __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION); + + __ bind(¬_strings); + // Neither argument is a string. + __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); + break; + } + case Token::SUB: + __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); + break; + case Token::MUL: + __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); + break; + case Token::DIV: + __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); + break; + case Token::MOD: + __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); + break; + case Token::BIT_OR: + __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); + break; + case Token::BIT_AND: + __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); + break; + case Token::BIT_XOR: + __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); + break; + case Token::SAR: + __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); + break; + case Token::SHL: + __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); + break; + case Token::SHR: + __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); + break; + default: + UNREACHABLE(); + } +} + + +void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm, + Label* alloc_failure) { + Label skip_allocation; + OverwriteMode mode = mode_; + if (HasArgsReversed()) { + if (mode == OVERWRITE_RIGHT) { + mode = OVERWRITE_LEFT; + } else if (mode == OVERWRITE_LEFT) { + mode = OVERWRITE_RIGHT; + } + } + switch (mode) { + case OVERWRITE_LEFT: { + // If the argument in edx is already an object, we skip the + // allocation of a heap number. + __ test(edx, Immediate(kSmiTagMask)); + __ j(not_zero, &skip_allocation, not_taken); + // Allocate a heap number for the result. Keep eax and edx intact + // for the possible runtime call. + __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure); + // Now edx can be overwritten losing one of the arguments as we are + // now done and will not need it any more. + __ mov(edx, Operand(ebx)); + __ bind(&skip_allocation); + // Use object in edx as a result holder + __ mov(eax, Operand(edx)); + break; + } + case OVERWRITE_RIGHT: + // If the argument in eax is already an object, we skip the + // allocation of a heap number. + __ test(eax, Immediate(kSmiTagMask)); + __ j(not_zero, &skip_allocation, not_taken); + // Fall through! + case NO_OVERWRITE: + // Allocate a heap number for the result. Keep eax and edx intact + // for the possible runtime call. + __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure); + // Now eax can be overwritten losing one of the arguments as we are + // now done and will not need it any more. + __ mov(eax, ebx); + __ bind(&skip_allocation); + break; + default: UNREACHABLE(); + } +} + + +void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) { + // If arguments are not passed in registers read them from the stack. + ASSERT(!HasArgsInRegisters()); + __ mov(eax, Operand(esp, 1 * kPointerSize)); + __ mov(edx, Operand(esp, 2 * kPointerSize)); +} + + +void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) { + // If arguments are not passed in registers remove them from the stack before + // returning. + if (!HasArgsInRegisters()) { + __ ret(2 * kPointerSize); // Remove both operands + } else { + __ ret(0); + } +} + + +void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { + ASSERT(HasArgsInRegisters()); + __ pop(ecx); + if (HasArgsReversed()) { + __ push(eax); + __ push(edx); + } else { + __ push(edx); + __ push(eax); + } + __ push(ecx); +} + + +void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { + // Ensure the operands are on the stack. + if (HasArgsInRegisters()) { + GenerateRegisterArgsPush(masm); + } + + __ pop(ecx); // Save return address. + + // Left and right arguments are now on top. + // Push this stub's key. Although the operation and the type info are + // encoded into the key, the encoding is opaque, so push them too. + __ push(Immediate(Smi::FromInt(MinorKey()))); + __ push(Immediate(Smi::FromInt(op_))); + __ push(Immediate(Smi::FromInt(runtime_operands_type_))); + + __ push(ecx); // Push return address. + + // Patch the caller to an appropriate specialized stub and return the + // operation result to the caller of the stub. + __ TailCallExternalReference( + ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), + 5, + 1); +} + + +Handle GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { + GenericBinaryOpStub stub(key, type_info); + return stub.GetCode(); +} + + +void TranscendentalCacheStub::Generate(MacroAssembler* masm) { + // Input on stack: + // esp[4]: argument (should be number). + // esp[0]: return address. + // Test that eax is a number. + Label runtime_call; + Label runtime_call_clear_stack; + Label input_not_smi; + Label loaded; + __ mov(eax, Operand(esp, kPointerSize)); + __ test(eax, Immediate(kSmiTagMask)); + __ j(not_zero, &input_not_smi); + // Input is a smi. Untag and load it onto the FPU stack. + // Then load the low and high words of the double into ebx, edx. + STATIC_ASSERT(kSmiTagSize == 1); + __ sar(eax, 1); + __ sub(Operand(esp), Immediate(2 * kPointerSize)); + __ mov(Operand(esp, 0), eax); + __ fild_s(Operand(esp, 0)); + __ fst_d(Operand(esp, 0)); + __ pop(edx); + __ pop(ebx); + __ jmp(&loaded); + __ bind(&input_not_smi); + // Check if input is a HeapNumber. + __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); + __ cmp(Operand(ebx), Immediate(Factory::heap_number_map())); + __ j(not_equal, &runtime_call); + // Input is a HeapNumber. Push it on the FPU stack and load its + // low and high words into ebx, edx. + __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); + __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset)); + __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset)); + + __ bind(&loaded); + // ST[0] == double value + // ebx = low 32 bits of double value + // edx = high 32 bits of double value + // Compute hash (the shifts are arithmetic): + // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); + __ mov(ecx, ebx); + __ xor_(ecx, Operand(edx)); + __ mov(eax, ecx); + __ sar(eax, 16); + __ xor_(ecx, Operand(eax)); + __ mov(eax, ecx); + __ sar(eax, 8); + __ xor_(ecx, Operand(eax)); + ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize)); + __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1)); + + // ST[0] == double value. + // ebx = low 32 bits of double value. + // edx = high 32 bits of double value. + // ecx = TranscendentalCache::hash(double value). + __ mov(eax, + Immediate(ExternalReference::transcendental_cache_array_address())); + // Eax points to cache array. + __ mov(eax, Operand(eax, type_ * sizeof(TranscendentalCache::caches_[0]))); + // Eax points to the cache for the type type_. + // If NULL, the cache hasn't been initialized yet, so go through runtime. + __ test(eax, Operand(eax)); + __ j(zero, &runtime_call_clear_stack); +#ifdef DEBUG + // Check that the layout of cache elements match expectations. + { TranscendentalCache::Element test_elem[2]; + char* elem_start = reinterpret_cast(&test_elem[0]); + char* elem2_start = reinterpret_cast(&test_elem[1]); + char* elem_in0 = reinterpret_cast(&(test_elem[0].in[0])); + char* elem_in1 = reinterpret_cast(&(test_elem[0].in[1])); + char* elem_out = reinterpret_cast(&(test_elem[0].output)); + CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. + CHECK_EQ(0, elem_in0 - elem_start); + CHECK_EQ(kIntSize, elem_in1 - elem_start); + CHECK_EQ(2 * kIntSize, elem_out - elem_start); + } +#endif + // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12]. + __ lea(ecx, Operand(ecx, ecx, times_2, 0)); + __ lea(ecx, Operand(eax, ecx, times_4, 0)); + // Check if cache matches: Double value is stored in uint32_t[2] array. + Label cache_miss; + __ cmp(ebx, Operand(ecx, 0)); + __ j(not_equal, &cache_miss); + __ cmp(edx, Operand(ecx, kIntSize)); + __ j(not_equal, &cache_miss); + // Cache hit! + __ mov(eax, Operand(ecx, 2 * kIntSize)); + __ fstp(0); + __ ret(kPointerSize); + + __ bind(&cache_miss); + // Update cache with new value. + // We are short on registers, so use no_reg as scratch. + // This gives slightly larger code. + __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack); + GenerateOperation(masm); + __ mov(Operand(ecx, 0), ebx); + __ mov(Operand(ecx, kIntSize), edx); + __ mov(Operand(ecx, 2 * kIntSize), eax); + __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); + __ ret(kPointerSize); + + __ bind(&runtime_call_clear_stack); + __ fstp(0); + __ bind(&runtime_call); + __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); +} + + +Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { + switch (type_) { + // Add more cases when necessary. + case TranscendentalCache::SIN: return Runtime::kMath_sin; + case TranscendentalCache::COS: return Runtime::kMath_cos; + default: + UNIMPLEMENTED(); + return Runtime::kAbort; + } +} + + +void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { + // Only free register is edi. + Label done; + ASSERT(type_ == TranscendentalCache::SIN || + type_ == TranscendentalCache::COS); + // More transcendental types can be added later. + + // Both fsin and fcos require arguments in the range +/-2^63 and + // return NaN for infinities and NaN. They can share all code except + // the actual fsin/fcos operation. + Label in_range; + // If argument is outside the range -2^63..2^63, fsin/cos doesn't + // work. We must reduce it to the appropriate range. + __ mov(edi, edx); + __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only. + int supported_exponent_limit = + (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift; + __ cmp(Operand(edi), Immediate(supported_exponent_limit)); + __ j(below, &in_range, taken); + // Check for infinity and NaN. Both return NaN for sin. + __ cmp(Operand(edi), Immediate(0x7ff00000)); + Label non_nan_result; + __ j(not_equal, &non_nan_result, taken); + // Input is +/-Infinity or NaN. Result is NaN. + __ fstp(0); + // NaN is represented by 0x7ff8000000000000. + __ push(Immediate(0x7ff80000)); + __ push(Immediate(0)); + __ fld_d(Operand(esp, 0)); + __ add(Operand(esp), Immediate(2 * kPointerSize)); + __ jmp(&done); + + __ bind(&non_nan_result); + + // Use fpmod to restrict argument to the range +/-2*PI. + __ mov(edi, eax); // Save eax before using fnstsw_ax. + __ fldpi(); + __ fadd(0); + __ fld(1); + // FPU Stack: input, 2*pi, input. + { + Label no_exceptions; + __ fwait(); + __ fnstsw_ax(); + // Clear if Illegal Operand or Zero Division exceptions are set. + __ test(Operand(eax), Immediate(5)); + __ j(zero, &no_exceptions); + __ fnclex(); + __ bind(&no_exceptions); + } + + // Compute st(0) % st(1) + { + Label partial_remainder_loop; + __ bind(&partial_remainder_loop); + __ fprem1(); + __ fwait(); + __ fnstsw_ax(); + __ test(Operand(eax), Immediate(0x400 /* C2 */)); + // If C2 is set, computation only has partial result. Loop to + // continue computation. + __ j(not_zero, &partial_remainder_loop); + } + // FPU Stack: input, 2*pi, input % 2*pi + __ fstp(2); + __ fstp(0); + __ mov(eax, edi); // Restore eax (allocated HeapNumber pointer). + + // FPU Stack: input % 2*pi + __ bind(&in_range); + switch (type_) { + case TranscendentalCache::SIN: + __ fsin(); + break; + case TranscendentalCache::COS: + __ fcos(); + break; + default: + UNREACHABLE(); + } + __ bind(&done); +} + + +// Get the integer part of a heap number. Surprisingly, all this bit twiddling +// is faster than using the built-in instructions on floating point registers. +// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the +// trashed registers. +void IntegerConvert(MacroAssembler* masm, + Register source, + TypeInfo type_info, + bool use_sse3, + Label* conversion_failure) { + ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx)); + Label done, right_exponent, normal_exponent; + Register scratch = ebx; + Register scratch2 = edi; + if (type_info.IsInteger32() && CpuFeatures::IsEnabled(SSE2)) { + CpuFeatures::Scope scope(SSE2); + __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset)); + return; + } + if (!type_info.IsInteger32() || !use_sse3) { + // Get exponent word. + __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset)); + // Get exponent alone in scratch2. + __ mov(scratch2, scratch); + __ and_(scratch2, HeapNumber::kExponentMask); + } + if (use_sse3) { + CpuFeatures::Scope scope(SSE3); + if (!type_info.IsInteger32()) { + // Check whether the exponent is too big for a 64 bit signed integer. + static const uint32_t kTooBigExponent = + (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift; + __ cmp(Operand(scratch2), Immediate(kTooBigExponent)); + __ j(greater_equal, conversion_failure); + } + // Load x87 register with heap number. + __ fld_d(FieldOperand(source, HeapNumber::kValueOffset)); + // Reserve space for 64 bit answer. + __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint. + // Do conversion, which cannot fail because we checked the exponent. + __ fisttp_d(Operand(esp, 0)); + __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx. + __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint. + } else { + // Load ecx with zero. We use this either for the final shift or + // for the answer. + __ xor_(ecx, Operand(ecx)); + // Check whether the exponent matches a 32 bit signed int that cannot be + // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the + // exponent is 30 (biased). This is the exponent that we are fastest at and + // also the highest exponent we can handle here. + const uint32_t non_smi_exponent = + (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; + __ cmp(Operand(scratch2), Immediate(non_smi_exponent)); + // If we have a match of the int32-but-not-Smi exponent then skip some + // logic. + __ j(equal, &right_exponent); + // If the exponent is higher than that then go to slow case. This catches + // numbers that don't fit in a signed int32, infinities and NaNs. + __ j(less, &normal_exponent); + + { + // Handle a big exponent. The only reason we have this code is that the + // >>> operator has a tendency to generate numbers with an exponent of 31. + const uint32_t big_non_smi_exponent = + (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift; + __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent)); + __ j(not_equal, conversion_failure); + // We have the big exponent, typically from >>>. This means the number is + // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa. + __ mov(scratch2, scratch); + __ and_(scratch2, HeapNumber::kMantissaMask); + // Put back the implicit 1. + __ or_(scratch2, 1 << HeapNumber::kExponentShift); + // Shift up the mantissa bits to take up the space the exponent used to + // take. We just orred in the implicit bit so that took care of one and + // we want to use the full unsigned range so we subtract 1 bit from the + // shift distance. + const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1; + __ shl(scratch2, big_shift_distance); + // Get the second half of the double. + __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset)); + // Shift down 21 bits to get the most significant 11 bits or the low + // mantissa word. + __ shr(ecx, 32 - big_shift_distance); + __ or_(ecx, Operand(scratch2)); + // We have the answer in ecx, but we may need to negate it. + __ test(scratch, Operand(scratch)); + __ j(positive, &done); + __ neg(ecx); + __ jmp(&done); + } + + __ bind(&normal_exponent); + // Exponent word in scratch, exponent part of exponent word in scratch2. + // Zero in ecx. + // We know the exponent is smaller than 30 (biased). If it is less than + // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie + // it rounds to zero. + const uint32_t zero_exponent = + (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; + __ sub(Operand(scratch2), Immediate(zero_exponent)); + // ecx already has a Smi zero. + __ j(less, &done); + + // We have a shifted exponent between 0 and 30 in scratch2. + __ shr(scratch2, HeapNumber::kExponentShift); + __ mov(ecx, Immediate(30)); + __ sub(ecx, Operand(scratch2)); + + __ bind(&right_exponent); + // Here ecx is the shift, scratch is the exponent word. + // Get the top bits of the mantissa. + __ and_(scratch, HeapNumber::kMantissaMask); + // Put back the implicit 1. + __ or_(scratch, 1 << HeapNumber::kExponentShift); + // Shift up the mantissa bits to take up the space the exponent used to + // take. We have kExponentShift + 1 significant bits int he low end of the + // word. Shift them to the top bits. + const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; + __ shl(scratch, shift_distance); + // Get the second half of the double. For some exponents we don't + // actually need this because the bits get shifted out again, but + // it's probably slower to test than just to do it. + __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset)); + // Shift down 22 bits to get the most significant 10 bits or the low + // mantissa word. + __ shr(scratch2, 32 - shift_distance); + __ or_(scratch2, Operand(scratch)); + // Move down according to the exponent. + __ shr_cl(scratch2); + // Now the unsigned answer is in scratch2. We need to move it to ecx and + // we may need to fix the sign. + Label negative; + __ xor_(ecx, Operand(ecx)); + __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset)); + __ j(greater, &negative); + __ mov(ecx, scratch2); + __ jmp(&done); + __ bind(&negative); + __ sub(ecx, Operand(scratch2)); + __ bind(&done); + } +} + + +// Input: edx, eax are the left and right objects of a bit op. +// Output: eax, ecx are left and right integers for a bit op. +void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm, + TypeInfo type_info, + bool use_sse3, + Label* conversion_failure) { + // Check float operands. + Label arg1_is_object, check_undefined_arg1; + Label arg2_is_object, check_undefined_arg2; + Label load_arg2, done; + + if (!type_info.IsDouble()) { + if (!type_info.IsSmi()) { + __ test(edx, Immediate(kSmiTagMask)); + __ j(not_zero, &arg1_is_object); + } else { + if (FLAG_debug_code) __ AbortIfNotSmi(edx); + } + __ SmiUntag(edx); + __ jmp(&load_arg2); + } + + __ bind(&arg1_is_object); + + // Get the untagged integer version of the edx heap number in ecx. + IntegerConvert(masm, edx, type_info, use_sse3, conversion_failure); + __ mov(edx, ecx); + + // Here edx has the untagged integer, eax has a Smi or a heap number. + __ bind(&load_arg2); + if (!type_info.IsDouble()) { + // Test if arg2 is a Smi. + if (!type_info.IsSmi()) { + __ test(eax, Immediate(kSmiTagMask)); + __ j(not_zero, &arg2_is_object); + } else { + if (FLAG_debug_code) __ AbortIfNotSmi(eax); + } + __ SmiUntag(eax); + __ mov(ecx, eax); + __ jmp(&done); + } + + __ bind(&arg2_is_object); + + // Get the untagged integer version of the eax heap number in ecx. + IntegerConvert(masm, eax, type_info, use_sse3, conversion_failure); + __ bind(&done); + __ mov(eax, edx); +} + + +// Input: edx, eax are the left and right objects of a bit op. +// Output: eax, ecx are left and right integers for a bit op. +void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm, + bool use_sse3, + Label* conversion_failure) { + // Check float operands. + Label arg1_is_object, check_undefined_arg1; + Label arg2_is_object, check_undefined_arg2; + Label load_arg2, done; + + // Test if arg1 is a Smi. + __ test(edx, Immediate(kSmiTagMask)); + __ j(not_zero, &arg1_is_object); + + __ SmiUntag(edx); + __ jmp(&load_arg2); + + // If the argument is undefined it converts to zero (ECMA-262, section 9.5). + __ bind(&check_undefined_arg1); + __ cmp(edx, Factory::undefined_value()); + __ j(not_equal, conversion_failure); + __ mov(edx, Immediate(0)); + __ jmp(&load_arg2); + + __ bind(&arg1_is_object); + __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset)); + __ cmp(ebx, Factory::heap_number_map()); + __ j(not_equal, &check_undefined_arg1); + + // Get the untagged integer version of the edx heap number in ecx. + IntegerConvert(masm, + edx, + TypeInfo::Unknown(), + use_sse3, + conversion_failure); + __ mov(edx, ecx); + + // Here edx has the untagged integer, eax has a Smi or a heap number. + __ bind(&load_arg2); + + // Test if arg2 is a Smi. + __ test(eax, Immediate(kSmiTagMask)); + __ j(not_zero, &arg2_is_object); + + __ SmiUntag(eax); + __ mov(ecx, eax); + __ jmp(&done); + + // If the argument is undefined it converts to zero (ECMA-262, section 9.5). + __ bind(&check_undefined_arg2); + __ cmp(eax, Factory::undefined_value()); + __ j(not_equal, conversion_failure); + __ mov(ecx, Immediate(0)); + __ jmp(&done); + + __ bind(&arg2_is_object); + __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); + __ cmp(ebx, Factory::heap_number_map()); + __ j(not_equal, &check_undefined_arg2); + + // Get the untagged integer version of the eax heap number in ecx. + IntegerConvert(masm, + eax, + TypeInfo::Unknown(), + use_sse3, + conversion_failure); + __ bind(&done); + __ mov(eax, edx); +} + + +void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, + TypeInfo type_info, + bool use_sse3, + Label* conversion_failure) { + if (type_info.IsNumber()) { + LoadNumbersAsIntegers(masm, type_info, use_sse3, conversion_failure); + } else { + LoadUnknownsAsIntegers(masm, use_sse3, conversion_failure); + } +} + + +void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, + Register number) { + Label load_smi, done; + + __ test(number, Immediate(kSmiTagMask)); + __ j(zero, &load_smi, not_taken); + __ fld_d(FieldOperand(number, HeapNumber::kValueOffset)); + __ jmp(&done); + + __ bind(&load_smi); + __ SmiUntag(number); + __ push(number); + __ fild_s(Operand(esp, 0)); + __ pop(number); + + __ bind(&done); +} + + +void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) { + Label load_smi_edx, load_eax, load_smi_eax, done; + // Load operand in edx into xmm0. + __ test(edx, Immediate(kSmiTagMask)); + __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi. + __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); + + __ bind(&load_eax); + // Load operand in eax into xmm1. + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi. + __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); + __ jmp(&done); + + __ bind(&load_smi_edx); + __ SmiUntag(edx); // Untag smi before converting to float. + __ cvtsi2sd(xmm0, Operand(edx)); + __ SmiTag(edx); // Retag smi for heap number overwriting test. + __ jmp(&load_eax); + + __ bind(&load_smi_eax); + __ SmiUntag(eax); // Untag smi before converting to float. + __ cvtsi2sd(xmm1, Operand(eax)); + __ SmiTag(eax); // Retag smi for heap number overwriting test. + + __ bind(&done); +} + + +void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm, + Label* not_numbers) { + Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done; + // Load operand in edx into xmm0, or branch to not_numbers. + __ test(edx, Immediate(kSmiTagMask)); + __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi. + __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Factory::heap_number_map()); + __ j(not_equal, not_numbers); // Argument in edx is not a number. + __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); + __ bind(&load_eax); + // Load operand in eax into xmm1, or branch to not_numbers. + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi. + __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::heap_number_map()); + __ j(equal, &load_float_eax); + __ jmp(not_numbers); // Argument in eax is not a number. + __ bind(&load_smi_edx); + __ SmiUntag(edx); // Untag smi before converting to float. + __ cvtsi2sd(xmm0, Operand(edx)); + __ SmiTag(edx); // Retag smi for heap number overwriting test. + __ jmp(&load_eax); + __ bind(&load_smi_eax); + __ SmiUntag(eax); // Untag smi before converting to float. + __ cvtsi2sd(xmm1, Operand(eax)); + __ SmiTag(eax); // Retag smi for heap number overwriting test. + __ jmp(&done); + __ bind(&load_float_eax); + __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); + __ bind(&done); +} + + +void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm, + Register scratch) { + const Register left = edx; + const Register right = eax; + __ mov(scratch, left); + ASSERT(!scratch.is(right)); // We're about to clobber scratch. + __ SmiUntag(scratch); + __ cvtsi2sd(xmm0, Operand(scratch)); + + __ mov(scratch, right); + __ SmiUntag(scratch); + __ cvtsi2sd(xmm1, Operand(scratch)); +} + + +void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, + Register scratch, + ArgLocation arg_location) { + Label load_smi_1, load_smi_2, done_load_1, done; + if (arg_location == ARGS_IN_REGISTERS) { + __ mov(scratch, edx); + } else { + __ mov(scratch, Operand(esp, 2 * kPointerSize)); + } + __ test(scratch, Immediate(kSmiTagMask)); + __ j(zero, &load_smi_1, not_taken); + __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset)); + __ bind(&done_load_1); + + if (arg_location == ARGS_IN_REGISTERS) { + __ mov(scratch, eax); + } else { + __ mov(scratch, Operand(esp, 1 * kPointerSize)); + } + __ test(scratch, Immediate(kSmiTagMask)); + __ j(zero, &load_smi_2, not_taken); + __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset)); + __ jmp(&done); + + __ bind(&load_smi_1); + __ SmiUntag(scratch); + __ push(scratch); + __ fild_s(Operand(esp, 0)); + __ pop(scratch); + __ jmp(&done_load_1); + + __ bind(&load_smi_2); + __ SmiUntag(scratch); + __ push(scratch); + __ fild_s(Operand(esp, 0)); + __ pop(scratch); + + __ bind(&done); +} + + +void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm, + Register scratch) { + const Register left = edx; + const Register right = eax; + __ mov(scratch, left); + ASSERT(!scratch.is(right)); // We're about to clobber scratch. + __ SmiUntag(scratch); + __ push(scratch); + __ fild_s(Operand(esp, 0)); + + __ mov(scratch, right); + __ SmiUntag(scratch); + __ mov(Operand(esp, 0), scratch); + __ fild_s(Operand(esp, 0)); + __ pop(scratch); +} + + +void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, + Label* non_float, + Register scratch) { + Label test_other, done; + // Test if both operands are floats or smi -> scratch=k_is_float; + // Otherwise scratch = k_not_float. + __ test(edx, Immediate(kSmiTagMask)); + __ j(zero, &test_other, not_taken); // argument in edx is OK + __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset)); + __ cmp(scratch, Factory::heap_number_map()); + __ j(not_equal, non_float); // argument in edx is not a number -> NaN + + __ bind(&test_other); + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &done); // argument in eax is OK + __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset)); + __ cmp(scratch, Factory::heap_number_map()); + __ j(not_equal, non_float); // argument in eax is not a number -> NaN + + // Fall-through: Both operands are numbers. + __ bind(&done); +} + + +void GenericUnaryOpStub::Generate(MacroAssembler* masm) { + Label slow, done; + + if (op_ == Token::SUB) { + // Check whether the value is a smi. + Label try_float; + __ test(eax, Immediate(kSmiTagMask)); + __ j(not_zero, &try_float, not_taken); + + if (negative_zero_ == kStrictNegativeZero) { + // Go slow case if the value of the expression is zero + // to make sure that we switch between 0 and -0. + __ test(eax, Operand(eax)); + __ j(zero, &slow, not_taken); + } + + // The value of the expression is a smi that is not zero. Try + // optimistic subtraction '0 - value'. + Label undo; + __ mov(edx, Operand(eax)); + __ Set(eax, Immediate(0)); + __ sub(eax, Operand(edx)); + __ j(no_overflow, &done, taken); + + // Restore eax and go slow case. + __ bind(&undo); + __ mov(eax, Operand(edx)); + __ jmp(&slow); + + // Try floating point case. + __ bind(&try_float); + __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); + __ cmp(edx, Factory::heap_number_map()); + __ j(not_equal, &slow); + if (overwrite_ == UNARY_OVERWRITE) { + __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset)); + __ xor_(edx, HeapNumber::kSignMask); // Flip sign. + __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx); + } else { + __ mov(edx, Operand(eax)); + // edx: operand + __ AllocateHeapNumber(eax, ebx, ecx, &undo); + // eax: allocated 'empty' number + __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset)); + __ xor_(ecx, HeapNumber::kSignMask); // Flip sign. + __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx); + __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset)); + __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx); + } + } else if (op_ == Token::BIT_NOT) { + // Check if the operand is a heap number. + __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); + __ cmp(edx, Factory::heap_number_map()); + __ j(not_equal, &slow, not_taken); + + // Convert the heap number in eax to an untagged integer in ecx. + IntegerConvert(masm, + eax, + TypeInfo::Unknown(), + CpuFeatures::IsSupported(SSE3), + &slow); + + // Do the bitwise operation and check if the result fits in a smi. + Label try_float; + __ not_(ecx); + __ cmp(ecx, 0xc0000000); + __ j(sign, &try_float, not_taken); + + // Tag the result as a smi and we're done. + STATIC_ASSERT(kSmiTagSize == 1); + __ lea(eax, Operand(ecx, times_2, kSmiTag)); + __ jmp(&done); + + // Try to store the result in a heap number. + __ bind(&try_float); + if (overwrite_ == UNARY_NO_OVERWRITE) { + // Allocate a fresh heap number, but don't overwrite eax until + // we're sure we can do it without going through the slow case + // that needs the value in eax. + __ AllocateHeapNumber(ebx, edx, edi, &slow); + __ mov(eax, Operand(ebx)); + } + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatures::Scope use_sse2(SSE2); + __ cvtsi2sd(xmm0, Operand(ecx)); + __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); + } else { + __ push(ecx); + __ fild_s(Operand(esp, 0)); + __ pop(ecx); + __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); + } + } else { + UNIMPLEMENTED(); + } + + // Return from the stub. + __ bind(&done); + __ StubReturn(1); + + // Handle the slow case by jumping to the JavaScript builtin. + __ bind(&slow); + __ pop(ecx); // pop return address. + __ push(eax); + __ push(ecx); // push return address + switch (op_) { + case Token::SUB: + __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); + break; + case Token::BIT_NOT: + __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); + break; + default: + UNREACHABLE(); + } +} + + +void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { + // The key is in edx and the parameter count is in eax. + + // The displacement is used for skipping the frame pointer on the + // stack. It is the offset of the last parameter (if any) relative + // to the frame pointer. + static const int kDisplacement = 1 * kPointerSize; + + // Check that the key is a smi. + Label slow; + __ test(edx, Immediate(kSmiTagMask)); + __ j(not_zero, &slow, not_taken); + + // Check if the calling frame is an arguments adaptor frame. + Label adaptor; + __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); + __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset)); + __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ j(equal, &adaptor); + + // Check index against formal parameters count limit passed in + // through register eax. Use unsigned comparison to get negative + // check for free. + __ cmp(edx, Operand(eax)); + __ j(above_equal, &slow, not_taken); + + // Read the argument from the stack and return it. + STATIC_ASSERT(kSmiTagSize == 1); + STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these. + __ lea(ebx, Operand(ebp, eax, times_2, 0)); + __ neg(edx); + __ mov(eax, Operand(ebx, edx, times_2, kDisplacement)); + __ ret(0); + + // Arguments adaptor case: Check index against actual arguments + // limit found in the arguments adaptor frame. Use unsigned + // comparison to get negative check for free. + __ bind(&adaptor); + __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ cmp(edx, Operand(ecx)); + __ j(above_equal, &slow, not_taken); + + // Read the argument from the stack and return it. + STATIC_ASSERT(kSmiTagSize == 1); + STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these. + __ lea(ebx, Operand(ebx, ecx, times_2, 0)); + __ neg(edx); + __ mov(eax, Operand(ebx, edx, times_2, kDisplacement)); + __ ret(0); + + // Slow-case: Handle non-smi or out-of-bounds access to arguments + // by calling the runtime system. + __ bind(&slow); + __ pop(ebx); // Return address. + __ push(edx); + __ push(ebx); + __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); +} + + +void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { + // esp[0] : return address + // esp[4] : number of parameters + // esp[8] : receiver displacement + // esp[16] : function + + // The displacement is used for skipping the return address and the + // frame pointer on the stack. It is the offset of the last + // parameter (if any) relative to the frame pointer. + static const int kDisplacement = 2 * kPointerSize; + + // Check if the calling frame is an arguments adaptor frame. + Label adaptor_frame, try_allocate, runtime; + __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); + __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); + __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ j(equal, &adaptor_frame); + + // Get the length from the frame. + __ mov(ecx, Operand(esp, 1 * kPointerSize)); + __ jmp(&try_allocate); + + // Patch the arguments.length and the parameters pointer. + __ bind(&adaptor_frame); + __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ mov(Operand(esp, 1 * kPointerSize), ecx); + __ lea(edx, Operand(edx, ecx, times_2, kDisplacement)); + __ mov(Operand(esp, 2 * kPointerSize), edx); + + // Try the new space allocation. Start out with computing the size of + // the arguments object and the elements array. + Label add_arguments_object; + __ bind(&try_allocate); + __ test(ecx, Operand(ecx)); + __ j(zero, &add_arguments_object); + __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize)); + __ bind(&add_arguments_object); + __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSize)); + + // Do the allocation of both objects in one go. + __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT); + + // Get the arguments boilerplate from the current (global) context. + int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); + __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX))); + __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset)); + __ mov(edi, Operand(edi, offset)); + + // Copy the JS object part. + for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { + __ mov(ebx, FieldOperand(edi, i)); + __ mov(FieldOperand(eax, i), ebx); + } + + // Setup the callee in-object property. + STATIC_ASSERT(Heap::arguments_callee_index == 0); + __ mov(ebx, Operand(esp, 3 * kPointerSize)); + __ mov(FieldOperand(eax, JSObject::kHeaderSize), ebx); + + // Get the length (smi tagged) and set that as an in-object property too. + STATIC_ASSERT(Heap::arguments_length_index == 1); + __ mov(ecx, Operand(esp, 1 * kPointerSize)); + __ mov(FieldOperand(eax, JSObject::kHeaderSize + kPointerSize), ecx); + + // If there are no actual arguments, we're done. + Label done; + __ test(ecx, Operand(ecx)); + __ j(zero, &done); + + // Get the parameters pointer from the stack. + __ mov(edx, Operand(esp, 2 * kPointerSize)); + + // Setup the elements pointer in the allocated arguments object and + // initialize the header in the elements fixed array. + __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize)); + __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi); + __ mov(FieldOperand(edi, FixedArray::kMapOffset), + Immediate(Factory::fixed_array_map())); + __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx); + // Untag the length for the loop below. + __ SmiUntag(ecx); + + // Copy the fixed array slots. + Label loop; + __ bind(&loop); + __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver. + __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx); + __ add(Operand(edi), Immediate(kPointerSize)); + __ sub(Operand(edx), Immediate(kPointerSize)); + __ dec(ecx); + __ j(not_zero, &loop); + + // Return and remove the on-stack parameters. + __ bind(&done); + __ ret(3 * kPointerSize); + + // Do the runtime call to allocate the arguments object. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); +} + + +void RegExpExecStub::Generate(MacroAssembler* masm) { + // Just jump directly to runtime if native RegExp is not selected at compile + // time or if regexp entry in generated code is turned off runtime switch or + // at compilation. +#ifdef V8_INTERPRETED_REGEXP + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); +#else // V8_INTERPRETED_REGEXP + if (!FLAG_regexp_entry_native) { + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); + return; + } + + // Stack frame on entry. + // esp[0]: return address + // esp[4]: last_match_info (expected JSArray) + // esp[8]: previous index + // esp[12]: subject string + // esp[16]: JSRegExp object + + static const int kLastMatchInfoOffset = 1 * kPointerSize; + static const int kPreviousIndexOffset = 2 * kPointerSize; + static const int kSubjectOffset = 3 * kPointerSize; + static const int kJSRegExpOffset = 4 * kPointerSize; + + Label runtime, invoke_regexp; + + // Ensure that a RegExp stack is allocated. + ExternalReference address_of_regexp_stack_memory_address = + ExternalReference::address_of_regexp_stack_memory_address(); + ExternalReference address_of_regexp_stack_memory_size = + ExternalReference::address_of_regexp_stack_memory_size(); + __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size)); + __ test(ebx, Operand(ebx)); + __ j(zero, &runtime, not_taken); + + // Check that the first argument is a JSRegExp object. + __ mov(eax, Operand(esp, kJSRegExpOffset)); + STATIC_ASSERT(kSmiTag == 0); + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &runtime); + __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx); + __ j(not_equal, &runtime); + // Check that the RegExp has been compiled (data contains a fixed array). + __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset)); + if (FLAG_debug_code) { + __ test(ecx, Immediate(kSmiTagMask)); + __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected"); + __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx); + __ Check(equal, "Unexpected type for RegExp data, FixedArray expected"); + } + + // ecx: RegExp data (FixedArray) + // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. + __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset)); + __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP))); + __ j(not_equal, &runtime); + + // ecx: RegExp data (FixedArray) + // Check that the number of captures fit in the static offsets vector buffer. + __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset)); + // Calculate number of capture registers (number_of_captures + 1) * 2. This + // uses the asumption that smis are 2 * their untagged value. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); + __ add(Operand(edx), Immediate(2)); // edx was a smi. + // Check that the static offsets vector buffer is large enough. + __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize); + __ j(above, &runtime); + + // ecx: RegExp data (FixedArray) + // edx: Number of capture registers + // Check that the second argument is a string. + __ mov(eax, Operand(esp, kSubjectOffset)); + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &runtime); + Condition is_string = masm->IsObjectStringType(eax, ebx, ebx); + __ j(NegateCondition(is_string), &runtime); + // Get the length of the string to ebx. + __ mov(ebx, FieldOperand(eax, String::kLengthOffset)); + + // ebx: Length of subject string as a smi + // ecx: RegExp data (FixedArray) + // edx: Number of capture registers + // Check that the third argument is a positive smi less than the subject + // string length. A negative value will be greater (unsigned comparison). + __ mov(eax, Operand(esp, kPreviousIndexOffset)); + __ test(eax, Immediate(kSmiTagMask)); + __ j(not_zero, &runtime); + __ cmp(eax, Operand(ebx)); + __ j(above_equal, &runtime); + + // ecx: RegExp data (FixedArray) + // edx: Number of capture registers + // Check that the fourth object is a JSArray object. + __ mov(eax, Operand(esp, kLastMatchInfoOffset)); + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &runtime); + __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx); + __ j(not_equal, &runtime); + // Check that the JSArray is in fast case. + __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset)); + __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset)); + __ cmp(eax, Factory::fixed_array_map()); + __ j(not_equal, &runtime); + // Check that the last match info has space for the capture registers and the + // additional information. + __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset)); + __ SmiUntag(eax); + __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead)); + __ cmp(edx, Operand(eax)); + __ j(greater, &runtime); + + // ecx: RegExp data (FixedArray) + // Check the representation and encoding of the subject string. + Label seq_ascii_string, seq_two_byte_string, check_code; + __ mov(eax, Operand(esp, kSubjectOffset)); + __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); + __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); + // First check for flat two byte string. + __ and_(ebx, + kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask); + STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0); + __ j(zero, &seq_two_byte_string); + // Any other flat string must be a flat ascii string. + __ test(Operand(ebx), + Immediate(kIsNotStringMask | kStringRepresentationMask)); + __ j(zero, &seq_ascii_string); + + // Check for flat cons string. + // A flat cons string is a cons string where the second part is the empty + // string. In that case the subject string is just the first part of the cons + // string. Also in this case the first part of the cons string is known to be + // a sequential string or an external string. + STATIC_ASSERT(kExternalStringTag != 0); + STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0); + __ test(Operand(ebx), + Immediate(kIsNotStringMask | kExternalStringTag)); + __ j(not_zero, &runtime); + // String is a cons string. + __ mov(edx, FieldOperand(eax, ConsString::kSecondOffset)); + __ cmp(Operand(edx), Factory::empty_string()); + __ j(not_equal, &runtime); + __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset)); + __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); + // String is a cons string with empty second part. + // eax: first part of cons string. + // ebx: map of first part of cons string. + // Is first part a flat two byte string? + __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset), + kStringRepresentationMask | kStringEncodingMask); + STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0); + __ j(zero, &seq_two_byte_string); + // Any other flat string must be ascii. + __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset), + kStringRepresentationMask); + __ j(not_zero, &runtime); + + __ bind(&seq_ascii_string); + // eax: subject string (flat ascii) + // ecx: RegExp data (FixedArray) + __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset)); + __ Set(edi, Immediate(1)); // Type is ascii. + __ jmp(&check_code); + + __ bind(&seq_two_byte_string); + // eax: subject string (flat two byte) + // ecx: RegExp data (FixedArray) + __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset)); + __ Set(edi, Immediate(0)); // Type is two byte. + + __ bind(&check_code); + // Check that the irregexp code has been generated for the actual string + // encoding. If it has, the field contains a code object otherwise it contains + // the hole. + __ CmpObjectType(edx, CODE_TYPE, ebx); + __ j(not_equal, &runtime); + + // eax: subject string + // edx: code + // edi: encoding of subject string (1 if ascii, 0 if two_byte); + // Load used arguments before starting to push arguments for call to native + // RegExp code to avoid handling changing stack height. + __ mov(ebx, Operand(esp, kPreviousIndexOffset)); + __ SmiUntag(ebx); // Previous index from smi. + + // eax: subject string + // ebx: previous index + // edx: code + // edi: encoding of subject string (1 if ascii 0 if two_byte); + // All checks done. Now push arguments for native regexp code. + __ IncrementCounter(&Counters::regexp_entry_native, 1); + + static const int kRegExpExecuteArguments = 7; + __ PrepareCallCFunction(kRegExpExecuteArguments, ecx); + + // Argument 7: Indicate that this is a direct call from JavaScript. + __ mov(Operand(esp, 6 * kPointerSize), Immediate(1)); + + // Argument 6: Start (high end) of backtracking stack memory area. + __ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address)); + __ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size)); + __ mov(Operand(esp, 5 * kPointerSize), ecx); + + // Argument 5: static offsets vector buffer. + __ mov(Operand(esp, 4 * kPointerSize), + Immediate(ExternalReference::address_of_static_offsets_vector())); + + // Argument 4: End of string data + // Argument 3: Start of string data + Label setup_two_byte, setup_rest; + __ test(edi, Operand(edi)); + __ mov(edi, FieldOperand(eax, String::kLengthOffset)); + __ j(zero, &setup_two_byte); + __ SmiUntag(edi); + __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize)); + __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4. + __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize)); + __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3. + __ jmp(&setup_rest); + + __ bind(&setup_two_byte); + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize == 1); // edi is smi (powered by 2). + __ lea(ecx, FieldOperand(eax, edi, times_1, SeqTwoByteString::kHeaderSize)); + __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4. + __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize)); + __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3. + + __ bind(&setup_rest); + + // Argument 2: Previous index. + __ mov(Operand(esp, 1 * kPointerSize), ebx); + + // Argument 1: Subject string. + __ mov(Operand(esp, 0 * kPointerSize), eax); + + // Locate the code entry and call it. + __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ CallCFunction(edx, kRegExpExecuteArguments); + + // Check the result. + Label success; + __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS); + __ j(equal, &success, taken); + Label failure; + __ cmp(eax, NativeRegExpMacroAssembler::FAILURE); + __ j(equal, &failure, taken); + __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION); + // If not exception it can only be retry. Handle that in the runtime system. + __ j(not_equal, &runtime); + // Result must now be exception. If there is no pending exception already a + // stack overflow (on the backtrack stack) was detected in RegExp code but + // haven't created the exception yet. Handle that in the runtime system. + // TODO(592): Rerunning the RegExp to get the stack overflow exception. + ExternalReference pending_exception(Top::k_pending_exception_address); + __ mov(eax, + Operand::StaticVariable(ExternalReference::the_hole_value_location())); + __ cmp(eax, Operand::StaticVariable(pending_exception)); + __ j(equal, &runtime); + __ bind(&failure); + // For failure and exception return null. + __ mov(Operand(eax), Factory::null_value()); + __ ret(4 * kPointerSize); + + // Load RegExp data. + __ bind(&success); + __ mov(eax, Operand(esp, kJSRegExpOffset)); + __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset)); + __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset)); + // Calculate number of capture registers (number_of_captures + 1) * 2. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); + __ add(Operand(edx), Immediate(2)); // edx was a smi. + + // edx: Number of capture registers + // Load last_match_info which is still known to be a fast case JSArray. + __ mov(eax, Operand(esp, kLastMatchInfoOffset)); + __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset)); + + // ebx: last_match_info backing store (FixedArray) + // edx: number of capture registers + // Store the capture count. + __ SmiTag(edx); // Number of capture registers to smi. + __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx); + __ SmiUntag(edx); // Number of capture registers back from smi. + // Store last subject and last input. + __ mov(eax, Operand(esp, kSubjectOffset)); + __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax); + __ mov(ecx, ebx); + __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi); + __ mov(eax, Operand(esp, kSubjectOffset)); + __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax); + __ mov(ecx, ebx); + __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi); + + // Get the static offsets vector filled by the native regexp code. + ExternalReference address_of_static_offsets_vector = + ExternalReference::address_of_static_offsets_vector(); + __ mov(ecx, Immediate(address_of_static_offsets_vector)); + + // ebx: last_match_info backing store (FixedArray) + // ecx: offsets vector + // edx: number of capture registers + Label next_capture, done; + // Capture register counter starts from number of capture registers and + // counts down until wraping after zero. + __ bind(&next_capture); + __ sub(Operand(edx), Immediate(1)); + __ j(negative, &done); + // Read the value from the static offsets vector buffer. + __ mov(edi, Operand(ecx, edx, times_int_size, 0)); + __ SmiTag(edi); + // Store the smi value in the last match info. + __ mov(FieldOperand(ebx, + edx, + times_pointer_size, + RegExpImpl::kFirstCaptureOffset), + edi); + __ jmp(&next_capture); + __ bind(&done); + + // Return last match info. + __ mov(eax, Operand(esp, kLastMatchInfoOffset)); + __ ret(4 * kPointerSize); + + // Do the runtime call to execute the regexp. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); +#endif // V8_INTERPRETED_REGEXP +} + + +void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, + Register object, + Register result, + Register scratch1, + Register scratch2, + bool object_is_smi, + Label* not_found) { + // Use of registers. Register result is used as a temporary. + Register number_string_cache = result; + Register mask = scratch1; + Register scratch = scratch2; + + // Load the number string cache. + ExternalReference roots_address = ExternalReference::roots_address(); + __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex)); + __ mov(number_string_cache, + Operand::StaticArray(scratch, times_pointer_size, roots_address)); + // Make the hash mask from the length of the number string cache. It + // contains two elements (number and string) for each cache entry. + __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); + __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two. + __ sub(Operand(mask), Immediate(1)); // Make mask. + + // Calculate the entry in the number string cache. The hash value in the + // number string cache for smis is just the smi value, and the hash for + // doubles is the xor of the upper and lower words. See + // Heap::GetNumberStringCache. + Label smi_hash_calculated; + Label load_result_from_cache; + if (object_is_smi) { + __ mov(scratch, object); + __ SmiUntag(scratch); + } else { + Label not_smi, hash_calculated; + STATIC_ASSERT(kSmiTag == 0); + __ test(object, Immediate(kSmiTagMask)); + __ j(not_zero, ¬_smi); + __ mov(scratch, object); + __ SmiUntag(scratch); + __ jmp(&smi_hash_calculated); + __ bind(¬_smi); + __ cmp(FieldOperand(object, HeapObject::kMapOffset), + Factory::heap_number_map()); + __ j(not_equal, not_found); + STATIC_ASSERT(8 == kDoubleSize); + __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset)); + __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4)); + // Object is heap number and hash is now in scratch. Calculate cache index. + __ and_(scratch, Operand(mask)); + Register index = scratch; + Register probe = mask; + __ mov(probe, + FieldOperand(number_string_cache, + index, + times_twice_pointer_size, + FixedArray::kHeaderSize)); + __ test(probe, Immediate(kSmiTagMask)); + __ j(zero, not_found); + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatures::Scope fscope(SSE2); + __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); + __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset)); + __ ucomisd(xmm0, xmm1); + } else { + __ fld_d(FieldOperand(object, HeapNumber::kValueOffset)); + __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset)); + __ FCmp(); + } + __ j(parity_even, not_found); // Bail out if NaN is involved. + __ j(not_equal, not_found); // The cache did not contain this value. + __ jmp(&load_result_from_cache); + } + + __ bind(&smi_hash_calculated); + // Object is smi and hash is now in scratch. Calculate cache index. + __ and_(scratch, Operand(mask)); + Register index = scratch; + // Check if the entry is the smi we are looking for. + __ cmp(object, + FieldOperand(number_string_cache, + index, + times_twice_pointer_size, + FixedArray::kHeaderSize)); + __ j(not_equal, not_found); + + // Get the result from the cache. + __ bind(&load_result_from_cache); + __ mov(result, + FieldOperand(number_string_cache, + index, + times_twice_pointer_size, + FixedArray::kHeaderSize + kPointerSize)); + __ IncrementCounter(&Counters::number_to_string_native, 1); +} + + +void NumberToStringStub::Generate(MacroAssembler* masm) { + Label runtime; + + __ mov(ebx, Operand(esp, kPointerSize)); + + // Generate code to lookup number in the number string cache. + GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime); + __ ret(1 * kPointerSize); + + __ bind(&runtime); + // Handle number to string in the runtime system if not found in the cache. + __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); +} + + +static int NegativeComparisonResult(Condition cc) { + ASSERT(cc != equal); + ASSERT((cc == less) || (cc == less_equal) + || (cc == greater) || (cc == greater_equal)); + return (cc == greater || cc == greater_equal) ? LESS : GREATER; +} + +void CompareStub::Generate(MacroAssembler* masm) { + ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); + + Label check_unequal_objects, done; + + // NOTICE! This code is only reached after a smi-fast-case check, so + // it is certain that at least one operand isn't a smi. + + // Identical objects can be compared fast, but there are some tricky cases + // for NaN and undefined. + { + Label not_identical; + __ cmp(eax, Operand(edx)); + __ j(not_equal, ¬_identical); + + if (cc_ != equal) { + // Check for undefined. undefined OP undefined is false even though + // undefined == undefined. + Label check_for_nan; + __ cmp(edx, Factory::undefined_value()); + __ j(not_equal, &check_for_nan); + __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_)))); + __ ret(0); + __ bind(&check_for_nan); + } + + // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), + // so we do the second best thing - test it ourselves. + // Note: if cc_ != equal, never_nan_nan_ is not used. + if (never_nan_nan_ && (cc_ == equal)) { + __ Set(eax, Immediate(Smi::FromInt(EQUAL))); + __ ret(0); + } else { + Label heap_number; + __ cmp(FieldOperand(edx, HeapObject::kMapOffset), + Immediate(Factory::heap_number_map())); + __ j(equal, &heap_number); + if (cc_ != equal) { + // Call runtime on identical JSObjects. Otherwise return equal. + __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx); + __ j(above_equal, ¬_identical); + } + __ Set(eax, Immediate(Smi::FromInt(EQUAL))); + __ ret(0); + + __ bind(&heap_number); + // It is a heap number, so return non-equal if it's NaN and equal if + // it's not NaN. + // The representation of NaN values has all exponent bits (52..62) set, + // and not all mantissa bits (0..51) clear. + // We only accept QNaNs, which have bit 51 set. + // Read top bits of double representation (second word of value). + + // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e., + // all bits in the mask are set. We only need to check the word + // that contains the exponent and high bit of the mantissa. + STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0); + __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset)); + __ xor_(eax, Operand(eax)); + // Shift value and mask so kQuietNaNHighBitsMask applies to topmost + // bits. + __ add(edx, Operand(edx)); + __ cmp(edx, kQuietNaNHighBitsMask << 1); + if (cc_ == equal) { + STATIC_ASSERT(EQUAL != 1); + __ setcc(above_equal, eax); + __ ret(0); + } else { + Label nan; + __ j(above_equal, &nan); + __ Set(eax, Immediate(Smi::FromInt(EQUAL))); + __ ret(0); + __ bind(&nan); + __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_)))); + __ ret(0); + } + } + + __ bind(¬_identical); + } + + // Strict equality can quickly decide whether objects are equal. + // Non-strict object equality is slower, so it is handled later in the stub. + if (cc_ == equal && strict_) { + Label slow; // Fallthrough label. + Label not_smis; + // If we're doing a strict equality comparison, we don't have to do + // type conversion, so we generate code to do fast comparison for objects + // and oddballs. Non-smi numbers and strings still go through the usual + // slow-case code. + // If either is a Smi (we know that not both are), then they can only + // be equal if the other is a HeapNumber. If so, use the slow case. + STATIC_ASSERT(kSmiTag == 0); + ASSERT_EQ(0, Smi::FromInt(0)); + __ mov(ecx, Immediate(kSmiTagMask)); + __ and_(ecx, Operand(eax)); + __ test(ecx, Operand(edx)); + __ j(not_zero, ¬_smis); + // One operand is a smi. + + // Check whether the non-smi is a heap number. + STATIC_ASSERT(kSmiTagMask == 1); + // ecx still holds eax & kSmiTag, which is either zero or one. + __ sub(Operand(ecx), Immediate(0x01)); + __ mov(ebx, edx); + __ xor_(ebx, Operand(eax)); + __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx. + __ xor_(ebx, Operand(eax)); + // if eax was smi, ebx is now edx, else eax. + + // Check if the non-smi operand is a heap number. + __ cmp(FieldOperand(ebx, HeapObject::kMapOffset), + Immediate(Factory::heap_number_map())); + // If heap number, handle it in the slow case. + __ j(equal, &slow); + // Return non-equal (ebx is not zero) + __ mov(eax, ebx); + __ ret(0); + + __ bind(¬_smis); + // If either operand is a JSObject or an oddball value, then they are not + // equal since their pointers are different + // There is no test for undetectability in strict equality. + + // Get the type of the first operand. + // If the first object is a JS object, we have done pointer comparison. + Label first_non_object; + STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); + __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx); + __ j(below, &first_non_object); + + // Return non-zero (eax is not zero) + Label return_not_equal; + STATIC_ASSERT(kHeapObjectTag != 0); + __ bind(&return_not_equal); + __ ret(0); + + __ bind(&first_non_object); + // Check for oddballs: true, false, null, undefined. + __ CmpInstanceType(ecx, ODDBALL_TYPE); + __ j(equal, &return_not_equal); + + __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx); + __ j(above_equal, &return_not_equal); + + // Check for oddballs: true, false, null, undefined. + __ CmpInstanceType(ecx, ODDBALL_TYPE); + __ j(equal, &return_not_equal); + + // Fall through to the general case. + __ bind(&slow); + } + + // Generate the number comparison code. + if (include_number_compare_) { + Label non_number_comparison; + Label unordered; + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatures::Scope use_sse2(SSE2); + CpuFeatures::Scope use_cmov(CMOV); + + FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison); + __ ucomisd(xmm0, xmm1); + + // Don't base result on EFLAGS when a NaN is involved. + __ j(parity_even, &unordered, not_taken); + // Return a result of -1, 0, or 1, based on EFLAGS. + __ mov(eax, 0); // equal + __ mov(ecx, Immediate(Smi::FromInt(1))); + __ cmov(above, eax, Operand(ecx)); + __ mov(ecx, Immediate(Smi::FromInt(-1))); + __ cmov(below, eax, Operand(ecx)); + __ ret(0); + } else { + FloatingPointHelper::CheckFloatOperands( + masm, &non_number_comparison, ebx); + FloatingPointHelper::LoadFloatOperand(masm, eax); + FloatingPointHelper::LoadFloatOperand(masm, edx); + __ FCmp(); + + // Don't base result on EFLAGS when a NaN is involved. + __ j(parity_even, &unordered, not_taken); + + Label below_label, above_label; + // Return a result of -1, 0, or 1, based on EFLAGS. + __ j(below, &below_label, not_taken); + __ j(above, &above_label, not_taken); + + __ xor_(eax, Operand(eax)); + __ ret(0); + + __ bind(&below_label); + __ mov(eax, Immediate(Smi::FromInt(-1))); + __ ret(0); + + __ bind(&above_label); + __ mov(eax, Immediate(Smi::FromInt(1))); + __ ret(0); + } + + // If one of the numbers was NaN, then the result is always false. + // The cc is never not-equal. + __ bind(&unordered); + ASSERT(cc_ != not_equal); + if (cc_ == less || cc_ == less_equal) { + __ mov(eax, Immediate(Smi::FromInt(1))); + } else { + __ mov(eax, Immediate(Smi::FromInt(-1))); + } + __ ret(0); + + // The number comparison code did not provide a valid result. + __ bind(&non_number_comparison); + } + + // Fast negative check for symbol-to-symbol equality. + Label check_for_strings; + if (cc_ == equal) { + BranchIfNonSymbol(masm, &check_for_strings, eax, ecx); + BranchIfNonSymbol(masm, &check_for_strings, edx, ecx); + + // We've already checked for object identity, so if both operands + // are symbols they aren't equal. Register eax already holds a + // non-zero value, which indicates not equal, so just return. + __ ret(0); + } + + __ bind(&check_for_strings); + + __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, + &check_unequal_objects); + + // Inline comparison of ascii strings. + StringCompareStub::GenerateCompareFlatAsciiStrings(masm, + edx, + eax, + ecx, + ebx, + edi); +#ifdef DEBUG + __ Abort("Unexpected fall-through from string comparison"); +#endif + + __ bind(&check_unequal_objects); + if (cc_ == equal && !strict_) { + // Non-strict equality. Objects are unequal if + // they are both JSObjects and not undetectable, + // and their pointers are different. + Label not_both_objects; + Label return_unequal; + // At most one is a smi, so we can test for smi by adding the two. + // A smi plus a heap object has the low bit set, a heap object plus + // a heap object has the low bit clear. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagMask == 1); + __ lea(ecx, Operand(eax, edx, times_1, 0)); + __ test(ecx, Immediate(kSmiTagMask)); + __ j(not_zero, ¬_both_objects); + __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx); + __ j(below, ¬_both_objects); + __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ebx); + __ j(below, ¬_both_objects); + // We do not bail out after this point. Both are JSObjects, and + // they are equal if and only if both are undetectable. + // The and of the undetectable flags is 1 if and only if they are equal. + __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), + 1 << Map::kIsUndetectable); + __ j(zero, &return_unequal); + __ test_b(FieldOperand(ebx, Map::kBitFieldOffset), + 1 << Map::kIsUndetectable); + __ j(zero, &return_unequal); + // The objects are both undetectable, so they both compare as the value + // undefined, and are equal. + __ Set(eax, Immediate(EQUAL)); + __ bind(&return_unequal); + // Return non-equal by returning the non-zero object pointer in eax, + // or return equal if we fell through to here. + __ ret(0); // rax, rdx were pushed + __ bind(¬_both_objects); + } + + // Push arguments below the return address. + __ pop(ecx); + __ push(edx); + __ push(eax); + + // Figure out which native to call and setup the arguments. + Builtins::JavaScript builtin; + if (cc_ == equal) { + builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; + } else { + builtin = Builtins::COMPARE; + __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_)))); + } + + // Restore return address on the stack. + __ push(ecx); + + // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) + // tagged as a small integer. + __ InvokeBuiltin(builtin, JUMP_FUNCTION); +} + + +void CompareStub::BranchIfNonSymbol(MacroAssembler* masm, + Label* label, + Register object, + Register scratch) { + __ test(object, Immediate(kSmiTagMask)); + __ j(zero, label); + __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset)); + __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset)); + __ and_(scratch, kIsSymbolMask | kIsNotStringMask); + __ cmp(scratch, kSymbolTag | kStringTag); + __ j(not_equal, label); +} + + +void StackCheckStub::Generate(MacroAssembler* masm) { + // Because builtins always remove the receiver from the stack, we + // have to fake one to avoid underflowing the stack. The receiver + // must be inserted below the return address on the stack so we + // temporarily store that in a register. + __ pop(eax); + __ push(Immediate(Smi::FromInt(0))); + __ push(eax); + + // Do tail-call to runtime routine. + __ TailCallRuntime(Runtime::kStackGuard, 1, 1); +} + + +void CallFunctionStub::Generate(MacroAssembler* masm) { + Label slow; + + // If the receiver might be a value (string, number or boolean) check for this + // and box it if it is. + if (ReceiverMightBeValue()) { + // Get the receiver from the stack. + // +1 ~ return address + Label receiver_is_value, receiver_is_js_object; + __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize)); + + // Check if receiver is a smi (which is a number value). + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &receiver_is_value, not_taken); + + // Check if the receiver is a valid JS object. + __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edi); + __ j(above_equal, &receiver_is_js_object); + + // Call the runtime to box the value. + __ bind(&receiver_is_value); + __ EnterInternalFrame(); + __ push(eax); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ LeaveInternalFrame(); + __ mov(Operand(esp, (argc_ + 1) * kPointerSize), eax); + + __ bind(&receiver_is_js_object); + } + + // Get the function to call from the stack. + // +2 ~ receiver, return address + __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize)); + + // Check that the function really is a JavaScript function. + __ test(edi, Immediate(kSmiTagMask)); + __ j(zero, &slow, not_taken); + // Goto slow case if we do not have a function. + __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); + __ j(not_equal, &slow, not_taken); + + // Fast-case: Just invoke the function. + ParameterCount actual(argc_); + __ InvokeFunction(edi, actual, JUMP_FUNCTION); + + // Slow-case: Non-function called. + __ bind(&slow); + // CALL_NON_FUNCTION expects the non-function callee as receiver (instead + // of the original receiver from the call site). + __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi); + __ Set(eax, Immediate(argc_)); + __ Set(ebx, Immediate(0)); + __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION); + Handle adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)); + __ jmp(adaptor, RelocInfo::CODE_TARGET); +} + + +void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { + // eax holds the exception. + + // Adjust this code if not the case. + STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); + + // Drop the sp to the top of the handler. + ExternalReference handler_address(Top::k_handler_address); + __ mov(esp, Operand::StaticVariable(handler_address)); + + // Restore next handler and frame pointer, discard handler state. + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + __ pop(Operand::StaticVariable(handler_address)); + STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize); + __ pop(ebp); + __ pop(edx); // Remove state. + + // Before returning we restore the context from the frame pointer if + // not NULL. The frame pointer is NULL in the exception handler of + // a JS entry frame. + __ xor_(esi, Operand(esi)); // Tentatively set context pointer to NULL. + Label skip; + __ cmp(ebp, 0); + __ j(equal, &skip, not_taken); + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + __ bind(&skip); + + STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); + __ ret(0); +} + + +// If true, a Handle passed by value is passed and returned by +// using the location_ field directly. If false, it is passed and +// returned as a pointer to a handle. +#ifdef USING_BSD_ABI +static const bool kPassHandlesDirectly = true; +#else +static const bool kPassHandlesDirectly = false; +#endif + + +void ApiGetterEntryStub::Generate(MacroAssembler* masm) { + Label empty_handle; + Label prologue; + Label promote_scheduled_exception; + __ EnterApiExitFrame(ExitFrame::MODE_NORMAL, kStackSpace, kArgc); + STATIC_ASSERT(kArgc == 4); + if (kPassHandlesDirectly) { + // When handles as passed directly we don't have to allocate extra + // space for and pass an out parameter. + __ mov(Operand(esp, 0 * kPointerSize), ebx); // name. + __ mov(Operand(esp, 1 * kPointerSize), eax); // arguments pointer. + } else { + // The function expects three arguments to be passed but we allocate + // four to get space for the output cell. The argument slots are filled + // as follows: + // + // 3: output cell + // 2: arguments pointer + // 1: name + // 0: pointer to the output cell + // + // Note that this is one more "argument" than the function expects + // so the out cell will have to be popped explicitly after returning + // from the function. + __ mov(Operand(esp, 1 * kPointerSize), ebx); // name. + __ mov(Operand(esp, 2 * kPointerSize), eax); // arguments pointer. + __ mov(ebx, esp); + __ add(Operand(ebx), Immediate(3 * kPointerSize)); + __ mov(Operand(esp, 0 * kPointerSize), ebx); // output + __ mov(Operand(esp, 3 * kPointerSize), Immediate(0)); // out cell. + } + // Call the api function! + __ call(fun()->address(), RelocInfo::RUNTIME_ENTRY); + // Check if the function scheduled an exception. + ExternalReference scheduled_exception_address = + ExternalReference::scheduled_exception_address(); + __ cmp(Operand::StaticVariable(scheduled_exception_address), + Immediate(Factory::the_hole_value())); + __ j(not_equal, &promote_scheduled_exception, not_taken); + if (!kPassHandlesDirectly) { + // The returned value is a pointer to the handle holding the result. + // Dereference this to get to the location. + __ mov(eax, Operand(eax, 0)); + } + // Check if the result handle holds 0. + __ test(eax, Operand(eax)); + __ j(zero, &empty_handle, not_taken); + // It was non-zero. Dereference to get the result value. + __ mov(eax, Operand(eax, 0)); + __ bind(&prologue); + __ LeaveExitFrame(ExitFrame::MODE_NORMAL); + __ ret(0); + __ bind(&promote_scheduled_exception); + __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1); + __ bind(&empty_handle); + // It was zero; the result is undefined. + __ mov(eax, Factory::undefined_value()); + __ jmp(&prologue); +} + + +void CEntryStub::GenerateCore(MacroAssembler* masm, + Label* throw_normal_exception, + Label* throw_termination_exception, + Label* throw_out_of_memory_exception, + bool do_gc, + bool always_allocate_scope, + int /* alignment_skew */) { + // eax: result parameter for PerformGC, if any + // ebx: pointer to C function (C callee-saved) + // ebp: frame pointer (restored after C call) + // esp: stack pointer (restored after C call) + // edi: number of arguments including receiver (C callee-saved) + // esi: pointer to the first argument (C callee-saved) + + // Result returned in eax, or eax+edx if result_size_ is 2. + + // Check stack alignment. + if (FLAG_debug_code) { + __ CheckStackAlignment(); + } + + if (do_gc) { + // Pass failure code returned from last attempt as first argument to + // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the + // stack alignment is known to be correct. This function takes one argument + // which is passed on the stack, and we know that the stack has been + // prepared to pass at least one argument. + __ mov(Operand(esp, 0 * kPointerSize), eax); // Result. + __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY); + } + + ExternalReference scope_depth = + ExternalReference::heap_always_allocate_scope_depth(); + if (always_allocate_scope) { + __ inc(Operand::StaticVariable(scope_depth)); + } + + // Call C function. + __ mov(Operand(esp, 0 * kPointerSize), edi); // argc. + __ mov(Operand(esp, 1 * kPointerSize), esi); // argv. + __ call(Operand(ebx)); + // Result is in eax or edx:eax - do not destroy these registers! + + if (always_allocate_scope) { + __ dec(Operand::StaticVariable(scope_depth)); + } + + // Make sure we're not trying to return 'the hole' from the runtime + // call as this may lead to crashes in the IC code later. + if (FLAG_debug_code) { + Label okay; + __ cmp(eax, Factory::the_hole_value()); + __ j(not_equal, &okay); + __ int3(); + __ bind(&okay); + } + + // Check for failure result. + Label failure_returned; + STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); + __ lea(ecx, Operand(eax, 1)); + // Lower 2 bits of ecx are 0 iff eax has failure tag. + __ test(ecx, Immediate(kFailureTagMask)); + __ j(zero, &failure_returned, not_taken); + + // Exit the JavaScript to C++ exit frame. + __ LeaveExitFrame(mode_); + __ ret(0); + + // Handling of failure. + __ bind(&failure_returned); + + Label retry; + // If the returned exception is RETRY_AFTER_GC continue at retry label + STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); + __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); + __ j(zero, &retry, taken); + + // Special handling of out of memory exceptions. + __ cmp(eax, reinterpret_cast(Failure::OutOfMemoryException())); + __ j(equal, throw_out_of_memory_exception); + + // Retrieve the pending exception and clear the variable. + ExternalReference pending_exception_address(Top::k_pending_exception_address); + __ mov(eax, Operand::StaticVariable(pending_exception_address)); + __ mov(edx, + Operand::StaticVariable(ExternalReference::the_hole_value_location())); + __ mov(Operand::StaticVariable(pending_exception_address), edx); + + // Special handling of termination exceptions which are uncatchable + // by javascript code. + __ cmp(eax, Factory::termination_exception()); + __ j(equal, throw_termination_exception); + + // Handle normal exception. + __ jmp(throw_normal_exception); + + // Retry. + __ bind(&retry); +} + + +void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, + UncatchableExceptionType type) { + // Adjust this code if not the case. + STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); + + // Drop sp to the top stack handler. + ExternalReference handler_address(Top::k_handler_address); + __ mov(esp, Operand::StaticVariable(handler_address)); + + // Unwind the handlers until the ENTRY handler is found. + Label loop, done; + __ bind(&loop); + // Load the type of the current stack handler. + const int kStateOffset = StackHandlerConstants::kStateOffset; + __ cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY)); + __ j(equal, &done); + // Fetch the next handler in the list. + const int kNextOffset = StackHandlerConstants::kNextOffset; + __ mov(esp, Operand(esp, kNextOffset)); + __ jmp(&loop); + __ bind(&done); + + // Set the top handler address to next handler past the current ENTRY handler. + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + __ pop(Operand::StaticVariable(handler_address)); + + if (type == OUT_OF_MEMORY) { + // Set external caught exception to false. + ExternalReference external_caught(Top::k_external_caught_exception_address); + __ mov(eax, false); + __ mov(Operand::StaticVariable(external_caught), eax); + + // Set pending exception and eax to out of memory exception. + ExternalReference pending_exception(Top::k_pending_exception_address); + __ mov(eax, reinterpret_cast(Failure::OutOfMemoryException())); + __ mov(Operand::StaticVariable(pending_exception), eax); + } + + // Clear the context pointer. + __ xor_(esi, Operand(esi)); + + // Restore fp from handler and discard handler state. + STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize); + __ pop(ebp); + __ pop(edx); // State. + + STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); + __ ret(0); +} + + +void CEntryStub::Generate(MacroAssembler* masm) { + // eax: number of arguments including receiver + // ebx: pointer to C function (C callee-saved) + // ebp: frame pointer (restored after C call) + // esp: stack pointer (restored after C call) + // esi: current context (C callee-saved) + // edi: JS function of the caller (C callee-saved) + + // NOTE: Invocations of builtins may return failure objects instead + // of a proper result. The builtin entry handles this by performing + // a garbage collection and retrying the builtin (twice). + + // Enter the exit frame that transitions from JavaScript to C++. + __ EnterExitFrame(mode_); + + // eax: result parameter for PerformGC, if any (setup below) + // ebx: pointer to builtin function (C callee-saved) + // ebp: frame pointer (restored after C call) + // esp: stack pointer (restored after C call) + // edi: number of arguments including receiver (C callee-saved) + // esi: argv pointer (C callee-saved) + + Label throw_normal_exception; + Label throw_termination_exception; + Label throw_out_of_memory_exception; + + // Call into the runtime system. + GenerateCore(masm, + &throw_normal_exception, + &throw_termination_exception, + &throw_out_of_memory_exception, + false, + false); + + // Do space-specific GC and retry runtime call. + GenerateCore(masm, + &throw_normal_exception, + &throw_termination_exception, + &throw_out_of_memory_exception, + true, + false); + + // Do full GC and retry runtime call one final time. + Failure* failure = Failure::InternalError(); + __ mov(eax, Immediate(reinterpret_cast(failure))); + GenerateCore(masm, + &throw_normal_exception, + &throw_termination_exception, + &throw_out_of_memory_exception, + true, + true); + + __ bind(&throw_out_of_memory_exception); + GenerateThrowUncatchable(masm, OUT_OF_MEMORY); + + __ bind(&throw_termination_exception); + GenerateThrowUncatchable(masm, TERMINATION); + + __ bind(&throw_normal_exception); + GenerateThrowTOS(masm); +} + + +void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { + Label invoke, exit; +#ifdef ENABLE_LOGGING_AND_PROFILING + Label not_outermost_js, not_outermost_js_2; +#endif + + // Setup frame. + __ push(ebp); + __ mov(ebp, Operand(esp)); + + // Push marker in two places. + int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; + __ push(Immediate(Smi::FromInt(marker))); // context slot + __ push(Immediate(Smi::FromInt(marker))); // function slot + // Save callee-saved registers (C calling conventions). + __ push(edi); + __ push(esi); + __ push(ebx); + + // Save copies of the top frame descriptor on the stack. + ExternalReference c_entry_fp(Top::k_c_entry_fp_address); + __ push(Operand::StaticVariable(c_entry_fp)); + +#ifdef ENABLE_LOGGING_AND_PROFILING + // If this is the outermost JS call, set js_entry_sp value. + ExternalReference js_entry_sp(Top::k_js_entry_sp_address); + __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0)); + __ j(not_equal, ¬_outermost_js); + __ mov(Operand::StaticVariable(js_entry_sp), ebp); + __ bind(¬_outermost_js); +#endif + + // Call a faked try-block that does the invoke. + __ call(&invoke); + + // Caught exception: Store result (exception) in the pending + // exception field in the JSEnv and return a failure sentinel. + ExternalReference pending_exception(Top::k_pending_exception_address); + __ mov(Operand::StaticVariable(pending_exception), eax); + __ mov(eax, reinterpret_cast(Failure::Exception())); + __ jmp(&exit); + + // Invoke: Link this frame into the handler chain. + __ bind(&invoke); + __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER); + + // Clear any pending exceptions. + __ mov(edx, + Operand::StaticVariable(ExternalReference::the_hole_value_location())); + __ mov(Operand::StaticVariable(pending_exception), edx); + + // Fake a receiver (NULL). + __ push(Immediate(0)); // receiver + + // Invoke the function by calling through JS entry trampoline + // builtin and pop the faked function when we return. Notice that we + // cannot store a reference to the trampoline code directly in this + // stub, because the builtin stubs may not have been generated yet. + if (is_construct) { + ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline); + __ mov(edx, Immediate(construct_entry)); + } else { + ExternalReference entry(Builtins::JSEntryTrampoline); + __ mov(edx, Immediate(entry)); + } + __ mov(edx, Operand(edx, 0)); // deref address + __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); + __ call(Operand(edx)); + + // Unlink this frame from the handler chain. + __ pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address))); + // Pop next_sp. + __ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize)); + +#ifdef ENABLE_LOGGING_AND_PROFILING + // If current EBP value is the same as js_entry_sp value, it means that + // the current function is the outermost. + __ cmp(ebp, Operand::StaticVariable(js_entry_sp)); + __ j(not_equal, ¬_outermost_js_2); + __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0)); + __ bind(¬_outermost_js_2); +#endif + + // Restore the top frame descriptor from the stack. + __ bind(&exit); + __ pop(Operand::StaticVariable(ExternalReference(Top::k_c_entry_fp_address))); + + // Restore callee-saved registers (C calling conventions). + __ pop(ebx); + __ pop(esi); + __ pop(edi); + __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers + + // Restore frame pointer and return. + __ pop(ebp); + __ ret(0); +} + + +void InstanceofStub::Generate(MacroAssembler* masm) { + // Get the object - go slow case if it's a smi. + Label slow; + __ mov(eax, Operand(esp, 2 * kPointerSize)); // 2 ~ return address, function + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &slow, not_taken); + + // Check that the left hand is a JS object. + __ IsObjectJSObjectType(eax, eax, edx, &slow); + + // Get the prototype of the function. + __ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address + // edx is function, eax is map. + + // Look up the function and the map in the instanceof cache. + Label miss; + ExternalReference roots_address = ExternalReference::roots_address(); + __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex)); + __ cmp(edx, Operand::StaticArray(ecx, times_pointer_size, roots_address)); + __ j(not_equal, &miss); + __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex)); + __ cmp(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address)); + __ j(not_equal, &miss); + __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex)); + __ mov(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address)); + __ ret(2 * kPointerSize); + + __ bind(&miss); + __ TryGetFunctionPrototype(edx, ebx, ecx, &slow); + + // Check that the function prototype is a JS object. + __ test(ebx, Immediate(kSmiTagMask)); + __ j(zero, &slow, not_taken); + __ IsObjectJSObjectType(ebx, ecx, ecx, &slow); + + // Register mapping: + // eax is object map. + // edx is function. + // ebx is function prototype. + __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex)); + __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax); + __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex)); + __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), edx); + + __ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset)); + + // Loop through the prototype chain looking for the function prototype. + Label loop, is_instance, is_not_instance; + __ bind(&loop); + __ cmp(ecx, Operand(ebx)); + __ j(equal, &is_instance); + __ cmp(Operand(ecx), Immediate(Factory::null_value())); + __ j(equal, &is_not_instance); + __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset)); + __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset)); + __ jmp(&loop); + + __ bind(&is_instance); + __ Set(eax, Immediate(0)); + __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex)); + __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax); + __ ret(2 * kPointerSize); + + __ bind(&is_not_instance); + __ Set(eax, Immediate(Smi::FromInt(1))); + __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex)); + __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax); + __ ret(2 * kPointerSize); + + // Slow-case: Go through the JavaScript implementation. + __ bind(&slow); + __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); +} + + +int CompareStub::MinorKey() { + // Encode the three parameters in a unique 16 bit value. To avoid duplicate + // stubs the never NaN NaN condition is only taken into account if the + // condition is equals. + ASSERT(static_cast(cc_) < (1 << 12)); + ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); + return ConditionField::encode(static_cast(cc_)) + | RegisterField::encode(false) // lhs_ and rhs_ are not used + | StrictField::encode(strict_) + | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false) + | IncludeNumberCompareField::encode(include_number_compare_); +} + + +// Unfortunately you have to run without snapshots to see most of these +// names in the profile since most compare stubs end up in the snapshot. +const char* CompareStub::GetName() { + ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); + + if (name_ != NULL) return name_; + const int kMaxNameLength = 100; + name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); + if (name_ == NULL) return "OOM"; + + const char* cc_name; + switch (cc_) { + case less: cc_name = "LT"; break; + case greater: cc_name = "GT"; break; + case less_equal: cc_name = "LE"; break; + case greater_equal: cc_name = "GE"; break; + case equal: cc_name = "EQ"; break; + case not_equal: cc_name = "NE"; break; + default: cc_name = "UnknownCondition"; break; + } + + const char* strict_name = ""; + if (strict_ && (cc_ == equal || cc_ == not_equal)) { + strict_name = "_STRICT"; + } + + const char* never_nan_nan_name = ""; + if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) { + never_nan_nan_name = "_NO_NAN"; + } + + const char* include_number_compare_name = ""; + if (!include_number_compare_) { + include_number_compare_name = "_NO_NUMBER"; + } + + OS::SNPrintF(Vector(name_, kMaxNameLength), + "CompareStub_%s%s%s%s", + cc_name, + strict_name, + never_nan_nan_name, + include_number_compare_name); + return name_; +} + + +// ------------------------------------------------------------------------- +// StringCharCodeAtGenerator + +void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { + Label flat_string; + Label ascii_string; + Label got_char_code; + + // If the receiver is a smi trigger the non-string case. + STATIC_ASSERT(kSmiTag == 0); + __ test(object_, Immediate(kSmiTagMask)); + __ j(zero, receiver_not_string_); + + // Fetch the instance type of the receiver into result register. + __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); + __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); + // If the receiver is not a string trigger the non-string case. + __ test(result_, Immediate(kIsNotStringMask)); + __ j(not_zero, receiver_not_string_); + + // If the index is non-smi trigger the non-smi case. + STATIC_ASSERT(kSmiTag == 0); + __ test(index_, Immediate(kSmiTagMask)); + __ j(not_zero, &index_not_smi_); + + // Put smi-tagged index into scratch register. + __ mov(scratch_, index_); + __ bind(&got_smi_index_); + + // Check for index out of range. + __ cmp(scratch_, FieldOperand(object_, String::kLengthOffset)); + __ j(above_equal, index_out_of_range_); + + // We need special handling for non-flat strings. + STATIC_ASSERT(kSeqStringTag == 0); + __ test(result_, Immediate(kStringRepresentationMask)); + __ j(zero, &flat_string); + + // Handle non-flat strings. + __ test(result_, Immediate(kIsConsStringMask)); + __ j(zero, &call_runtime_); + + // ConsString. + // Check whether the right hand side is the empty string (i.e. if + // this is really a flat string in a cons string). If that is not + // the case we would rather go to the runtime system now to flatten + // the string. + __ cmp(FieldOperand(object_, ConsString::kSecondOffset), + Immediate(Factory::empty_string())); + __ j(not_equal, &call_runtime_); + // Get the first of the two strings and load its instance type. + __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset)); + __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); + __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); + // If the first cons component is also non-flat, then go to runtime. + STATIC_ASSERT(kSeqStringTag == 0); + __ test(result_, Immediate(kStringRepresentationMask)); + __ j(not_zero, &call_runtime_); + + // Check for 1-byte or 2-byte string. + __ bind(&flat_string); + STATIC_ASSERT(kAsciiStringTag != 0); + __ test(result_, Immediate(kStringEncodingMask)); + __ j(not_zero, &ascii_string); + + // 2-byte string. + // Load the 2-byte character code into the result register. + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); + __ movzx_w(result_, FieldOperand(object_, + scratch_, times_1, // Scratch is smi-tagged. + SeqTwoByteString::kHeaderSize)); + __ jmp(&got_char_code); + + // ASCII string. + // Load the byte into the result register. + __ bind(&ascii_string); + __ SmiUntag(scratch_); + __ movzx_b(result_, FieldOperand(object_, + scratch_, times_1, + SeqAsciiString::kHeaderSize)); + __ bind(&got_char_code); + __ SmiTag(result_); + __ bind(&exit_); +} + + +void StringCharCodeAtGenerator::GenerateSlow( + MacroAssembler* masm, const RuntimeCallHelper& call_helper) { + __ Abort("Unexpected fallthrough to CharCodeAt slow case"); + + // Index is not a smi. + __ bind(&index_not_smi_); + // If index is a heap number, try converting it to an integer. + __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true); + call_helper.BeforeCall(masm); + __ push(object_); + __ push(index_); + __ push(index_); // Consumed by runtime conversion function. + if (index_flags_ == STRING_INDEX_IS_NUMBER) { + __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); + } else { + ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); + // NumberToSmi discards numbers that are not exact integers. + __ CallRuntime(Runtime::kNumberToSmi, 1); + } + if (!scratch_.is(eax)) { + // Save the conversion result before the pop instructions below + // have a chance to overwrite it. + __ mov(scratch_, eax); + } + __ pop(index_); + __ pop(object_); + // Reload the instance type. + __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); + __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); + call_helper.AfterCall(masm); + // If index is still not a smi, it must be out of range. + STATIC_ASSERT(kSmiTag == 0); + __ test(scratch_, Immediate(kSmiTagMask)); + __ j(not_zero, index_out_of_range_); + // Otherwise, return to the fast path. + __ jmp(&got_smi_index_); + + // Call runtime. We get here when the receiver is a string and the + // index is a number, but the code of getting the actual character + // is too complex (e.g., when the string needs to be flattened). + __ bind(&call_runtime_); + call_helper.BeforeCall(masm); + __ push(object_); + __ push(index_); + __ CallRuntime(Runtime::kStringCharCodeAt, 2); + if (!result_.is(eax)) { + __ mov(result_, eax); + } + call_helper.AfterCall(masm); + __ jmp(&exit_); + + __ Abort("Unexpected fallthrough from CharCodeAt slow case"); +} + + +// ------------------------------------------------------------------------- +// StringCharFromCodeGenerator + +void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { + // Fast case of Heap::LookupSingleCharacterStringFromCode. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiShiftSize == 0); + ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); + __ test(code_, + Immediate(kSmiTagMask | + ((~String::kMaxAsciiCharCode) << kSmiTagSize))); + __ j(not_zero, &slow_case_, not_taken); + + __ Set(result_, Immediate(Factory::single_character_string_cache())); + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize == 1); + STATIC_ASSERT(kSmiShiftSize == 0); + // At this point code register contains smi tagged ascii char code. + __ mov(result_, FieldOperand(result_, + code_, times_half_pointer_size, + FixedArray::kHeaderSize)); + __ cmp(result_, Factory::undefined_value()); + __ j(equal, &slow_case_, not_taken); + __ bind(&exit_); +} + + +void StringCharFromCodeGenerator::GenerateSlow( + MacroAssembler* masm, const RuntimeCallHelper& call_helper) { + __ Abort("Unexpected fallthrough to CharFromCode slow case"); + + __ bind(&slow_case_); + call_helper.BeforeCall(masm); + __ push(code_); + __ CallRuntime(Runtime::kCharFromCode, 1); + if (!result_.is(eax)) { + __ mov(result_, eax); + } + call_helper.AfterCall(masm); + __ jmp(&exit_); + + __ Abort("Unexpected fallthrough from CharFromCode slow case"); +} + + +// ------------------------------------------------------------------------- +// StringCharAtGenerator + +void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) { + char_code_at_generator_.GenerateFast(masm); + char_from_code_generator_.GenerateFast(masm); +} + + +void StringCharAtGenerator::GenerateSlow( + MacroAssembler* masm, const RuntimeCallHelper& call_helper) { + char_code_at_generator_.GenerateSlow(masm, call_helper); + char_from_code_generator_.GenerateSlow(masm, call_helper); +} + + +void StringAddStub::Generate(MacroAssembler* masm) { + Label string_add_runtime; + + // Load the two arguments. + __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument. + __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument. + + // Make sure that both arguments are strings if not known in advance. + if (string_check_) { + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &string_add_runtime); + __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx); + __ j(above_equal, &string_add_runtime); + + // First argument is a a string, test second. + __ test(edx, Immediate(kSmiTagMask)); + __ j(zero, &string_add_runtime); + __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx); + __ j(above_equal, &string_add_runtime); + } + + // Both arguments are strings. + // eax: first string + // edx: second string + // Check if either of the strings are empty. In that case return the other. + Label second_not_zero_length, both_not_zero_length; + __ mov(ecx, FieldOperand(edx, String::kLengthOffset)); + STATIC_ASSERT(kSmiTag == 0); + __ test(ecx, Operand(ecx)); + __ j(not_zero, &second_not_zero_length); + // Second string is empty, result is first string which is already in eax. + __ IncrementCounter(&Counters::string_add_native, 1); + __ ret(2 * kPointerSize); + __ bind(&second_not_zero_length); + __ mov(ebx, FieldOperand(eax, String::kLengthOffset)); + STATIC_ASSERT(kSmiTag == 0); + __ test(ebx, Operand(ebx)); + __ j(not_zero, &both_not_zero_length); + // First string is empty, result is second string which is in edx. + __ mov(eax, edx); + __ IncrementCounter(&Counters::string_add_native, 1); + __ ret(2 * kPointerSize); + + // Both strings are non-empty. + // eax: first string + // ebx: length of first string as a smi + // ecx: length of second string as a smi + // edx: second string + // Look at the length of the result of adding the two strings. + Label string_add_flat_result, longer_than_two; + __ bind(&both_not_zero_length); + __ add(ebx, Operand(ecx)); + STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength); + // Handle exceptionally long strings in the runtime system. + __ j(overflow, &string_add_runtime); + // Use the runtime system when adding two one character strings, as it + // contains optimizations for this specific case using the symbol table. + __ cmp(Operand(ebx), Immediate(Smi::FromInt(2))); + __ j(not_equal, &longer_than_two); + + // Check that both strings are non-external ascii strings. + __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, + &string_add_runtime); + + // Get the two characters forming the sub string. + __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize)); + __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize)); + + // Try to lookup two character string in symbol table. If it is not found + // just allocate a new one. + Label make_two_character_string, make_flat_ascii_string; + StringHelper::GenerateTwoCharacterSymbolTableProbe( + masm, ebx, ecx, eax, edx, edi, &make_two_character_string); + __ IncrementCounter(&Counters::string_add_native, 1); + __ ret(2 * kPointerSize); + + __ bind(&make_two_character_string); + __ Set(ebx, Immediate(Smi::FromInt(2))); + __ jmp(&make_flat_ascii_string); + + __ bind(&longer_than_two); + // Check if resulting string will be flat. + __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength))); + __ j(below, &string_add_flat_result); + + // If result is not supposed to be flat allocate a cons string object. If both + // strings are ascii the result is an ascii cons string. + Label non_ascii, allocated, ascii_data; + __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset)); + __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset)); + __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); + __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset)); + __ and_(ecx, Operand(edi)); + STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag); + __ test(ecx, Immediate(kAsciiStringTag)); + __ j(zero, &non_ascii); + __ bind(&ascii_data); + // Allocate an acsii cons string. + __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime); + __ bind(&allocated); + // Fill the fields of the cons string. + if (FLAG_debug_code) __ AbortIfNotSmi(ebx); + __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx); + __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset), + Immediate(String::kEmptyHashField)); + __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax); + __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx); + __ mov(eax, ecx); + __ IncrementCounter(&Counters::string_add_native, 1); + __ ret(2 * kPointerSize); + __ bind(&non_ascii); + // At least one of the strings is two-byte. Check whether it happens + // to contain only ascii characters. + // ecx: first instance type AND second instance type. + // edi: second instance type. + __ test(ecx, Immediate(kAsciiDataHintMask)); + __ j(not_zero, &ascii_data); + __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); + __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); + __ xor_(edi, Operand(ecx)); + STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); + __ and_(edi, kAsciiStringTag | kAsciiDataHintTag); + __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag); + __ j(equal, &ascii_data); + // Allocate a two byte cons string. + __ AllocateConsString(ecx, edi, no_reg, &string_add_runtime); + __ jmp(&allocated); + + // Handle creating a flat result. First check that both strings are not + // external strings. + // eax: first string + // ebx: length of resulting flat string as a smi + // edx: second string + __ bind(&string_add_flat_result); + __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); + __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); + __ and_(ecx, kStringRepresentationMask); + __ cmp(ecx, kExternalStringTag); + __ j(equal, &string_add_runtime); + __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset)); + __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); + __ and_(ecx, kStringRepresentationMask); + __ cmp(ecx, kExternalStringTag); + __ j(equal, &string_add_runtime); + // Now check if both strings are ascii strings. + // eax: first string + // ebx: length of resulting flat string as a smi + // edx: second string + Label non_ascii_string_add_flat_result; + STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag); + __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); + __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag); + __ j(zero, &non_ascii_string_add_flat_result); + __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset)); + __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag); + __ j(zero, &string_add_runtime); + + __ bind(&make_flat_ascii_string); + // Both strings are ascii strings. As they are short they are both flat. + // ebx: length of resulting flat string as a smi + __ SmiUntag(ebx); + __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime); + // eax: result string + __ mov(ecx, eax); + // Locate first character of result. + __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + // Load first argument and locate first character. + __ mov(edx, Operand(esp, 2 * kPointerSize)); + __ mov(edi, FieldOperand(edx, String::kLengthOffset)); + __ SmiUntag(edi); + __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + // eax: result string + // ecx: first character of result + // edx: first char of first argument + // edi: length of first argument + StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true); + // Load second argument and locate first character. + __ mov(edx, Operand(esp, 1 * kPointerSize)); + __ mov(edi, FieldOperand(edx, String::kLengthOffset)); + __ SmiUntag(edi); + __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + // eax: result string + // ecx: next character of result + // edx: first char of second argument + // edi: length of second argument + StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true); + __ IncrementCounter(&Counters::string_add_native, 1); + __ ret(2 * kPointerSize); + + // Handle creating a flat two byte result. + // eax: first string - known to be two byte + // ebx: length of resulting flat string as a smi + // edx: second string + __ bind(&non_ascii_string_add_flat_result); + __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset)); + __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag); + __ j(not_zero, &string_add_runtime); + // Both strings are two byte strings. As they are short they are both + // flat. + __ SmiUntag(ebx); + __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &string_add_runtime); + // eax: result string + __ mov(ecx, eax); + // Locate first character of result. + __ add(Operand(ecx), + Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + // Load first argument and locate first character. + __ mov(edx, Operand(esp, 2 * kPointerSize)); + __ mov(edi, FieldOperand(edx, String::kLengthOffset)); + __ SmiUntag(edi); + __ add(Operand(edx), + Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + // eax: result string + // ecx: first character of result + // edx: first char of first argument + // edi: length of first argument + StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false); + // Load second argument and locate first character. + __ mov(edx, Operand(esp, 1 * kPointerSize)); + __ mov(edi, FieldOperand(edx, String::kLengthOffset)); + __ SmiUntag(edi); + __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + // eax: result string + // ecx: next character of result + // edx: first char of second argument + // edi: length of second argument + StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false); + __ IncrementCounter(&Counters::string_add_native, 1); + __ ret(2 * kPointerSize); + + // Just jump to runtime to add the two strings. + __ bind(&string_add_runtime); + __ TailCallRuntime(Runtime::kStringAdd, 2, 1); +} + + +void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch, + bool ascii) { + Label loop; + __ bind(&loop); + // This loop just copies one character at a time, as it is only used for very + // short strings. + if (ascii) { + __ mov_b(scratch, Operand(src, 0)); + __ mov_b(Operand(dest, 0), scratch); + __ add(Operand(src), Immediate(1)); + __ add(Operand(dest), Immediate(1)); + } else { + __ mov_w(scratch, Operand(src, 0)); + __ mov_w(Operand(dest, 0), scratch); + __ add(Operand(src), Immediate(2)); + __ add(Operand(dest), Immediate(2)); + } + __ sub(Operand(count), Immediate(1)); + __ j(not_zero, &loop); +} + + +void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch, + bool ascii) { + // Copy characters using rep movs of doublewords. + // The destination is aligned on a 4 byte boundary because we are + // copying to the beginning of a newly allocated string. + ASSERT(dest.is(edi)); // rep movs destination + ASSERT(src.is(esi)); // rep movs source + ASSERT(count.is(ecx)); // rep movs count + ASSERT(!scratch.is(dest)); + ASSERT(!scratch.is(src)); + ASSERT(!scratch.is(count)); + + // Nothing to do for zero characters. + Label done; + __ test(count, Operand(count)); + __ j(zero, &done); + + // Make count the number of bytes to copy. + if (!ascii) { + __ shl(count, 1); + } + + // Don't enter the rep movs if there are less than 4 bytes to copy. + Label last_bytes; + __ test(count, Immediate(~3)); + __ j(zero, &last_bytes); + + // Copy from edi to esi using rep movs instruction. + __ mov(scratch, count); + __ sar(count, 2); // Number of doublewords to copy. + __ cld(); + __ rep_movs(); + + // Find number of bytes left. + __ mov(count, scratch); + __ and_(count, 3); + + // Check if there are more bytes to copy. + __ bind(&last_bytes); + __ test(count, Operand(count)); + __ j(zero, &done); + + // Copy remaining characters. + Label loop; + __ bind(&loop); + __ mov_b(scratch, Operand(src, 0)); + __ mov_b(Operand(dest, 0), scratch); + __ add(Operand(src), Immediate(1)); + __ add(Operand(dest), Immediate(1)); + __ sub(Operand(count), Immediate(1)); + __ j(not_zero, &loop); + + __ bind(&done); +} + + +void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, + Register c1, + Register c2, + Register scratch1, + Register scratch2, + Register scratch3, + Label* not_found) { + // Register scratch3 is the general scratch register in this function. + Register scratch = scratch3; + + // Make sure that both characters are not digits as such strings has a + // different hash algorithm. Don't try to look for these in the symbol table. + Label not_array_index; + __ mov(scratch, c1); + __ sub(Operand(scratch), Immediate(static_cast('0'))); + __ cmp(Operand(scratch), Immediate(static_cast('9' - '0'))); + __ j(above, ¬_array_index); + __ mov(scratch, c2); + __ sub(Operand(scratch), Immediate(static_cast('0'))); + __ cmp(Operand(scratch), Immediate(static_cast('9' - '0'))); + __ j(below_equal, not_found); + + __ bind(¬_array_index); + // Calculate the two character string hash. + Register hash = scratch1; + GenerateHashInit(masm, hash, c1, scratch); + GenerateHashAddCharacter(masm, hash, c2, scratch); + GenerateHashGetHash(masm, hash, scratch); + + // Collect the two characters in a register. + Register chars = c1; + __ shl(c2, kBitsPerByte); + __ or_(chars, Operand(c2)); + + // chars: two character string, char 1 in byte 0 and char 2 in byte 1. + // hash: hash of two character string. + + // Load the symbol table. + Register symbol_table = c2; + ExternalReference roots_address = ExternalReference::roots_address(); + __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex)); + __ mov(symbol_table, + Operand::StaticArray(scratch, times_pointer_size, roots_address)); + + // Calculate capacity mask from the symbol table capacity. + Register mask = scratch2; + __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset)); + __ SmiUntag(mask); + __ sub(Operand(mask), Immediate(1)); + + // Registers + // chars: two character string, char 1 in byte 0 and char 2 in byte 1. + // hash: hash of two character string + // symbol_table: symbol table + // mask: capacity mask + // scratch: - + + // Perform a number of probes in the symbol table. + static const int kProbes = 4; + Label found_in_symbol_table; + Label next_probe[kProbes], next_probe_pop_mask[kProbes]; + for (int i = 0; i < kProbes; i++) { + // Calculate entry in symbol table. + __ mov(scratch, hash); + if (i > 0) { + __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i))); + } + __ and_(scratch, Operand(mask)); + + // Load the entry from the symbol table. + Register candidate = scratch; // Scratch register contains candidate. + STATIC_ASSERT(SymbolTable::kEntrySize == 1); + __ mov(candidate, + FieldOperand(symbol_table, + scratch, + times_pointer_size, + SymbolTable::kElementsStartOffset)); + + // If entry is undefined no string with this hash can be found. + __ cmp(candidate, Factory::undefined_value()); + __ j(equal, not_found); + + // If length is not 2 the string is not a candidate. + __ cmp(FieldOperand(candidate, String::kLengthOffset), + Immediate(Smi::FromInt(2))); + __ j(not_equal, &next_probe[i]); + + // As we are out of registers save the mask on the stack and use that + // register as a temporary. + __ push(mask); + Register temp = mask; + + // Check that the candidate is a non-external ascii string. + __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset)); + __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset)); + __ JumpIfInstanceTypeIsNotSequentialAscii( + temp, temp, &next_probe_pop_mask[i]); + + // Check if the two characters match. + __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize)); + __ and_(temp, 0x0000ffff); + __ cmp(chars, Operand(temp)); + __ j(equal, &found_in_symbol_table); + __ bind(&next_probe_pop_mask[i]); + __ pop(mask); + __ bind(&next_probe[i]); + } + + // No matching 2 character string found by probing. + __ jmp(not_found); + + // Scratch register contains result when we fall through to here. + Register result = scratch; + __ bind(&found_in_symbol_table); + __ pop(mask); // Pop saved mask from the stack. + if (!result.is(eax)) { + __ mov(eax, result); + } +} + + +void StringHelper::GenerateHashInit(MacroAssembler* masm, + Register hash, + Register character, + Register scratch) { + // hash = character + (character << 10); + __ mov(hash, character); + __ shl(hash, 10); + __ add(hash, Operand(character)); + // hash ^= hash >> 6; + __ mov(scratch, hash); + __ sar(scratch, 6); + __ xor_(hash, Operand(scratch)); +} + + +void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, + Register hash, + Register character, + Register scratch) { + // hash += character; + __ add(hash, Operand(character)); + // hash += hash << 10; + __ mov(scratch, hash); + __ shl(scratch, 10); + __ add(hash, Operand(scratch)); + // hash ^= hash >> 6; + __ mov(scratch, hash); + __ sar(scratch, 6); + __ xor_(hash, Operand(scratch)); +} + + +void StringHelper::GenerateHashGetHash(MacroAssembler* masm, + Register hash, + Register scratch) { + // hash += hash << 3; + __ mov(scratch, hash); + __ shl(scratch, 3); + __ add(hash, Operand(scratch)); + // hash ^= hash >> 11; + __ mov(scratch, hash); + __ sar(scratch, 11); + __ xor_(hash, Operand(scratch)); + // hash += hash << 15; + __ mov(scratch, hash); + __ shl(scratch, 15); + __ add(hash, Operand(scratch)); + + // if (hash == 0) hash = 27; + Label hash_not_zero; + __ test(hash, Operand(hash)); + __ j(not_zero, &hash_not_zero); + __ mov(hash, Immediate(27)); + __ bind(&hash_not_zero); +} + + +void SubStringStub::Generate(MacroAssembler* masm) { + Label runtime; + + // Stack frame on entry. + // esp[0]: return address + // esp[4]: to + // esp[8]: from + // esp[12]: string + + // Make sure first argument is a string. + __ mov(eax, Operand(esp, 3 * kPointerSize)); + STATIC_ASSERT(kSmiTag == 0); + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &runtime); + Condition is_string = masm->IsObjectStringType(eax, ebx, ebx); + __ j(NegateCondition(is_string), &runtime); + + // eax: string + // ebx: instance type + + // Calculate length of sub string using the smi values. + Label result_longer_than_two; + __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index. + __ test(ecx, Immediate(kSmiTagMask)); + __ j(not_zero, &runtime); + __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index. + __ test(edx, Immediate(kSmiTagMask)); + __ j(not_zero, &runtime); + __ sub(ecx, Operand(edx)); + __ cmp(ecx, FieldOperand(eax, String::kLengthOffset)); + Label return_eax; + __ j(equal, &return_eax); + // Special handling of sub-strings of length 1 and 2. One character strings + // are handled in the runtime system (looked up in the single character + // cache). Two character strings are looked for in the symbol cache. + __ SmiUntag(ecx); // Result length is no longer smi. + __ cmp(ecx, 2); + __ j(greater, &result_longer_than_two); + __ j(less, &runtime); + + // Sub string of length 2 requested. + // eax: string + // ebx: instance type + // ecx: sub string length (value is 2) + // edx: from index (smi) + __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime); + + // Get the two characters forming the sub string. + __ SmiUntag(edx); // From index is no longer smi. + __ movzx_b(ebx, FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize)); + __ movzx_b(ecx, + FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1)); + + // Try to lookup two character string in symbol table. + Label make_two_character_string; + StringHelper::GenerateTwoCharacterSymbolTableProbe( + masm, ebx, ecx, eax, edx, edi, &make_two_character_string); + __ ret(3 * kPointerSize); + + __ bind(&make_two_character_string); + // Setup registers for allocating the two character string. + __ mov(eax, Operand(esp, 3 * kPointerSize)); + __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); + __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); + __ Set(ecx, Immediate(2)); + + __ bind(&result_longer_than_two); + // eax: string + // ebx: instance type + // ecx: result string length + // Check for flat ascii string + Label non_ascii_flat; + __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat); + + // Allocate the result. + __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime); + + // eax: result string + // ecx: result string length + __ mov(edx, esi); // esi used by following code. + // Locate first character of result. + __ mov(edi, eax); + __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + // Load string argument and locate character of sub string start. + __ mov(esi, Operand(esp, 3 * kPointerSize)); + __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from + __ SmiUntag(ebx); + __ add(esi, Operand(ebx)); + + // eax: result string + // ecx: result length + // edx: original value of esi + // edi: first character of result + // esi: character of sub string start + StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true); + __ mov(esi, edx); // Restore esi. + __ IncrementCounter(&Counters::sub_string_native, 1); + __ ret(3 * kPointerSize); + + __ bind(&non_ascii_flat); + // eax: string + // ebx: instance type & kStringRepresentationMask | kStringEncodingMask + // ecx: result string length + // Check for flat two byte string + __ cmp(ebx, kSeqStringTag | kTwoByteStringTag); + __ j(not_equal, &runtime); + + // Allocate the result. + __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime); + + // eax: result string + // ecx: result string length + __ mov(edx, esi); // esi used by following code. + // Locate first character of result. + __ mov(edi, eax); + __ add(Operand(edi), + Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + // Load string argument and locate character of sub string start. + __ mov(esi, Operand(esp, 3 * kPointerSize)); + __ add(Operand(esi), + Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from + // As from is a smi it is 2 times the value which matches the size of a two + // byte character. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); + __ add(esi, Operand(ebx)); + + // eax: result string + // ecx: result length + // edx: original value of esi + // edi: first character of result + // esi: character of sub string start + StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false); + __ mov(esi, edx); // Restore esi. + + __ bind(&return_eax); + __ IncrementCounter(&Counters::sub_string_native, 1); + __ ret(3 * kPointerSize); + + // Just jump to runtime to create the sub string. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kSubString, 3, 1); +} + + +void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, + Register left, + Register right, + Register scratch1, + Register scratch2, + Register scratch3) { + Label result_not_equal; + Label result_greater; + Label compare_lengths; + + __ IncrementCounter(&Counters::string_compare_native, 1); + + // Find minimum length. + Label left_shorter; + __ mov(scratch1, FieldOperand(left, String::kLengthOffset)); + __ mov(scratch3, scratch1); + __ sub(scratch3, FieldOperand(right, String::kLengthOffset)); + + Register length_delta = scratch3; + + __ j(less_equal, &left_shorter); + // Right string is shorter. Change scratch1 to be length of right string. + __ sub(scratch1, Operand(length_delta)); + __ bind(&left_shorter); + + Register min_length = scratch1; + + // If either length is zero, just compare lengths. + __ test(min_length, Operand(min_length)); + __ j(zero, &compare_lengths); + + // Change index to run from -min_length to -1 by adding min_length + // to string start. This means that loop ends when index reaches zero, + // which doesn't need an additional compare. + __ SmiUntag(min_length); + __ lea(left, + FieldOperand(left, + min_length, times_1, + SeqAsciiString::kHeaderSize)); + __ lea(right, + FieldOperand(right, + min_length, times_1, + SeqAsciiString::kHeaderSize)); + __ neg(min_length); + + Register index = min_length; // index = -min_length; + + { + // Compare loop. + Label loop; + __ bind(&loop); + // Compare characters. + __ mov_b(scratch2, Operand(left, index, times_1, 0)); + __ cmpb(scratch2, Operand(right, index, times_1, 0)); + __ j(not_equal, &result_not_equal); + __ add(Operand(index), Immediate(1)); + __ j(not_zero, &loop); + } + + // Compare lengths - strings up to min-length are equal. + __ bind(&compare_lengths); + __ test(length_delta, Operand(length_delta)); + __ j(not_zero, &result_not_equal); + + // Result is EQUAL. + STATIC_ASSERT(EQUAL == 0); + STATIC_ASSERT(kSmiTag == 0); + __ Set(eax, Immediate(Smi::FromInt(EQUAL))); + __ ret(0); + + __ bind(&result_not_equal); + __ j(greater, &result_greater); + + // Result is LESS. + __ Set(eax, Immediate(Smi::FromInt(LESS))); + __ ret(0); + + // Result is GREATER. + __ bind(&result_greater); + __ Set(eax, Immediate(Smi::FromInt(GREATER))); + __ ret(0); +} + + +void StringCompareStub::Generate(MacroAssembler* masm) { + Label runtime; + + // Stack frame on entry. + // esp[0]: return address + // esp[4]: right string + // esp[8]: left string + + __ mov(edx, Operand(esp, 2 * kPointerSize)); // left + __ mov(eax, Operand(esp, 1 * kPointerSize)); // right + + Label not_same; + __ cmp(edx, Operand(eax)); + __ j(not_equal, ¬_same); + STATIC_ASSERT(EQUAL == 0); + STATIC_ASSERT(kSmiTag == 0); + __ Set(eax, Immediate(Smi::FromInt(EQUAL))); + __ IncrementCounter(&Counters::string_compare_native, 1); + __ ret(2 * kPointerSize); + + __ bind(¬_same); + + // Check that both objects are sequential ascii strings. + __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime); + + // Compare flat ascii strings. + // Drop arguments from the stack. + __ pop(ecx); + __ add(Operand(esp), Immediate(2 * kPointerSize)); + __ push(ecx); + GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi); + + // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) + // tagged as a small integer. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kStringCompare, 2, 1); +} + +#undef __ + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_IA32 diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h new file mode 100644 index 0000000..c258302 --- /dev/null +++ b/src/ia32/code-stubs-ia32.h @@ -0,0 +1,362 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_IA32_CODE_STUBS_IA32_H_ +#define V8_IA32_CODE_STUBS_IA32_H_ + +#include "codegen-inl.h" +#include "ast.h" +#include "ic-inl.h" + +namespace v8 { +namespace internal { + + +// Compute a transcendental math function natively, or call the +// TranscendentalCache runtime function. +class TranscendentalCacheStub: public CodeStub { + public: + explicit TranscendentalCacheStub(TranscendentalCache::Type type) + : type_(type) {} + void Generate(MacroAssembler* masm); + private: + TranscendentalCache::Type type_; + Major MajorKey() { return TranscendentalCache; } + int MinorKey() { return type_; } + Runtime::FunctionId RuntimeFunction(); + void GenerateOperation(MacroAssembler* masm); +}; + + +class ToBooleanStub: public CodeStub { + public: + ToBooleanStub() { } + + void Generate(MacroAssembler* masm); + + private: + Major MajorKey() { return ToBoolean; } + int MinorKey() { return 0; } +}; + + +// Flag that indicates how to generate code for the stub GenericBinaryOpStub. +enum GenericBinaryFlags { + NO_GENERIC_BINARY_FLAGS = 0, + NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub. +}; + + +class GenericBinaryOpStub: public CodeStub { + public: + GenericBinaryOpStub(Token::Value op, + OverwriteMode mode, + GenericBinaryFlags flags, + TypeInfo operands_type) + : op_(op), + mode_(mode), + flags_(flags), + args_in_registers_(false), + args_reversed_(false), + static_operands_type_(operands_type), + runtime_operands_type_(BinaryOpIC::DEFAULT), + name_(NULL) { + if (static_operands_type_.IsSmi()) { + mode_ = NO_OVERWRITE; + } + use_sse3_ = CpuFeatures::IsSupported(SSE3); + ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); + } + + GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type) + : op_(OpBits::decode(key)), + mode_(ModeBits::decode(key)), + flags_(FlagBits::decode(key)), + args_in_registers_(ArgsInRegistersBits::decode(key)), + args_reversed_(ArgsReversedBits::decode(key)), + use_sse3_(SSE3Bits::decode(key)), + static_operands_type_(TypeInfo::ExpandedRepresentation( + StaticTypeInfoBits::decode(key))), + runtime_operands_type_(runtime_operands_type), + name_(NULL) { + } + + // Generate code to call the stub with the supplied arguments. This will add + // code at the call site to prepare arguments either in registers or on the + // stack together with the actual call. + void GenerateCall(MacroAssembler* masm, Register left, Register right); + void GenerateCall(MacroAssembler* masm, Register left, Smi* right); + void GenerateCall(MacroAssembler* masm, Smi* left, Register right); + + Result GenerateCall(MacroAssembler* masm, + VirtualFrame* frame, + Result* left, + Result* right); + + private: + Token::Value op_; + OverwriteMode mode_; + GenericBinaryFlags flags_; + bool args_in_registers_; // Arguments passed in registers not on the stack. + bool args_reversed_; // Left and right argument are swapped. + bool use_sse3_; + + // Number type information of operands, determined by code generator. + TypeInfo static_operands_type_; + + // Operand type information determined at runtime. + BinaryOpIC::TypeInfo runtime_operands_type_; + + char* name_; + + const char* GetName(); + +#ifdef DEBUG + void Print() { + PrintF("GenericBinaryOpStub %d (op %s), " + "(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n", + MinorKey(), + Token::String(op_), + static_cast(mode_), + static_cast(flags_), + static_cast(args_in_registers_), + static_cast(args_reversed_), + static_operands_type_.ToString()); + } +#endif + + // Minor key encoding in 18 bits RRNNNFRASOOOOOOOMM. + class ModeBits: public BitField {}; + class OpBits: public BitField {}; + class SSE3Bits: public BitField {}; + class ArgsInRegistersBits: public BitField {}; + class ArgsReversedBits: public BitField {}; + class FlagBits: public BitField {}; + class StaticTypeInfoBits: public BitField {}; + class RuntimeTypeInfoBits: public BitField {}; + + Major MajorKey() { return GenericBinaryOp; } + int MinorKey() { + // Encode the parameters in a unique 18 bit value. + return OpBits::encode(op_) + | ModeBits::encode(mode_) + | FlagBits::encode(flags_) + | SSE3Bits::encode(use_sse3_) + | ArgsInRegistersBits::encode(args_in_registers_) + | ArgsReversedBits::encode(args_reversed_) + | StaticTypeInfoBits::encode( + static_operands_type_.ThreeBitRepresentation()) + | RuntimeTypeInfoBits::encode(runtime_operands_type_); + } + + void Generate(MacroAssembler* masm); + void GenerateSmiCode(MacroAssembler* masm, Label* slow); + void GenerateLoadArguments(MacroAssembler* masm); + void GenerateReturn(MacroAssembler* masm); + void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure); + void GenerateRegisterArgsPush(MacroAssembler* masm); + void GenerateTypeTransition(MacroAssembler* masm); + + bool ArgsInRegistersSupported() { + return op_ == Token::ADD || op_ == Token::SUB + || op_ == Token::MUL || op_ == Token::DIV; + } + bool IsOperationCommutative() { + return (op_ == Token::ADD) || (op_ == Token::MUL); + } + + void SetArgsInRegisters() { args_in_registers_ = true; } + void SetArgsReversed() { args_reversed_ = true; } + bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; } + bool HasArgsInRegisters() { return args_in_registers_; } + bool HasArgsReversed() { return args_reversed_; } + + bool ShouldGenerateSmiCode() { + return HasSmiCodeInStub() && + runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS && + runtime_operands_type_ != BinaryOpIC::STRINGS; + } + + bool ShouldGenerateFPCode() { + return runtime_operands_type_ != BinaryOpIC::STRINGS; + } + + virtual int GetCodeKind() { return Code::BINARY_OP_IC; } + + virtual InlineCacheState GetICState() { + return BinaryOpIC::ToState(runtime_operands_type_); + } +}; + + +class StringHelper : public AllStatic { + public: + // Generate code for copying characters using a simple loop. This should only + // be used in places where the number of characters is small and the + // additional setup and checking in GenerateCopyCharactersREP adds too much + // overhead. Copying of overlapping regions is not supported. + static void GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch, + bool ascii); + + // Generate code for copying characters using the rep movs instruction. + // Copies ecx characters from esi to edi. Copying of overlapping regions is + // not supported. + static void GenerateCopyCharactersREP(MacroAssembler* masm, + Register dest, // Must be edi. + Register src, // Must be esi. + Register count, // Must be ecx. + Register scratch, // Neither of above. + bool ascii); + + // Probe the symbol table for a two character string. If the string is + // not found by probing a jump to the label not_found is performed. This jump + // does not guarantee that the string is not in the symbol table. If the + // string is found the code falls through with the string in register eax. + static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, + Register c1, + Register c2, + Register scratch1, + Register scratch2, + Register scratch3, + Label* not_found); + + // Generate string hash. + static void GenerateHashInit(MacroAssembler* masm, + Register hash, + Register character, + Register scratch); + static void GenerateHashAddCharacter(MacroAssembler* masm, + Register hash, + Register character, + Register scratch); + static void GenerateHashGetHash(MacroAssembler* masm, + Register hash, + Register scratch); + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); +}; + + +// Flag that indicates how to generate code for the stub StringAddStub. +enum StringAddFlags { + NO_STRING_ADD_FLAGS = 0, + NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub. +}; + + +class StringAddStub: public CodeStub { + public: + explicit StringAddStub(StringAddFlags flags) { + string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0); + } + + private: + Major MajorKey() { return StringAdd; } + int MinorKey() { return string_check_ ? 0 : 1; } + + void Generate(MacroAssembler* masm); + + // Should the stub check whether arguments are strings? + bool string_check_; +}; + + +class SubStringStub: public CodeStub { + public: + SubStringStub() {} + + private: + Major MajorKey() { return SubString; } + int MinorKey() { return 0; } + + void Generate(MacroAssembler* masm); +}; + + +class StringCompareStub: public CodeStub { + public: + explicit StringCompareStub() { + } + + // Compare two flat ascii strings and returns result in eax after popping two + // arguments from the stack. + static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, + Register left, + Register right, + Register scratch1, + Register scratch2, + Register scratch3); + + private: + Major MajorKey() { return StringCompare; } + int MinorKey() { return 0; } + + void Generate(MacroAssembler* masm); +}; + + +class NumberToStringStub: public CodeStub { + public: + NumberToStringStub() { } + + // Generate code to do a lookup in the number string cache. If the number in + // the register object is found in the cache the generated code falls through + // with the result in the result register. The object and the result register + // can be the same. If the number is not found in the cache the code jumps to + // the label not_found with only the content of register object unchanged. + static void GenerateLookupNumberStringCache(MacroAssembler* masm, + Register object, + Register result, + Register scratch1, + Register scratch2, + bool object_is_smi, + Label* not_found); + + private: + Major MajorKey() { return NumberToString; } + int MinorKey() { return 0; } + + void Generate(MacroAssembler* masm); + + const char* GetName() { return "NumberToStringStub"; } + +#ifdef DEBUG + void Print() { + PrintF("NumberToStringStub\n"); + } +#endif +}; + + +} } // namespace v8::internal + +#endif // V8_IA32_CODE_STUBS_IA32_H_ diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc index 922998d..39208b8 100644 --- a/src/ia32/codegen-ia32.cc +++ b/src/ia32/codegen-ia32.cc @@ -30,6 +30,7 @@ #if defined(V8_TARGET_ARCH_IA32) #include "bootstrapper.h" +#include "code-stubs-ia32.h" #include "codegen-inl.h" #include "compiler.h" #include "debug.h" @@ -934,97 +935,6 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) { } -class FloatingPointHelper : public AllStatic { - public: - - enum ArgLocation { - ARGS_ON_STACK, - ARGS_IN_REGISTERS - }; - - // Code pattern for loading a floating point value. Input value must - // be either a smi or a heap number object (fp value). Requirements: - // operand in register number. Returns operand as floating point number - // on FPU stack. - static void LoadFloatOperand(MacroAssembler* masm, Register number); - - // Code pattern for loading floating point values. Input values must - // be either smi or heap number objects (fp values). Requirements: - // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax. - // Returns operands as floating point numbers on FPU stack. - static void LoadFloatOperands(MacroAssembler* masm, - Register scratch, - ArgLocation arg_location = ARGS_ON_STACK); - - // Similar to LoadFloatOperand but assumes that both operands are smis. - // Expects operands in edx, eax. - static void LoadFloatSmis(MacroAssembler* masm, Register scratch); - - // Test if operands are smi or number objects (fp). Requirements: - // operand_1 in eax, operand_2 in edx; falls through on float - // operands, jumps to the non_float label otherwise. - static void CheckFloatOperands(MacroAssembler* masm, - Label* non_float, - Register scratch); - - // Takes the operands in edx and eax and loads them as integers in eax - // and ecx. - static void LoadAsIntegers(MacroAssembler* masm, - TypeInfo type_info, - bool use_sse3, - Label* operand_conversion_failure); - static void LoadNumbersAsIntegers(MacroAssembler* masm, - TypeInfo type_info, - bool use_sse3, - Label* operand_conversion_failure); - static void LoadUnknownsAsIntegers(MacroAssembler* masm, - bool use_sse3, - Label* operand_conversion_failure); - - // Test if operands are smis or heap numbers and load them - // into xmm0 and xmm1 if they are. Operands are in edx and eax. - // Leaves operands unchanged. - static void LoadSSE2Operands(MacroAssembler* masm); - - // Test if operands are numbers (smi or HeapNumber objects), and load - // them into xmm0 and xmm1 if they are. Jump to label not_numbers if - // either operand is not a number. Operands are in edx and eax. - // Leaves operands unchanged. - static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers); - - // Similar to LoadSSE2Operands but assumes that both operands are smis. - // Expects operands in edx, eax. - static void LoadSSE2Smis(MacroAssembler* masm, Register scratch); -}; - - -const char* GenericBinaryOpStub::GetName() { - if (name_ != NULL) return name_; - const int kMaxNameLength = 100; - name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); - if (name_ == NULL) return "OOM"; - const char* op_name = Token::Name(op_); - const char* overwrite_name; - switch (mode_) { - case NO_OVERWRITE: overwrite_name = "Alloc"; break; - case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; - case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; - default: overwrite_name = "UnknownOverwrite"; break; - } - - OS::SNPrintF(Vector(name_, kMaxNameLength), - "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s", - op_name, - overwrite_name, - (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "", - args_in_registers_ ? "RegArgs" : "StackArgs", - args_reversed_ ? "_R" : "", - static_operands_type_.ToString(), - BinaryOpIC::GetName(runtime_operands_type_)); - return name_; -} - - // Perform or call the specialized stub for a binary operation. Requires the // three registers left, right and dst to be distinct and spilled. This // deferred operation has up to three entry points: The main one calls the @@ -9880,352 +9790,6 @@ void Reference::SetValue(InitState init_state) { } -void FastNewClosureStub::Generate(MacroAssembler* masm) { - // Create a new closure from the given function info in new - // space. Set the context to the current context in esi. - Label gc; - __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT); - - // Get the function info from the stack. - __ mov(edx, Operand(esp, 1 * kPointerSize)); - - // Compute the function map in the current global context and set that - // as the map of the allocated object. - __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX))); - __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset)); - __ mov(ecx, Operand(ecx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX))); - __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx); - - // Initialize the rest of the function. We don't have to update the - // write barrier because the allocated object is in new space. - __ mov(ebx, Immediate(Factory::empty_fixed_array())); - __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx); - __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx); - __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset), - Immediate(Factory::the_hole_value())); - __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx); - __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi); - __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx); - - // Initialize the code pointer in the function to be the one - // found in the shared function info object. - __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset)); - __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); - __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx); - - // Return and remove the on-stack parameter. - __ ret(1 * kPointerSize); - - // Create a new closure through the slower runtime call. - __ bind(&gc); - __ pop(ecx); // Temporarily remove return address. - __ pop(edx); - __ push(esi); - __ push(edx); - __ push(ecx); // Restore return address. - __ TailCallRuntime(Runtime::kNewClosure, 2, 1); -} - - -void FastNewContextStub::Generate(MacroAssembler* masm) { - // Try to allocate the context in new space. - Label gc; - int length = slots_ + Context::MIN_CONTEXT_SLOTS; - __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize, - eax, ebx, ecx, &gc, TAG_OBJECT); - - // Get the function from the stack. - __ mov(ecx, Operand(esp, 1 * kPointerSize)); - - // Setup the object header. - __ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map()); - __ mov(FieldOperand(eax, Context::kLengthOffset), - Immediate(Smi::FromInt(length))); - - // Setup the fixed slots. - __ xor_(ebx, Operand(ebx)); // Set to NULL. - __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx); - __ mov(Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)), eax); - __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), ebx); - __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx); - - // Copy the global object from the surrounding context. We go through the - // context in the function (ecx) to match the allocation behavior we have - // in the runtime system (see Heap::AllocateFunctionContext). - __ mov(ebx, FieldOperand(ecx, JSFunction::kContextOffset)); - __ mov(ebx, Operand(ebx, Context::SlotOffset(Context::GLOBAL_INDEX))); - __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx); - - // Initialize the rest of the slots to undefined. - __ mov(ebx, Factory::undefined_value()); - for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { - __ mov(Operand(eax, Context::SlotOffset(i)), ebx); - } - - // Return and remove the on-stack parameter. - __ mov(esi, Operand(eax)); - __ ret(1 * kPointerSize); - - // Need to collect. Call into runtime system. - __ bind(&gc); - __ TailCallRuntime(Runtime::kNewContext, 1, 1); -} - - -void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { - // Stack layout on entry: - // - // [esp + kPointerSize]: constant elements. - // [esp + (2 * kPointerSize)]: literal index. - // [esp + (3 * kPointerSize)]: literals array. - - // All sizes here are multiples of kPointerSize. - int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0; - int size = JSArray::kSize + elements_size; - - // Load boilerplate object into ecx and check if we need to create a - // boilerplate. - Label slow_case; - __ mov(ecx, Operand(esp, 3 * kPointerSize)); - __ mov(eax, Operand(esp, 2 * kPointerSize)); - STATIC_ASSERT(kPointerSize == 4); - STATIC_ASSERT(kSmiTagSize == 1); - STATIC_ASSERT(kSmiTag == 0); - __ mov(ecx, CodeGenerator::FixedArrayElementOperand(ecx, eax)); - __ cmp(ecx, Factory::undefined_value()); - __ j(equal, &slow_case); - - if (FLAG_debug_code) { - const char* message; - Handle expected_map; - if (mode_ == CLONE_ELEMENTS) { - message = "Expected (writable) fixed array"; - expected_map = Factory::fixed_array_map(); - } else { - ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS); - message = "Expected copy-on-write fixed array"; - expected_map = Factory::fixed_cow_array_map(); - } - __ push(ecx); - __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset)); - __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map); - __ Assert(equal, message); - __ pop(ecx); - } - - // Allocate both the JS array and the elements array in one big - // allocation. This avoids multiple limit checks. - __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT); - - // Copy the JS array part. - for (int i = 0; i < JSArray::kSize; i += kPointerSize) { - if ((i != JSArray::kElementsOffset) || (length_ == 0)) { - __ mov(ebx, FieldOperand(ecx, i)); - __ mov(FieldOperand(eax, i), ebx); - } - } - - if (length_ > 0) { - // Get hold of the elements array of the boilerplate and setup the - // elements pointer in the resulting object. - __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset)); - __ lea(edx, Operand(eax, JSArray::kSize)); - __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx); - - // Copy the elements array. - for (int i = 0; i < elements_size; i += kPointerSize) { - __ mov(ebx, FieldOperand(ecx, i)); - __ mov(FieldOperand(edx, i), ebx); - } - } - - // Return and remove the on-stack parameters. - __ ret(3 * kPointerSize); - - __ bind(&slow_case); - __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); -} - - -// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined). -void ToBooleanStub::Generate(MacroAssembler* masm) { - Label false_result, true_result, not_string; - __ mov(eax, Operand(esp, 1 * kPointerSize)); - - // 'null' => false. - __ cmp(eax, Factory::null_value()); - __ j(equal, &false_result); - - // Get the map and type of the heap object. - __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); - __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset)); - - // Undetectable => false. - __ test_b(FieldOperand(edx, Map::kBitFieldOffset), - 1 << Map::kIsUndetectable); - __ j(not_zero, &false_result); - - // JavaScript object => true. - __ CmpInstanceType(edx, FIRST_JS_OBJECT_TYPE); - __ j(above_equal, &true_result); - - // String value => false iff empty. - __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE); - __ j(above_equal, ¬_string); - STATIC_ASSERT(kSmiTag == 0); - __ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0)); - __ j(zero, &false_result); - __ jmp(&true_result); - - __ bind(¬_string); - // HeapNumber => false iff +0, -0, or NaN. - __ cmp(edx, Factory::heap_number_map()); - __ j(not_equal, &true_result); - __ fldz(); - __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); - __ FCmp(); - __ j(zero, &false_result); - // Fall through to |true_result|. - - // Return 1/0 for true/false in eax. - __ bind(&true_result); - __ mov(eax, 1); - __ ret(1 * kPointerSize); - __ bind(&false_result); - __ mov(eax, 0); - __ ret(1 * kPointerSize); -} - - -void GenericBinaryOpStub::GenerateCall( - MacroAssembler* masm, - Register left, - Register right) { - if (!ArgsInRegistersSupported()) { - // Pass arguments on the stack. - __ push(left); - __ push(right); - } else { - // The calling convention with registers is left in edx and right in eax. - Register left_arg = edx; - Register right_arg = eax; - if (!(left.is(left_arg) && right.is(right_arg))) { - if (left.is(right_arg) && right.is(left_arg)) { - if (IsOperationCommutative()) { - SetArgsReversed(); - } else { - __ xchg(left, right); - } - } else if (left.is(left_arg)) { - __ mov(right_arg, right); - } else if (right.is(right_arg)) { - __ mov(left_arg, left); - } else if (left.is(right_arg)) { - if (IsOperationCommutative()) { - __ mov(left_arg, right); - SetArgsReversed(); - } else { - // Order of moves important to avoid destroying left argument. - __ mov(left_arg, left); - __ mov(right_arg, right); - } - } else if (right.is(left_arg)) { - if (IsOperationCommutative()) { - __ mov(right_arg, left); - SetArgsReversed(); - } else { - // Order of moves important to avoid destroying right argument. - __ mov(right_arg, right); - __ mov(left_arg, left); - } - } else { - // Order of moves is not important. - __ mov(left_arg, left); - __ mov(right_arg, right); - } - } - - // Update flags to indicate that arguments are in registers. - SetArgsInRegisters(); - __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); - } - - // Call the stub. - __ CallStub(this); -} - - -void GenericBinaryOpStub::GenerateCall( - MacroAssembler* masm, - Register left, - Smi* right) { - if (!ArgsInRegistersSupported()) { - // Pass arguments on the stack. - __ push(left); - __ push(Immediate(right)); - } else { - // The calling convention with registers is left in edx and right in eax. - Register left_arg = edx; - Register right_arg = eax; - if (left.is(left_arg)) { - __ mov(right_arg, Immediate(right)); - } else if (left.is(right_arg) && IsOperationCommutative()) { - __ mov(left_arg, Immediate(right)); - SetArgsReversed(); - } else { - // For non-commutative operations, left and right_arg might be - // the same register. Therefore, the order of the moves is - // important here in order to not overwrite left before moving - // it to left_arg. - __ mov(left_arg, left); - __ mov(right_arg, Immediate(right)); - } - - // Update flags to indicate that arguments are in registers. - SetArgsInRegisters(); - __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); - } - - // Call the stub. - __ CallStub(this); -} - - -void GenericBinaryOpStub::GenerateCall( - MacroAssembler* masm, - Smi* left, - Register right) { - if (!ArgsInRegistersSupported()) { - // Pass arguments on the stack. - __ push(Immediate(left)); - __ push(right); - } else { - // The calling convention with registers is left in edx and right in eax. - Register left_arg = edx; - Register right_arg = eax; - if (right.is(right_arg)) { - __ mov(left_arg, Immediate(left)); - } else if (right.is(left_arg) && IsOperationCommutative()) { - __ mov(right_arg, Immediate(left)); - SetArgsReversed(); - } else { - // For non-commutative operations, right and left_arg might be - // the same register. Therefore, the order of the moves is - // important here in order to not overwrite right before moving - // it to right_arg. - __ mov(right_arg, right); - __ mov(left_arg, Immediate(left)); - } - // Update flags to indicate that arguments are in registers. - SetArgsInRegisters(); - __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); - } - - // Call the stub. - __ CallStub(this); -} - - Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm, VirtualFrame* frame, Result* left, @@ -10241,4064 +9805,6 @@ Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm, } -void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { - // 1. Move arguments into edx, eax except for DIV and MOD, which need the - // dividend in eax and edx free for the division. Use eax, ebx for those. - Comment load_comment(masm, "-- Load arguments"); - Register left = edx; - Register right = eax; - if (op_ == Token::DIV || op_ == Token::MOD) { - left = eax; - right = ebx; - if (HasArgsInRegisters()) { - __ mov(ebx, eax); - __ mov(eax, edx); - } - } - if (!HasArgsInRegisters()) { - __ mov(right, Operand(esp, 1 * kPointerSize)); - __ mov(left, Operand(esp, 2 * kPointerSize)); - } - - if (static_operands_type_.IsSmi()) { - if (FLAG_debug_code) { - __ AbortIfNotSmi(left); - __ AbortIfNotSmi(right); - } - if (op_ == Token::BIT_OR) { - __ or_(right, Operand(left)); - GenerateReturn(masm); - return; - } else if (op_ == Token::BIT_AND) { - __ and_(right, Operand(left)); - GenerateReturn(masm); - return; - } else if (op_ == Token::BIT_XOR) { - __ xor_(right, Operand(left)); - GenerateReturn(masm); - return; - } - } - - // 2. Prepare the smi check of both operands by oring them together. - Comment smi_check_comment(masm, "-- Smi check arguments"); - Label not_smis; - Register combined = ecx; - ASSERT(!left.is(combined) && !right.is(combined)); - switch (op_) { - case Token::BIT_OR: - // Perform the operation into eax and smi check the result. Preserve - // eax in case the result is not a smi. - ASSERT(!left.is(ecx) && !right.is(ecx)); - __ mov(ecx, right); - __ or_(right, Operand(left)); // Bitwise or is commutative. - combined = right; - break; - - case Token::BIT_XOR: - case Token::BIT_AND: - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: - case Token::MOD: - __ mov(combined, right); - __ or_(combined, Operand(left)); - break; - - case Token::SHL: - case Token::SAR: - case Token::SHR: - // Move the right operand into ecx for the shift operation, use eax - // for the smi check register. - ASSERT(!left.is(ecx) && !right.is(ecx)); - __ mov(ecx, right); - __ or_(right, Operand(left)); - combined = right; - break; - - default: - break; - } - - // 3. Perform the smi check of the operands. - STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case. - __ test(combined, Immediate(kSmiTagMask)); - __ j(not_zero, ¬_smis, not_taken); - - // 4. Operands are both smis, perform the operation leaving the result in - // eax and check the result if necessary. - Comment perform_smi(masm, "-- Perform smi operation"); - Label use_fp_on_smis; - switch (op_) { - case Token::BIT_OR: - // Nothing to do. - break; - - case Token::BIT_XOR: - ASSERT(right.is(eax)); - __ xor_(right, Operand(left)); // Bitwise xor is commutative. - break; - - case Token::BIT_AND: - ASSERT(right.is(eax)); - __ and_(right, Operand(left)); // Bitwise and is commutative. - break; - - case Token::SHL: - // Remove tags from operands (but keep sign). - __ SmiUntag(left); - __ SmiUntag(ecx); - // Perform the operation. - __ shl_cl(left); - // Check that the *signed* result fits in a smi. - __ cmp(left, 0xc0000000); - __ j(sign, &use_fp_on_smis, not_taken); - // Tag the result and store it in register eax. - __ SmiTag(left); - __ mov(eax, left); - break; - - case Token::SAR: - // Remove tags from operands (but keep sign). - __ SmiUntag(left); - __ SmiUntag(ecx); - // Perform the operation. - __ sar_cl(left); - // Tag the result and store it in register eax. - __ SmiTag(left); - __ mov(eax, left); - break; - - case Token::SHR: - // Remove tags from operands (but keep sign). - __ SmiUntag(left); - __ SmiUntag(ecx); - // Perform the operation. - __ shr_cl(left); - // Check that the *unsigned* result fits in a smi. - // Neither of the two high-order bits can be set: - // - 0x80000000: high bit would be lost when smi tagging. - // - 0x40000000: this number would convert to negative when - // Smi tagging these two cases can only happen with shifts - // by 0 or 1 when handed a valid smi. - __ test(left, Immediate(0xc0000000)); - __ j(not_zero, slow, not_taken); - // Tag the result and store it in register eax. - __ SmiTag(left); - __ mov(eax, left); - break; - - case Token::ADD: - ASSERT(right.is(eax)); - __ add(right, Operand(left)); // Addition is commutative. - __ j(overflow, &use_fp_on_smis, not_taken); - break; - - case Token::SUB: - __ sub(left, Operand(right)); - __ j(overflow, &use_fp_on_smis, not_taken); - __ mov(eax, left); - break; - - case Token::MUL: - // If the smi tag is 0 we can just leave the tag on one operand. - STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case. - // We can't revert the multiplication if the result is not a smi - // so save the right operand. - __ mov(ebx, right); - // Remove tag from one of the operands (but keep sign). - __ SmiUntag(right); - // Do multiplication. - __ imul(right, Operand(left)); // Multiplication is commutative. - __ j(overflow, &use_fp_on_smis, not_taken); - // Check for negative zero result. Use combined = left | right. - __ NegativeZeroTest(right, combined, &use_fp_on_smis); - break; - - case Token::DIV: - // We can't revert the division if the result is not a smi so - // save the left operand. - __ mov(edi, left); - // Check for 0 divisor. - __ test(right, Operand(right)); - __ j(zero, &use_fp_on_smis, not_taken); - // Sign extend left into edx:eax. - ASSERT(left.is(eax)); - __ cdq(); - // Divide edx:eax by right. - __ idiv(right); - // Check for the corner case of dividing the most negative smi by - // -1. We cannot use the overflow flag, since it is not set by idiv - // instruction. - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); - __ cmp(eax, 0x40000000); - __ j(equal, &use_fp_on_smis); - // Check for negative zero result. Use combined = left | right. - __ NegativeZeroTest(eax, combined, &use_fp_on_smis); - // Check that the remainder is zero. - __ test(edx, Operand(edx)); - __ j(not_zero, &use_fp_on_smis); - // Tag the result and store it in register eax. - __ SmiTag(eax); - break; - - case Token::MOD: - // Check for 0 divisor. - __ test(right, Operand(right)); - __ j(zero, ¬_smis, not_taken); - - // Sign extend left into edx:eax. - ASSERT(left.is(eax)); - __ cdq(); - // Divide edx:eax by right. - __ idiv(right); - // Check for negative zero result. Use combined = left | right. - __ NegativeZeroTest(edx, combined, slow); - // Move remainder to register eax. - __ mov(eax, edx); - break; - - default: - UNREACHABLE(); - } - - // 5. Emit return of result in eax. - GenerateReturn(masm); - - // 6. For some operations emit inline code to perform floating point - // operations on known smis (e.g., if the result of the operation - // overflowed the smi range). - switch (op_) { - case Token::SHL: { - Comment perform_float(masm, "-- Perform float operation on smis"); - __ bind(&use_fp_on_smis); - // Result we want is in left == edx, so we can put the allocated heap - // number in eax. - __ AllocateHeapNumber(eax, ecx, ebx, slow); - // Store the result in the HeapNumber and return. - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); - __ cvtsi2sd(xmm0, Operand(left)); - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); - } else { - // It's OK to overwrite the right argument on the stack because we - // are about to return. - __ mov(Operand(esp, 1 * kPointerSize), left); - __ fild_s(Operand(esp, 1 * kPointerSize)); - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); - } - GenerateReturn(masm); - break; - } - - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: { - Comment perform_float(masm, "-- Perform float operation on smis"); - __ bind(&use_fp_on_smis); - // Restore arguments to edx, eax. - switch (op_) { - case Token::ADD: - // Revert right = right + left. - __ sub(right, Operand(left)); - break; - case Token::SUB: - // Revert left = left - right. - __ add(left, Operand(right)); - break; - case Token::MUL: - // Right was clobbered but a copy is in ebx. - __ mov(right, ebx); - break; - case Token::DIV: - // Left was clobbered but a copy is in edi. Right is in ebx for - // division. - __ mov(edx, edi); - __ mov(eax, right); - break; - default: UNREACHABLE(); - break; - } - __ AllocateHeapNumber(ecx, ebx, no_reg, slow); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); - FloatingPointHelper::LoadSSE2Smis(masm, ebx); - switch (op_) { - case Token::ADD: __ addsd(xmm0, xmm1); break; - case Token::SUB: __ subsd(xmm0, xmm1); break; - case Token::MUL: __ mulsd(xmm0, xmm1); break; - case Token::DIV: __ divsd(xmm0, xmm1); break; - default: UNREACHABLE(); - } - __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0); - } else { // SSE2 not available, use FPU. - FloatingPointHelper::LoadFloatSmis(masm, ebx); - switch (op_) { - case Token::ADD: __ faddp(1); break; - case Token::SUB: __ fsubp(1); break; - case Token::MUL: __ fmulp(1); break; - case Token::DIV: __ fdivp(1); break; - default: UNREACHABLE(); - } - __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset)); - } - __ mov(eax, ecx); - GenerateReturn(masm); - break; - } - - default: - break; - } - - // 7. Non-smi operands, fall out to the non-smi code with the operands in - // edx and eax. - Comment done_comment(masm, "-- Enter non-smi code"); - __ bind(¬_smis); - switch (op_) { - case Token::BIT_OR: - case Token::SHL: - case Token::SAR: - case Token::SHR: - // Right operand is saved in ecx and eax was destroyed by the smi - // check. - __ mov(eax, ecx); - break; - - case Token::DIV: - case Token::MOD: - // Operands are in eax, ebx at this point. - __ mov(edx, eax); - __ mov(eax, ebx); - break; - - default: - break; - } -} - - -void GenericBinaryOpStub::Generate(MacroAssembler* masm) { - Label call_runtime; - - __ IncrementCounter(&Counters::generic_binary_stub_calls, 1); - - // Generate fast case smi code if requested. This flag is set when the fast - // case smi code is not generated by the caller. Generating it here will speed - // up common operations. - if (ShouldGenerateSmiCode()) { - GenerateSmiCode(masm, &call_runtime); - } else if (op_ != Token::MOD) { // MOD goes straight to runtime. - if (!HasArgsInRegisters()) { - GenerateLoadArguments(masm); - } - } - - // Floating point case. - if (ShouldGenerateFPCode()) { - switch (op_) { - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: { - if (runtime_operands_type_ == BinaryOpIC::DEFAULT && - HasSmiCodeInStub()) { - // Execution reaches this point when the first non-smi argument occurs - // (and only if smi code is generated). This is the right moment to - // patch to HEAP_NUMBERS state. The transition is attempted only for - // the four basic operations. The stub stays in the DEFAULT state - // forever for all other operations (also if smi code is skipped). - GenerateTypeTransition(masm); - break; - } - - Label not_floats; - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); - if (static_operands_type_.IsNumber()) { - if (FLAG_debug_code) { - // Assert at runtime that inputs are only numbers. - __ AbortIfNotNumber(edx); - __ AbortIfNotNumber(eax); - } - if (static_operands_type_.IsSmi()) { - if (FLAG_debug_code) { - __ AbortIfNotSmi(edx); - __ AbortIfNotSmi(eax); - } - FloatingPointHelper::LoadSSE2Smis(masm, ecx); - } else { - FloatingPointHelper::LoadSSE2Operands(masm); - } - } else { - FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime); - } - - switch (op_) { - case Token::ADD: __ addsd(xmm0, xmm1); break; - case Token::SUB: __ subsd(xmm0, xmm1); break; - case Token::MUL: __ mulsd(xmm0, xmm1); break; - case Token::DIV: __ divsd(xmm0, xmm1); break; - default: UNREACHABLE(); - } - GenerateHeapResultAllocation(masm, &call_runtime); - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); - GenerateReturn(masm); - } else { // SSE2 not available, use FPU. - if (static_operands_type_.IsNumber()) { - if (FLAG_debug_code) { - // Assert at runtime that inputs are only numbers. - __ AbortIfNotNumber(edx); - __ AbortIfNotNumber(eax); - } - } else { - FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx); - } - FloatingPointHelper::LoadFloatOperands( - masm, - ecx, - FloatingPointHelper::ARGS_IN_REGISTERS); - switch (op_) { - case Token::ADD: __ faddp(1); break; - case Token::SUB: __ fsubp(1); break; - case Token::MUL: __ fmulp(1); break; - case Token::DIV: __ fdivp(1); break; - default: UNREACHABLE(); - } - Label after_alloc_failure; - GenerateHeapResultAllocation(masm, &after_alloc_failure); - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); - GenerateReturn(masm); - __ bind(&after_alloc_failure); - __ ffree(); - __ jmp(&call_runtime); - } - __ bind(¬_floats); - if (runtime_operands_type_ == BinaryOpIC::DEFAULT && - !HasSmiCodeInStub()) { - // Execution reaches this point when the first non-number argument - // occurs (and only if smi code is skipped from the stub, otherwise - // the patching has already been done earlier in this case branch). - // Try patching to STRINGS for ADD operation. - if (op_ == Token::ADD) { - GenerateTypeTransition(masm); - } - } - break; - } - case Token::MOD: { - // For MOD we go directly to runtime in the non-smi case. - break; - } - case Token::BIT_OR: - case Token::BIT_AND: - case Token::BIT_XOR: - case Token::SAR: - case Token::SHL: - case Token::SHR: { - Label non_smi_result; - FloatingPointHelper::LoadAsIntegers(masm, - static_operands_type_, - use_sse3_, - &call_runtime); - switch (op_) { - case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; - case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; - case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; - case Token::SAR: __ sar_cl(eax); break; - case Token::SHL: __ shl_cl(eax); break; - case Token::SHR: __ shr_cl(eax); break; - default: UNREACHABLE(); - } - if (op_ == Token::SHR) { - // Check if result is non-negative and fits in a smi. - __ test(eax, Immediate(0xc0000000)); - __ j(not_zero, &call_runtime); - } else { - // Check if result fits in a smi. - __ cmp(eax, 0xc0000000); - __ j(negative, &non_smi_result); - } - // Tag smi result and return. - __ SmiTag(eax); - GenerateReturn(masm); - - // All ops except SHR return a signed int32 that we load in - // a HeapNumber. - if (op_ != Token::SHR) { - __ bind(&non_smi_result); - // Allocate a heap number if needed. - __ mov(ebx, Operand(eax)); // ebx: result - Label skip_allocation; - switch (mode_) { - case OVERWRITE_LEFT: - case OVERWRITE_RIGHT: - // If the operand was an object, we skip the - // allocation of a heap number. - __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ? - 1 * kPointerSize : 2 * kPointerSize)); - __ test(eax, Immediate(kSmiTagMask)); - __ j(not_zero, &skip_allocation, not_taken); - // Fall through! - case NO_OVERWRITE: - __ AllocateHeapNumber(eax, ecx, edx, &call_runtime); - __ bind(&skip_allocation); - break; - default: UNREACHABLE(); - } - // Store the result in the HeapNumber and return. - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); - __ cvtsi2sd(xmm0, Operand(ebx)); - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); - } else { - __ mov(Operand(esp, 1 * kPointerSize), ebx); - __ fild_s(Operand(esp, 1 * kPointerSize)); - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); - } - GenerateReturn(masm); - } - break; - } - default: UNREACHABLE(); break; - } - } - - // If all else fails, use the runtime system to get the correct - // result. If arguments was passed in registers now place them on the - // stack in the correct order below the return address. - __ bind(&call_runtime); - if (HasArgsInRegisters()) { - GenerateRegisterArgsPush(masm); - } - - switch (op_) { - case Token::ADD: { - // Test for string arguments before calling runtime. - Label not_strings, not_string1, string1, string1_smi2; - - // If this stub has already generated FP-specific code then the arguments - // are already in edx, eax - if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) { - GenerateLoadArguments(masm); - } - - // Registers containing left and right operands respectively. - Register lhs, rhs; - if (HasArgsReversed()) { - lhs = eax; - rhs = edx; - } else { - lhs = edx; - rhs = eax; - } - - // Test if first argument is a string. - __ test(lhs, Immediate(kSmiTagMask)); - __ j(zero, ¬_string1); - __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx); - __ j(above_equal, ¬_string1); - - // First argument is a string, test second. - __ test(rhs, Immediate(kSmiTagMask)); - __ j(zero, &string1_smi2); - __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx); - __ j(above_equal, &string1); - - // First and second argument are strings. Jump to the string add stub. - StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); - __ TailCallStub(&string_add_stub); - - __ bind(&string1_smi2); - // First argument is a string, second is a smi. Try to lookup the number - // string for the smi in the number string cache. - NumberToStringStub::GenerateLookupNumberStringCache( - masm, rhs, edi, ebx, ecx, true, &string1); - - // Replace second argument on stack and tailcall string add stub to make - // the result. - __ mov(Operand(esp, 1 * kPointerSize), edi); - __ TailCallStub(&string_add_stub); - - // Only first argument is a string. - __ bind(&string1); - __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION); - - // First argument was not a string, test second. - __ bind(¬_string1); - __ test(rhs, Immediate(kSmiTagMask)); - __ j(zero, ¬_strings); - __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx); - __ j(above_equal, ¬_strings); - - // Only second argument is a string. - __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION); - - __ bind(¬_strings); - // Neither argument is a string. - __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); - break; - } - case Token::SUB: - __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); - break; - case Token::MUL: - __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); - break; - case Token::DIV: - __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); - break; - case Token::MOD: - __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); - break; - case Token::BIT_OR: - __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); - break; - case Token::BIT_AND: - __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); - break; - case Token::BIT_XOR: - __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); - break; - case Token::SAR: - __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); - break; - case Token::SHL: - __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); - break; - case Token::SHR: - __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); - break; - default: - UNREACHABLE(); - } -} - - -void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm, - Label* alloc_failure) { - Label skip_allocation; - OverwriteMode mode = mode_; - if (HasArgsReversed()) { - if (mode == OVERWRITE_RIGHT) { - mode = OVERWRITE_LEFT; - } else if (mode == OVERWRITE_LEFT) { - mode = OVERWRITE_RIGHT; - } - } - switch (mode) { - case OVERWRITE_LEFT: { - // If the argument in edx is already an object, we skip the - // allocation of a heap number. - __ test(edx, Immediate(kSmiTagMask)); - __ j(not_zero, &skip_allocation, not_taken); - // Allocate a heap number for the result. Keep eax and edx intact - // for the possible runtime call. - __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure); - // Now edx can be overwritten losing one of the arguments as we are - // now done and will not need it any more. - __ mov(edx, Operand(ebx)); - __ bind(&skip_allocation); - // Use object in edx as a result holder - __ mov(eax, Operand(edx)); - break; - } - case OVERWRITE_RIGHT: - // If the argument in eax is already an object, we skip the - // allocation of a heap number. - __ test(eax, Immediate(kSmiTagMask)); - __ j(not_zero, &skip_allocation, not_taken); - // Fall through! - case NO_OVERWRITE: - // Allocate a heap number for the result. Keep eax and edx intact - // for the possible runtime call. - __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure); - // Now eax can be overwritten losing one of the arguments as we are - // now done and will not need it any more. - __ mov(eax, ebx); - __ bind(&skip_allocation); - break; - default: UNREACHABLE(); - } -} - - -void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) { - // If arguments are not passed in registers read them from the stack. - ASSERT(!HasArgsInRegisters()); - __ mov(eax, Operand(esp, 1 * kPointerSize)); - __ mov(edx, Operand(esp, 2 * kPointerSize)); -} - - -void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) { - // If arguments are not passed in registers remove them from the stack before - // returning. - if (!HasArgsInRegisters()) { - __ ret(2 * kPointerSize); // Remove both operands - } else { - __ ret(0); - } -} - - -void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { - ASSERT(HasArgsInRegisters()); - __ pop(ecx); - if (HasArgsReversed()) { - __ push(eax); - __ push(edx); - } else { - __ push(edx); - __ push(eax); - } - __ push(ecx); -} - - -void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { - // Ensure the operands are on the stack. - if (HasArgsInRegisters()) { - GenerateRegisterArgsPush(masm); - } - - __ pop(ecx); // Save return address. - - // Left and right arguments are now on top. - // Push this stub's key. Although the operation and the type info are - // encoded into the key, the encoding is opaque, so push them too. - __ push(Immediate(Smi::FromInt(MinorKey()))); - __ push(Immediate(Smi::FromInt(op_))); - __ push(Immediate(Smi::FromInt(runtime_operands_type_))); - - __ push(ecx); // Push return address. - - // Patch the caller to an appropriate specialized stub and return the - // operation result to the caller of the stub. - __ TailCallExternalReference( - ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), - 5, - 1); -} - - -Handle GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { - GenericBinaryOpStub stub(key, type_info); - return stub.GetCode(); -} - - -void TranscendentalCacheStub::Generate(MacroAssembler* masm) { - // Input on stack: - // esp[4]: argument (should be number). - // esp[0]: return address. - // Test that eax is a number. - Label runtime_call; - Label runtime_call_clear_stack; - Label input_not_smi; - Label loaded; - __ mov(eax, Operand(esp, kPointerSize)); - __ test(eax, Immediate(kSmiTagMask)); - __ j(not_zero, &input_not_smi); - // Input is a smi. Untag and load it onto the FPU stack. - // Then load the low and high words of the double into ebx, edx. - STATIC_ASSERT(kSmiTagSize == 1); - __ sar(eax, 1); - __ sub(Operand(esp), Immediate(2 * kPointerSize)); - __ mov(Operand(esp, 0), eax); - __ fild_s(Operand(esp, 0)); - __ fst_d(Operand(esp, 0)); - __ pop(edx); - __ pop(ebx); - __ jmp(&loaded); - __ bind(&input_not_smi); - // Check if input is a HeapNumber. - __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); - __ cmp(Operand(ebx), Immediate(Factory::heap_number_map())); - __ j(not_equal, &runtime_call); - // Input is a HeapNumber. Push it on the FPU stack and load its - // low and high words into ebx, edx. - __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); - __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset)); - __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset)); - - __ bind(&loaded); - // ST[0] == double value - // ebx = low 32 bits of double value - // edx = high 32 bits of double value - // Compute hash (the shifts are arithmetic): - // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); - __ mov(ecx, ebx); - __ xor_(ecx, Operand(edx)); - __ mov(eax, ecx); - __ sar(eax, 16); - __ xor_(ecx, Operand(eax)); - __ mov(eax, ecx); - __ sar(eax, 8); - __ xor_(ecx, Operand(eax)); - ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize)); - __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1)); - - // ST[0] == double value. - // ebx = low 32 bits of double value. - // edx = high 32 bits of double value. - // ecx = TranscendentalCache::hash(double value). - __ mov(eax, - Immediate(ExternalReference::transcendental_cache_array_address())); - // Eax points to cache array. - __ mov(eax, Operand(eax, type_ * sizeof(TranscendentalCache::caches_[0]))); - // Eax points to the cache for the type type_. - // If NULL, the cache hasn't been initialized yet, so go through runtime. - __ test(eax, Operand(eax)); - __ j(zero, &runtime_call_clear_stack); -#ifdef DEBUG - // Check that the layout of cache elements match expectations. - { TranscendentalCache::Element test_elem[2]; - char* elem_start = reinterpret_cast(&test_elem[0]); - char* elem2_start = reinterpret_cast(&test_elem[1]); - char* elem_in0 = reinterpret_cast(&(test_elem[0].in[0])); - char* elem_in1 = reinterpret_cast(&(test_elem[0].in[1])); - char* elem_out = reinterpret_cast(&(test_elem[0].output)); - CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. - CHECK_EQ(0, elem_in0 - elem_start); - CHECK_EQ(kIntSize, elem_in1 - elem_start); - CHECK_EQ(2 * kIntSize, elem_out - elem_start); - } -#endif - // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12]. - __ lea(ecx, Operand(ecx, ecx, times_2, 0)); - __ lea(ecx, Operand(eax, ecx, times_4, 0)); - // Check if cache matches: Double value is stored in uint32_t[2] array. - Label cache_miss; - __ cmp(ebx, Operand(ecx, 0)); - __ j(not_equal, &cache_miss); - __ cmp(edx, Operand(ecx, kIntSize)); - __ j(not_equal, &cache_miss); - // Cache hit! - __ mov(eax, Operand(ecx, 2 * kIntSize)); - __ fstp(0); - __ ret(kPointerSize); - - __ bind(&cache_miss); - // Update cache with new value. - // We are short on registers, so use no_reg as scratch. - // This gives slightly larger code. - __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack); - GenerateOperation(masm); - __ mov(Operand(ecx, 0), ebx); - __ mov(Operand(ecx, kIntSize), edx); - __ mov(Operand(ecx, 2 * kIntSize), eax); - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); - __ ret(kPointerSize); - - __ bind(&runtime_call_clear_stack); - __ fstp(0); - __ bind(&runtime_call); - __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); -} - - -Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { - switch (type_) { - // Add more cases when necessary. - case TranscendentalCache::SIN: return Runtime::kMath_sin; - case TranscendentalCache::COS: return Runtime::kMath_cos; - default: - UNIMPLEMENTED(); - return Runtime::kAbort; - } -} - - -void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { - // Only free register is edi. - Label done; - ASSERT(type_ == TranscendentalCache::SIN || - type_ == TranscendentalCache::COS); - // More transcendental types can be added later. - - // Both fsin and fcos require arguments in the range +/-2^63 and - // return NaN for infinities and NaN. They can share all code except - // the actual fsin/fcos operation. - Label in_range; - // If argument is outside the range -2^63..2^63, fsin/cos doesn't - // work. We must reduce it to the appropriate range. - __ mov(edi, edx); - __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only. - int supported_exponent_limit = - (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift; - __ cmp(Operand(edi), Immediate(supported_exponent_limit)); - __ j(below, &in_range, taken); - // Check for infinity and NaN. Both return NaN for sin. - __ cmp(Operand(edi), Immediate(0x7ff00000)); - Label non_nan_result; - __ j(not_equal, &non_nan_result, taken); - // Input is +/-Infinity or NaN. Result is NaN. - __ fstp(0); - // NaN is represented by 0x7ff8000000000000. - __ push(Immediate(0x7ff80000)); - __ push(Immediate(0)); - __ fld_d(Operand(esp, 0)); - __ add(Operand(esp), Immediate(2 * kPointerSize)); - __ jmp(&done); - - __ bind(&non_nan_result); - - // Use fpmod to restrict argument to the range +/-2*PI. - __ mov(edi, eax); // Save eax before using fnstsw_ax. - __ fldpi(); - __ fadd(0); - __ fld(1); - // FPU Stack: input, 2*pi, input. - { - Label no_exceptions; - __ fwait(); - __ fnstsw_ax(); - // Clear if Illegal Operand or Zero Division exceptions are set. - __ test(Operand(eax), Immediate(5)); - __ j(zero, &no_exceptions); - __ fnclex(); - __ bind(&no_exceptions); - } - - // Compute st(0) % st(1) - { - Label partial_remainder_loop; - __ bind(&partial_remainder_loop); - __ fprem1(); - __ fwait(); - __ fnstsw_ax(); - __ test(Operand(eax), Immediate(0x400 /* C2 */)); - // If C2 is set, computation only has partial result. Loop to - // continue computation. - __ j(not_zero, &partial_remainder_loop); - } - // FPU Stack: input, 2*pi, input % 2*pi - __ fstp(2); - __ fstp(0); - __ mov(eax, edi); // Restore eax (allocated HeapNumber pointer). - - // FPU Stack: input % 2*pi - __ bind(&in_range); - switch (type_) { - case TranscendentalCache::SIN: - __ fsin(); - break; - case TranscendentalCache::COS: - __ fcos(); - break; - default: - UNREACHABLE(); - } - __ bind(&done); -} - - -// Get the integer part of a heap number. Surprisingly, all this bit twiddling -// is faster than using the built-in instructions on floating point registers. -// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the -// trashed registers. -void IntegerConvert(MacroAssembler* masm, - Register source, - TypeInfo type_info, - bool use_sse3, - Label* conversion_failure) { - ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx)); - Label done, right_exponent, normal_exponent; - Register scratch = ebx; - Register scratch2 = edi; - if (type_info.IsInteger32() && CpuFeatures::IsEnabled(SSE2)) { - CpuFeatures::Scope scope(SSE2); - __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset)); - return; - } - if (!type_info.IsInteger32() || !use_sse3) { - // Get exponent word. - __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset)); - // Get exponent alone in scratch2. - __ mov(scratch2, scratch); - __ and_(scratch2, HeapNumber::kExponentMask); - } - if (use_sse3) { - CpuFeatures::Scope scope(SSE3); - if (!type_info.IsInteger32()) { - // Check whether the exponent is too big for a 64 bit signed integer. - static const uint32_t kTooBigExponent = - (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift; - __ cmp(Operand(scratch2), Immediate(kTooBigExponent)); - __ j(greater_equal, conversion_failure); - } - // Load x87 register with heap number. - __ fld_d(FieldOperand(source, HeapNumber::kValueOffset)); - // Reserve space for 64 bit answer. - __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint. - // Do conversion, which cannot fail because we checked the exponent. - __ fisttp_d(Operand(esp, 0)); - __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx. - __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint. - } else { - // Load ecx with zero. We use this either for the final shift or - // for the answer. - __ xor_(ecx, Operand(ecx)); - // Check whether the exponent matches a 32 bit signed int that cannot be - // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the - // exponent is 30 (biased). This is the exponent that we are fastest at and - // also the highest exponent we can handle here. - const uint32_t non_smi_exponent = - (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; - __ cmp(Operand(scratch2), Immediate(non_smi_exponent)); - // If we have a match of the int32-but-not-Smi exponent then skip some - // logic. - __ j(equal, &right_exponent); - // If the exponent is higher than that then go to slow case. This catches - // numbers that don't fit in a signed int32, infinities and NaNs. - __ j(less, &normal_exponent); - - { - // Handle a big exponent. The only reason we have this code is that the - // >>> operator has a tendency to generate numbers with an exponent of 31. - const uint32_t big_non_smi_exponent = - (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift; - __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent)); - __ j(not_equal, conversion_failure); - // We have the big exponent, typically from >>>. This means the number is - // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa. - __ mov(scratch2, scratch); - __ and_(scratch2, HeapNumber::kMantissaMask); - // Put back the implicit 1. - __ or_(scratch2, 1 << HeapNumber::kExponentShift); - // Shift up the mantissa bits to take up the space the exponent used to - // take. We just orred in the implicit bit so that took care of one and - // we want to use the full unsigned range so we subtract 1 bit from the - // shift distance. - const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1; - __ shl(scratch2, big_shift_distance); - // Get the second half of the double. - __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset)); - // Shift down 21 bits to get the most significant 11 bits or the low - // mantissa word. - __ shr(ecx, 32 - big_shift_distance); - __ or_(ecx, Operand(scratch2)); - // We have the answer in ecx, but we may need to negate it. - __ test(scratch, Operand(scratch)); - __ j(positive, &done); - __ neg(ecx); - __ jmp(&done); - } - - __ bind(&normal_exponent); - // Exponent word in scratch, exponent part of exponent word in scratch2. - // Zero in ecx. - // We know the exponent is smaller than 30 (biased). If it is less than - // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie - // it rounds to zero. - const uint32_t zero_exponent = - (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; - __ sub(Operand(scratch2), Immediate(zero_exponent)); - // ecx already has a Smi zero. - __ j(less, &done); - - // We have a shifted exponent between 0 and 30 in scratch2. - __ shr(scratch2, HeapNumber::kExponentShift); - __ mov(ecx, Immediate(30)); - __ sub(ecx, Operand(scratch2)); - - __ bind(&right_exponent); - // Here ecx is the shift, scratch is the exponent word. - // Get the top bits of the mantissa. - __ and_(scratch, HeapNumber::kMantissaMask); - // Put back the implicit 1. - __ or_(scratch, 1 << HeapNumber::kExponentShift); - // Shift up the mantissa bits to take up the space the exponent used to - // take. We have kExponentShift + 1 significant bits int he low end of the - // word. Shift them to the top bits. - const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; - __ shl(scratch, shift_distance); - // Get the second half of the double. For some exponents we don't - // actually need this because the bits get shifted out again, but - // it's probably slower to test than just to do it. - __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset)); - // Shift down 22 bits to get the most significant 10 bits or the low - // mantissa word. - __ shr(scratch2, 32 - shift_distance); - __ or_(scratch2, Operand(scratch)); - // Move down according to the exponent. - __ shr_cl(scratch2); - // Now the unsigned answer is in scratch2. We need to move it to ecx and - // we may need to fix the sign. - Label negative; - __ xor_(ecx, Operand(ecx)); - __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset)); - __ j(greater, &negative); - __ mov(ecx, scratch2); - __ jmp(&done); - __ bind(&negative); - __ sub(ecx, Operand(scratch2)); - __ bind(&done); - } -} - - -// Input: edx, eax are the left and right objects of a bit op. -// Output: eax, ecx are left and right integers for a bit op. -void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm, - TypeInfo type_info, - bool use_sse3, - Label* conversion_failure) { - // Check float operands. - Label arg1_is_object, check_undefined_arg1; - Label arg2_is_object, check_undefined_arg2; - Label load_arg2, done; - - if (!type_info.IsDouble()) { - if (!type_info.IsSmi()) { - __ test(edx, Immediate(kSmiTagMask)); - __ j(not_zero, &arg1_is_object); - } else { - if (FLAG_debug_code) __ AbortIfNotSmi(edx); - } - __ SmiUntag(edx); - __ jmp(&load_arg2); - } - - __ bind(&arg1_is_object); - - // Get the untagged integer version of the edx heap number in ecx. - IntegerConvert(masm, edx, type_info, use_sse3, conversion_failure); - __ mov(edx, ecx); - - // Here edx has the untagged integer, eax has a Smi or a heap number. - __ bind(&load_arg2); - if (!type_info.IsDouble()) { - // Test if arg2 is a Smi. - if (!type_info.IsSmi()) { - __ test(eax, Immediate(kSmiTagMask)); - __ j(not_zero, &arg2_is_object); - } else { - if (FLAG_debug_code) __ AbortIfNotSmi(eax); - } - __ SmiUntag(eax); - __ mov(ecx, eax); - __ jmp(&done); - } - - __ bind(&arg2_is_object); - - // Get the untagged integer version of the eax heap number in ecx. - IntegerConvert(masm, eax, type_info, use_sse3, conversion_failure); - __ bind(&done); - __ mov(eax, edx); -} - - -// Input: edx, eax are the left and right objects of a bit op. -// Output: eax, ecx are left and right integers for a bit op. -void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm, - bool use_sse3, - Label* conversion_failure) { - // Check float operands. - Label arg1_is_object, check_undefined_arg1; - Label arg2_is_object, check_undefined_arg2; - Label load_arg2, done; - - // Test if arg1 is a Smi. - __ test(edx, Immediate(kSmiTagMask)); - __ j(not_zero, &arg1_is_object); - - __ SmiUntag(edx); - __ jmp(&load_arg2); - - // If the argument is undefined it converts to zero (ECMA-262, section 9.5). - __ bind(&check_undefined_arg1); - __ cmp(edx, Factory::undefined_value()); - __ j(not_equal, conversion_failure); - __ mov(edx, Immediate(0)); - __ jmp(&load_arg2); - - __ bind(&arg1_is_object); - __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset)); - __ cmp(ebx, Factory::heap_number_map()); - __ j(not_equal, &check_undefined_arg1); - - // Get the untagged integer version of the edx heap number in ecx. - IntegerConvert(masm, - edx, - TypeInfo::Unknown(), - use_sse3, - conversion_failure); - __ mov(edx, ecx); - - // Here edx has the untagged integer, eax has a Smi or a heap number. - __ bind(&load_arg2); - - // Test if arg2 is a Smi. - __ test(eax, Immediate(kSmiTagMask)); - __ j(not_zero, &arg2_is_object); - - __ SmiUntag(eax); - __ mov(ecx, eax); - __ jmp(&done); - - // If the argument is undefined it converts to zero (ECMA-262, section 9.5). - __ bind(&check_undefined_arg2); - __ cmp(eax, Factory::undefined_value()); - __ j(not_equal, conversion_failure); - __ mov(ecx, Immediate(0)); - __ jmp(&done); - - __ bind(&arg2_is_object); - __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); - __ cmp(ebx, Factory::heap_number_map()); - __ j(not_equal, &check_undefined_arg2); - - // Get the untagged integer version of the eax heap number in ecx. - IntegerConvert(masm, - eax, - TypeInfo::Unknown(), - use_sse3, - conversion_failure); - __ bind(&done); - __ mov(eax, edx); -} - - -void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, - TypeInfo type_info, - bool use_sse3, - Label* conversion_failure) { - if (type_info.IsNumber()) { - LoadNumbersAsIntegers(masm, type_info, use_sse3, conversion_failure); - } else { - LoadUnknownsAsIntegers(masm, use_sse3, conversion_failure); - } -} - - -void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, - Register number) { - Label load_smi, done; - - __ test(number, Immediate(kSmiTagMask)); - __ j(zero, &load_smi, not_taken); - __ fld_d(FieldOperand(number, HeapNumber::kValueOffset)); - __ jmp(&done); - - __ bind(&load_smi); - __ SmiUntag(number); - __ push(number); - __ fild_s(Operand(esp, 0)); - __ pop(number); - - __ bind(&done); -} - - -void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) { - Label load_smi_edx, load_eax, load_smi_eax, done; - // Load operand in edx into xmm0. - __ test(edx, Immediate(kSmiTagMask)); - __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi. - __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); - - __ bind(&load_eax); - // Load operand in eax into xmm1. - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi. - __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); - __ jmp(&done); - - __ bind(&load_smi_edx); - __ SmiUntag(edx); // Untag smi before converting to float. - __ cvtsi2sd(xmm0, Operand(edx)); - __ SmiTag(edx); // Retag smi for heap number overwriting test. - __ jmp(&load_eax); - - __ bind(&load_smi_eax); - __ SmiUntag(eax); // Untag smi before converting to float. - __ cvtsi2sd(xmm1, Operand(eax)); - __ SmiTag(eax); // Retag smi for heap number overwriting test. - - __ bind(&done); -} - - -void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm, - Label* not_numbers) { - Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done; - // Load operand in edx into xmm0, or branch to not_numbers. - __ test(edx, Immediate(kSmiTagMask)); - __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi. - __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Factory::heap_number_map()); - __ j(not_equal, not_numbers); // Argument in edx is not a number. - __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); - __ bind(&load_eax); - // Load operand in eax into xmm1, or branch to not_numbers. - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi. - __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::heap_number_map()); - __ j(equal, &load_float_eax); - __ jmp(not_numbers); // Argument in eax is not a number. - __ bind(&load_smi_edx); - __ SmiUntag(edx); // Untag smi before converting to float. - __ cvtsi2sd(xmm0, Operand(edx)); - __ SmiTag(edx); // Retag smi for heap number overwriting test. - __ jmp(&load_eax); - __ bind(&load_smi_eax); - __ SmiUntag(eax); // Untag smi before converting to float. - __ cvtsi2sd(xmm1, Operand(eax)); - __ SmiTag(eax); // Retag smi for heap number overwriting test. - __ jmp(&done); - __ bind(&load_float_eax); - __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); - __ bind(&done); -} - - -void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm, - Register scratch) { - const Register left = edx; - const Register right = eax; - __ mov(scratch, left); - ASSERT(!scratch.is(right)); // We're about to clobber scratch. - __ SmiUntag(scratch); - __ cvtsi2sd(xmm0, Operand(scratch)); - - __ mov(scratch, right); - __ SmiUntag(scratch); - __ cvtsi2sd(xmm1, Operand(scratch)); -} - - -void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, - Register scratch, - ArgLocation arg_location) { - Label load_smi_1, load_smi_2, done_load_1, done; - if (arg_location == ARGS_IN_REGISTERS) { - __ mov(scratch, edx); - } else { - __ mov(scratch, Operand(esp, 2 * kPointerSize)); - } - __ test(scratch, Immediate(kSmiTagMask)); - __ j(zero, &load_smi_1, not_taken); - __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset)); - __ bind(&done_load_1); - - if (arg_location == ARGS_IN_REGISTERS) { - __ mov(scratch, eax); - } else { - __ mov(scratch, Operand(esp, 1 * kPointerSize)); - } - __ test(scratch, Immediate(kSmiTagMask)); - __ j(zero, &load_smi_2, not_taken); - __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset)); - __ jmp(&done); - - __ bind(&load_smi_1); - __ SmiUntag(scratch); - __ push(scratch); - __ fild_s(Operand(esp, 0)); - __ pop(scratch); - __ jmp(&done_load_1); - - __ bind(&load_smi_2); - __ SmiUntag(scratch); - __ push(scratch); - __ fild_s(Operand(esp, 0)); - __ pop(scratch); - - __ bind(&done); -} - - -void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm, - Register scratch) { - const Register left = edx; - const Register right = eax; - __ mov(scratch, left); - ASSERT(!scratch.is(right)); // We're about to clobber scratch. - __ SmiUntag(scratch); - __ push(scratch); - __ fild_s(Operand(esp, 0)); - - __ mov(scratch, right); - __ SmiUntag(scratch); - __ mov(Operand(esp, 0), scratch); - __ fild_s(Operand(esp, 0)); - __ pop(scratch); -} - - -void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, - Label* non_float, - Register scratch) { - Label test_other, done; - // Test if both operands are floats or smi -> scratch=k_is_float; - // Otherwise scratch = k_not_float. - __ test(edx, Immediate(kSmiTagMask)); - __ j(zero, &test_other, not_taken); // argument in edx is OK - __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset)); - __ cmp(scratch, Factory::heap_number_map()); - __ j(not_equal, non_float); // argument in edx is not a number -> NaN - - __ bind(&test_other); - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &done); // argument in eax is OK - __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset)); - __ cmp(scratch, Factory::heap_number_map()); - __ j(not_equal, non_float); // argument in eax is not a number -> NaN - - // Fall-through: Both operands are numbers. - __ bind(&done); -} - - -void GenericUnaryOpStub::Generate(MacroAssembler* masm) { - Label slow, done; - - if (op_ == Token::SUB) { - // Check whether the value is a smi. - Label try_float; - __ test(eax, Immediate(kSmiTagMask)); - __ j(not_zero, &try_float, not_taken); - - if (negative_zero_ == kStrictNegativeZero) { - // Go slow case if the value of the expression is zero - // to make sure that we switch between 0 and -0. - __ test(eax, Operand(eax)); - __ j(zero, &slow, not_taken); - } - - // The value of the expression is a smi that is not zero. Try - // optimistic subtraction '0 - value'. - Label undo; - __ mov(edx, Operand(eax)); - __ Set(eax, Immediate(0)); - __ sub(eax, Operand(edx)); - __ j(no_overflow, &done, taken); - - // Restore eax and go slow case. - __ bind(&undo); - __ mov(eax, Operand(edx)); - __ jmp(&slow); - - // Try floating point case. - __ bind(&try_float); - __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); - __ cmp(edx, Factory::heap_number_map()); - __ j(not_equal, &slow); - if (overwrite_ == UNARY_OVERWRITE) { - __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset)); - __ xor_(edx, HeapNumber::kSignMask); // Flip sign. - __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx); - } else { - __ mov(edx, Operand(eax)); - // edx: operand - __ AllocateHeapNumber(eax, ebx, ecx, &undo); - // eax: allocated 'empty' number - __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset)); - __ xor_(ecx, HeapNumber::kSignMask); // Flip sign. - __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx); - __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset)); - __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx); - } - } else if (op_ == Token::BIT_NOT) { - // Check if the operand is a heap number. - __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); - __ cmp(edx, Factory::heap_number_map()); - __ j(not_equal, &slow, not_taken); - - // Convert the heap number in eax to an untagged integer in ecx. - IntegerConvert(masm, - eax, - TypeInfo::Unknown(), - CpuFeatures::IsSupported(SSE3), - &slow); - - // Do the bitwise operation and check if the result fits in a smi. - Label try_float; - __ not_(ecx); - __ cmp(ecx, 0xc0000000); - __ j(sign, &try_float, not_taken); - - // Tag the result as a smi and we're done. - STATIC_ASSERT(kSmiTagSize == 1); - __ lea(eax, Operand(ecx, times_2, kSmiTag)); - __ jmp(&done); - - // Try to store the result in a heap number. - __ bind(&try_float); - if (overwrite_ == UNARY_NO_OVERWRITE) { - // Allocate a fresh heap number, but don't overwrite eax until - // we're sure we can do it without going through the slow case - // that needs the value in eax. - __ AllocateHeapNumber(ebx, edx, edi, &slow); - __ mov(eax, Operand(ebx)); - } - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); - __ cvtsi2sd(xmm0, Operand(ecx)); - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); - } else { - __ push(ecx); - __ fild_s(Operand(esp, 0)); - __ pop(ecx); - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); - } - } else { - UNIMPLEMENTED(); - } - - // Return from the stub. - __ bind(&done); - __ StubReturn(1); - - // Handle the slow case by jumping to the JavaScript builtin. - __ bind(&slow); - __ pop(ecx); // pop return address. - __ push(eax); - __ push(ecx); // push return address - switch (op_) { - case Token::SUB: - __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); - break; - case Token::BIT_NOT: - __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); - break; - default: - UNREACHABLE(); - } -} - - -void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { - // The key is in edx and the parameter count is in eax. - - // The displacement is used for skipping the frame pointer on the - // stack. It is the offset of the last parameter (if any) relative - // to the frame pointer. - static const int kDisplacement = 1 * kPointerSize; - - // Check that the key is a smi. - Label slow; - __ test(edx, Immediate(kSmiTagMask)); - __ j(not_zero, &slow, not_taken); - - // Check if the calling frame is an arguments adaptor frame. - Label adaptor; - __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); - __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset)); - __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); - __ j(equal, &adaptor); - - // Check index against formal parameters count limit passed in - // through register eax. Use unsigned comparison to get negative - // check for free. - __ cmp(edx, Operand(eax)); - __ j(above_equal, &slow, not_taken); - - // Read the argument from the stack and return it. - STATIC_ASSERT(kSmiTagSize == 1); - STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these. - __ lea(ebx, Operand(ebp, eax, times_2, 0)); - __ neg(edx); - __ mov(eax, Operand(ebx, edx, times_2, kDisplacement)); - __ ret(0); - - // Arguments adaptor case: Check index against actual arguments - // limit found in the arguments adaptor frame. Use unsigned - // comparison to get negative check for free. - __ bind(&adaptor); - __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ cmp(edx, Operand(ecx)); - __ j(above_equal, &slow, not_taken); - - // Read the argument from the stack and return it. - STATIC_ASSERT(kSmiTagSize == 1); - STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these. - __ lea(ebx, Operand(ebx, ecx, times_2, 0)); - __ neg(edx); - __ mov(eax, Operand(ebx, edx, times_2, kDisplacement)); - __ ret(0); - - // Slow-case: Handle non-smi or out-of-bounds access to arguments - // by calling the runtime system. - __ bind(&slow); - __ pop(ebx); // Return address. - __ push(edx); - __ push(ebx); - __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); -} - - -void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { - // esp[0] : return address - // esp[4] : number of parameters - // esp[8] : receiver displacement - // esp[16] : function - - // The displacement is used for skipping the return address and the - // frame pointer on the stack. It is the offset of the last - // parameter (if any) relative to the frame pointer. - static const int kDisplacement = 2 * kPointerSize; - - // Check if the calling frame is an arguments adaptor frame. - Label adaptor_frame, try_allocate, runtime; - __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); - __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); - __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); - __ j(equal, &adaptor_frame); - - // Get the length from the frame. - __ mov(ecx, Operand(esp, 1 * kPointerSize)); - __ jmp(&try_allocate); - - // Patch the arguments.length and the parameters pointer. - __ bind(&adaptor_frame); - __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ mov(Operand(esp, 1 * kPointerSize), ecx); - __ lea(edx, Operand(edx, ecx, times_2, kDisplacement)); - __ mov(Operand(esp, 2 * kPointerSize), edx); - - // Try the new space allocation. Start out with computing the size of - // the arguments object and the elements array. - Label add_arguments_object; - __ bind(&try_allocate); - __ test(ecx, Operand(ecx)); - __ j(zero, &add_arguments_object); - __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize)); - __ bind(&add_arguments_object); - __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSize)); - - // Do the allocation of both objects in one go. - __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT); - - // Get the arguments boilerplate from the current (global) context. - int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); - __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX))); - __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset)); - __ mov(edi, Operand(edi, offset)); - - // Copy the JS object part. - for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { - __ mov(ebx, FieldOperand(edi, i)); - __ mov(FieldOperand(eax, i), ebx); - } - - // Setup the callee in-object property. - STATIC_ASSERT(Heap::arguments_callee_index == 0); - __ mov(ebx, Operand(esp, 3 * kPointerSize)); - __ mov(FieldOperand(eax, JSObject::kHeaderSize), ebx); - - // Get the length (smi tagged) and set that as an in-object property too. - STATIC_ASSERT(Heap::arguments_length_index == 1); - __ mov(ecx, Operand(esp, 1 * kPointerSize)); - __ mov(FieldOperand(eax, JSObject::kHeaderSize + kPointerSize), ecx); - - // If there are no actual arguments, we're done. - Label done; - __ test(ecx, Operand(ecx)); - __ j(zero, &done); - - // Get the parameters pointer from the stack. - __ mov(edx, Operand(esp, 2 * kPointerSize)); - - // Setup the elements pointer in the allocated arguments object and - // initialize the header in the elements fixed array. - __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize)); - __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi); - __ mov(FieldOperand(edi, FixedArray::kMapOffset), - Immediate(Factory::fixed_array_map())); - __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx); - // Untag the length for the loop below. - __ SmiUntag(ecx); - - // Copy the fixed array slots. - Label loop; - __ bind(&loop); - __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver. - __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx); - __ add(Operand(edi), Immediate(kPointerSize)); - __ sub(Operand(edx), Immediate(kPointerSize)); - __ dec(ecx); - __ j(not_zero, &loop); - - // Return and remove the on-stack parameters. - __ bind(&done); - __ ret(3 * kPointerSize); - - // Do the runtime call to allocate the arguments object. - __ bind(&runtime); - __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); -} - - -void RegExpExecStub::Generate(MacroAssembler* masm) { - // Just jump directly to runtime if native RegExp is not selected at compile - // time or if regexp entry in generated code is turned off runtime switch or - // at compilation. -#ifdef V8_INTERPRETED_REGEXP - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); -#else // V8_INTERPRETED_REGEXP - if (!FLAG_regexp_entry_native) { - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); - return; - } - - // Stack frame on entry. - // esp[0]: return address - // esp[4]: last_match_info (expected JSArray) - // esp[8]: previous index - // esp[12]: subject string - // esp[16]: JSRegExp object - - static const int kLastMatchInfoOffset = 1 * kPointerSize; - static const int kPreviousIndexOffset = 2 * kPointerSize; - static const int kSubjectOffset = 3 * kPointerSize; - static const int kJSRegExpOffset = 4 * kPointerSize; - - Label runtime, invoke_regexp; - - // Ensure that a RegExp stack is allocated. - ExternalReference address_of_regexp_stack_memory_address = - ExternalReference::address_of_regexp_stack_memory_address(); - ExternalReference address_of_regexp_stack_memory_size = - ExternalReference::address_of_regexp_stack_memory_size(); - __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size)); - __ test(ebx, Operand(ebx)); - __ j(zero, &runtime, not_taken); - - // Check that the first argument is a JSRegExp object. - __ mov(eax, Operand(esp, kJSRegExpOffset)); - STATIC_ASSERT(kSmiTag == 0); - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &runtime); - __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx); - __ j(not_equal, &runtime); - // Check that the RegExp has been compiled (data contains a fixed array). - __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset)); - if (FLAG_debug_code) { - __ test(ecx, Immediate(kSmiTagMask)); - __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected"); - __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx); - __ Check(equal, "Unexpected type for RegExp data, FixedArray expected"); - } - - // ecx: RegExp data (FixedArray) - // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. - __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset)); - __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP))); - __ j(not_equal, &runtime); - - // ecx: RegExp data (FixedArray) - // Check that the number of captures fit in the static offsets vector buffer. - __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset)); - // Calculate number of capture registers (number_of_captures + 1) * 2. This - // uses the asumption that smis are 2 * their untagged value. - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - __ add(Operand(edx), Immediate(2)); // edx was a smi. - // Check that the static offsets vector buffer is large enough. - __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize); - __ j(above, &runtime); - - // ecx: RegExp data (FixedArray) - // edx: Number of capture registers - // Check that the second argument is a string. - __ mov(eax, Operand(esp, kSubjectOffset)); - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &runtime); - Condition is_string = masm->IsObjectStringType(eax, ebx, ebx); - __ j(NegateCondition(is_string), &runtime); - // Get the length of the string to ebx. - __ mov(ebx, FieldOperand(eax, String::kLengthOffset)); - - // ebx: Length of subject string as a smi - // ecx: RegExp data (FixedArray) - // edx: Number of capture registers - // Check that the third argument is a positive smi less than the subject - // string length. A negative value will be greater (unsigned comparison). - __ mov(eax, Operand(esp, kPreviousIndexOffset)); - __ test(eax, Immediate(kSmiTagMask)); - __ j(not_zero, &runtime); - __ cmp(eax, Operand(ebx)); - __ j(above_equal, &runtime); - - // ecx: RegExp data (FixedArray) - // edx: Number of capture registers - // Check that the fourth object is a JSArray object. - __ mov(eax, Operand(esp, kLastMatchInfoOffset)); - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &runtime); - __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx); - __ j(not_equal, &runtime); - // Check that the JSArray is in fast case. - __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset)); - __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset)); - __ cmp(eax, Factory::fixed_array_map()); - __ j(not_equal, &runtime); - // Check that the last match info has space for the capture registers and the - // additional information. - __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset)); - __ SmiUntag(eax); - __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead)); - __ cmp(edx, Operand(eax)); - __ j(greater, &runtime); - - // ecx: RegExp data (FixedArray) - // Check the representation and encoding of the subject string. - Label seq_ascii_string, seq_two_byte_string, check_code; - __ mov(eax, Operand(esp, kSubjectOffset)); - __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); - __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); - // First check for flat two byte string. - __ and_(ebx, - kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask); - STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0); - __ j(zero, &seq_two_byte_string); - // Any other flat string must be a flat ascii string. - __ test(Operand(ebx), - Immediate(kIsNotStringMask | kStringRepresentationMask)); - __ j(zero, &seq_ascii_string); - - // Check for flat cons string. - // A flat cons string is a cons string where the second part is the empty - // string. In that case the subject string is just the first part of the cons - // string. Also in this case the first part of the cons string is known to be - // a sequential string or an external string. - STATIC_ASSERT(kExternalStringTag != 0); - STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0); - __ test(Operand(ebx), - Immediate(kIsNotStringMask | kExternalStringTag)); - __ j(not_zero, &runtime); - // String is a cons string. - __ mov(edx, FieldOperand(eax, ConsString::kSecondOffset)); - __ cmp(Operand(edx), Factory::empty_string()); - __ j(not_equal, &runtime); - __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset)); - __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); - // String is a cons string with empty second part. - // eax: first part of cons string. - // ebx: map of first part of cons string. - // Is first part a flat two byte string? - __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset), - kStringRepresentationMask | kStringEncodingMask); - STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0); - __ j(zero, &seq_two_byte_string); - // Any other flat string must be ascii. - __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset), - kStringRepresentationMask); - __ j(not_zero, &runtime); - - __ bind(&seq_ascii_string); - // eax: subject string (flat ascii) - // ecx: RegExp data (FixedArray) - __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset)); - __ Set(edi, Immediate(1)); // Type is ascii. - __ jmp(&check_code); - - __ bind(&seq_two_byte_string); - // eax: subject string (flat two byte) - // ecx: RegExp data (FixedArray) - __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset)); - __ Set(edi, Immediate(0)); // Type is two byte. - - __ bind(&check_code); - // Check that the irregexp code has been generated for the actual string - // encoding. If it has, the field contains a code object otherwise it contains - // the hole. - __ CmpObjectType(edx, CODE_TYPE, ebx); - __ j(not_equal, &runtime); - - // eax: subject string - // edx: code - // edi: encoding of subject string (1 if ascii, 0 if two_byte); - // Load used arguments before starting to push arguments for call to native - // RegExp code to avoid handling changing stack height. - __ mov(ebx, Operand(esp, kPreviousIndexOffset)); - __ SmiUntag(ebx); // Previous index from smi. - - // eax: subject string - // ebx: previous index - // edx: code - // edi: encoding of subject string (1 if ascii 0 if two_byte); - // All checks done. Now push arguments for native regexp code. - __ IncrementCounter(&Counters::regexp_entry_native, 1); - - static const int kRegExpExecuteArguments = 7; - __ PrepareCallCFunction(kRegExpExecuteArguments, ecx); - - // Argument 7: Indicate that this is a direct call from JavaScript. - __ mov(Operand(esp, 6 * kPointerSize), Immediate(1)); - - // Argument 6: Start (high end) of backtracking stack memory area. - __ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address)); - __ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size)); - __ mov(Operand(esp, 5 * kPointerSize), ecx); - - // Argument 5: static offsets vector buffer. - __ mov(Operand(esp, 4 * kPointerSize), - Immediate(ExternalReference::address_of_static_offsets_vector())); - - // Argument 4: End of string data - // Argument 3: Start of string data - Label setup_two_byte, setup_rest; - __ test(edi, Operand(edi)); - __ mov(edi, FieldOperand(eax, String::kLengthOffset)); - __ j(zero, &setup_two_byte); - __ SmiUntag(edi); - __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize)); - __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4. - __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize)); - __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3. - __ jmp(&setup_rest); - - __ bind(&setup_two_byte); - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize == 1); // edi is smi (powered by 2). - __ lea(ecx, FieldOperand(eax, edi, times_1, SeqTwoByteString::kHeaderSize)); - __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4. - __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize)); - __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3. - - __ bind(&setup_rest); - - // Argument 2: Previous index. - __ mov(Operand(esp, 1 * kPointerSize), ebx); - - // Argument 1: Subject string. - __ mov(Operand(esp, 0 * kPointerSize), eax); - - // Locate the code entry and call it. - __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ CallCFunction(edx, kRegExpExecuteArguments); - - // Check the result. - Label success; - __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS); - __ j(equal, &success, taken); - Label failure; - __ cmp(eax, NativeRegExpMacroAssembler::FAILURE); - __ j(equal, &failure, taken); - __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION); - // If not exception it can only be retry. Handle that in the runtime system. - __ j(not_equal, &runtime); - // Result must now be exception. If there is no pending exception already a - // stack overflow (on the backtrack stack) was detected in RegExp code but - // haven't created the exception yet. Handle that in the runtime system. - // TODO(592): Rerunning the RegExp to get the stack overflow exception. - ExternalReference pending_exception(Top::k_pending_exception_address); - __ mov(eax, - Operand::StaticVariable(ExternalReference::the_hole_value_location())); - __ cmp(eax, Operand::StaticVariable(pending_exception)); - __ j(equal, &runtime); - __ bind(&failure); - // For failure and exception return null. - __ mov(Operand(eax), Factory::null_value()); - __ ret(4 * kPointerSize); - - // Load RegExp data. - __ bind(&success); - __ mov(eax, Operand(esp, kJSRegExpOffset)); - __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset)); - __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset)); - // Calculate number of capture registers (number_of_captures + 1) * 2. - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - __ add(Operand(edx), Immediate(2)); // edx was a smi. - - // edx: Number of capture registers - // Load last_match_info which is still known to be a fast case JSArray. - __ mov(eax, Operand(esp, kLastMatchInfoOffset)); - __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset)); - - // ebx: last_match_info backing store (FixedArray) - // edx: number of capture registers - // Store the capture count. - __ SmiTag(edx); // Number of capture registers to smi. - __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx); - __ SmiUntag(edx); // Number of capture registers back from smi. - // Store last subject and last input. - __ mov(eax, Operand(esp, kSubjectOffset)); - __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax); - __ mov(ecx, ebx); - __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi); - __ mov(eax, Operand(esp, kSubjectOffset)); - __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax); - __ mov(ecx, ebx); - __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi); - - // Get the static offsets vector filled by the native regexp code. - ExternalReference address_of_static_offsets_vector = - ExternalReference::address_of_static_offsets_vector(); - __ mov(ecx, Immediate(address_of_static_offsets_vector)); - - // ebx: last_match_info backing store (FixedArray) - // ecx: offsets vector - // edx: number of capture registers - Label next_capture, done; - // Capture register counter starts from number of capture registers and - // counts down until wraping after zero. - __ bind(&next_capture); - __ sub(Operand(edx), Immediate(1)); - __ j(negative, &done); - // Read the value from the static offsets vector buffer. - __ mov(edi, Operand(ecx, edx, times_int_size, 0)); - __ SmiTag(edi); - // Store the smi value in the last match info. - __ mov(FieldOperand(ebx, - edx, - times_pointer_size, - RegExpImpl::kFirstCaptureOffset), - edi); - __ jmp(&next_capture); - __ bind(&done); - - // Return last match info. - __ mov(eax, Operand(esp, kLastMatchInfoOffset)); - __ ret(4 * kPointerSize); - - // Do the runtime call to execute the regexp. - __ bind(&runtime); - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); -#endif // V8_INTERPRETED_REGEXP -} - - -void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, - Register object, - Register result, - Register scratch1, - Register scratch2, - bool object_is_smi, - Label* not_found) { - // Use of registers. Register result is used as a temporary. - Register number_string_cache = result; - Register mask = scratch1; - Register scratch = scratch2; - - // Load the number string cache. - ExternalReference roots_address = ExternalReference::roots_address(); - __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex)); - __ mov(number_string_cache, - Operand::StaticArray(scratch, times_pointer_size, roots_address)); - // Make the hash mask from the length of the number string cache. It - // contains two elements (number and string) for each cache entry. - __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); - __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two. - __ sub(Operand(mask), Immediate(1)); // Make mask. - - // Calculate the entry in the number string cache. The hash value in the - // number string cache for smis is just the smi value, and the hash for - // doubles is the xor of the upper and lower words. See - // Heap::GetNumberStringCache. - Label smi_hash_calculated; - Label load_result_from_cache; - if (object_is_smi) { - __ mov(scratch, object); - __ SmiUntag(scratch); - } else { - Label not_smi, hash_calculated; - STATIC_ASSERT(kSmiTag == 0); - __ test(object, Immediate(kSmiTagMask)); - __ j(not_zero, ¬_smi); - __ mov(scratch, object); - __ SmiUntag(scratch); - __ jmp(&smi_hash_calculated); - __ bind(¬_smi); - __ cmp(FieldOperand(object, HeapObject::kMapOffset), - Factory::heap_number_map()); - __ j(not_equal, not_found); - STATIC_ASSERT(8 == kDoubleSize); - __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset)); - __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4)); - // Object is heap number and hash is now in scratch. Calculate cache index. - __ and_(scratch, Operand(mask)); - Register index = scratch; - Register probe = mask; - __ mov(probe, - FieldOperand(number_string_cache, - index, - times_twice_pointer_size, - FixedArray::kHeaderSize)); - __ test(probe, Immediate(kSmiTagMask)); - __ j(zero, not_found); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope fscope(SSE2); - __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); - __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset)); - __ ucomisd(xmm0, xmm1); - } else { - __ fld_d(FieldOperand(object, HeapNumber::kValueOffset)); - __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset)); - __ FCmp(); - } - __ j(parity_even, not_found); // Bail out if NaN is involved. - __ j(not_equal, not_found); // The cache did not contain this value. - __ jmp(&load_result_from_cache); - } - - __ bind(&smi_hash_calculated); - // Object is smi and hash is now in scratch. Calculate cache index. - __ and_(scratch, Operand(mask)); - Register index = scratch; - // Check if the entry is the smi we are looking for. - __ cmp(object, - FieldOperand(number_string_cache, - index, - times_twice_pointer_size, - FixedArray::kHeaderSize)); - __ j(not_equal, not_found); - - // Get the result from the cache. - __ bind(&load_result_from_cache); - __ mov(result, - FieldOperand(number_string_cache, - index, - times_twice_pointer_size, - FixedArray::kHeaderSize + kPointerSize)); - __ IncrementCounter(&Counters::number_to_string_native, 1); -} - - -void NumberToStringStub::Generate(MacroAssembler* masm) { - Label runtime; - - __ mov(ebx, Operand(esp, kPointerSize)); - - // Generate code to lookup number in the number string cache. - GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime); - __ ret(1 * kPointerSize); - - __ bind(&runtime); - // Handle number to string in the runtime system if not found in the cache. - __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); -} - - -static int NegativeComparisonResult(Condition cc) { - ASSERT(cc != equal); - ASSERT((cc == less) || (cc == less_equal) - || (cc == greater) || (cc == greater_equal)); - return (cc == greater || cc == greater_equal) ? LESS : GREATER; -} - - -void CompareStub::Generate(MacroAssembler* masm) { - ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); - - Label check_unequal_objects, done; - - // NOTICE! This code is only reached after a smi-fast-case check, so - // it is certain that at least one operand isn't a smi. - - // Identical objects can be compared fast, but there are some tricky cases - // for NaN and undefined. - { - Label not_identical; - __ cmp(eax, Operand(edx)); - __ j(not_equal, ¬_identical); - - if (cc_ != equal) { - // Check for undefined. undefined OP undefined is false even though - // undefined == undefined. - Label check_for_nan; - __ cmp(edx, Factory::undefined_value()); - __ j(not_equal, &check_for_nan); - __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_)))); - __ ret(0); - __ bind(&check_for_nan); - } - - // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), - // so we do the second best thing - test it ourselves. - // Note: if cc_ != equal, never_nan_nan_ is not used. - if (never_nan_nan_ && (cc_ == equal)) { - __ Set(eax, Immediate(Smi::FromInt(EQUAL))); - __ ret(0); - } else { - Label heap_number; - __ cmp(FieldOperand(edx, HeapObject::kMapOffset), - Immediate(Factory::heap_number_map())); - __ j(equal, &heap_number); - if (cc_ != equal) { - // Call runtime on identical JSObjects. Otherwise return equal. - __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx); - __ j(above_equal, ¬_identical); - } - __ Set(eax, Immediate(Smi::FromInt(EQUAL))); - __ ret(0); - - __ bind(&heap_number); - // It is a heap number, so return non-equal if it's NaN and equal if - // it's not NaN. - // The representation of NaN values has all exponent bits (52..62) set, - // and not all mantissa bits (0..51) clear. - // We only accept QNaNs, which have bit 51 set. - // Read top bits of double representation (second word of value). - - // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e., - // all bits in the mask are set. We only need to check the word - // that contains the exponent and high bit of the mantissa. - STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0); - __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset)); - __ xor_(eax, Operand(eax)); - // Shift value and mask so kQuietNaNHighBitsMask applies to topmost - // bits. - __ add(edx, Operand(edx)); - __ cmp(edx, kQuietNaNHighBitsMask << 1); - if (cc_ == equal) { - STATIC_ASSERT(EQUAL != 1); - __ setcc(above_equal, eax); - __ ret(0); - } else { - Label nan; - __ j(above_equal, &nan); - __ Set(eax, Immediate(Smi::FromInt(EQUAL))); - __ ret(0); - __ bind(&nan); - __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_)))); - __ ret(0); - } - } - - __ bind(¬_identical); - } - - // Strict equality can quickly decide whether objects are equal. - // Non-strict object equality is slower, so it is handled later in the stub. - if (cc_ == equal && strict_) { - Label slow; // Fallthrough label. - Label not_smis; - // If we're doing a strict equality comparison, we don't have to do - // type conversion, so we generate code to do fast comparison for objects - // and oddballs. Non-smi numbers and strings still go through the usual - // slow-case code. - // If either is a Smi (we know that not both are), then they can only - // be equal if the other is a HeapNumber. If so, use the slow case. - STATIC_ASSERT(kSmiTag == 0); - ASSERT_EQ(0, Smi::FromInt(0)); - __ mov(ecx, Immediate(kSmiTagMask)); - __ and_(ecx, Operand(eax)); - __ test(ecx, Operand(edx)); - __ j(not_zero, ¬_smis); - // One operand is a smi. - - // Check whether the non-smi is a heap number. - STATIC_ASSERT(kSmiTagMask == 1); - // ecx still holds eax & kSmiTag, which is either zero or one. - __ sub(Operand(ecx), Immediate(0x01)); - __ mov(ebx, edx); - __ xor_(ebx, Operand(eax)); - __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx. - __ xor_(ebx, Operand(eax)); - // if eax was smi, ebx is now edx, else eax. - - // Check if the non-smi operand is a heap number. - __ cmp(FieldOperand(ebx, HeapObject::kMapOffset), - Immediate(Factory::heap_number_map())); - // If heap number, handle it in the slow case. - __ j(equal, &slow); - // Return non-equal (ebx is not zero) - __ mov(eax, ebx); - __ ret(0); - - __ bind(¬_smis); - // If either operand is a JSObject or an oddball value, then they are not - // equal since their pointers are different - // There is no test for undetectability in strict equality. - - // Get the type of the first operand. - // If the first object is a JS object, we have done pointer comparison. - Label first_non_object; - STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); - __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx); - __ j(below, &first_non_object); - - // Return non-zero (eax is not zero) - Label return_not_equal; - STATIC_ASSERT(kHeapObjectTag != 0); - __ bind(&return_not_equal); - __ ret(0); - - __ bind(&first_non_object); - // Check for oddballs: true, false, null, undefined. - __ CmpInstanceType(ecx, ODDBALL_TYPE); - __ j(equal, &return_not_equal); - - __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx); - __ j(above_equal, &return_not_equal); - - // Check for oddballs: true, false, null, undefined. - __ CmpInstanceType(ecx, ODDBALL_TYPE); - __ j(equal, &return_not_equal); - - // Fall through to the general case. - __ bind(&slow); - } - - // Generate the number comparison code. - if (include_number_compare_) { - Label non_number_comparison; - Label unordered; - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); - CpuFeatures::Scope use_cmov(CMOV); - - FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison); - __ ucomisd(xmm0, xmm1); - - // Don't base result on EFLAGS when a NaN is involved. - __ j(parity_even, &unordered, not_taken); - // Return a result of -1, 0, or 1, based on EFLAGS. - __ mov(eax, 0); // equal - __ mov(ecx, Immediate(Smi::FromInt(1))); - __ cmov(above, eax, Operand(ecx)); - __ mov(ecx, Immediate(Smi::FromInt(-1))); - __ cmov(below, eax, Operand(ecx)); - __ ret(0); - } else { - FloatingPointHelper::CheckFloatOperands( - masm, &non_number_comparison, ebx); - FloatingPointHelper::LoadFloatOperand(masm, eax); - FloatingPointHelper::LoadFloatOperand(masm, edx); - __ FCmp(); - - // Don't base result on EFLAGS when a NaN is involved. - __ j(parity_even, &unordered, not_taken); - - Label below_label, above_label; - // Return a result of -1, 0, or 1, based on EFLAGS. - __ j(below, &below_label, not_taken); - __ j(above, &above_label, not_taken); - - __ xor_(eax, Operand(eax)); - __ ret(0); - - __ bind(&below_label); - __ mov(eax, Immediate(Smi::FromInt(-1))); - __ ret(0); - - __ bind(&above_label); - __ mov(eax, Immediate(Smi::FromInt(1))); - __ ret(0); - } - - // If one of the numbers was NaN, then the result is always false. - // The cc is never not-equal. - __ bind(&unordered); - ASSERT(cc_ != not_equal); - if (cc_ == less || cc_ == less_equal) { - __ mov(eax, Immediate(Smi::FromInt(1))); - } else { - __ mov(eax, Immediate(Smi::FromInt(-1))); - } - __ ret(0); - - // The number comparison code did not provide a valid result. - __ bind(&non_number_comparison); - } - - // Fast negative check for symbol-to-symbol equality. - Label check_for_strings; - if (cc_ == equal) { - BranchIfNonSymbol(masm, &check_for_strings, eax, ecx); - BranchIfNonSymbol(masm, &check_for_strings, edx, ecx); - - // We've already checked for object identity, so if both operands - // are symbols they aren't equal. Register eax already holds a - // non-zero value, which indicates not equal, so just return. - __ ret(0); - } - - __ bind(&check_for_strings); - - __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, - &check_unequal_objects); - - // Inline comparison of ascii strings. - StringCompareStub::GenerateCompareFlatAsciiStrings(masm, - edx, - eax, - ecx, - ebx, - edi); -#ifdef DEBUG - __ Abort("Unexpected fall-through from string comparison"); -#endif - - __ bind(&check_unequal_objects); - if (cc_ == equal && !strict_) { - // Non-strict equality. Objects are unequal if - // they are both JSObjects and not undetectable, - // and their pointers are different. - Label not_both_objects; - Label return_unequal; - // At most one is a smi, so we can test for smi by adding the two. - // A smi plus a heap object has the low bit set, a heap object plus - // a heap object has the low bit clear. - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagMask == 1); - __ lea(ecx, Operand(eax, edx, times_1, 0)); - __ test(ecx, Immediate(kSmiTagMask)); - __ j(not_zero, ¬_both_objects); - __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx); - __ j(below, ¬_both_objects); - __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ebx); - __ j(below, ¬_both_objects); - // We do not bail out after this point. Both are JSObjects, and - // they are equal if and only if both are undetectable. - // The and of the undetectable flags is 1 if and only if they are equal. - __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), - 1 << Map::kIsUndetectable); - __ j(zero, &return_unequal); - __ test_b(FieldOperand(ebx, Map::kBitFieldOffset), - 1 << Map::kIsUndetectable); - __ j(zero, &return_unequal); - // The objects are both undetectable, so they both compare as the value - // undefined, and are equal. - __ Set(eax, Immediate(EQUAL)); - __ bind(&return_unequal); - // Return non-equal by returning the non-zero object pointer in eax, - // or return equal if we fell through to here. - __ ret(0); // rax, rdx were pushed - __ bind(¬_both_objects); - } - - // Push arguments below the return address. - __ pop(ecx); - __ push(edx); - __ push(eax); - - // Figure out which native to call and setup the arguments. - Builtins::JavaScript builtin; - if (cc_ == equal) { - builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; - } else { - builtin = Builtins::COMPARE; - __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_)))); - } - - // Restore return address on the stack. - __ push(ecx); - - // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) - // tagged as a small integer. - __ InvokeBuiltin(builtin, JUMP_FUNCTION); -} - - -void CompareStub::BranchIfNonSymbol(MacroAssembler* masm, - Label* label, - Register object, - Register scratch) { - __ test(object, Immediate(kSmiTagMask)); - __ j(zero, label); - __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset)); - __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset)); - __ and_(scratch, kIsSymbolMask | kIsNotStringMask); - __ cmp(scratch, kSymbolTag | kStringTag); - __ j(not_equal, label); -} - - -void StackCheckStub::Generate(MacroAssembler* masm) { - // Because builtins always remove the receiver from the stack, we - // have to fake one to avoid underflowing the stack. The receiver - // must be inserted below the return address on the stack so we - // temporarily store that in a register. - __ pop(eax); - __ push(Immediate(Smi::FromInt(0))); - __ push(eax); - - // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kStackGuard, 1, 1); -} - - -void CallFunctionStub::Generate(MacroAssembler* masm) { - Label slow; - - // If the receiver might be a value (string, number or boolean) check for this - // and box it if it is. - if (ReceiverMightBeValue()) { - // Get the receiver from the stack. - // +1 ~ return address - Label receiver_is_value, receiver_is_js_object; - __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize)); - - // Check if receiver is a smi (which is a number value). - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &receiver_is_value, not_taken); - - // Check if the receiver is a valid JS object. - __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edi); - __ j(above_equal, &receiver_is_js_object); - - // Call the runtime to box the value. - __ bind(&receiver_is_value); - __ EnterInternalFrame(); - __ push(eax); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ LeaveInternalFrame(); - __ mov(Operand(esp, (argc_ + 1) * kPointerSize), eax); - - __ bind(&receiver_is_js_object); - } - - // Get the function to call from the stack. - // +2 ~ receiver, return address - __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize)); - - // Check that the function really is a JavaScript function. - __ test(edi, Immediate(kSmiTagMask)); - __ j(zero, &slow, not_taken); - // Goto slow case if we do not have a function. - __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); - __ j(not_equal, &slow, not_taken); - - // Fast-case: Just invoke the function. - ParameterCount actual(argc_); - __ InvokeFunction(edi, actual, JUMP_FUNCTION); - - // Slow-case: Non-function called. - __ bind(&slow); - // CALL_NON_FUNCTION expects the non-function callee as receiver (instead - // of the original receiver from the call site). - __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi); - __ Set(eax, Immediate(argc_)); - __ Set(ebx, Immediate(0)); - __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION); - Handle adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)); - __ jmp(adaptor, RelocInfo::CODE_TARGET); -} - - -void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { - // eax holds the exception. - - // Adjust this code if not the case. - STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); - - // Drop the sp to the top of the handler. - ExternalReference handler_address(Top::k_handler_address); - __ mov(esp, Operand::StaticVariable(handler_address)); - - // Restore next handler and frame pointer, discard handler state. - STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); - __ pop(Operand::StaticVariable(handler_address)); - STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize); - __ pop(ebp); - __ pop(edx); // Remove state. - - // Before returning we restore the context from the frame pointer if - // not NULL. The frame pointer is NULL in the exception handler of - // a JS entry frame. - __ xor_(esi, Operand(esi)); // Tentatively set context pointer to NULL. - Label skip; - __ cmp(ebp, 0); - __ j(equal, &skip, not_taken); - __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); - __ bind(&skip); - - STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); - __ ret(0); -} - - -// If true, a Handle passed by value is passed and returned by -// using the location_ field directly. If false, it is passed and -// returned as a pointer to a handle. -#ifdef USING_BSD_ABI -static const bool kPassHandlesDirectly = true; -#else -static const bool kPassHandlesDirectly = false; -#endif - - -void ApiGetterEntryStub::Generate(MacroAssembler* masm) { - Label empty_handle; - Label prologue; - Label promote_scheduled_exception; - __ EnterApiExitFrame(ExitFrame::MODE_NORMAL, kStackSpace, kArgc); - STATIC_ASSERT(kArgc == 4); - if (kPassHandlesDirectly) { - // When handles as passed directly we don't have to allocate extra - // space for and pass an out parameter. - __ mov(Operand(esp, 0 * kPointerSize), ebx); // name. - __ mov(Operand(esp, 1 * kPointerSize), eax); // arguments pointer. - } else { - // The function expects three arguments to be passed but we allocate - // four to get space for the output cell. The argument slots are filled - // as follows: - // - // 3: output cell - // 2: arguments pointer - // 1: name - // 0: pointer to the output cell - // - // Note that this is one more "argument" than the function expects - // so the out cell will have to be popped explicitly after returning - // from the function. - __ mov(Operand(esp, 1 * kPointerSize), ebx); // name. - __ mov(Operand(esp, 2 * kPointerSize), eax); // arguments pointer. - __ mov(ebx, esp); - __ add(Operand(ebx), Immediate(3 * kPointerSize)); - __ mov(Operand(esp, 0 * kPointerSize), ebx); // output - __ mov(Operand(esp, 3 * kPointerSize), Immediate(0)); // out cell. - } - // Call the api function! - __ call(fun()->address(), RelocInfo::RUNTIME_ENTRY); - // Check if the function scheduled an exception. - ExternalReference scheduled_exception_address = - ExternalReference::scheduled_exception_address(); - __ cmp(Operand::StaticVariable(scheduled_exception_address), - Immediate(Factory::the_hole_value())); - __ j(not_equal, &promote_scheduled_exception, not_taken); - if (!kPassHandlesDirectly) { - // The returned value is a pointer to the handle holding the result. - // Dereference this to get to the location. - __ mov(eax, Operand(eax, 0)); - } - // Check if the result handle holds 0. - __ test(eax, Operand(eax)); - __ j(zero, &empty_handle, not_taken); - // It was non-zero. Dereference to get the result value. - __ mov(eax, Operand(eax, 0)); - __ bind(&prologue); - __ LeaveExitFrame(ExitFrame::MODE_NORMAL); - __ ret(0); - __ bind(&promote_scheduled_exception); - __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1); - __ bind(&empty_handle); - // It was zero; the result is undefined. - __ mov(eax, Factory::undefined_value()); - __ jmp(&prologue); -} - - -void CEntryStub::GenerateCore(MacroAssembler* masm, - Label* throw_normal_exception, - Label* throw_termination_exception, - Label* throw_out_of_memory_exception, - bool do_gc, - bool always_allocate_scope, - int /* alignment_skew */) { - // eax: result parameter for PerformGC, if any - // ebx: pointer to C function (C callee-saved) - // ebp: frame pointer (restored after C call) - // esp: stack pointer (restored after C call) - // edi: number of arguments including receiver (C callee-saved) - // esi: pointer to the first argument (C callee-saved) - - // Result returned in eax, or eax+edx if result_size_ is 2. - - // Check stack alignment. - if (FLAG_debug_code) { - __ CheckStackAlignment(); - } - - if (do_gc) { - // Pass failure code returned from last attempt as first argument to - // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the - // stack alignment is known to be correct. This function takes one argument - // which is passed on the stack, and we know that the stack has been - // prepared to pass at least one argument. - __ mov(Operand(esp, 0 * kPointerSize), eax); // Result. - __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY); - } - - ExternalReference scope_depth = - ExternalReference::heap_always_allocate_scope_depth(); - if (always_allocate_scope) { - __ inc(Operand::StaticVariable(scope_depth)); - } - - // Call C function. - __ mov(Operand(esp, 0 * kPointerSize), edi); // argc. - __ mov(Operand(esp, 1 * kPointerSize), esi); // argv. - __ call(Operand(ebx)); - // Result is in eax or edx:eax - do not destroy these registers! - - if (always_allocate_scope) { - __ dec(Operand::StaticVariable(scope_depth)); - } - - // Make sure we're not trying to return 'the hole' from the runtime - // call as this may lead to crashes in the IC code later. - if (FLAG_debug_code) { - Label okay; - __ cmp(eax, Factory::the_hole_value()); - __ j(not_equal, &okay); - __ int3(); - __ bind(&okay); - } - - // Check for failure result. - Label failure_returned; - STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); - __ lea(ecx, Operand(eax, 1)); - // Lower 2 bits of ecx are 0 iff eax has failure tag. - __ test(ecx, Immediate(kFailureTagMask)); - __ j(zero, &failure_returned, not_taken); - - // Exit the JavaScript to C++ exit frame. - __ LeaveExitFrame(mode_); - __ ret(0); - - // Handling of failure. - __ bind(&failure_returned); - - Label retry; - // If the returned exception is RETRY_AFTER_GC continue at retry label - STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); - __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); - __ j(zero, &retry, taken); - - // Special handling of out of memory exceptions. - __ cmp(eax, reinterpret_cast(Failure::OutOfMemoryException())); - __ j(equal, throw_out_of_memory_exception); - - // Retrieve the pending exception and clear the variable. - ExternalReference pending_exception_address(Top::k_pending_exception_address); - __ mov(eax, Operand::StaticVariable(pending_exception_address)); - __ mov(edx, - Operand::StaticVariable(ExternalReference::the_hole_value_location())); - __ mov(Operand::StaticVariable(pending_exception_address), edx); - - // Special handling of termination exceptions which are uncatchable - // by javascript code. - __ cmp(eax, Factory::termination_exception()); - __ j(equal, throw_termination_exception); - - // Handle normal exception. - __ jmp(throw_normal_exception); - - // Retry. - __ bind(&retry); -} - - -void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, - UncatchableExceptionType type) { - // Adjust this code if not the case. - STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); - - // Drop sp to the top stack handler. - ExternalReference handler_address(Top::k_handler_address); - __ mov(esp, Operand::StaticVariable(handler_address)); - - // Unwind the handlers until the ENTRY handler is found. - Label loop, done; - __ bind(&loop); - // Load the type of the current stack handler. - const int kStateOffset = StackHandlerConstants::kStateOffset; - __ cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY)); - __ j(equal, &done); - // Fetch the next handler in the list. - const int kNextOffset = StackHandlerConstants::kNextOffset; - __ mov(esp, Operand(esp, kNextOffset)); - __ jmp(&loop); - __ bind(&done); - - // Set the top handler address to next handler past the current ENTRY handler. - STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); - __ pop(Operand::StaticVariable(handler_address)); - - if (type == OUT_OF_MEMORY) { - // Set external caught exception to false. - ExternalReference external_caught(Top::k_external_caught_exception_address); - __ mov(eax, false); - __ mov(Operand::StaticVariable(external_caught), eax); - - // Set pending exception and eax to out of memory exception. - ExternalReference pending_exception(Top::k_pending_exception_address); - __ mov(eax, reinterpret_cast(Failure::OutOfMemoryException())); - __ mov(Operand::StaticVariable(pending_exception), eax); - } - - // Clear the context pointer. - __ xor_(esi, Operand(esi)); - - // Restore fp from handler and discard handler state. - STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize); - __ pop(ebp); - __ pop(edx); // State. - - STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); - __ ret(0); -} - - -void CEntryStub::Generate(MacroAssembler* masm) { - // eax: number of arguments including receiver - // ebx: pointer to C function (C callee-saved) - // ebp: frame pointer (restored after C call) - // esp: stack pointer (restored after C call) - // esi: current context (C callee-saved) - // edi: JS function of the caller (C callee-saved) - - // NOTE: Invocations of builtins may return failure objects instead - // of a proper result. The builtin entry handles this by performing - // a garbage collection and retrying the builtin (twice). - - // Enter the exit frame that transitions from JavaScript to C++. - __ EnterExitFrame(mode_); - - // eax: result parameter for PerformGC, if any (setup below) - // ebx: pointer to builtin function (C callee-saved) - // ebp: frame pointer (restored after C call) - // esp: stack pointer (restored after C call) - // edi: number of arguments including receiver (C callee-saved) - // esi: argv pointer (C callee-saved) - - Label throw_normal_exception; - Label throw_termination_exception; - Label throw_out_of_memory_exception; - - // Call into the runtime system. - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - &throw_out_of_memory_exception, - false, - false); - - // Do space-specific GC and retry runtime call. - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - &throw_out_of_memory_exception, - true, - false); - - // Do full GC and retry runtime call one final time. - Failure* failure = Failure::InternalError(); - __ mov(eax, Immediate(reinterpret_cast(failure))); - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - &throw_out_of_memory_exception, - true, - true); - - __ bind(&throw_out_of_memory_exception); - GenerateThrowUncatchable(masm, OUT_OF_MEMORY); - - __ bind(&throw_termination_exception); - GenerateThrowUncatchable(masm, TERMINATION); - - __ bind(&throw_normal_exception); - GenerateThrowTOS(masm); -} - - -void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { - Label invoke, exit; -#ifdef ENABLE_LOGGING_AND_PROFILING - Label not_outermost_js, not_outermost_js_2; -#endif - - // Setup frame. - __ push(ebp); - __ mov(ebp, Operand(esp)); - - // Push marker in two places. - int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; - __ push(Immediate(Smi::FromInt(marker))); // context slot - __ push(Immediate(Smi::FromInt(marker))); // function slot - // Save callee-saved registers (C calling conventions). - __ push(edi); - __ push(esi); - __ push(ebx); - - // Save copies of the top frame descriptor on the stack. - ExternalReference c_entry_fp(Top::k_c_entry_fp_address); - __ push(Operand::StaticVariable(c_entry_fp)); - -#ifdef ENABLE_LOGGING_AND_PROFILING - // If this is the outermost JS call, set js_entry_sp value. - ExternalReference js_entry_sp(Top::k_js_entry_sp_address); - __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0)); - __ j(not_equal, ¬_outermost_js); - __ mov(Operand::StaticVariable(js_entry_sp), ebp); - __ bind(¬_outermost_js); -#endif - - // Call a faked try-block that does the invoke. - __ call(&invoke); - - // Caught exception: Store result (exception) in the pending - // exception field in the JSEnv and return a failure sentinel. - ExternalReference pending_exception(Top::k_pending_exception_address); - __ mov(Operand::StaticVariable(pending_exception), eax); - __ mov(eax, reinterpret_cast(Failure::Exception())); - __ jmp(&exit); - - // Invoke: Link this frame into the handler chain. - __ bind(&invoke); - __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER); - - // Clear any pending exceptions. - __ mov(edx, - Operand::StaticVariable(ExternalReference::the_hole_value_location())); - __ mov(Operand::StaticVariable(pending_exception), edx); - - // Fake a receiver (NULL). - __ push(Immediate(0)); // receiver - - // Invoke the function by calling through JS entry trampoline - // builtin and pop the faked function when we return. Notice that we - // cannot store a reference to the trampoline code directly in this - // stub, because the builtin stubs may not have been generated yet. - if (is_construct) { - ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline); - __ mov(edx, Immediate(construct_entry)); - } else { - ExternalReference entry(Builtins::JSEntryTrampoline); - __ mov(edx, Immediate(entry)); - } - __ mov(edx, Operand(edx, 0)); // deref address - __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); - __ call(Operand(edx)); - - // Unlink this frame from the handler chain. - __ pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address))); - // Pop next_sp. - __ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize)); - -#ifdef ENABLE_LOGGING_AND_PROFILING - // If current EBP value is the same as js_entry_sp value, it means that - // the current function is the outermost. - __ cmp(ebp, Operand::StaticVariable(js_entry_sp)); - __ j(not_equal, ¬_outermost_js_2); - __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0)); - __ bind(¬_outermost_js_2); -#endif - - // Restore the top frame descriptor from the stack. - __ bind(&exit); - __ pop(Operand::StaticVariable(ExternalReference(Top::k_c_entry_fp_address))); - - // Restore callee-saved registers (C calling conventions). - __ pop(ebx); - __ pop(esi); - __ pop(edi); - __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers - - // Restore frame pointer and return. - __ pop(ebp); - __ ret(0); -} - - -void InstanceofStub::Generate(MacroAssembler* masm) { - // Get the object - go slow case if it's a smi. - Label slow; - __ mov(eax, Operand(esp, 2 * kPointerSize)); // 2 ~ return address, function - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &slow, not_taken); - - // Check that the left hand is a JS object. - __ IsObjectJSObjectType(eax, eax, edx, &slow); - - // Get the prototype of the function. - __ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address - // edx is function, eax is map. - - // Look up the function and the map in the instanceof cache. - Label miss; - ExternalReference roots_address = ExternalReference::roots_address(); - __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex)); - __ cmp(edx, Operand::StaticArray(ecx, times_pointer_size, roots_address)); - __ j(not_equal, &miss); - __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex)); - __ cmp(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address)); - __ j(not_equal, &miss); - __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex)); - __ mov(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address)); - __ ret(2 * kPointerSize); - - __ bind(&miss); - __ TryGetFunctionPrototype(edx, ebx, ecx, &slow); - - // Check that the function prototype is a JS object. - __ test(ebx, Immediate(kSmiTagMask)); - __ j(zero, &slow, not_taken); - __ IsObjectJSObjectType(ebx, ecx, ecx, &slow); - - // Register mapping: - // eax is object map. - // edx is function. - // ebx is function prototype. - __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex)); - __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax); - __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex)); - __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), edx); - - __ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset)); - - // Loop through the prototype chain looking for the function prototype. - Label loop, is_instance, is_not_instance; - __ bind(&loop); - __ cmp(ecx, Operand(ebx)); - __ j(equal, &is_instance); - __ cmp(Operand(ecx), Immediate(Factory::null_value())); - __ j(equal, &is_not_instance); - __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset)); - __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset)); - __ jmp(&loop); - - __ bind(&is_instance); - __ Set(eax, Immediate(0)); - __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex)); - __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax); - __ ret(2 * kPointerSize); - - __ bind(&is_not_instance); - __ Set(eax, Immediate(Smi::FromInt(1))); - __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex)); - __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax); - __ ret(2 * kPointerSize); - - // Slow-case: Go through the JavaScript implementation. - __ bind(&slow); - __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); -} - - -int CompareStub::MinorKey() { - // Encode the three parameters in a unique 16 bit value. To avoid duplicate - // stubs the never NaN NaN condition is only taken into account if the - // condition is equals. - ASSERT(static_cast(cc_) < (1 << 12)); - ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); - return ConditionField::encode(static_cast(cc_)) - | RegisterField::encode(false) // lhs_ and rhs_ are not used - | StrictField::encode(strict_) - | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false) - | IncludeNumberCompareField::encode(include_number_compare_); -} - - -// Unfortunately you have to run without snapshots to see most of these -// names in the profile since most compare stubs end up in the snapshot. -const char* CompareStub::GetName() { - ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); - - if (name_ != NULL) return name_; - const int kMaxNameLength = 100; - name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); - if (name_ == NULL) return "OOM"; - - const char* cc_name; - switch (cc_) { - case less: cc_name = "LT"; break; - case greater: cc_name = "GT"; break; - case less_equal: cc_name = "LE"; break; - case greater_equal: cc_name = "GE"; break; - case equal: cc_name = "EQ"; break; - case not_equal: cc_name = "NE"; break; - default: cc_name = "UnknownCondition"; break; - } - - const char* strict_name = ""; - if (strict_ && (cc_ == equal || cc_ == not_equal)) { - strict_name = "_STRICT"; - } - - const char* never_nan_nan_name = ""; - if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) { - never_nan_nan_name = "_NO_NAN"; - } - - const char* include_number_compare_name = ""; - if (!include_number_compare_) { - include_number_compare_name = "_NO_NUMBER"; - } - - OS::SNPrintF(Vector(name_, kMaxNameLength), - "CompareStub_%s%s%s%s", - cc_name, - strict_name, - never_nan_nan_name, - include_number_compare_name); - return name_; -} - - -// ------------------------------------------------------------------------- -// StringCharCodeAtGenerator - -void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { - Label flat_string; - Label ascii_string; - Label got_char_code; - - // If the receiver is a smi trigger the non-string case. - STATIC_ASSERT(kSmiTag == 0); - __ test(object_, Immediate(kSmiTagMask)); - __ j(zero, receiver_not_string_); - - // Fetch the instance type of the receiver into result register. - __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); - __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); - // If the receiver is not a string trigger the non-string case. - __ test(result_, Immediate(kIsNotStringMask)); - __ j(not_zero, receiver_not_string_); - - // If the index is non-smi trigger the non-smi case. - STATIC_ASSERT(kSmiTag == 0); - __ test(index_, Immediate(kSmiTagMask)); - __ j(not_zero, &index_not_smi_); - - // Put smi-tagged index into scratch register. - __ mov(scratch_, index_); - __ bind(&got_smi_index_); - - // Check for index out of range. - __ cmp(scratch_, FieldOperand(object_, String::kLengthOffset)); - __ j(above_equal, index_out_of_range_); - - // We need special handling for non-flat strings. - STATIC_ASSERT(kSeqStringTag == 0); - __ test(result_, Immediate(kStringRepresentationMask)); - __ j(zero, &flat_string); - - // Handle non-flat strings. - __ test(result_, Immediate(kIsConsStringMask)); - __ j(zero, &call_runtime_); - - // ConsString. - // Check whether the right hand side is the empty string (i.e. if - // this is really a flat string in a cons string). If that is not - // the case we would rather go to the runtime system now to flatten - // the string. - __ cmp(FieldOperand(object_, ConsString::kSecondOffset), - Immediate(Factory::empty_string())); - __ j(not_equal, &call_runtime_); - // Get the first of the two strings and load its instance type. - __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset)); - __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); - __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); - // If the first cons component is also non-flat, then go to runtime. - STATIC_ASSERT(kSeqStringTag == 0); - __ test(result_, Immediate(kStringRepresentationMask)); - __ j(not_zero, &call_runtime_); - - // Check for 1-byte or 2-byte string. - __ bind(&flat_string); - STATIC_ASSERT(kAsciiStringTag != 0); - __ test(result_, Immediate(kStringEncodingMask)); - __ j(not_zero, &ascii_string); - - // 2-byte string. - // Load the 2-byte character code into the result register. - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); - __ movzx_w(result_, FieldOperand(object_, - scratch_, times_1, // Scratch is smi-tagged. - SeqTwoByteString::kHeaderSize)); - __ jmp(&got_char_code); - - // ASCII string. - // Load the byte into the result register. - __ bind(&ascii_string); - __ SmiUntag(scratch_); - __ movzx_b(result_, FieldOperand(object_, - scratch_, times_1, - SeqAsciiString::kHeaderSize)); - __ bind(&got_char_code); - __ SmiTag(result_); - __ bind(&exit_); -} - - -void StringCharCodeAtGenerator::GenerateSlow( - MacroAssembler* masm, const RuntimeCallHelper& call_helper) { - __ Abort("Unexpected fallthrough to CharCodeAt slow case"); - - // Index is not a smi. - __ bind(&index_not_smi_); - // If index is a heap number, try converting it to an integer. - __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true); - call_helper.BeforeCall(masm); - __ push(object_); - __ push(index_); - __ push(index_); // Consumed by runtime conversion function. - if (index_flags_ == STRING_INDEX_IS_NUMBER) { - __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); - } else { - ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); - // NumberToSmi discards numbers that are not exact integers. - __ CallRuntime(Runtime::kNumberToSmi, 1); - } - if (!scratch_.is(eax)) { - // Save the conversion result before the pop instructions below - // have a chance to overwrite it. - __ mov(scratch_, eax); - } - __ pop(index_); - __ pop(object_); - // Reload the instance type. - __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); - __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); - call_helper.AfterCall(masm); - // If index is still not a smi, it must be out of range. - STATIC_ASSERT(kSmiTag == 0); - __ test(scratch_, Immediate(kSmiTagMask)); - __ j(not_zero, index_out_of_range_); - // Otherwise, return to the fast path. - __ jmp(&got_smi_index_); - - // Call runtime. We get here when the receiver is a string and the - // index is a number, but the code of getting the actual character - // is too complex (e.g., when the string needs to be flattened). - __ bind(&call_runtime_); - call_helper.BeforeCall(masm); - __ push(object_); - __ push(index_); - __ CallRuntime(Runtime::kStringCharCodeAt, 2); - if (!result_.is(eax)) { - __ mov(result_, eax); - } - call_helper.AfterCall(masm); - __ jmp(&exit_); - - __ Abort("Unexpected fallthrough from CharCodeAt slow case"); -} - - -// ------------------------------------------------------------------------- -// StringCharFromCodeGenerator - -void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { - // Fast case of Heap::LookupSingleCharacterStringFromCode. - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiShiftSize == 0); - ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); - __ test(code_, - Immediate(kSmiTagMask | - ((~String::kMaxAsciiCharCode) << kSmiTagSize))); - __ j(not_zero, &slow_case_, not_taken); - - __ Set(result_, Immediate(Factory::single_character_string_cache())); - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize == 1); - STATIC_ASSERT(kSmiShiftSize == 0); - // At this point code register contains smi tagged ascii char code. - __ mov(result_, FieldOperand(result_, - code_, times_half_pointer_size, - FixedArray::kHeaderSize)); - __ cmp(result_, Factory::undefined_value()); - __ j(equal, &slow_case_, not_taken); - __ bind(&exit_); -} - - -void StringCharFromCodeGenerator::GenerateSlow( - MacroAssembler* masm, const RuntimeCallHelper& call_helper) { - __ Abort("Unexpected fallthrough to CharFromCode slow case"); - - __ bind(&slow_case_); - call_helper.BeforeCall(masm); - __ push(code_); - __ CallRuntime(Runtime::kCharFromCode, 1); - if (!result_.is(eax)) { - __ mov(result_, eax); - } - call_helper.AfterCall(masm); - __ jmp(&exit_); - - __ Abort("Unexpected fallthrough from CharFromCode slow case"); -} - - -// ------------------------------------------------------------------------- -// StringCharAtGenerator - -void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) { - char_code_at_generator_.GenerateFast(masm); - char_from_code_generator_.GenerateFast(masm); -} - - -void StringCharAtGenerator::GenerateSlow( - MacroAssembler* masm, const RuntimeCallHelper& call_helper) { - char_code_at_generator_.GenerateSlow(masm, call_helper); - char_from_code_generator_.GenerateSlow(masm, call_helper); -} - - -void StringAddStub::Generate(MacroAssembler* masm) { - Label string_add_runtime; - - // Load the two arguments. - __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument. - __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument. - - // Make sure that both arguments are strings if not known in advance. - if (string_check_) { - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &string_add_runtime); - __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx); - __ j(above_equal, &string_add_runtime); - - // First argument is a a string, test second. - __ test(edx, Immediate(kSmiTagMask)); - __ j(zero, &string_add_runtime); - __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx); - __ j(above_equal, &string_add_runtime); - } - - // Both arguments are strings. - // eax: first string - // edx: second string - // Check if either of the strings are empty. In that case return the other. - Label second_not_zero_length, both_not_zero_length; - __ mov(ecx, FieldOperand(edx, String::kLengthOffset)); - STATIC_ASSERT(kSmiTag == 0); - __ test(ecx, Operand(ecx)); - __ j(not_zero, &second_not_zero_length); - // Second string is empty, result is first string which is already in eax. - __ IncrementCounter(&Counters::string_add_native, 1); - __ ret(2 * kPointerSize); - __ bind(&second_not_zero_length); - __ mov(ebx, FieldOperand(eax, String::kLengthOffset)); - STATIC_ASSERT(kSmiTag == 0); - __ test(ebx, Operand(ebx)); - __ j(not_zero, &both_not_zero_length); - // First string is empty, result is second string which is in edx. - __ mov(eax, edx); - __ IncrementCounter(&Counters::string_add_native, 1); - __ ret(2 * kPointerSize); - - // Both strings are non-empty. - // eax: first string - // ebx: length of first string as a smi - // ecx: length of second string as a smi - // edx: second string - // Look at the length of the result of adding the two strings. - Label string_add_flat_result, longer_than_two; - __ bind(&both_not_zero_length); - __ add(ebx, Operand(ecx)); - STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength); - // Handle exceptionally long strings in the runtime system. - __ j(overflow, &string_add_runtime); - // Use the runtime system when adding two one character strings, as it - // contains optimizations for this specific case using the symbol table. - __ cmp(Operand(ebx), Immediate(Smi::FromInt(2))); - __ j(not_equal, &longer_than_two); - - // Check that both strings are non-external ascii strings. - __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, - &string_add_runtime); - - // Get the two characters forming the sub string. - __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize)); - __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize)); - - // Try to lookup two character string in symbol table. If it is not found - // just allocate a new one. - Label make_two_character_string, make_flat_ascii_string; - StringHelper::GenerateTwoCharacterSymbolTableProbe( - masm, ebx, ecx, eax, edx, edi, &make_two_character_string); - __ IncrementCounter(&Counters::string_add_native, 1); - __ ret(2 * kPointerSize); - - __ bind(&make_two_character_string); - __ Set(ebx, Immediate(Smi::FromInt(2))); - __ jmp(&make_flat_ascii_string); - - __ bind(&longer_than_two); - // Check if resulting string will be flat. - __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength))); - __ j(below, &string_add_flat_result); - - // If result is not supposed to be flat allocate a cons string object. If both - // strings are ascii the result is an ascii cons string. - Label non_ascii, allocated, ascii_data; - __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset)); - __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset)); - __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); - __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset)); - __ and_(ecx, Operand(edi)); - STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag); - __ test(ecx, Immediate(kAsciiStringTag)); - __ j(zero, &non_ascii); - __ bind(&ascii_data); - // Allocate an acsii cons string. - __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime); - __ bind(&allocated); - // Fill the fields of the cons string. - if (FLAG_debug_code) __ AbortIfNotSmi(ebx); - __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx); - __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset), - Immediate(String::kEmptyHashField)); - __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax); - __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx); - __ mov(eax, ecx); - __ IncrementCounter(&Counters::string_add_native, 1); - __ ret(2 * kPointerSize); - __ bind(&non_ascii); - // At least one of the strings is two-byte. Check whether it happens - // to contain only ascii characters. - // ecx: first instance type AND second instance type. - // edi: second instance type. - __ test(ecx, Immediate(kAsciiDataHintMask)); - __ j(not_zero, &ascii_data); - __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); - __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); - __ xor_(edi, Operand(ecx)); - STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); - __ and_(edi, kAsciiStringTag | kAsciiDataHintTag); - __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag); - __ j(equal, &ascii_data); - // Allocate a two byte cons string. - __ AllocateConsString(ecx, edi, no_reg, &string_add_runtime); - __ jmp(&allocated); - - // Handle creating a flat result. First check that both strings are not - // external strings. - // eax: first string - // ebx: length of resulting flat string as a smi - // edx: second string - __ bind(&string_add_flat_result); - __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); - __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); - __ and_(ecx, kStringRepresentationMask); - __ cmp(ecx, kExternalStringTag); - __ j(equal, &string_add_runtime); - __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset)); - __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); - __ and_(ecx, kStringRepresentationMask); - __ cmp(ecx, kExternalStringTag); - __ j(equal, &string_add_runtime); - // Now check if both strings are ascii strings. - // eax: first string - // ebx: length of resulting flat string as a smi - // edx: second string - Label non_ascii_string_add_flat_result; - STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag); - __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); - __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag); - __ j(zero, &non_ascii_string_add_flat_result); - __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset)); - __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag); - __ j(zero, &string_add_runtime); - - __ bind(&make_flat_ascii_string); - // Both strings are ascii strings. As they are short they are both flat. - // ebx: length of resulting flat string as a smi - __ SmiUntag(ebx); - __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime); - // eax: result string - __ mov(ecx, eax); - // Locate first character of result. - __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - // Load first argument and locate first character. - __ mov(edx, Operand(esp, 2 * kPointerSize)); - __ mov(edi, FieldOperand(edx, String::kLengthOffset)); - __ SmiUntag(edi); - __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - // eax: result string - // ecx: first character of result - // edx: first char of first argument - // edi: length of first argument - StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true); - // Load second argument and locate first character. - __ mov(edx, Operand(esp, 1 * kPointerSize)); - __ mov(edi, FieldOperand(edx, String::kLengthOffset)); - __ SmiUntag(edi); - __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - // eax: result string - // ecx: next character of result - // edx: first char of second argument - // edi: length of second argument - StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true); - __ IncrementCounter(&Counters::string_add_native, 1); - __ ret(2 * kPointerSize); - - // Handle creating a flat two byte result. - // eax: first string - known to be two byte - // ebx: length of resulting flat string as a smi - // edx: second string - __ bind(&non_ascii_string_add_flat_result); - __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset)); - __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag); - __ j(not_zero, &string_add_runtime); - // Both strings are two byte strings. As they are short they are both - // flat. - __ SmiUntag(ebx); - __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &string_add_runtime); - // eax: result string - __ mov(ecx, eax); - // Locate first character of result. - __ add(Operand(ecx), - Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - // Load first argument and locate first character. - __ mov(edx, Operand(esp, 2 * kPointerSize)); - __ mov(edi, FieldOperand(edx, String::kLengthOffset)); - __ SmiUntag(edi); - __ add(Operand(edx), - Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - // eax: result string - // ecx: first character of result - // edx: first char of first argument - // edi: length of first argument - StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false); - // Load second argument and locate first character. - __ mov(edx, Operand(esp, 1 * kPointerSize)); - __ mov(edi, FieldOperand(edx, String::kLengthOffset)); - __ SmiUntag(edi); - __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - // eax: result string - // ecx: next character of result - // edx: first char of second argument - // edi: length of second argument - StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false); - __ IncrementCounter(&Counters::string_add_native, 1); - __ ret(2 * kPointerSize); - - // Just jump to runtime to add the two strings. - __ bind(&string_add_runtime); - __ TailCallRuntime(Runtime::kStringAdd, 2, 1); -} - - -void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, - Register dest, - Register src, - Register count, - Register scratch, - bool ascii) { - Label loop; - __ bind(&loop); - // This loop just copies one character at a time, as it is only used for very - // short strings. - if (ascii) { - __ mov_b(scratch, Operand(src, 0)); - __ mov_b(Operand(dest, 0), scratch); - __ add(Operand(src), Immediate(1)); - __ add(Operand(dest), Immediate(1)); - } else { - __ mov_w(scratch, Operand(src, 0)); - __ mov_w(Operand(dest, 0), scratch); - __ add(Operand(src), Immediate(2)); - __ add(Operand(dest), Immediate(2)); - } - __ sub(Operand(count), Immediate(1)); - __ j(not_zero, &loop); -} - - -void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, - Register dest, - Register src, - Register count, - Register scratch, - bool ascii) { - // Copy characters using rep movs of doublewords. - // The destination is aligned on a 4 byte boundary because we are - // copying to the beginning of a newly allocated string. - ASSERT(dest.is(edi)); // rep movs destination - ASSERT(src.is(esi)); // rep movs source - ASSERT(count.is(ecx)); // rep movs count - ASSERT(!scratch.is(dest)); - ASSERT(!scratch.is(src)); - ASSERT(!scratch.is(count)); - - // Nothing to do for zero characters. - Label done; - __ test(count, Operand(count)); - __ j(zero, &done); - - // Make count the number of bytes to copy. - if (!ascii) { - __ shl(count, 1); - } - - // Don't enter the rep movs if there are less than 4 bytes to copy. - Label last_bytes; - __ test(count, Immediate(~3)); - __ j(zero, &last_bytes); - - // Copy from edi to esi using rep movs instruction. - __ mov(scratch, count); - __ sar(count, 2); // Number of doublewords to copy. - __ cld(); - __ rep_movs(); - - // Find number of bytes left. - __ mov(count, scratch); - __ and_(count, 3); - - // Check if there are more bytes to copy. - __ bind(&last_bytes); - __ test(count, Operand(count)); - __ j(zero, &done); - - // Copy remaining characters. - Label loop; - __ bind(&loop); - __ mov_b(scratch, Operand(src, 0)); - __ mov_b(Operand(dest, 0), scratch); - __ add(Operand(src), Immediate(1)); - __ add(Operand(dest), Immediate(1)); - __ sub(Operand(count), Immediate(1)); - __ j(not_zero, &loop); - - __ bind(&done); -} - - -void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, - Register c1, - Register c2, - Register scratch1, - Register scratch2, - Register scratch3, - Label* not_found) { - // Register scratch3 is the general scratch register in this function. - Register scratch = scratch3; - - // Make sure that both characters are not digits as such strings has a - // different hash algorithm. Don't try to look for these in the symbol table. - Label not_array_index; - __ mov(scratch, c1); - __ sub(Operand(scratch), Immediate(static_cast('0'))); - __ cmp(Operand(scratch), Immediate(static_cast('9' - '0'))); - __ j(above, ¬_array_index); - __ mov(scratch, c2); - __ sub(Operand(scratch), Immediate(static_cast('0'))); - __ cmp(Operand(scratch), Immediate(static_cast('9' - '0'))); - __ j(below_equal, not_found); - - __ bind(¬_array_index); - // Calculate the two character string hash. - Register hash = scratch1; - GenerateHashInit(masm, hash, c1, scratch); - GenerateHashAddCharacter(masm, hash, c2, scratch); - GenerateHashGetHash(masm, hash, scratch); - - // Collect the two characters in a register. - Register chars = c1; - __ shl(c2, kBitsPerByte); - __ or_(chars, Operand(c2)); - - // chars: two character string, char 1 in byte 0 and char 2 in byte 1. - // hash: hash of two character string. - - // Load the symbol table. - Register symbol_table = c2; - ExternalReference roots_address = ExternalReference::roots_address(); - __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex)); - __ mov(symbol_table, - Operand::StaticArray(scratch, times_pointer_size, roots_address)); - - // Calculate capacity mask from the symbol table capacity. - Register mask = scratch2; - __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset)); - __ SmiUntag(mask); - __ sub(Operand(mask), Immediate(1)); - - // Registers - // chars: two character string, char 1 in byte 0 and char 2 in byte 1. - // hash: hash of two character string - // symbol_table: symbol table - // mask: capacity mask - // scratch: - - - // Perform a number of probes in the symbol table. - static const int kProbes = 4; - Label found_in_symbol_table; - Label next_probe[kProbes], next_probe_pop_mask[kProbes]; - for (int i = 0; i < kProbes; i++) { - // Calculate entry in symbol table. - __ mov(scratch, hash); - if (i > 0) { - __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i))); - } - __ and_(scratch, Operand(mask)); - - // Load the entry from the symbol table. - Register candidate = scratch; // Scratch register contains candidate. - STATIC_ASSERT(SymbolTable::kEntrySize == 1); - __ mov(candidate, - FieldOperand(symbol_table, - scratch, - times_pointer_size, - SymbolTable::kElementsStartOffset)); - - // If entry is undefined no string with this hash can be found. - __ cmp(candidate, Factory::undefined_value()); - __ j(equal, not_found); - - // If length is not 2 the string is not a candidate. - __ cmp(FieldOperand(candidate, String::kLengthOffset), - Immediate(Smi::FromInt(2))); - __ j(not_equal, &next_probe[i]); - - // As we are out of registers save the mask on the stack and use that - // register as a temporary. - __ push(mask); - Register temp = mask; - - // Check that the candidate is a non-external ascii string. - __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset)); - __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset)); - __ JumpIfInstanceTypeIsNotSequentialAscii( - temp, temp, &next_probe_pop_mask[i]); - - // Check if the two characters match. - __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize)); - __ and_(temp, 0x0000ffff); - __ cmp(chars, Operand(temp)); - __ j(equal, &found_in_symbol_table); - __ bind(&next_probe_pop_mask[i]); - __ pop(mask); - __ bind(&next_probe[i]); - } - - // No matching 2 character string found by probing. - __ jmp(not_found); - - // Scratch register contains result when we fall through to here. - Register result = scratch; - __ bind(&found_in_symbol_table); - __ pop(mask); // Pop saved mask from the stack. - if (!result.is(eax)) { - __ mov(eax, result); - } -} - - -void StringHelper::GenerateHashInit(MacroAssembler* masm, - Register hash, - Register character, - Register scratch) { - // hash = character + (character << 10); - __ mov(hash, character); - __ shl(hash, 10); - __ add(hash, Operand(character)); - // hash ^= hash >> 6; - __ mov(scratch, hash); - __ sar(scratch, 6); - __ xor_(hash, Operand(scratch)); -} - - -void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, - Register hash, - Register character, - Register scratch) { - // hash += character; - __ add(hash, Operand(character)); - // hash += hash << 10; - __ mov(scratch, hash); - __ shl(scratch, 10); - __ add(hash, Operand(scratch)); - // hash ^= hash >> 6; - __ mov(scratch, hash); - __ sar(scratch, 6); - __ xor_(hash, Operand(scratch)); -} - - -void StringHelper::GenerateHashGetHash(MacroAssembler* masm, - Register hash, - Register scratch) { - // hash += hash << 3; - __ mov(scratch, hash); - __ shl(scratch, 3); - __ add(hash, Operand(scratch)); - // hash ^= hash >> 11; - __ mov(scratch, hash); - __ sar(scratch, 11); - __ xor_(hash, Operand(scratch)); - // hash += hash << 15; - __ mov(scratch, hash); - __ shl(scratch, 15); - __ add(hash, Operand(scratch)); - - // if (hash == 0) hash = 27; - Label hash_not_zero; - __ test(hash, Operand(hash)); - __ j(not_zero, &hash_not_zero); - __ mov(hash, Immediate(27)); - __ bind(&hash_not_zero); -} - - -void SubStringStub::Generate(MacroAssembler* masm) { - Label runtime; - - // Stack frame on entry. - // esp[0]: return address - // esp[4]: to - // esp[8]: from - // esp[12]: string - - // Make sure first argument is a string. - __ mov(eax, Operand(esp, 3 * kPointerSize)); - STATIC_ASSERT(kSmiTag == 0); - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &runtime); - Condition is_string = masm->IsObjectStringType(eax, ebx, ebx); - __ j(NegateCondition(is_string), &runtime); - - // eax: string - // ebx: instance type - - // Calculate length of sub string using the smi values. - Label result_longer_than_two; - __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index. - __ test(ecx, Immediate(kSmiTagMask)); - __ j(not_zero, &runtime); - __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index. - __ test(edx, Immediate(kSmiTagMask)); - __ j(not_zero, &runtime); - __ sub(ecx, Operand(edx)); - __ cmp(ecx, FieldOperand(eax, String::kLengthOffset)); - Label return_eax; - __ j(equal, &return_eax); - // Special handling of sub-strings of length 1 and 2. One character strings - // are handled in the runtime system (looked up in the single character - // cache). Two character strings are looked for in the symbol cache. - __ SmiUntag(ecx); // Result length is no longer smi. - __ cmp(ecx, 2); - __ j(greater, &result_longer_than_two); - __ j(less, &runtime); - - // Sub string of length 2 requested. - // eax: string - // ebx: instance type - // ecx: sub string length (value is 2) - // edx: from index (smi) - __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime); - - // Get the two characters forming the sub string. - __ SmiUntag(edx); // From index is no longer smi. - __ movzx_b(ebx, FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize)); - __ movzx_b(ecx, - FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1)); - - // Try to lookup two character string in symbol table. - Label make_two_character_string; - StringHelper::GenerateTwoCharacterSymbolTableProbe( - masm, ebx, ecx, eax, edx, edi, &make_two_character_string); - __ ret(3 * kPointerSize); - - __ bind(&make_two_character_string); - // Setup registers for allocating the two character string. - __ mov(eax, Operand(esp, 3 * kPointerSize)); - __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); - __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); - __ Set(ecx, Immediate(2)); - - __ bind(&result_longer_than_two); - // eax: string - // ebx: instance type - // ecx: result string length - // Check for flat ascii string - Label non_ascii_flat; - __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat); - - // Allocate the result. - __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime); - - // eax: result string - // ecx: result string length - __ mov(edx, esi); // esi used by following code. - // Locate first character of result. - __ mov(edi, eax); - __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - // Load string argument and locate character of sub string start. - __ mov(esi, Operand(esp, 3 * kPointerSize)); - __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from - __ SmiUntag(ebx); - __ add(esi, Operand(ebx)); - - // eax: result string - // ecx: result length - // edx: original value of esi - // edi: first character of result - // esi: character of sub string start - StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true); - __ mov(esi, edx); // Restore esi. - __ IncrementCounter(&Counters::sub_string_native, 1); - __ ret(3 * kPointerSize); - - __ bind(&non_ascii_flat); - // eax: string - // ebx: instance type & kStringRepresentationMask | kStringEncodingMask - // ecx: result string length - // Check for flat two byte string - __ cmp(ebx, kSeqStringTag | kTwoByteStringTag); - __ j(not_equal, &runtime); - - // Allocate the result. - __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime); - - // eax: result string - // ecx: result string length - __ mov(edx, esi); // esi used by following code. - // Locate first character of result. - __ mov(edi, eax); - __ add(Operand(edi), - Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - // Load string argument and locate character of sub string start. - __ mov(esi, Operand(esp, 3 * kPointerSize)); - __ add(Operand(esi), - Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from - // As from is a smi it is 2 times the value which matches the size of a two - // byte character. - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - __ add(esi, Operand(ebx)); - - // eax: result string - // ecx: result length - // edx: original value of esi - // edi: first character of result - // esi: character of sub string start - StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false); - __ mov(esi, edx); // Restore esi. - - __ bind(&return_eax); - __ IncrementCounter(&Counters::sub_string_native, 1); - __ ret(3 * kPointerSize); - - // Just jump to runtime to create the sub string. - __ bind(&runtime); - __ TailCallRuntime(Runtime::kSubString, 3, 1); -} - - -void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, - Register left, - Register right, - Register scratch1, - Register scratch2, - Register scratch3) { - Label result_not_equal; - Label result_greater; - Label compare_lengths; - - __ IncrementCounter(&Counters::string_compare_native, 1); - - // Find minimum length. - Label left_shorter; - __ mov(scratch1, FieldOperand(left, String::kLengthOffset)); - __ mov(scratch3, scratch1); - __ sub(scratch3, FieldOperand(right, String::kLengthOffset)); - - Register length_delta = scratch3; - - __ j(less_equal, &left_shorter); - // Right string is shorter. Change scratch1 to be length of right string. - __ sub(scratch1, Operand(length_delta)); - __ bind(&left_shorter); - - Register min_length = scratch1; - - // If either length is zero, just compare lengths. - __ test(min_length, Operand(min_length)); - __ j(zero, &compare_lengths); - - // Change index to run from -min_length to -1 by adding min_length - // to string start. This means that loop ends when index reaches zero, - // which doesn't need an additional compare. - __ SmiUntag(min_length); - __ lea(left, - FieldOperand(left, - min_length, times_1, - SeqAsciiString::kHeaderSize)); - __ lea(right, - FieldOperand(right, - min_length, times_1, - SeqAsciiString::kHeaderSize)); - __ neg(min_length); - - Register index = min_length; // index = -min_length; - - { - // Compare loop. - Label loop; - __ bind(&loop); - // Compare characters. - __ mov_b(scratch2, Operand(left, index, times_1, 0)); - __ cmpb(scratch2, Operand(right, index, times_1, 0)); - __ j(not_equal, &result_not_equal); - __ add(Operand(index), Immediate(1)); - __ j(not_zero, &loop); - } - - // Compare lengths - strings up to min-length are equal. - __ bind(&compare_lengths); - __ test(length_delta, Operand(length_delta)); - __ j(not_zero, &result_not_equal); - - // Result is EQUAL. - STATIC_ASSERT(EQUAL == 0); - STATIC_ASSERT(kSmiTag == 0); - __ Set(eax, Immediate(Smi::FromInt(EQUAL))); - __ ret(0); - - __ bind(&result_not_equal); - __ j(greater, &result_greater); - - // Result is LESS. - __ Set(eax, Immediate(Smi::FromInt(LESS))); - __ ret(0); - - // Result is GREATER. - __ bind(&result_greater); - __ Set(eax, Immediate(Smi::FromInt(GREATER))); - __ ret(0); -} - - -void StringCompareStub::Generate(MacroAssembler* masm) { - Label runtime; - - // Stack frame on entry. - // esp[0]: return address - // esp[4]: right string - // esp[8]: left string - - __ mov(edx, Operand(esp, 2 * kPointerSize)); // left - __ mov(eax, Operand(esp, 1 * kPointerSize)); // right - - Label not_same; - __ cmp(edx, Operand(eax)); - __ j(not_equal, ¬_same); - STATIC_ASSERT(EQUAL == 0); - STATIC_ASSERT(kSmiTag == 0); - __ Set(eax, Immediate(Smi::FromInt(EQUAL))); - __ IncrementCounter(&Counters::string_compare_native, 1); - __ ret(2 * kPointerSize); - - __ bind(¬_same); - - // Check that both objects are sequential ascii strings. - __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime); - - // Compare flat ascii strings. - // Drop arguments from the stack. - __ pop(ecx); - __ add(Operand(esp), Immediate(2 * kPointerSize)); - __ push(ecx); - GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi); - - // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) - // tagged as a small integer. - __ bind(&runtime); - __ TailCallRuntime(Runtime::kStringCompare, 2, 1); -} - #undef __ #define __ masm. diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h index ce1bcf6..d4cd8c6 100644 --- a/src/ia32/codegen-ia32.h +++ b/src/ia32/codegen-ia32.h @@ -803,327 +803,6 @@ class CodeGenerator: public AstVisitor { }; -// Compute a transcendental math function natively, or call the -// TranscendentalCache runtime function. -class TranscendentalCacheStub: public CodeStub { - public: - explicit TranscendentalCacheStub(TranscendentalCache::Type type) - : type_(type) {} - void Generate(MacroAssembler* masm); - private: - TranscendentalCache::Type type_; - Major MajorKey() { return TranscendentalCache; } - int MinorKey() { return type_; } - Runtime::FunctionId RuntimeFunction(); - void GenerateOperation(MacroAssembler* masm); -}; - - -class ToBooleanStub: public CodeStub { - public: - ToBooleanStub() { } - - void Generate(MacroAssembler* masm); - - private: - Major MajorKey() { return ToBoolean; } - int MinorKey() { return 0; } -}; - - -// Flag that indicates how to generate code for the stub GenericBinaryOpStub. -enum GenericBinaryFlags { - NO_GENERIC_BINARY_FLAGS = 0, - NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub. -}; - - -class GenericBinaryOpStub: public CodeStub { - public: - GenericBinaryOpStub(Token::Value op, - OverwriteMode mode, - GenericBinaryFlags flags, - TypeInfo operands_type) - : op_(op), - mode_(mode), - flags_(flags), - args_in_registers_(false), - args_reversed_(false), - static_operands_type_(operands_type), - runtime_operands_type_(BinaryOpIC::DEFAULT), - name_(NULL) { - if (static_operands_type_.IsSmi()) { - mode_ = NO_OVERWRITE; - } - use_sse3_ = CpuFeatures::IsSupported(SSE3); - ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); - } - - GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type) - : op_(OpBits::decode(key)), - mode_(ModeBits::decode(key)), - flags_(FlagBits::decode(key)), - args_in_registers_(ArgsInRegistersBits::decode(key)), - args_reversed_(ArgsReversedBits::decode(key)), - use_sse3_(SSE3Bits::decode(key)), - static_operands_type_(TypeInfo::ExpandedRepresentation( - StaticTypeInfoBits::decode(key))), - runtime_operands_type_(runtime_operands_type), - name_(NULL) { - } - - // Generate code to call the stub with the supplied arguments. This will add - // code at the call site to prepare arguments either in registers or on the - // stack together with the actual call. - void GenerateCall(MacroAssembler* masm, Register left, Register right); - void GenerateCall(MacroAssembler* masm, Register left, Smi* right); - void GenerateCall(MacroAssembler* masm, Smi* left, Register right); - - Result GenerateCall(MacroAssembler* masm, - VirtualFrame* frame, - Result* left, - Result* right); - - private: - Token::Value op_; - OverwriteMode mode_; - GenericBinaryFlags flags_; - bool args_in_registers_; // Arguments passed in registers not on the stack. - bool args_reversed_; // Left and right argument are swapped. - bool use_sse3_; - - // Number type information of operands, determined by code generator. - TypeInfo static_operands_type_; - - // Operand type information determined at runtime. - BinaryOpIC::TypeInfo runtime_operands_type_; - - char* name_; - - const char* GetName(); - -#ifdef DEBUG - void Print() { - PrintF("GenericBinaryOpStub %d (op %s), " - "(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n", - MinorKey(), - Token::String(op_), - static_cast(mode_), - static_cast(flags_), - static_cast(args_in_registers_), - static_cast(args_reversed_), - static_operands_type_.ToString()); - } -#endif - - // Minor key encoding in 18 bits RRNNNFRASOOOOOOOMM. - class ModeBits: public BitField {}; - class OpBits: public BitField {}; - class SSE3Bits: public BitField {}; - class ArgsInRegistersBits: public BitField {}; - class ArgsReversedBits: public BitField {}; - class FlagBits: public BitField {}; - class StaticTypeInfoBits: public BitField {}; - class RuntimeTypeInfoBits: public BitField {}; - - Major MajorKey() { return GenericBinaryOp; } - int MinorKey() { - // Encode the parameters in a unique 18 bit value. - return OpBits::encode(op_) - | ModeBits::encode(mode_) - | FlagBits::encode(flags_) - | SSE3Bits::encode(use_sse3_) - | ArgsInRegistersBits::encode(args_in_registers_) - | ArgsReversedBits::encode(args_reversed_) - | StaticTypeInfoBits::encode( - static_operands_type_.ThreeBitRepresentation()) - | RuntimeTypeInfoBits::encode(runtime_operands_type_); - } - - void Generate(MacroAssembler* masm); - void GenerateSmiCode(MacroAssembler* masm, Label* slow); - void GenerateLoadArguments(MacroAssembler* masm); - void GenerateReturn(MacroAssembler* masm); - void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure); - void GenerateRegisterArgsPush(MacroAssembler* masm); - void GenerateTypeTransition(MacroAssembler* masm); - - bool ArgsInRegistersSupported() { - return op_ == Token::ADD || op_ == Token::SUB - || op_ == Token::MUL || op_ == Token::DIV; - } - bool IsOperationCommutative() { - return (op_ == Token::ADD) || (op_ == Token::MUL); - } - - void SetArgsInRegisters() { args_in_registers_ = true; } - void SetArgsReversed() { args_reversed_ = true; } - bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; } - bool HasArgsInRegisters() { return args_in_registers_; } - bool HasArgsReversed() { return args_reversed_; } - - bool ShouldGenerateSmiCode() { - return HasSmiCodeInStub() && - runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS && - runtime_operands_type_ != BinaryOpIC::STRINGS; - } - - bool ShouldGenerateFPCode() { - return runtime_operands_type_ != BinaryOpIC::STRINGS; - } - - virtual int GetCodeKind() { return Code::BINARY_OP_IC; } - - virtual InlineCacheState GetICState() { - return BinaryOpIC::ToState(runtime_operands_type_); - } -}; - - -class StringHelper : public AllStatic { - public: - // Generate code for copying characters using a simple loop. This should only - // be used in places where the number of characters is small and the - // additional setup and checking in GenerateCopyCharactersREP adds too much - // overhead. Copying of overlapping regions is not supported. - static void GenerateCopyCharacters(MacroAssembler* masm, - Register dest, - Register src, - Register count, - Register scratch, - bool ascii); - - // Generate code for copying characters using the rep movs instruction. - // Copies ecx characters from esi to edi. Copying of overlapping regions is - // not supported. - static void GenerateCopyCharactersREP(MacroAssembler* masm, - Register dest, // Must be edi. - Register src, // Must be esi. - Register count, // Must be ecx. - Register scratch, // Neither of above. - bool ascii); - - // Probe the symbol table for a two character string. If the string is - // not found by probing a jump to the label not_found is performed. This jump - // does not guarantee that the string is not in the symbol table. If the - // string is found the code falls through with the string in register eax. - static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, - Register c1, - Register c2, - Register scratch1, - Register scratch2, - Register scratch3, - Label* not_found); - - // Generate string hash. - static void GenerateHashInit(MacroAssembler* masm, - Register hash, - Register character, - Register scratch); - static void GenerateHashAddCharacter(MacroAssembler* masm, - Register hash, - Register character, - Register scratch); - static void GenerateHashGetHash(MacroAssembler* masm, - Register hash, - Register scratch); - - private: - DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); -}; - - -// Flag that indicates how to generate code for the stub StringAddStub. -enum StringAddFlags { - NO_STRING_ADD_FLAGS = 0, - NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub. -}; - - -class StringAddStub: public CodeStub { - public: - explicit StringAddStub(StringAddFlags flags) { - string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0); - } - - private: - Major MajorKey() { return StringAdd; } - int MinorKey() { return string_check_ ? 0 : 1; } - - void Generate(MacroAssembler* masm); - - // Should the stub check whether arguments are strings? - bool string_check_; -}; - - -class SubStringStub: public CodeStub { - public: - SubStringStub() {} - - private: - Major MajorKey() { return SubString; } - int MinorKey() { return 0; } - - void Generate(MacroAssembler* masm); -}; - - -class StringCompareStub: public CodeStub { - public: - explicit StringCompareStub() { - } - - // Compare two flat ascii strings and returns result in eax after popping two - // arguments from the stack. - static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, - Register left, - Register right, - Register scratch1, - Register scratch2, - Register scratch3); - - private: - Major MajorKey() { return StringCompare; } - int MinorKey() { return 0; } - - void Generate(MacroAssembler* masm); -}; - - -class NumberToStringStub: public CodeStub { - public: - NumberToStringStub() { } - - // Generate code to do a lookup in the number string cache. If the number in - // the register object is found in the cache the generated code falls through - // with the result in the result register. The object and the result register - // can be the same. If the number is not found in the cache the code jumps to - // the label not_found with only the content of register object unchanged. - static void GenerateLookupNumberStringCache(MacroAssembler* masm, - Register object, - Register result, - Register scratch1, - Register scratch2, - bool object_is_smi, - Label* not_found); - - private: - Major MajorKey() { return NumberToString; } - int MinorKey() { return 0; } - - void Generate(MacroAssembler* masm); - - const char* GetName() { return "NumberToStringStub"; } - -#ifdef DEBUG - void Print() { - PrintF("NumberToStringStub\n"); - } -#endif -}; - - } } // namespace v8::internal #endif // V8_IA32_CODEGEN_IA32_H_ diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc index c00f626..707c07e 100644 --- a/src/ia32/full-codegen-ia32.cc +++ b/src/ia32/full-codegen-ia32.cc @@ -29,6 +29,7 @@ #if defined(V8_TARGET_ARCH_IA32) +#include "code-stubs-ia32.h" #include "codegen-inl.h" #include "compiler.h" #include "debug.h" diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc new file mode 100644 index 0000000..304e65c --- /dev/null +++ b/src/x64/code-stubs-x64.cc @@ -0,0 +1,4016 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#if defined(V8_TARGET_ARCH_X64) + +#include "bootstrapper.h" +#include "code-stubs-x64.h" +#include "codegen-inl.h" +#include "regexp-macro-assembler.h" + +namespace v8 { +namespace internal { + +#define __ ACCESS_MASM(masm) +void FastNewClosureStub::Generate(MacroAssembler* masm) { + // Create a new closure from the given function info in new + // space. Set the context to the current context in rsi. + Label gc; + __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT); + + // Get the function info from the stack. + __ movq(rdx, Operand(rsp, 1 * kPointerSize)); + + // Compute the function map in the current global context and set that + // as the map of the allocated object. + __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); + __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset)); + __ movq(rcx, Operand(rcx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX))); + __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx); + + // Initialize the rest of the function. We don't have to update the + // write barrier because the allocated object is in new space. + __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex); + __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex); + __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx); + __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx); + __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx); + __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx); + __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi); + __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx); + + // Initialize the code pointer in the function to be the one + // found in the shared function info object. + __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset)); + __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize)); + __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx); + + + // Return and remove the on-stack parameter. + __ ret(1 * kPointerSize); + + // Create a new closure through the slower runtime call. + __ bind(&gc); + __ pop(rcx); // Temporarily remove return address. + __ pop(rdx); + __ push(rsi); + __ push(rdx); + __ push(rcx); // Restore return address. + __ TailCallRuntime(Runtime::kNewClosure, 2, 1); +} + + +void FastNewContextStub::Generate(MacroAssembler* masm) { + // Try to allocate the context in new space. + Label gc; + int length = slots_ + Context::MIN_CONTEXT_SLOTS; + __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize, + rax, rbx, rcx, &gc, TAG_OBJECT); + + // Get the function from the stack. + __ movq(rcx, Operand(rsp, 1 * kPointerSize)); + + // Setup the object header. + __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex); + __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister); + __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length)); + + // Setup the fixed slots. + __ xor_(rbx, rbx); // Set to NULL. + __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx); + __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax); + __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx); + __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx); + + // Copy the global object from the surrounding context. + __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); + __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx); + + // Initialize the rest of the slots to undefined. + __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex); + for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { + __ movq(Operand(rax, Context::SlotOffset(i)), rbx); + } + + // Return and remove the on-stack parameter. + __ movq(rsi, rax); + __ ret(1 * kPointerSize); + + // Need to collect. Call into runtime system. + __ bind(&gc); + __ TailCallRuntime(Runtime::kNewContext, 1, 1); +} + + +void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { + // Stack layout on entry: + // + // [rsp + kPointerSize]: constant elements. + // [rsp + (2 * kPointerSize)]: literal index. + // [rsp + (3 * kPointerSize)]: literals array. + + // All sizes here are multiples of kPointerSize. + int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0; + int size = JSArray::kSize + elements_size; + + // Load boilerplate object into rcx and check if we need to create a + // boilerplate. + Label slow_case; + __ movq(rcx, Operand(rsp, 3 * kPointerSize)); + __ movq(rax, Operand(rsp, 2 * kPointerSize)); + SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2); + __ movq(rcx, + FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize)); + __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex); + __ j(equal, &slow_case); + + if (FLAG_debug_code) { + const char* message; + Heap::RootListIndex expected_map_index; + if (mode_ == CLONE_ELEMENTS) { + message = "Expected (writable) fixed array"; + expected_map_index = Heap::kFixedArrayMapRootIndex; + } else { + ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS); + message = "Expected copy-on-write fixed array"; + expected_map_index = Heap::kFixedCOWArrayMapRootIndex; + } + __ push(rcx); + __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset)); + __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset), + expected_map_index); + __ Assert(equal, message); + __ pop(rcx); + } + + // Allocate both the JS array and the elements array in one big + // allocation. This avoids multiple limit checks. + __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT); + + // Copy the JS array part. + for (int i = 0; i < JSArray::kSize; i += kPointerSize) { + if ((i != JSArray::kElementsOffset) || (length_ == 0)) { + __ movq(rbx, FieldOperand(rcx, i)); + __ movq(FieldOperand(rax, i), rbx); + } + } + + if (length_ > 0) { + // Get hold of the elements array of the boilerplate and setup the + // elements pointer in the resulting object. + __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset)); + __ lea(rdx, Operand(rax, JSArray::kSize)); + __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx); + + // Copy the elements array. + for (int i = 0; i < elements_size; i += kPointerSize) { + __ movq(rbx, FieldOperand(rcx, i)); + __ movq(FieldOperand(rdx, i), rbx); + } + } + + // Return and remove the on-stack parameters. + __ ret(3 * kPointerSize); + + __ bind(&slow_case); + __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); +} + + +void ToBooleanStub::Generate(MacroAssembler* masm) { + Label false_result, true_result, not_string; + __ movq(rax, Operand(rsp, 1 * kPointerSize)); + + // 'null' => false. + __ CompareRoot(rax, Heap::kNullValueRootIndex); + __ j(equal, &false_result); + + // Get the map and type of the heap object. + // We don't use CmpObjectType because we manipulate the type field. + __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset)); + __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset)); + + // Undetectable => false. + __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset)); + __ and_(rbx, Immediate(1 << Map::kIsUndetectable)); + __ j(not_zero, &false_result); + + // JavaScript object => true. + __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE)); + __ j(above_equal, &true_result); + + // String value => false iff empty. + __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE)); + __ j(above_equal, ¬_string); + __ movq(rdx, FieldOperand(rax, String::kLengthOffset)); + __ SmiTest(rdx); + __ j(zero, &false_result); + __ jmp(&true_result); + + __ bind(¬_string); + __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex); + __ j(not_equal, &true_result); + // HeapNumber => false iff +0, -0, or NaN. + // These three cases set the zero flag when compared to zero using ucomisd. + __ xorpd(xmm0, xmm0); + __ ucomisd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset)); + __ j(zero, &false_result); + // Fall through to |true_result|. + + // Return 1/0 for true/false in rax. + __ bind(&true_result); + __ movq(rax, Immediate(1)); + __ ret(1 * kPointerSize); + __ bind(&false_result); + __ xor_(rax, rax); + __ ret(1 * kPointerSize); +} + + +const char* GenericBinaryOpStub::GetName() { + if (name_ != NULL) return name_; + const int kMaxNameLength = 100; + name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); + if (name_ == NULL) return "OOM"; + const char* op_name = Token::Name(op_); + const char* overwrite_name; + switch (mode_) { + case NO_OVERWRITE: overwrite_name = "Alloc"; break; + case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; + case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; + default: overwrite_name = "UnknownOverwrite"; break; + } + + OS::SNPrintF(Vector(name_, kMaxNameLength), + "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s", + op_name, + overwrite_name, + (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "", + args_in_registers_ ? "RegArgs" : "StackArgs", + args_reversed_ ? "_R" : "", + static_operands_type_.ToString(), + BinaryOpIC::GetName(runtime_operands_type_)); + return name_; +} + + +void GenericBinaryOpStub::GenerateCall( + MacroAssembler* masm, + Register left, + Register right) { + if (!ArgsInRegistersSupported()) { + // Pass arguments on the stack. + __ push(left); + __ push(right); + } else { + // The calling convention with registers is left in rdx and right in rax. + Register left_arg = rdx; + Register right_arg = rax; + if (!(left.is(left_arg) && right.is(right_arg))) { + if (left.is(right_arg) && right.is(left_arg)) { + if (IsOperationCommutative()) { + SetArgsReversed(); + } else { + __ xchg(left, right); + } + } else if (left.is(left_arg)) { + __ movq(right_arg, right); + } else if (right.is(right_arg)) { + __ movq(left_arg, left); + } else if (left.is(right_arg)) { + if (IsOperationCommutative()) { + __ movq(left_arg, right); + SetArgsReversed(); + } else { + // Order of moves important to avoid destroying left argument. + __ movq(left_arg, left); + __ movq(right_arg, right); + } + } else if (right.is(left_arg)) { + if (IsOperationCommutative()) { + __ movq(right_arg, left); + SetArgsReversed(); + } else { + // Order of moves important to avoid destroying right argument. + __ movq(right_arg, right); + __ movq(left_arg, left); + } + } else { + // Order of moves is not important. + __ movq(left_arg, left); + __ movq(right_arg, right); + } + } + + // Update flags to indicate that arguments are in registers. + SetArgsInRegisters(); + __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); + } + + // Call the stub. + __ CallStub(this); +} + + +void GenericBinaryOpStub::GenerateCall( + MacroAssembler* masm, + Register left, + Smi* right) { + if (!ArgsInRegistersSupported()) { + // Pass arguments on the stack. + __ push(left); + __ Push(right); + } else { + // The calling convention with registers is left in rdx and right in rax. + Register left_arg = rdx; + Register right_arg = rax; + if (left.is(left_arg)) { + __ Move(right_arg, right); + } else if (left.is(right_arg) && IsOperationCommutative()) { + __ Move(left_arg, right); + SetArgsReversed(); + } else { + // For non-commutative operations, left and right_arg might be + // the same register. Therefore, the order of the moves is + // important here in order to not overwrite left before moving + // it to left_arg. + __ movq(left_arg, left); + __ Move(right_arg, right); + } + + // Update flags to indicate that arguments are in registers. + SetArgsInRegisters(); + __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); + } + + // Call the stub. + __ CallStub(this); +} + + +void GenericBinaryOpStub::GenerateCall( + MacroAssembler* masm, + Smi* left, + Register right) { + if (!ArgsInRegistersSupported()) { + // Pass arguments on the stack. + __ Push(left); + __ push(right); + } else { + // The calling convention with registers is left in rdx and right in rax. + Register left_arg = rdx; + Register right_arg = rax; + if (right.is(right_arg)) { + __ Move(left_arg, left); + } else if (right.is(left_arg) && IsOperationCommutative()) { + __ Move(right_arg, left); + SetArgsReversed(); + } else { + // For non-commutative operations, right and left_arg might be + // the same register. Therefore, the order of the moves is + // important here in order to not overwrite right before moving + // it to right_arg. + __ movq(right_arg, right); + __ Move(left_arg, left); + } + // Update flags to indicate that arguments are in registers. + SetArgsInRegisters(); + __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); + } + + // Call the stub. + __ CallStub(this); +} + + +class FloatingPointHelper : public AllStatic { + public: + // Load the operands from rdx and rax into xmm0 and xmm1, as doubles. + // If the operands are not both numbers, jump to not_numbers. + // Leaves rdx and rax unchanged. SmiOperands assumes both are smis. + // NumberOperands assumes both are smis or heap numbers. + static void LoadSSE2SmiOperands(MacroAssembler* masm); + static void LoadSSE2NumberOperands(MacroAssembler* masm); + static void LoadSSE2UnknownOperands(MacroAssembler* masm, + Label* not_numbers); + + // Takes the operands in rdx and rax and loads them as integers in rax + // and rcx. + static void LoadAsIntegers(MacroAssembler* masm, + Label* operand_conversion_failure, + Register heap_number_map); + // As above, but we know the operands to be numbers. In that case, + // conversion can't fail. + static void LoadNumbersAsIntegers(MacroAssembler* masm); +}; + + +void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { + // 1. Move arguments into rdx, rax except for DIV and MOD, which need the + // dividend in rax and rdx free for the division. Use rax, rbx for those. + Comment load_comment(masm, "-- Load arguments"); + Register left = rdx; + Register right = rax; + if (op_ == Token::DIV || op_ == Token::MOD) { + left = rax; + right = rbx; + if (HasArgsInRegisters()) { + __ movq(rbx, rax); + __ movq(rax, rdx); + } + } + if (!HasArgsInRegisters()) { + __ movq(right, Operand(rsp, 1 * kPointerSize)); + __ movq(left, Operand(rsp, 2 * kPointerSize)); + } + + Label not_smis; + // 2. Smi check both operands. + if (static_operands_type_.IsSmi()) { + // Skip smi check if we know that both arguments are smis. + if (FLAG_debug_code) { + __ AbortIfNotSmi(left); + __ AbortIfNotSmi(right); + } + if (op_ == Token::BIT_OR) { + // Handle OR here, since we do extra smi-checking in the or code below. + __ SmiOr(right, right, left); + GenerateReturn(masm); + return; + } + } else { + if (op_ != Token::BIT_OR) { + // Skip the check for OR as it is better combined with the + // actual operation. + Comment smi_check_comment(masm, "-- Smi check arguments"); + __ JumpIfNotBothSmi(left, right, ¬_smis); + } + } + + // 3. Operands are both smis (except for OR), perform the operation leaving + // the result in rax and check the result if necessary. + Comment perform_smi(masm, "-- Perform smi operation"); + Label use_fp_on_smis; + switch (op_) { + case Token::ADD: { + ASSERT(right.is(rax)); + __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative. + break; + } + + case Token::SUB: { + __ SmiSub(left, left, right, &use_fp_on_smis); + __ movq(rax, left); + break; + } + + case Token::MUL: + ASSERT(right.is(rax)); + __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative. + break; + + case Token::DIV: + ASSERT(left.is(rax)); + __ SmiDiv(left, left, right, &use_fp_on_smis); + break; + + case Token::MOD: + ASSERT(left.is(rax)); + __ SmiMod(left, left, right, slow); + break; + + case Token::BIT_OR: + ASSERT(right.is(rax)); + __ movq(rcx, right); // Save the right operand. + __ SmiOr(right, right, left); // BIT_OR is commutative. + __ testb(right, Immediate(kSmiTagMask)); + __ j(not_zero, ¬_smis); + break; + + case Token::BIT_AND: + ASSERT(right.is(rax)); + __ SmiAnd(right, right, left); // BIT_AND is commutative. + break; + + case Token::BIT_XOR: + ASSERT(right.is(rax)); + __ SmiXor(right, right, left); // BIT_XOR is commutative. + break; + + case Token::SHL: + case Token::SHR: + case Token::SAR: + switch (op_) { + case Token::SAR: + __ SmiShiftArithmeticRight(left, left, right); + break; + case Token::SHR: + __ SmiShiftLogicalRight(left, left, right, slow); + break; + case Token::SHL: + __ SmiShiftLeft(left, left, right); + break; + default: + UNREACHABLE(); + } + __ movq(rax, left); + break; + + default: + UNREACHABLE(); + break; + } + + // 4. Emit return of result in rax. + GenerateReturn(masm); + + // 5. For some operations emit inline code to perform floating point + // operations on known smis (e.g., if the result of the operation + // overflowed the smi range). + switch (op_) { + case Token::ADD: + case Token::SUB: + case Token::MUL: + case Token::DIV: { + ASSERT(use_fp_on_smis.is_linked()); + __ bind(&use_fp_on_smis); + if (op_ == Token::DIV) { + __ movq(rdx, rax); + __ movq(rax, rbx); + } + // left is rdx, right is rax. + __ AllocateHeapNumber(rbx, rcx, slow); + FloatingPointHelper::LoadSSE2SmiOperands(masm); + switch (op_) { + case Token::ADD: __ addsd(xmm0, xmm1); break; + case Token::SUB: __ subsd(xmm0, xmm1); break; + case Token::MUL: __ mulsd(xmm0, xmm1); break; + case Token::DIV: __ divsd(xmm0, xmm1); break; + default: UNREACHABLE(); + } + __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0); + __ movq(rax, rbx); + GenerateReturn(masm); + } + default: + break; + } + + // 6. Non-smi operands, fall out to the non-smi code with the operands in + // rdx and rax. + Comment done_comment(masm, "-- Enter non-smi code"); + __ bind(¬_smis); + + switch (op_) { + case Token::DIV: + case Token::MOD: + // Operands are in rax, rbx at this point. + __ movq(rdx, rax); + __ movq(rax, rbx); + break; + + case Token::BIT_OR: + // Right operand is saved in rcx and rax was destroyed by the smi + // operation. + __ movq(rax, rcx); + break; + + default: + break; + } +} + + +void GenericBinaryOpStub::Generate(MacroAssembler* masm) { + Label call_runtime; + + if (ShouldGenerateSmiCode()) { + GenerateSmiCode(masm, &call_runtime); + } else if (op_ != Token::MOD) { + if (!HasArgsInRegisters()) { + GenerateLoadArguments(masm); + } + } + // Floating point case. + if (ShouldGenerateFPCode()) { + switch (op_) { + case Token::ADD: + case Token::SUB: + case Token::MUL: + case Token::DIV: { + if (runtime_operands_type_ == BinaryOpIC::DEFAULT && + HasSmiCodeInStub()) { + // Execution reaches this point when the first non-smi argument occurs + // (and only if smi code is generated). This is the right moment to + // patch to HEAP_NUMBERS state. The transition is attempted only for + // the four basic operations. The stub stays in the DEFAULT state + // forever for all other operations (also if smi code is skipped). + GenerateTypeTransition(masm); + break; + } + + Label not_floats; + // rax: y + // rdx: x + if (static_operands_type_.IsNumber()) { + if (FLAG_debug_code) { + // Assert at runtime that inputs are only numbers. + __ AbortIfNotNumber(rdx); + __ AbortIfNotNumber(rax); + } + FloatingPointHelper::LoadSSE2NumberOperands(masm); + } else { + FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime); + } + + switch (op_) { + case Token::ADD: __ addsd(xmm0, xmm1); break; + case Token::SUB: __ subsd(xmm0, xmm1); break; + case Token::MUL: __ mulsd(xmm0, xmm1); break; + case Token::DIV: __ divsd(xmm0, xmm1); break; + default: UNREACHABLE(); + } + // Allocate a heap number, if needed. + Label skip_allocation; + OverwriteMode mode = mode_; + if (HasArgsReversed()) { + if (mode == OVERWRITE_RIGHT) { + mode = OVERWRITE_LEFT; + } else if (mode == OVERWRITE_LEFT) { + mode = OVERWRITE_RIGHT; + } + } + switch (mode) { + case OVERWRITE_LEFT: + __ JumpIfNotSmi(rdx, &skip_allocation); + __ AllocateHeapNumber(rbx, rcx, &call_runtime); + __ movq(rdx, rbx); + __ bind(&skip_allocation); + __ movq(rax, rdx); + break; + case OVERWRITE_RIGHT: + // If the argument in rax is already an object, we skip the + // allocation of a heap number. + __ JumpIfNotSmi(rax, &skip_allocation); + // Fall through! + case NO_OVERWRITE: + // Allocate a heap number for the result. Keep rax and rdx intact + // for the possible runtime call. + __ AllocateHeapNumber(rbx, rcx, &call_runtime); + __ movq(rax, rbx); + __ bind(&skip_allocation); + break; + default: UNREACHABLE(); + } + __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); + GenerateReturn(masm); + __ bind(¬_floats); + if (runtime_operands_type_ == BinaryOpIC::DEFAULT && + !HasSmiCodeInStub()) { + // Execution reaches this point when the first non-number argument + // occurs (and only if smi code is skipped from the stub, otherwise + // the patching has already been done earlier in this case branch). + // A perfect moment to try patching to STRINGS for ADD operation. + if (op_ == Token::ADD) { + GenerateTypeTransition(masm); + } + } + break; + } + case Token::MOD: { + // For MOD we go directly to runtime in the non-smi case. + break; + } + case Token::BIT_OR: + case Token::BIT_AND: + case Token::BIT_XOR: + case Token::SAR: + case Token::SHL: + case Token::SHR: { + Label skip_allocation, non_smi_shr_result; + Register heap_number_map = r9; + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + if (static_operands_type_.IsNumber()) { + if (FLAG_debug_code) { + // Assert at runtime that inputs are only numbers. + __ AbortIfNotNumber(rdx); + __ AbortIfNotNumber(rax); + } + FloatingPointHelper::LoadNumbersAsIntegers(masm); + } else { + FloatingPointHelper::LoadAsIntegers(masm, + &call_runtime, + heap_number_map); + } + switch (op_) { + case Token::BIT_OR: __ orl(rax, rcx); break; + case Token::BIT_AND: __ andl(rax, rcx); break; + case Token::BIT_XOR: __ xorl(rax, rcx); break; + case Token::SAR: __ sarl_cl(rax); break; + case Token::SHL: __ shll_cl(rax); break; + case Token::SHR: { + __ shrl_cl(rax); + // Check if result is negative. This can only happen for a shift + // by zero. + __ testl(rax, rax); + __ j(negative, &non_smi_shr_result); + break; + } + default: UNREACHABLE(); + } + + STATIC_ASSERT(kSmiValueSize == 32); + // Tag smi result and return. + __ Integer32ToSmi(rax, rax); + GenerateReturn(masm); + + // All bit-ops except SHR return a signed int32 that can be + // returned immediately as a smi. + // We might need to allocate a HeapNumber if we shift a negative + // number right by zero (i.e., convert to UInt32). + if (op_ == Token::SHR) { + ASSERT(non_smi_shr_result.is_linked()); + __ bind(&non_smi_shr_result); + // Allocate a heap number if needed. + __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). + switch (mode_) { + case OVERWRITE_LEFT: + case OVERWRITE_RIGHT: + // If the operand was an object, we skip the + // allocation of a heap number. + __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ? + 1 * kPointerSize : 2 * kPointerSize)); + __ JumpIfNotSmi(rax, &skip_allocation); + // Fall through! + case NO_OVERWRITE: + // Allocate heap number in new space. + // Not using AllocateHeapNumber macro in order to reuse + // already loaded heap_number_map. + __ AllocateInNewSpace(HeapNumber::kSize, + rax, + rcx, + no_reg, + &call_runtime, + TAG_OBJECT); + // Set the map. + if (FLAG_debug_code) { + __ AbortIfNotRootValue(heap_number_map, + Heap::kHeapNumberMapRootIndex, + "HeapNumberMap register clobbered."); + } + __ movq(FieldOperand(rax, HeapObject::kMapOffset), + heap_number_map); + __ bind(&skip_allocation); + break; + default: UNREACHABLE(); + } + // Store the result in the HeapNumber and return. + __ cvtqsi2sd(xmm0, rbx); + __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); + GenerateReturn(masm); + } + + break; + } + default: UNREACHABLE(); break; + } + } + + // If all else fails, use the runtime system to get the correct + // result. If arguments was passed in registers now place them on the + // stack in the correct order below the return address. + __ bind(&call_runtime); + + if (HasArgsInRegisters()) { + GenerateRegisterArgsPush(masm); + } + + switch (op_) { + case Token::ADD: { + // Registers containing left and right operands respectively. + Register lhs, rhs; + + if (HasArgsReversed()) { + lhs = rax; + rhs = rdx; + } else { + lhs = rdx; + rhs = rax; + } + + // Test for string arguments before calling runtime. + Label not_strings, both_strings, not_string1, string1, string1_smi2; + + // If this stub has already generated FP-specific code then the arguments + // are already in rdx and rax. + if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) { + GenerateLoadArguments(masm); + } + + Condition is_smi; + is_smi = masm->CheckSmi(lhs); + __ j(is_smi, ¬_string1); + __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8); + __ j(above_equal, ¬_string1); + + // First argument is a a string, test second. + is_smi = masm->CheckSmi(rhs); + __ j(is_smi, &string1_smi2); + __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9); + __ j(above_equal, &string1); + + // First and second argument are strings. + StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); + __ TailCallStub(&string_add_stub); + + __ bind(&string1_smi2); + // First argument is a string, second is a smi. Try to lookup the number + // string for the smi in the number string cache. + NumberToStringStub::GenerateLookupNumberStringCache( + masm, rhs, rbx, rcx, r8, true, &string1); + + // Replace second argument on stack and tailcall string add stub to make + // the result. + __ movq(Operand(rsp, 1 * kPointerSize), rbx); + __ TailCallStub(&string_add_stub); + + // Only first argument is a string. + __ bind(&string1); + __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION); + + // First argument was not a string, test second. + __ bind(¬_string1); + is_smi = masm->CheckSmi(rhs); + __ j(is_smi, ¬_strings); + __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs); + __ j(above_equal, ¬_strings); + + // Only second argument is a string. + __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION); + + __ bind(¬_strings); + // Neither argument is a string. + __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); + break; + } + case Token::SUB: + __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); + break; + case Token::MUL: + __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); + break; + case Token::DIV: + __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); + break; + case Token::MOD: + __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); + break; + case Token::BIT_OR: + __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); + break; + case Token::BIT_AND: + __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); + break; + case Token::BIT_XOR: + __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); + break; + case Token::SAR: + __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); + break; + case Token::SHL: + __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); + break; + case Token::SHR: + __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); + break; + default: + UNREACHABLE(); + } +} + + +void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) { + ASSERT(!HasArgsInRegisters()); + __ movq(rax, Operand(rsp, 1 * kPointerSize)); + __ movq(rdx, Operand(rsp, 2 * kPointerSize)); +} + + +void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) { + // If arguments are not passed in registers remove them from the stack before + // returning. + if (!HasArgsInRegisters()) { + __ ret(2 * kPointerSize); // Remove both operands + } else { + __ ret(0); + } +} + + +void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { + ASSERT(HasArgsInRegisters()); + __ pop(rcx); + if (HasArgsReversed()) { + __ push(rax); + __ push(rdx); + } else { + __ push(rdx); + __ push(rax); + } + __ push(rcx); +} + + +void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { + Label get_result; + + // Ensure the operands are on the stack. + if (HasArgsInRegisters()) { + GenerateRegisterArgsPush(masm); + } + + // Left and right arguments are already on stack. + __ pop(rcx); // Save the return address. + + // Push this stub's key. + __ Push(Smi::FromInt(MinorKey())); + + // Although the operation and the type info are encoded into the key, + // the encoding is opaque, so push them too. + __ Push(Smi::FromInt(op_)); + + __ Push(Smi::FromInt(runtime_operands_type_)); + + __ push(rcx); // The return address. + + // Perform patching to an appropriate fast case and return the result. + __ TailCallExternalReference( + ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), + 5, + 1); +} + + +Handle GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { + GenericBinaryOpStub stub(key, type_info); + return stub.GetCode(); +} + + +void TranscendentalCacheStub::Generate(MacroAssembler* masm) { + // Input on stack: + // rsp[8]: argument (should be number). + // rsp[0]: return address. + Label runtime_call; + Label runtime_call_clear_stack; + Label input_not_smi; + Label loaded; + // Test that rax is a number. + __ movq(rax, Operand(rsp, kPointerSize)); + __ JumpIfNotSmi(rax, &input_not_smi); + // Input is a smi. Untag and load it onto the FPU stack. + // Then load the bits of the double into rbx. + __ SmiToInteger32(rax, rax); + __ subq(rsp, Immediate(kPointerSize)); + __ cvtlsi2sd(xmm1, rax); + __ movsd(Operand(rsp, 0), xmm1); + __ movq(rbx, xmm1); + __ movq(rdx, xmm1); + __ fld_d(Operand(rsp, 0)); + __ addq(rsp, Immediate(kPointerSize)); + __ jmp(&loaded); + + __ bind(&input_not_smi); + // Check if input is a HeapNumber. + __ Move(rbx, Factory::heap_number_map()); + __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); + __ j(not_equal, &runtime_call); + // Input is a HeapNumber. Push it on the FPU stack and load its + // bits into rbx. + __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset)); + __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset)); + __ movq(rdx, rbx); + __ bind(&loaded); + // ST[0] == double value + // rbx = bits of double value. + // rdx = also bits of double value. + // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic): + // h = h0 = bits ^ (bits >> 32); + // h ^= h >> 16; + // h ^= h >> 8; + // h = h & (cacheSize - 1); + // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1) + __ sar(rdx, Immediate(32)); + __ xorl(rdx, rbx); + __ movl(rcx, rdx); + __ movl(rax, rdx); + __ movl(rdi, rdx); + __ sarl(rdx, Immediate(8)); + __ sarl(rcx, Immediate(16)); + __ sarl(rax, Immediate(24)); + __ xorl(rcx, rdx); + __ xorl(rax, rdi); + __ xorl(rcx, rax); + ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize)); + __ andl(rcx, Immediate(TranscendentalCache::kCacheSize - 1)); + + // ST[0] == double value. + // rbx = bits of double value. + // rcx = TranscendentalCache::hash(double value). + __ movq(rax, ExternalReference::transcendental_cache_array_address()); + // rax points to cache array. + __ movq(rax, Operand(rax, type_ * sizeof(TranscendentalCache::caches_[0]))); + // rax points to the cache for the type type_. + // If NULL, the cache hasn't been initialized yet, so go through runtime. + __ testq(rax, rax); + __ j(zero, &runtime_call_clear_stack); +#ifdef DEBUG + // Check that the layout of cache elements match expectations. + { // NOLINT - doesn't like a single brace on a line. + TranscendentalCache::Element test_elem[2]; + char* elem_start = reinterpret_cast(&test_elem[0]); + char* elem2_start = reinterpret_cast(&test_elem[1]); + char* elem_in0 = reinterpret_cast(&(test_elem[0].in[0])); + char* elem_in1 = reinterpret_cast(&(test_elem[0].in[1])); + char* elem_out = reinterpret_cast(&(test_elem[0].output)); + // Two uint_32's and a pointer per element. + CHECK_EQ(16, static_cast(elem2_start - elem_start)); + CHECK_EQ(0, static_cast(elem_in0 - elem_start)); + CHECK_EQ(kIntSize, static_cast(elem_in1 - elem_start)); + CHECK_EQ(2 * kIntSize, static_cast(elem_out - elem_start)); + } +#endif + // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16]. + __ addl(rcx, rcx); + __ lea(rcx, Operand(rax, rcx, times_8, 0)); + // Check if cache matches: Double value is stored in uint32_t[2] array. + Label cache_miss; + __ cmpq(rbx, Operand(rcx, 0)); + __ j(not_equal, &cache_miss); + // Cache hit! + __ movq(rax, Operand(rcx, 2 * kIntSize)); + __ fstp(0); // Clear FPU stack. + __ ret(kPointerSize); + + __ bind(&cache_miss); + // Update cache with new value. + Label nan_result; + GenerateOperation(masm, &nan_result); + __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack); + __ movq(Operand(rcx, 0), rbx); + __ movq(Operand(rcx, 2 * kIntSize), rax); + __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset)); + __ ret(kPointerSize); + + __ bind(&runtime_call_clear_stack); + __ fstp(0); + __ bind(&runtime_call); + __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); + + __ bind(&nan_result); + __ fstp(0); // Remove argument from FPU stack. + __ LoadRoot(rax, Heap::kNanValueRootIndex); + __ movq(Operand(rcx, 0), rbx); + __ movq(Operand(rcx, 2 * kIntSize), rax); + __ ret(kPointerSize); +} + + +Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { + switch (type_) { + // Add more cases when necessary. + case TranscendentalCache::SIN: return Runtime::kMath_sin; + case TranscendentalCache::COS: return Runtime::kMath_cos; + default: + UNIMPLEMENTED(); + return Runtime::kAbort; + } +} + + +void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm, + Label* on_nan_result) { + // Registers: + // rbx: Bits of input double. Must be preserved. + // rcx: Pointer to cache entry. Must be preserved. + // st(0): Input double + Label done; + ASSERT(type_ == TranscendentalCache::SIN || + type_ == TranscendentalCache::COS); + // More transcendental types can be added later. + + // Both fsin and fcos require arguments in the range +/-2^63 and + // return NaN for infinities and NaN. They can share all code except + // the actual fsin/fcos operation. + Label in_range; + // If argument is outside the range -2^63..2^63, fsin/cos doesn't + // work. We must reduce it to the appropriate range. + __ movq(rdi, rbx); + // Move exponent and sign bits to low bits. + __ shr(rdi, Immediate(HeapNumber::kMantissaBits)); + // Remove sign bit. + __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1)); + int supported_exponent_limit = (63 + HeapNumber::kExponentBias); + __ cmpl(rdi, Immediate(supported_exponent_limit)); + __ j(below, &in_range); + // Check for infinity and NaN. Both return NaN for sin. + __ cmpl(rdi, Immediate(0x7ff)); + __ j(equal, on_nan_result); + + // Use fpmod to restrict argument to the range +/-2*PI. + __ fldpi(); + __ fadd(0); + __ fld(1); + // FPU Stack: input, 2*pi, input. + { + Label no_exceptions; + __ fwait(); + __ fnstsw_ax(); + // Clear if Illegal Operand or Zero Division exceptions are set. + __ testl(rax, Immediate(5)); // #IO and #ZD flags of FPU status word. + __ j(zero, &no_exceptions); + __ fnclex(); + __ bind(&no_exceptions); + } + + // Compute st(0) % st(1) + { + Label partial_remainder_loop; + __ bind(&partial_remainder_loop); + __ fprem1(); + __ fwait(); + __ fnstsw_ax(); + __ testl(rax, Immediate(0x400)); // Check C2 bit of FPU status word. + // If C2 is set, computation only has partial result. Loop to + // continue computation. + __ j(not_zero, &partial_remainder_loop); + } + // FPU Stack: input, 2*pi, input % 2*pi + __ fstp(2); + // FPU Stack: input % 2*pi, 2*pi, + __ fstp(0); + // FPU Stack: input % 2*pi + __ bind(&in_range); + switch (type_) { + case TranscendentalCache::SIN: + __ fsin(); + break; + case TranscendentalCache::COS: + __ fcos(); + break; + default: + UNREACHABLE(); + } + __ bind(&done); +} + + +// Get the integer part of a heap number. +// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx. +void IntegerConvert(MacroAssembler* masm, + Register result, + Register source) { + // Result may be rcx. If result and source are the same register, source will + // be overwritten. + ASSERT(!result.is(rdi) && !result.is(rbx)); + // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use + // cvttsd2si (32-bit version) directly. + Register double_exponent = rbx; + Register double_value = rdi; + Label done, exponent_63_plus; + // Get double and extract exponent. + __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset)); + // Clear result preemptively, in case we need to return zero. + __ xorl(result, result); + __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there. + // Double to remove sign bit, shift exponent down to least significant bits. + // and subtract bias to get the unshifted, unbiased exponent. + __ lea(double_exponent, Operand(double_value, double_value, times_1, 0)); + __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits)); + __ subl(double_exponent, Immediate(HeapNumber::kExponentBias)); + // Check whether the exponent is too big for a 63 bit unsigned integer. + __ cmpl(double_exponent, Immediate(63)); + __ j(above_equal, &exponent_63_plus); + // Handle exponent range 0..62. + __ cvttsd2siq(result, xmm0); + __ jmp(&done); + + __ bind(&exponent_63_plus); + // Exponent negative or 63+. + __ cmpl(double_exponent, Immediate(83)); + // If exponent negative or above 83, number contains no significant bits in + // the range 0..2^31, so result is zero, and rcx already holds zero. + __ j(above, &done); + + // Exponent in rage 63..83. + // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely + // the least significant exponent-52 bits. + + // Negate low bits of mantissa if value is negative. + __ addq(double_value, double_value); // Move sign bit to carry. + __ sbbl(result, result); // And convert carry to -1 in result register. + // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0. + __ addl(double_value, result); + // Do xor in opposite directions depending on where we want the result + // (depending on whether result is rcx or not). + + if (result.is(rcx)) { + __ xorl(double_value, result); + // Left shift mantissa by (exponent - mantissabits - 1) to save the + // bits that have positional values below 2^32 (the extra -1 comes from the + // doubling done above to move the sign bit into the carry flag). + __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1)); + __ shll_cl(double_value); + __ movl(result, double_value); + } else { + // As the then-branch, but move double-value to result before shifting. + __ xorl(result, double_value); + __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1)); + __ shll_cl(result); + } + + __ bind(&done); +} + + +// Input: rdx, rax are the left and right objects of a bit op. +// Output: rax, rcx are left and right integers for a bit op. +void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) { + // Check float operands. + Label done; + Label rax_is_smi; + Label rax_is_object; + Label rdx_is_object; + + __ JumpIfNotSmi(rdx, &rdx_is_object); + __ SmiToInteger32(rdx, rdx); + __ JumpIfSmi(rax, &rax_is_smi); + + __ bind(&rax_is_object); + IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx. + __ jmp(&done); + + __ bind(&rdx_is_object); + IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx. + __ JumpIfNotSmi(rax, &rax_is_object); + __ bind(&rax_is_smi); + __ SmiToInteger32(rcx, rax); + + __ bind(&done); + __ movl(rax, rdx); +} + + +// Input: rdx, rax are the left and right objects of a bit op. +// Output: rax, rcx are left and right integers for a bit op. +void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, + Label* conversion_failure, + Register heap_number_map) { + // Check float operands. + Label arg1_is_object, check_undefined_arg1; + Label arg2_is_object, check_undefined_arg2; + Label load_arg2, done; + + __ JumpIfNotSmi(rdx, &arg1_is_object); + __ SmiToInteger32(rdx, rdx); + __ jmp(&load_arg2); + + // If the argument is undefined it converts to zero (ECMA-262, section 9.5). + __ bind(&check_undefined_arg1); + __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex); + __ j(not_equal, conversion_failure); + __ movl(rdx, Immediate(0)); + __ jmp(&load_arg2); + + __ bind(&arg1_is_object); + __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map); + __ j(not_equal, &check_undefined_arg1); + // Get the untagged integer version of the edx heap number in rcx. + IntegerConvert(masm, rdx, rdx); + + // Here rdx has the untagged integer, rax has a Smi or a heap number. + __ bind(&load_arg2); + // Test if arg2 is a Smi. + __ JumpIfNotSmi(rax, &arg2_is_object); + __ SmiToInteger32(rax, rax); + __ movl(rcx, rax); + __ jmp(&done); + + // If the argument is undefined it converts to zero (ECMA-262, section 9.5). + __ bind(&check_undefined_arg2); + __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); + __ j(not_equal, conversion_failure); + __ movl(rcx, Immediate(0)); + __ jmp(&done); + + __ bind(&arg2_is_object); + __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map); + __ j(not_equal, &check_undefined_arg2); + // Get the untagged integer version of the rax heap number in rcx. + IntegerConvert(masm, rcx, rax); + __ bind(&done); + __ movl(rax, rdx); +} + + +void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) { + __ SmiToInteger32(kScratchRegister, rdx); + __ cvtlsi2sd(xmm0, kScratchRegister); + __ SmiToInteger32(kScratchRegister, rax); + __ cvtlsi2sd(xmm1, kScratchRegister); +} + + +void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) { + Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done; + // Load operand in rdx into xmm0. + __ JumpIfSmi(rdx, &load_smi_rdx); + __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); + // Load operand in rax into xmm1. + __ JumpIfSmi(rax, &load_smi_rax); + __ bind(&load_nonsmi_rax); + __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); + __ jmp(&done); + + __ bind(&load_smi_rdx); + __ SmiToInteger32(kScratchRegister, rdx); + __ cvtlsi2sd(xmm0, kScratchRegister); + __ JumpIfNotSmi(rax, &load_nonsmi_rax); + + __ bind(&load_smi_rax); + __ SmiToInteger32(kScratchRegister, rax); + __ cvtlsi2sd(xmm1, kScratchRegister); + + __ bind(&done); +} + + +void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm, + Label* not_numbers) { + Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done; + // Load operand in rdx into xmm0, or branch to not_numbers. + __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex); + __ JumpIfSmi(rdx, &load_smi_rdx); + __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx); + __ j(not_equal, not_numbers); // Argument in rdx is not a number. + __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); + // Load operand in rax into xmm1, or branch to not_numbers. + __ JumpIfSmi(rax, &load_smi_rax); + + __ bind(&load_nonsmi_rax); + __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx); + __ j(not_equal, not_numbers); + __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); + __ jmp(&done); + + __ bind(&load_smi_rdx); + __ SmiToInteger32(kScratchRegister, rdx); + __ cvtlsi2sd(xmm0, kScratchRegister); + __ JumpIfNotSmi(rax, &load_nonsmi_rax); + + __ bind(&load_smi_rax); + __ SmiToInteger32(kScratchRegister, rax); + __ cvtlsi2sd(xmm1, kScratchRegister); + __ bind(&done); +} + + +void GenericUnaryOpStub::Generate(MacroAssembler* masm) { + Label slow, done; + + if (op_ == Token::SUB) { + // Check whether the value is a smi. + Label try_float; + __ JumpIfNotSmi(rax, &try_float); + + if (negative_zero_ == kIgnoreNegativeZero) { + __ SmiCompare(rax, Smi::FromInt(0)); + __ j(equal, &done); + } + + // Enter runtime system if the value of the smi is zero + // to make sure that we switch between 0 and -0. + // Also enter it if the value of the smi is Smi::kMinValue. + __ SmiNeg(rax, rax, &done); + + // Either zero or Smi::kMinValue, neither of which become a smi when + // negated. + if (negative_zero_ == kStrictNegativeZero) { + __ SmiCompare(rax, Smi::FromInt(0)); + __ j(not_equal, &slow); + __ Move(rax, Factory::minus_zero_value()); + __ jmp(&done); + } else { + __ jmp(&slow); + } + + // Try floating point case. + __ bind(&try_float); + __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset)); + __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex); + __ j(not_equal, &slow); + // Operand is a float, negate its value by flipping sign bit. + __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset)); + __ movq(kScratchRegister, Immediate(0x01)); + __ shl(kScratchRegister, Immediate(63)); + __ xor_(rdx, kScratchRegister); // Flip sign. + // rdx is value to store. + if (overwrite_ == UNARY_OVERWRITE) { + __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx); + } else { + __ AllocateHeapNumber(rcx, rbx, &slow); + // rcx: allocated 'empty' number + __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx); + __ movq(rax, rcx); + } + } else if (op_ == Token::BIT_NOT) { + // Check if the operand is a heap number. + __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset)); + __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex); + __ j(not_equal, &slow); + + // Convert the heap number in rax to an untagged integer in rcx. + IntegerConvert(masm, rax, rax); + + // Do the bitwise operation and smi tag the result. + __ notl(rax); + __ Integer32ToSmi(rax, rax); + } + + // Return from the stub. + __ bind(&done); + __ StubReturn(1); + + // Handle the slow case by jumping to the JavaScript builtin. + __ bind(&slow); + __ pop(rcx); // pop return address + __ push(rax); + __ push(rcx); // push return address + switch (op_) { + case Token::SUB: + __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); + break; + case Token::BIT_NOT: + __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); + break; + default: + UNREACHABLE(); + } +} + + +void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { + // The key is in rdx and the parameter count is in rax. + + // The displacement is used for skipping the frame pointer on the + // stack. It is the offset of the last parameter (if any) relative + // to the frame pointer. + static const int kDisplacement = 1 * kPointerSize; + + // Check that the key is a smi. + Label slow; + __ JumpIfNotSmi(rdx, &slow); + + // Check if the calling frame is an arguments adaptor frame. + Label adaptor; + __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); + __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset), + Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); + __ j(equal, &adaptor); + + // Check index against formal parameters count limit passed in + // through register rax. Use unsigned comparison to get negative + // check for free. + __ cmpq(rdx, rax); + __ j(above_equal, &slow); + + // Read the argument from the stack and return it. + SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2); + __ lea(rbx, Operand(rbp, index.reg, index.scale, 0)); + index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2); + __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement)); + __ Ret(); + + // Arguments adaptor case: Check index against actual arguments + // limit found in the arguments adaptor frame. Use unsigned + // comparison to get negative check for free. + __ bind(&adaptor); + __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ cmpq(rdx, rcx); + __ j(above_equal, &slow); + + // Read the argument from the stack and return it. + index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2); + __ lea(rbx, Operand(rbx, index.reg, index.scale, 0)); + index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2); + __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement)); + __ Ret(); + + // Slow-case: Handle non-smi or out-of-bounds access to arguments + // by calling the runtime system. + __ bind(&slow); + __ pop(rbx); // Return address. + __ push(rdx); + __ push(rbx); + __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); +} + + +void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { + // rsp[0] : return address + // rsp[8] : number of parameters + // rsp[16] : receiver displacement + // rsp[24] : function + + // The displacement is used for skipping the return address and the + // frame pointer on the stack. It is the offset of the last + // parameter (if any) relative to the frame pointer. + static const int kDisplacement = 2 * kPointerSize; + + // Check if the calling frame is an arguments adaptor frame. + Label adaptor_frame, try_allocate, runtime; + __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); + __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset), + Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); + __ j(equal, &adaptor_frame); + + // Get the length from the frame. + __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize)); + __ jmp(&try_allocate); + + // Patch the arguments.length and the parameters pointer. + __ bind(&adaptor_frame); + __ SmiToInteger32(rcx, + Operand(rdx, + ArgumentsAdaptorFrameConstants::kLengthOffset)); + // Space on stack must already hold a smi. + __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx); + // Do not clobber the length index for the indexing operation since + // it is used compute the size for allocation later. + __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement)); + __ movq(Operand(rsp, 2 * kPointerSize), rdx); + + // Try the new space allocation. Start out with computing the size of + // the arguments object and the elements array. + Label add_arguments_object; + __ bind(&try_allocate); + __ testl(rcx, rcx); + __ j(zero, &add_arguments_object); + __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize)); + __ bind(&add_arguments_object); + __ addl(rcx, Immediate(Heap::kArgumentsObjectSize)); + + // Do the allocation of both objects in one go. + __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT); + + // Get the arguments boilerplate from the current (global) context. + int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); + __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); + __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset)); + __ movq(rdi, Operand(rdi, offset)); + + // Copy the JS object part. + STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize); + __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize)); + __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize)); + __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize)); + __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister); + __ movq(FieldOperand(rax, 1 * kPointerSize), rdx); + __ movq(FieldOperand(rax, 2 * kPointerSize), rbx); + + // Setup the callee in-object property. + ASSERT(Heap::arguments_callee_index == 0); + __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize)); + __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister); + + // Get the length (smi tagged) and set that as an in-object property too. + ASSERT(Heap::arguments_length_index == 1); + __ movq(rcx, Operand(rsp, 1 * kPointerSize)); + __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx); + + // If there are no actual arguments, we're done. + Label done; + __ SmiTest(rcx); + __ j(zero, &done); + + // Get the parameters pointer from the stack and untag the length. + __ movq(rdx, Operand(rsp, 2 * kPointerSize)); + + // Setup the elements pointer in the allocated arguments object and + // initialize the header in the elements fixed array. + __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize)); + __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi); + __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex); + __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister); + __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx); + __ SmiToInteger32(rcx, rcx); // Untag length for the loop below. + + // Copy the fixed array slots. + Label loop; + __ bind(&loop); + __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver. + __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister); + __ addq(rdi, Immediate(kPointerSize)); + __ subq(rdx, Immediate(kPointerSize)); + __ decl(rcx); + __ j(not_zero, &loop); + + // Return and remove the on-stack parameters. + __ bind(&done); + __ ret(3 * kPointerSize); + + // Do the runtime call to allocate the arguments object. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); +} + + +void RegExpExecStub::Generate(MacroAssembler* masm) { + // Just jump directly to runtime if native RegExp is not selected at compile + // time or if regexp entry in generated code is turned off runtime switch or + // at compilation. +#ifdef V8_INTERPRETED_REGEXP + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); +#else // V8_INTERPRETED_REGEXP + if (!FLAG_regexp_entry_native) { + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); + return; + } + + // Stack frame on entry. + // esp[0]: return address + // esp[8]: last_match_info (expected JSArray) + // esp[16]: previous index + // esp[24]: subject string + // esp[32]: JSRegExp object + + static const int kLastMatchInfoOffset = 1 * kPointerSize; + static const int kPreviousIndexOffset = 2 * kPointerSize; + static const int kSubjectOffset = 3 * kPointerSize; + static const int kJSRegExpOffset = 4 * kPointerSize; + + Label runtime; + + // Ensure that a RegExp stack is allocated. + ExternalReference address_of_regexp_stack_memory_address = + ExternalReference::address_of_regexp_stack_memory_address(); + ExternalReference address_of_regexp_stack_memory_size = + ExternalReference::address_of_regexp_stack_memory_size(); + __ movq(kScratchRegister, address_of_regexp_stack_memory_size); + __ movq(kScratchRegister, Operand(kScratchRegister, 0)); + __ testq(kScratchRegister, kScratchRegister); + __ j(zero, &runtime); + + + // Check that the first argument is a JSRegExp object. + __ movq(rax, Operand(rsp, kJSRegExpOffset)); + __ JumpIfSmi(rax, &runtime); + __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister); + __ j(not_equal, &runtime); + // Check that the RegExp has been compiled (data contains a fixed array). + __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset)); + if (FLAG_debug_code) { + Condition is_smi = masm->CheckSmi(rcx); + __ Check(NegateCondition(is_smi), + "Unexpected type for RegExp data, FixedArray expected"); + __ CmpObjectType(rcx, FIXED_ARRAY_TYPE, kScratchRegister); + __ Check(equal, "Unexpected type for RegExp data, FixedArray expected"); + } + + // rcx: RegExp data (FixedArray) + // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. + __ SmiToInteger32(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset)); + __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP)); + __ j(not_equal, &runtime); + + // rcx: RegExp data (FixedArray) + // Check that the number of captures fit in the static offsets vector buffer. + __ SmiToInteger32(rdx, + FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset)); + // Calculate number of capture registers (number_of_captures + 1) * 2. + __ leal(rdx, Operand(rdx, rdx, times_1, 2)); + // Check that the static offsets vector buffer is large enough. + __ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize)); + __ j(above, &runtime); + + // rcx: RegExp data (FixedArray) + // rdx: Number of capture registers + // Check that the second argument is a string. + __ movq(rax, Operand(rsp, kSubjectOffset)); + __ JumpIfSmi(rax, &runtime); + Condition is_string = masm->IsObjectStringType(rax, rbx, rbx); + __ j(NegateCondition(is_string), &runtime); + + // rax: Subject string. + // rcx: RegExp data (FixedArray). + // rdx: Number of capture registers. + // Check that the third argument is a positive smi less than the string + // length. A negative value will be greater (unsigned comparison). + __ movq(rbx, Operand(rsp, kPreviousIndexOffset)); + __ JumpIfNotSmi(rbx, &runtime); + __ SmiCompare(rbx, FieldOperand(rax, String::kLengthOffset)); + __ j(above_equal, &runtime); + + // rcx: RegExp data (FixedArray) + // rdx: Number of capture registers + // Check that the fourth object is a JSArray object. + __ movq(rax, Operand(rsp, kLastMatchInfoOffset)); + __ JumpIfSmi(rax, &runtime); + __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister); + __ j(not_equal, &runtime); + // Check that the JSArray is in fast case. + __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset)); + __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset)); + __ Cmp(rax, Factory::fixed_array_map()); + __ j(not_equal, &runtime); + // Check that the last match info has space for the capture registers and the + // additional information. Ensure no overflow in add. + STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset); + __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset)); + __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead)); + __ cmpl(rdx, rax); + __ j(greater, &runtime); + + // rcx: RegExp data (FixedArray) + // Check the representation and encoding of the subject string. + Label seq_ascii_string, seq_two_byte_string, check_code; + __ movq(rax, Operand(rsp, kSubjectOffset)); + __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); + __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset)); + // First check for flat two byte string. + __ andb(rbx, Immediate( + kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask)); + STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0); + __ j(zero, &seq_two_byte_string); + // Any other flat string must be a flat ascii string. + __ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask)); + __ j(zero, &seq_ascii_string); + + // Check for flat cons string. + // A flat cons string is a cons string where the second part is the empty + // string. In that case the subject string is just the first part of the cons + // string. Also in this case the first part of the cons string is known to be + // a sequential string or an external string. + STATIC_ASSERT(kExternalStringTag !=0); + STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0); + __ testb(rbx, Immediate(kIsNotStringMask | kExternalStringTag)); + __ j(not_zero, &runtime); + // String is a cons string. + __ movq(rdx, FieldOperand(rax, ConsString::kSecondOffset)); + __ Cmp(rdx, Factory::empty_string()); + __ j(not_equal, &runtime); + __ movq(rax, FieldOperand(rax, ConsString::kFirstOffset)); + __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); + // String is a cons string with empty second part. + // rax: first part of cons string. + // rbx: map of first part of cons string. + // Is first part a flat two byte string? + __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset), + Immediate(kStringRepresentationMask | kStringEncodingMask)); + STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0); + __ j(zero, &seq_two_byte_string); + // Any other flat string must be ascii. + __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset), + Immediate(kStringRepresentationMask)); + __ j(not_zero, &runtime); + + __ bind(&seq_ascii_string); + // rax: subject string (sequential ascii) + // rcx: RegExp data (FixedArray) + __ movq(r11, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset)); + __ Set(rdi, 1); // Type is ascii. + __ jmp(&check_code); + + __ bind(&seq_two_byte_string); + // rax: subject string (flat two-byte) + // rcx: RegExp data (FixedArray) + __ movq(r11, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset)); + __ Set(rdi, 0); // Type is two byte. + + __ bind(&check_code); + // Check that the irregexp code has been generated for the actual string + // encoding. If it has, the field contains a code object otherwise it contains + // the hole. + __ CmpObjectType(r11, CODE_TYPE, kScratchRegister); + __ j(not_equal, &runtime); + + // rax: subject string + // rdi: encoding of subject string (1 if ascii, 0 if two_byte); + // r11: code + // Load used arguments before starting to push arguments for call to native + // RegExp code to avoid handling changing stack height. + __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset)); + + // rax: subject string + // rbx: previous index + // rdi: encoding of subject string (1 if ascii 0 if two_byte); + // r11: code + // All checks done. Now push arguments for native regexp code. + __ IncrementCounter(&Counters::regexp_entry_native, 1); + + // rsi is caller save on Windows and used to pass parameter on Linux. + __ push(rsi); + + static const int kRegExpExecuteArguments = 7; + __ PrepareCallCFunction(kRegExpExecuteArguments); + int argument_slots_on_stack = + masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments); + + // Argument 7: Indicate that this is a direct call from JavaScript. + __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize), + Immediate(1)); + + // Argument 6: Start (high end) of backtracking stack memory area. + __ movq(kScratchRegister, address_of_regexp_stack_memory_address); + __ movq(r9, Operand(kScratchRegister, 0)); + __ movq(kScratchRegister, address_of_regexp_stack_memory_size); + __ addq(r9, Operand(kScratchRegister, 0)); + // Argument 6 passed in r9 on Linux and on the stack on Windows. +#ifdef _WIN64 + __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize), r9); +#endif + + // Argument 5: static offsets vector buffer. + __ movq(r8, ExternalReference::address_of_static_offsets_vector()); + // Argument 5 passed in r8 on Linux and on the stack on Windows. +#ifdef _WIN64 + __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r8); +#endif + + // First four arguments are passed in registers on both Linux and Windows. +#ifdef _WIN64 + Register arg4 = r9; + Register arg3 = r8; + Register arg2 = rdx; + Register arg1 = rcx; +#else + Register arg4 = rcx; + Register arg3 = rdx; + Register arg2 = rsi; + Register arg1 = rdi; +#endif + + // Keep track on aliasing between argX defined above and the registers used. + // rax: subject string + // rbx: previous index + // rdi: encoding of subject string (1 if ascii 0 if two_byte); + // r11: code + + // Argument 4: End of string data + // Argument 3: Start of string data + Label setup_two_byte, setup_rest; + __ testb(rdi, rdi); + __ j(zero, &setup_two_byte); + __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset)); + __ lea(arg4, FieldOperand(rax, rdi, times_1, SeqAsciiString::kHeaderSize)); + __ lea(arg3, FieldOperand(rax, rbx, times_1, SeqAsciiString::kHeaderSize)); + __ jmp(&setup_rest); + __ bind(&setup_two_byte); + __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset)); + __ lea(arg4, FieldOperand(rax, rdi, times_2, SeqTwoByteString::kHeaderSize)); + __ lea(arg3, FieldOperand(rax, rbx, times_2, SeqTwoByteString::kHeaderSize)); + + __ bind(&setup_rest); + // Argument 2: Previous index. + __ movq(arg2, rbx); + + // Argument 1: Subject string. + __ movq(arg1, rax); + + // Locate the code entry and call it. + __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ CallCFunction(r11, kRegExpExecuteArguments); + + // rsi is caller save, as it is used to pass parameter. + __ pop(rsi); + + // Check the result. + Label success; + __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS)); + __ j(equal, &success); + Label failure; + __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE)); + __ j(equal, &failure); + __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION)); + // If not exception it can only be retry. Handle that in the runtime system. + __ j(not_equal, &runtime); + // Result must now be exception. If there is no pending exception already a + // stack overflow (on the backtrack stack) was detected in RegExp code but + // haven't created the exception yet. Handle that in the runtime system. + // TODO(592): Rerunning the RegExp to get the stack overflow exception. + ExternalReference pending_exception_address(Top::k_pending_exception_address); + __ movq(kScratchRegister, pending_exception_address); + __ Cmp(kScratchRegister, Factory::the_hole_value()); + __ j(equal, &runtime); + __ bind(&failure); + // For failure and exception return null. + __ Move(rax, Factory::null_value()); + __ ret(4 * kPointerSize); + + // Load RegExp data. + __ bind(&success); + __ movq(rax, Operand(rsp, kJSRegExpOffset)); + __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset)); + __ SmiToInteger32(rax, + FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset)); + // Calculate number of capture registers (number_of_captures + 1) * 2. + __ leal(rdx, Operand(rax, rax, times_1, 2)); + + // rdx: Number of capture registers + // Load last_match_info which is still known to be a fast case JSArray. + __ movq(rax, Operand(rsp, kLastMatchInfoOffset)); + __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset)); + + // rbx: last_match_info backing store (FixedArray) + // rdx: number of capture registers + // Store the capture count. + __ Integer32ToSmi(kScratchRegister, rdx); + __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset), + kScratchRegister); + // Store last subject and last input. + __ movq(rax, Operand(rsp, kSubjectOffset)); + __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax); + __ movq(rcx, rbx); + __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi); + __ movq(rax, Operand(rsp, kSubjectOffset)); + __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax); + __ movq(rcx, rbx); + __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi); + + // Get the static offsets vector filled by the native regexp code. + __ movq(rcx, ExternalReference::address_of_static_offsets_vector()); + + // rbx: last_match_info backing store (FixedArray) + // rcx: offsets vector + // rdx: number of capture registers + Label next_capture, done; + // Capture register counter starts from number of capture registers and + // counts down until wraping after zero. + __ bind(&next_capture); + __ subq(rdx, Immediate(1)); + __ j(negative, &done); + // Read the value from the static offsets vector buffer and make it a smi. + __ movl(rdi, Operand(rcx, rdx, times_int_size, 0)); + __ Integer32ToSmi(rdi, rdi, &runtime); + // Store the smi value in the last match info. + __ movq(FieldOperand(rbx, + rdx, + times_pointer_size, + RegExpImpl::kFirstCaptureOffset), + rdi); + __ jmp(&next_capture); + __ bind(&done); + + // Return last match info. + __ movq(rax, Operand(rsp, kLastMatchInfoOffset)); + __ ret(4 * kPointerSize); + + // Do the runtime call to execute the regexp. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); +#endif // V8_INTERPRETED_REGEXP +} + + +void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, + Register object, + Register result, + Register scratch1, + Register scratch2, + bool object_is_smi, + Label* not_found) { + // Use of registers. Register result is used as a temporary. + Register number_string_cache = result; + Register mask = scratch1; + Register scratch = scratch2; + + // Load the number string cache. + __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); + + // Make the hash mask from the length of the number string cache. It + // contains two elements (number and string) for each cache entry. + __ SmiToInteger32( + mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); + __ shrl(mask, Immediate(1)); + __ subq(mask, Immediate(1)); // Make mask. + + // Calculate the entry in the number string cache. The hash value in the + // number string cache for smis is just the smi value, and the hash for + // doubles is the xor of the upper and lower words. See + // Heap::GetNumberStringCache. + Label is_smi; + Label load_result_from_cache; + if (!object_is_smi) { + __ JumpIfSmi(object, &is_smi); + __ CheckMap(object, Factory::heap_number_map(), not_found, true); + + STATIC_ASSERT(8 == kDoubleSize); + __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4)); + __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset)); + GenerateConvertHashCodeToIndex(masm, scratch, mask); + + Register index = scratch; + Register probe = mask; + __ movq(probe, + FieldOperand(number_string_cache, + index, + times_1, + FixedArray::kHeaderSize)); + __ JumpIfSmi(probe, not_found); + ASSERT(CpuFeatures::IsSupported(SSE2)); + CpuFeatures::Scope fscope(SSE2); + __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); + __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset)); + __ ucomisd(xmm0, xmm1); + __ j(parity_even, not_found); // Bail out if NaN is involved. + __ j(not_equal, not_found); // The cache did not contain this value. + __ jmp(&load_result_from_cache); + } + + __ bind(&is_smi); + __ SmiToInteger32(scratch, object); + GenerateConvertHashCodeToIndex(masm, scratch, mask); + + Register index = scratch; + // Check if the entry is the smi we are looking for. + __ cmpq(object, + FieldOperand(number_string_cache, + index, + times_1, + FixedArray::kHeaderSize)); + __ j(not_equal, not_found); + + // Get the result from the cache. + __ bind(&load_result_from_cache); + __ movq(result, + FieldOperand(number_string_cache, + index, + times_1, + FixedArray::kHeaderSize + kPointerSize)); + __ IncrementCounter(&Counters::number_to_string_native, 1); +} + + +void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm, + Register hash, + Register mask) { + __ and_(hash, mask); + // Each entry in string cache consists of two pointer sized fields, + // but times_twice_pointer_size (multiplication by 16) scale factor + // is not supported by addrmode on x64 platform. + // So we have to premultiply entry index before lookup. + __ shl(hash, Immediate(kPointerSizeLog2 + 1)); +} + + +void NumberToStringStub::Generate(MacroAssembler* masm) { + Label runtime; + + __ movq(rbx, Operand(rsp, kPointerSize)); + + // Generate code to lookup number in the number string cache. + GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime); + __ ret(1 * kPointerSize); + + __ bind(&runtime); + // Handle number to string in the runtime system if not found in the cache. + __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); +} + + +static int NegativeComparisonResult(Condition cc) { + ASSERT(cc != equal); + ASSERT((cc == less) || (cc == less_equal) + || (cc == greater) || (cc == greater_equal)); + return (cc == greater || cc == greater_equal) ? LESS : GREATER; +} + + +void CompareStub::Generate(MacroAssembler* masm) { + ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); + + Label check_unequal_objects, done; + // The compare stub returns a positive, negative, or zero 64-bit integer + // value in rax, corresponding to result of comparing the two inputs. + // NOTICE! This code is only reached after a smi-fast-case check, so + // it is certain that at least one operand isn't a smi. + + // Two identical objects are equal unless they are both NaN or undefined. + { + Label not_identical; + __ cmpq(rax, rdx); + __ j(not_equal, ¬_identical); + + if (cc_ != equal) { + // Check for undefined. undefined OP undefined is false even though + // undefined == undefined. + Label check_for_nan; + __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex); + __ j(not_equal, &check_for_nan); + __ Set(rax, NegativeComparisonResult(cc_)); + __ ret(0); + __ bind(&check_for_nan); + } + + // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), + // so we do the second best thing - test it ourselves. + // Note: if cc_ != equal, never_nan_nan_ is not used. + // We cannot set rax to EQUAL until just before return because + // rax must be unchanged on jump to not_identical. + + if (never_nan_nan_ && (cc_ == equal)) { + __ Set(rax, EQUAL); + __ ret(0); + } else { + Label heap_number; + // If it's not a heap number, then return equal for (in)equality operator. + __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), + Factory::heap_number_map()); + __ j(equal, &heap_number); + if (cc_ != equal) { + // Call runtime on identical JSObjects. Otherwise return equal. + __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx); + __ j(above_equal, ¬_identical); + } + __ Set(rax, EQUAL); + __ ret(0); + + __ bind(&heap_number); + // It is a heap number, so return equal if it's not NaN. + // For NaN, return 1 for every condition except greater and + // greater-equal. Return -1 for them, so the comparison yields + // false for all conditions except not-equal. + __ Set(rax, EQUAL); + __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); + __ ucomisd(xmm0, xmm0); + __ setcc(parity_even, rax); + // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs. + if (cc_ == greater_equal || cc_ == greater) { + __ neg(rax); + } + __ ret(0); + } + + __ bind(¬_identical); + } + + if (cc_ == equal) { // Both strict and non-strict. + Label slow; // Fallthrough label. + + // If we're doing a strict equality comparison, we don't have to do + // type conversion, so we generate code to do fast comparison for objects + // and oddballs. Non-smi numbers and strings still go through the usual + // slow-case code. + if (strict_) { + // If either is a Smi (we know that not both are), then they can only + // be equal if the other is a HeapNumber. If so, use the slow case. + { + Label not_smis; + __ SelectNonSmi(rbx, rax, rdx, ¬_smis); + + // Check if the non-smi operand is a heap number. + __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset), + Factory::heap_number_map()); + // If heap number, handle it in the slow case. + __ j(equal, &slow); + // Return non-equal. ebx (the lower half of rbx) is not zero. + __ movq(rax, rbx); + __ ret(0); + + __ bind(¬_smis); + } + + // If either operand is a JSObject or an oddball value, then they are not + // equal since their pointers are different + // There is no test for undetectability in strict equality. + + // If the first object is a JS object, we have done pointer comparison. + STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); + Label first_non_object; + __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx); + __ j(below, &first_non_object); + // Return non-zero (eax (not rax) is not zero) + Label return_not_equal; + STATIC_ASSERT(kHeapObjectTag != 0); + __ bind(&return_not_equal); + __ ret(0); + + __ bind(&first_non_object); + // Check for oddballs: true, false, null, undefined. + __ CmpInstanceType(rcx, ODDBALL_TYPE); + __ j(equal, &return_not_equal); + + __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx); + __ j(above_equal, &return_not_equal); + + // Check for oddballs: true, false, null, undefined. + __ CmpInstanceType(rcx, ODDBALL_TYPE); + __ j(equal, &return_not_equal); + + // Fall through to the general case. + } + __ bind(&slow); + } + + // Generate the number comparison code. + if (include_number_compare_) { + Label non_number_comparison; + Label unordered; + FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison); + __ xorl(rax, rax); + __ xorl(rcx, rcx); + __ ucomisd(xmm0, xmm1); + + // Don't base result on EFLAGS when a NaN is involved. + __ j(parity_even, &unordered); + // Return a result of -1, 0, or 1, based on EFLAGS. + __ setcc(above, rax); + __ setcc(below, rcx); + __ subq(rax, rcx); + __ ret(0); + + // If one of the numbers was NaN, then the result is always false. + // The cc is never not-equal. + __ bind(&unordered); + ASSERT(cc_ != not_equal); + if (cc_ == less || cc_ == less_equal) { + __ Set(rax, 1); + } else { + __ Set(rax, -1); + } + __ ret(0); + + // The number comparison code did not provide a valid result. + __ bind(&non_number_comparison); + } + + // Fast negative check for symbol-to-symbol equality. + Label check_for_strings; + if (cc_ == equal) { + BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister); + BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister); + + // We've already checked for object identity, so if both operands + // are symbols they aren't equal. Register eax (not rax) already holds a + // non-zero value, which indicates not equal, so just return. + __ ret(0); + } + + __ bind(&check_for_strings); + + __ JumpIfNotBothSequentialAsciiStrings( + rdx, rax, rcx, rbx, &check_unequal_objects); + + // Inline comparison of ascii strings. + StringCompareStub::GenerateCompareFlatAsciiStrings(masm, + rdx, + rax, + rcx, + rbx, + rdi, + r8); + +#ifdef DEBUG + __ Abort("Unexpected fall-through from string comparison"); +#endif + + __ bind(&check_unequal_objects); + if (cc_ == equal && !strict_) { + // Not strict equality. Objects are unequal if + // they are both JSObjects and not undetectable, + // and their pointers are different. + Label not_both_objects, return_unequal; + // At most one is a smi, so we can test for smi by adding the two. + // A smi plus a heap object has the low bit set, a heap object plus + // a heap object has the low bit clear. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagMask == 1); + __ lea(rcx, Operand(rax, rdx, times_1, 0)); + __ testb(rcx, Immediate(kSmiTagMask)); + __ j(not_zero, ¬_both_objects); + __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx); + __ j(below, ¬_both_objects); + __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx); + __ j(below, ¬_both_objects); + __ testb(FieldOperand(rbx, Map::kBitFieldOffset), + Immediate(1 << Map::kIsUndetectable)); + __ j(zero, &return_unequal); + __ testb(FieldOperand(rcx, Map::kBitFieldOffset), + Immediate(1 << Map::kIsUndetectable)); + __ j(zero, &return_unequal); + // The objects are both undetectable, so they both compare as the value + // undefined, and are equal. + __ Set(rax, EQUAL); + __ bind(&return_unequal); + // Return non-equal by returning the non-zero object pointer in eax, + // or return equal if we fell through to here. + __ ret(0); + __ bind(¬_both_objects); + } + + // Push arguments below the return address to prepare jump to builtin. + __ pop(rcx); + __ push(rdx); + __ push(rax); + + // Figure out which native to call and setup the arguments. + Builtins::JavaScript builtin; + if (cc_ == equal) { + builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; + } else { + builtin = Builtins::COMPARE; + __ Push(Smi::FromInt(NegativeComparisonResult(cc_))); + } + + // Restore return address on the stack. + __ push(rcx); + + // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) + // tagged as a small integer. + __ InvokeBuiltin(builtin, JUMP_FUNCTION); +} + + +void CompareStub::BranchIfNonSymbol(MacroAssembler* masm, + Label* label, + Register object, + Register scratch) { + __ JumpIfSmi(object, label); + __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset)); + __ movzxbq(scratch, + FieldOperand(scratch, Map::kInstanceTypeOffset)); + // Ensure that no non-strings have the symbol bit set. + STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask); + STATIC_ASSERT(kSymbolTag != 0); + __ testb(scratch, Immediate(kIsSymbolMask)); + __ j(zero, label); +} + + +void StackCheckStub::Generate(MacroAssembler* masm) { + // Because builtins always remove the receiver from the stack, we + // have to fake one to avoid underflowing the stack. The receiver + // must be inserted below the return address on the stack so we + // temporarily store that in a register. + __ pop(rax); + __ Push(Smi::FromInt(0)); + __ push(rax); + + // Do tail-call to runtime routine. + __ TailCallRuntime(Runtime::kStackGuard, 1, 1); +} + + +void CallFunctionStub::Generate(MacroAssembler* masm) { + Label slow; + + // If the receiver might be a value (string, number or boolean) check for this + // and box it if it is. + if (ReceiverMightBeValue()) { + // Get the receiver from the stack. + // +1 ~ return address + Label receiver_is_value, receiver_is_js_object; + __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize)); + + // Check if receiver is a smi (which is a number value). + __ JumpIfSmi(rax, &receiver_is_value); + + // Check if the receiver is a valid JS object. + __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi); + __ j(above_equal, &receiver_is_js_object); + + // Call the runtime to box the value. + __ bind(&receiver_is_value); + __ EnterInternalFrame(); + __ push(rax); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ LeaveInternalFrame(); + __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax); + + __ bind(&receiver_is_js_object); + } + + // Get the function to call from the stack. + // +2 ~ receiver, return address + __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize)); + + // Check that the function really is a JavaScript function. + __ JumpIfSmi(rdi, &slow); + // Goto slow case if we do not have a function. + __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); + __ j(not_equal, &slow); + + // Fast-case: Just invoke the function. + ParameterCount actual(argc_); + __ InvokeFunction(rdi, actual, JUMP_FUNCTION); + + // Slow-case: Non-function called. + __ bind(&slow); + // CALL_NON_FUNCTION expects the non-function callee as receiver (instead + // of the original receiver from the call site). + __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi); + __ Set(rax, argc_); + __ Set(rbx, 0); + __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION); + Handle adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)); + __ Jump(adaptor, RelocInfo::CODE_TARGET); +} + + +void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { + // Check that stack should contain next handler, frame pointer, state and + // return address in that order. + STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize == + StackHandlerConstants::kStateOffset); + STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize == + StackHandlerConstants::kPCOffset); + + ExternalReference handler_address(Top::k_handler_address); + __ movq(kScratchRegister, handler_address); + __ movq(rsp, Operand(kScratchRegister, 0)); + // get next in chain + __ pop(rcx); + __ movq(Operand(kScratchRegister, 0), rcx); + __ pop(rbp); // pop frame pointer + __ pop(rdx); // remove state + + // Before returning we restore the context from the frame pointer if not NULL. + // The frame pointer is NULL in the exception handler of a JS entry frame. + __ xor_(rsi, rsi); // tentatively set context pointer to NULL + Label skip; + __ cmpq(rbp, Immediate(0)); + __ j(equal, &skip); + __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); + __ bind(&skip); + __ ret(0); +} + + +void ApiGetterEntryStub::Generate(MacroAssembler* masm) { + Label empty_result; + Label prologue; + Label promote_scheduled_exception; + __ EnterApiExitFrame(ExitFrame::MODE_NORMAL, kStackSpace, 0); + ASSERT_EQ(kArgc, 4); +#ifdef _WIN64 + // All the parameters should be set up by a caller. +#else + // Set 1st parameter register with property name. + __ movq(rsi, rdx); + // Second parameter register rdi should be set with pointer to AccessorInfo + // by a caller. +#endif + // Call the api function! + __ movq(rax, + reinterpret_cast(fun()->address()), + RelocInfo::RUNTIME_ENTRY); + __ call(rax); + // Check if the function scheduled an exception. + ExternalReference scheduled_exception_address = + ExternalReference::scheduled_exception_address(); + __ movq(rsi, scheduled_exception_address); + __ Cmp(Operand(rsi, 0), Factory::the_hole_value()); + __ j(not_equal, &promote_scheduled_exception); +#ifdef _WIN64 + // rax keeps a pointer to v8::Handle, unpack it. + __ movq(rax, Operand(rax, 0)); +#endif + // Check if the result handle holds 0. + __ testq(rax, rax); + __ j(zero, &empty_result); + // It was non-zero. Dereference to get the result value. + __ movq(rax, Operand(rax, 0)); + __ bind(&prologue); + __ LeaveExitFrame(ExitFrame::MODE_NORMAL); + __ ret(0); + __ bind(&promote_scheduled_exception); + __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1); + __ bind(&empty_result); + // It was zero; the result is undefined. + __ Move(rax, Factory::undefined_value()); + __ jmp(&prologue); +} + + +void CEntryStub::GenerateCore(MacroAssembler* masm, + Label* throw_normal_exception, + Label* throw_termination_exception, + Label* throw_out_of_memory_exception, + bool do_gc, + bool always_allocate_scope, + int /* alignment_skew */) { + // rax: result parameter for PerformGC, if any. + // rbx: pointer to C function (C callee-saved). + // rbp: frame pointer (restored after C call). + // rsp: stack pointer (restored after C call). + // r14: number of arguments including receiver (C callee-saved). + // r12: pointer to the first argument (C callee-saved). + // This pointer is reused in LeaveExitFrame(), so it is stored in a + // callee-saved register. + + // Simple results returned in rax (both AMD64 and Win64 calling conventions). + // Complex results must be written to address passed as first argument. + // AMD64 calling convention: a struct of two pointers in rax+rdx + + // Check stack alignment. + if (FLAG_debug_code) { + __ CheckStackAlignment(); + } + + if (do_gc) { + // Pass failure code returned from last attempt as first argument to + // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the + // stack is known to be aligned. This function takes one argument which is + // passed in register. +#ifdef _WIN64 + __ movq(rcx, rax); +#else // _WIN64 + __ movq(rdi, rax); +#endif + __ movq(kScratchRegister, + FUNCTION_ADDR(Runtime::PerformGC), + RelocInfo::RUNTIME_ENTRY); + __ call(kScratchRegister); + } + + ExternalReference scope_depth = + ExternalReference::heap_always_allocate_scope_depth(); + if (always_allocate_scope) { + __ movq(kScratchRegister, scope_depth); + __ incl(Operand(kScratchRegister, 0)); + } + + // Call C function. +#ifdef _WIN64 + // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9 + // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots. + __ movq(Operand(rsp, 4 * kPointerSize), r14); // argc. + __ movq(Operand(rsp, 5 * kPointerSize), r12); // argv. + if (result_size_ < 2) { + // Pass a pointer to the Arguments object as the first argument. + // Return result in single register (rax). + __ lea(rcx, Operand(rsp, 4 * kPointerSize)); + } else { + ASSERT_EQ(2, result_size_); + // Pass a pointer to the result location as the first argument. + __ lea(rcx, Operand(rsp, 6 * kPointerSize)); + // Pass a pointer to the Arguments object as the second argument. + __ lea(rdx, Operand(rsp, 4 * kPointerSize)); + } + +#else // _WIN64 + // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9. + __ movq(rdi, r14); // argc. + __ movq(rsi, r12); // argv. +#endif + __ call(rbx); + // Result is in rax - do not destroy this register! + + if (always_allocate_scope) { + __ movq(kScratchRegister, scope_depth); + __ decl(Operand(kScratchRegister, 0)); + } + + // Check for failure result. + Label failure_returned; + STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); +#ifdef _WIN64 + // If return value is on the stack, pop it to registers. + if (result_size_ > 1) { + ASSERT_EQ(2, result_size_); + // Read result values stored on stack. Result is stored + // above the four argument mirror slots and the two + // Arguments object slots. + __ movq(rax, Operand(rsp, 6 * kPointerSize)); + __ movq(rdx, Operand(rsp, 7 * kPointerSize)); + } +#endif + __ lea(rcx, Operand(rax, 1)); + // Lower 2 bits of rcx are 0 iff rax has failure tag. + __ testl(rcx, Immediate(kFailureTagMask)); + __ j(zero, &failure_returned); + + // Exit the JavaScript to C++ exit frame. + __ LeaveExitFrame(mode_, result_size_); + __ ret(0); + + // Handling of failure. + __ bind(&failure_returned); + + Label retry; + // If the returned exception is RETRY_AFTER_GC continue at retry label + STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); + __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); + __ j(zero, &retry); + + // Special handling of out of memory exceptions. + __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE); + __ cmpq(rax, kScratchRegister); + __ j(equal, throw_out_of_memory_exception); + + // Retrieve the pending exception and clear the variable. + ExternalReference pending_exception_address(Top::k_pending_exception_address); + __ movq(kScratchRegister, pending_exception_address); + __ movq(rax, Operand(kScratchRegister, 0)); + __ movq(rdx, ExternalReference::the_hole_value_location()); + __ movq(rdx, Operand(rdx, 0)); + __ movq(Operand(kScratchRegister, 0), rdx); + + // Special handling of termination exceptions which are uncatchable + // by javascript code. + __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex); + __ j(equal, throw_termination_exception); + + // Handle normal exception. + __ jmp(throw_normal_exception); + + // Retry. + __ bind(&retry); +} + + +void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, + UncatchableExceptionType type) { + // Fetch top stack handler. + ExternalReference handler_address(Top::k_handler_address); + __ movq(kScratchRegister, handler_address); + __ movq(rsp, Operand(kScratchRegister, 0)); + + // Unwind the handlers until the ENTRY handler is found. + Label loop, done; + __ bind(&loop); + // Load the type of the current stack handler. + const int kStateOffset = StackHandlerConstants::kStateOffset; + __ cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY)); + __ j(equal, &done); + // Fetch the next handler in the list. + const int kNextOffset = StackHandlerConstants::kNextOffset; + __ movq(rsp, Operand(rsp, kNextOffset)); + __ jmp(&loop); + __ bind(&done); + + // Set the top handler address to next handler past the current ENTRY handler. + __ movq(kScratchRegister, handler_address); + __ pop(Operand(kScratchRegister, 0)); + + if (type == OUT_OF_MEMORY) { + // Set external caught exception to false. + ExternalReference external_caught(Top::k_external_caught_exception_address); + __ movq(rax, Immediate(false)); + __ store_rax(external_caught); + + // Set pending exception and rax to out of memory exception. + ExternalReference pending_exception(Top::k_pending_exception_address); + __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE); + __ store_rax(pending_exception); + } + + // Clear the context pointer. + __ xor_(rsi, rsi); + + // Restore registers from handler. + STATIC_ASSERT(StackHandlerConstants::kNextOffset + kPointerSize == + StackHandlerConstants::kFPOffset); + __ pop(rbp); // FP + STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize == + StackHandlerConstants::kStateOffset); + __ pop(rdx); // State + + STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize == + StackHandlerConstants::kPCOffset); + __ ret(0); +} + + +void CEntryStub::Generate(MacroAssembler* masm) { + // rax: number of arguments including receiver + // rbx: pointer to C function (C callee-saved) + // rbp: frame pointer of calling JS frame (restored after C call) + // rsp: stack pointer (restored after C call) + // rsi: current context (restored) + + // NOTE: Invocations of builtins may return failure objects + // instead of a proper result. The builtin entry handles + // this by performing a garbage collection and retrying the + // builtin once. + + // Enter the exit frame that transitions from JavaScript to C++. + __ EnterExitFrame(mode_, result_size_); + + // rax: Holds the context at this point, but should not be used. + // On entry to code generated by GenerateCore, it must hold + // a failure result if the collect_garbage argument to GenerateCore + // is true. This failure result can be the result of code + // generated by a previous call to GenerateCore. The value + // of rax is then passed to Runtime::PerformGC. + // rbx: pointer to builtin function (C callee-saved). + // rbp: frame pointer of exit frame (restored after C call). + // rsp: stack pointer (restored after C call). + // r14: number of arguments including receiver (C callee-saved). + // r12: argv pointer (C callee-saved). + + Label throw_normal_exception; + Label throw_termination_exception; + Label throw_out_of_memory_exception; + + // Call into the runtime system. + GenerateCore(masm, + &throw_normal_exception, + &throw_termination_exception, + &throw_out_of_memory_exception, + false, + false); + + // Do space-specific GC and retry runtime call. + GenerateCore(masm, + &throw_normal_exception, + &throw_termination_exception, + &throw_out_of_memory_exception, + true, + false); + + // Do full GC and retry runtime call one final time. + Failure* failure = Failure::InternalError(); + __ movq(rax, failure, RelocInfo::NONE); + GenerateCore(masm, + &throw_normal_exception, + &throw_termination_exception, + &throw_out_of_memory_exception, + true, + true); + + __ bind(&throw_out_of_memory_exception); + GenerateThrowUncatchable(masm, OUT_OF_MEMORY); + + __ bind(&throw_termination_exception); + GenerateThrowUncatchable(masm, TERMINATION); + + __ bind(&throw_normal_exception); + GenerateThrowTOS(masm); +} + + +void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { + Label invoke, exit; +#ifdef ENABLE_LOGGING_AND_PROFILING + Label not_outermost_js, not_outermost_js_2; +#endif + + // Setup frame. + __ push(rbp); + __ movq(rbp, rsp); + + // Push the stack frame type marker twice. + int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; + // Scratch register is neither callee-save, nor an argument register on any + // platform. It's free to use at this point. + // Cannot use smi-register for loading yet. + __ movq(kScratchRegister, + reinterpret_cast(Smi::FromInt(marker)), + RelocInfo::NONE); + __ push(kScratchRegister); // context slot + __ push(kScratchRegister); // function slot + // Save callee-saved registers (X64/Win64 calling conventions). + __ push(r12); + __ push(r13); + __ push(r14); + __ push(r15); +#ifdef _WIN64 + __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI. + __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI. +#endif + __ push(rbx); + // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are + // callee save as well. + + // Save copies of the top frame descriptor on the stack. + ExternalReference c_entry_fp(Top::k_c_entry_fp_address); + __ load_rax(c_entry_fp); + __ push(rax); + + // Set up the roots and smi constant registers. + // Needs to be done before any further smi loads. + ExternalReference roots_address = ExternalReference::roots_address(); + __ movq(kRootRegister, roots_address); + __ InitializeSmiConstantRegister(); + +#ifdef ENABLE_LOGGING_AND_PROFILING + // If this is the outermost JS call, set js_entry_sp value. + ExternalReference js_entry_sp(Top::k_js_entry_sp_address); + __ load_rax(js_entry_sp); + __ testq(rax, rax); + __ j(not_zero, ¬_outermost_js); + __ movq(rax, rbp); + __ store_rax(js_entry_sp); + __ bind(¬_outermost_js); +#endif + + // Call a faked try-block that does the invoke. + __ call(&invoke); + + // Caught exception: Store result (exception) in the pending + // exception field in the JSEnv and return a failure sentinel. + ExternalReference pending_exception(Top::k_pending_exception_address); + __ store_rax(pending_exception); + __ movq(rax, Failure::Exception(), RelocInfo::NONE); + __ jmp(&exit); + + // Invoke: Link this frame into the handler chain. + __ bind(&invoke); + __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER); + + // Clear any pending exceptions. + __ load_rax(ExternalReference::the_hole_value_location()); + __ store_rax(pending_exception); + + // Fake a receiver (NULL). + __ push(Immediate(0)); // receiver + + // Invoke the function by calling through JS entry trampoline + // builtin and pop the faked function when we return. We load the address + // from an external reference instead of inlining the call target address + // directly in the code, because the builtin stubs may not have been + // generated yet at the time this code is generated. + if (is_construct) { + ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline); + __ load_rax(construct_entry); + } else { + ExternalReference entry(Builtins::JSEntryTrampoline); + __ load_rax(entry); + } + __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize)); + __ call(kScratchRegister); + + // Unlink this frame from the handler chain. + __ movq(kScratchRegister, ExternalReference(Top::k_handler_address)); + __ pop(Operand(kScratchRegister, 0)); + // Pop next_sp. + __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize)); + +#ifdef ENABLE_LOGGING_AND_PROFILING + // If current EBP value is the same as js_entry_sp value, it means that + // the current function is the outermost. + __ movq(kScratchRegister, js_entry_sp); + __ cmpq(rbp, Operand(kScratchRegister, 0)); + __ j(not_equal, ¬_outermost_js_2); + __ movq(Operand(kScratchRegister, 0), Immediate(0)); + __ bind(¬_outermost_js_2); +#endif + + // Restore the top frame descriptor from the stack. + __ bind(&exit); + __ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address)); + __ pop(Operand(kScratchRegister, 0)); + + // Restore callee-saved registers (X64 conventions). + __ pop(rbx); +#ifdef _WIN64 + // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI. + __ pop(rsi); + __ pop(rdi); +#endif + __ pop(r15); + __ pop(r14); + __ pop(r13); + __ pop(r12); + __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers + + // Restore frame pointer and return. + __ pop(rbp); + __ ret(0); +} + + +void InstanceofStub::Generate(MacroAssembler* masm) { + // Implements "value instanceof function" operator. + // Expected input state: + // rsp[0] : return address + // rsp[1] : function pointer + // rsp[2] : value + // Returns a bitwise zero to indicate that the value + // is and instance of the function and anything else to + // indicate that the value is not an instance. + + // Get the object - go slow case if it's a smi. + Label slow; + __ movq(rax, Operand(rsp, 2 * kPointerSize)); + __ JumpIfSmi(rax, &slow); + + // Check that the left hand is a JS object. Leave its map in rax. + __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax); + __ j(below, &slow); + __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE); + __ j(above, &slow); + + // Get the prototype of the function. + __ movq(rdx, Operand(rsp, 1 * kPointerSize)); + // rdx is function, rax is map. + + // Look up the function and the map in the instanceof cache. + Label miss; + __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex); + __ j(not_equal, &miss); + __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex); + __ j(not_equal, &miss); + __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex); + __ ret(2 * kPointerSize); + + __ bind(&miss); + __ TryGetFunctionPrototype(rdx, rbx, &slow); + + // Check that the function prototype is a JS object. + __ JumpIfSmi(rbx, &slow); + __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister); + __ j(below, &slow); + __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE); + __ j(above, &slow); + + // Register mapping: + // rax is object map. + // rdx is function. + // rbx is function prototype. + __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex); + __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex); + + __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset)); + + // Loop through the prototype chain looking for the function prototype. + Label loop, is_instance, is_not_instance; + __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex); + __ bind(&loop); + __ cmpq(rcx, rbx); + __ j(equal, &is_instance); + __ cmpq(rcx, kScratchRegister); + // The code at is_not_instance assumes that kScratchRegister contains a + // non-zero GCable value (the null object in this case). + __ j(equal, &is_not_instance); + __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset)); + __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset)); + __ jmp(&loop); + + __ bind(&is_instance); + __ xorl(rax, rax); + // Store bitwise zero in the cache. This is a Smi in GC terms. + STATIC_ASSERT(kSmiTag == 0); + __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex); + __ ret(2 * kPointerSize); + + __ bind(&is_not_instance); + // We have to store a non-zero value in the cache. + __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex); + __ ret(2 * kPointerSize); + + // Slow-case: Go through the JavaScript implementation. + __ bind(&slow); + __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); +} + + +int CompareStub::MinorKey() { + // Encode the three parameters in a unique 16 bit value. To avoid duplicate + // stubs the never NaN NaN condition is only taken into account if the + // condition is equals. + ASSERT(static_cast(cc_) < (1 << 12)); + ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); + return ConditionField::encode(static_cast(cc_)) + | RegisterField::encode(false) // lhs_ and rhs_ are not used + | StrictField::encode(strict_) + | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false) + | IncludeNumberCompareField::encode(include_number_compare_); +} + + +// Unfortunately you have to run without snapshots to see most of these +// names in the profile since most compare stubs end up in the snapshot. +const char* CompareStub::GetName() { + ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); + + if (name_ != NULL) return name_; + const int kMaxNameLength = 100; + name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); + if (name_ == NULL) return "OOM"; + + const char* cc_name; + switch (cc_) { + case less: cc_name = "LT"; break; + case greater: cc_name = "GT"; break; + case less_equal: cc_name = "LE"; break; + case greater_equal: cc_name = "GE"; break; + case equal: cc_name = "EQ"; break; + case not_equal: cc_name = "NE"; break; + default: cc_name = "UnknownCondition"; break; + } + + const char* strict_name = ""; + if (strict_ && (cc_ == equal || cc_ == not_equal)) { + strict_name = "_STRICT"; + } + + const char* never_nan_nan_name = ""; + if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) { + never_nan_nan_name = "_NO_NAN"; + } + + const char* include_number_compare_name = ""; + if (!include_number_compare_) { + include_number_compare_name = "_NO_NUMBER"; + } + + OS::SNPrintF(Vector(name_, kMaxNameLength), + "CompareStub_%s%s%s%s", + cc_name, + strict_name, + never_nan_nan_name, + include_number_compare_name); + return name_; +} + + +// ------------------------------------------------------------------------- +// StringCharCodeAtGenerator + +void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { + Label flat_string; + Label ascii_string; + Label got_char_code; + + // If the receiver is a smi trigger the non-string case. + __ JumpIfSmi(object_, receiver_not_string_); + + // Fetch the instance type of the receiver into result register. + __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset)); + __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); + // If the receiver is not a string trigger the non-string case. + __ testb(result_, Immediate(kIsNotStringMask)); + __ j(not_zero, receiver_not_string_); + + // If the index is non-smi trigger the non-smi case. + __ JumpIfNotSmi(index_, &index_not_smi_); + + // Put smi-tagged index into scratch register. + __ movq(scratch_, index_); + __ bind(&got_smi_index_); + + // Check for index out of range. + __ SmiCompare(scratch_, FieldOperand(object_, String::kLengthOffset)); + __ j(above_equal, index_out_of_range_); + + // We need special handling for non-flat strings. + STATIC_ASSERT(kSeqStringTag == 0); + __ testb(result_, Immediate(kStringRepresentationMask)); + __ j(zero, &flat_string); + + // Handle non-flat strings. + __ testb(result_, Immediate(kIsConsStringMask)); + __ j(zero, &call_runtime_); + + // ConsString. + // Check whether the right hand side is the empty string (i.e. if + // this is really a flat string in a cons string). If that is not + // the case we would rather go to the runtime system now to flatten + // the string. + __ CompareRoot(FieldOperand(object_, ConsString::kSecondOffset), + Heap::kEmptyStringRootIndex); + __ j(not_equal, &call_runtime_); + // Get the first of the two strings and load its instance type. + __ movq(object_, FieldOperand(object_, ConsString::kFirstOffset)); + __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset)); + __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); + // If the first cons component is also non-flat, then go to runtime. + STATIC_ASSERT(kSeqStringTag == 0); + __ testb(result_, Immediate(kStringRepresentationMask)); + __ j(not_zero, &call_runtime_); + + // Check for 1-byte or 2-byte string. + __ bind(&flat_string); + STATIC_ASSERT(kAsciiStringTag != 0); + __ testb(result_, Immediate(kStringEncodingMask)); + __ j(not_zero, &ascii_string); + + // 2-byte string. + // Load the 2-byte character code into the result register. + __ SmiToInteger32(scratch_, scratch_); + __ movzxwl(result_, FieldOperand(object_, + scratch_, times_2, + SeqTwoByteString::kHeaderSize)); + __ jmp(&got_char_code); + + // ASCII string. + // Load the byte into the result register. + __ bind(&ascii_string); + __ SmiToInteger32(scratch_, scratch_); + __ movzxbl(result_, FieldOperand(object_, + scratch_, times_1, + SeqAsciiString::kHeaderSize)); + __ bind(&got_char_code); + __ Integer32ToSmi(result_, result_); + __ bind(&exit_); +} + + +void StringCharCodeAtGenerator::GenerateSlow( + MacroAssembler* masm, const RuntimeCallHelper& call_helper) { + __ Abort("Unexpected fallthrough to CharCodeAt slow case"); + + // Index is not a smi. + __ bind(&index_not_smi_); + // If index is a heap number, try converting it to an integer. + __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true); + call_helper.BeforeCall(masm); + __ push(object_); + __ push(index_); + __ push(index_); // Consumed by runtime conversion function. + if (index_flags_ == STRING_INDEX_IS_NUMBER) { + __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); + } else { + ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); + // NumberToSmi discards numbers that are not exact integers. + __ CallRuntime(Runtime::kNumberToSmi, 1); + } + if (!scratch_.is(rax)) { + // Save the conversion result before the pop instructions below + // have a chance to overwrite it. + __ movq(scratch_, rax); + } + __ pop(index_); + __ pop(object_); + // Reload the instance type. + __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset)); + __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); + call_helper.AfterCall(masm); + // If index is still not a smi, it must be out of range. + __ JumpIfNotSmi(scratch_, index_out_of_range_); + // Otherwise, return to the fast path. + __ jmp(&got_smi_index_); + + // Call runtime. We get here when the receiver is a string and the + // index is a number, but the code of getting the actual character + // is too complex (e.g., when the string needs to be flattened). + __ bind(&call_runtime_); + call_helper.BeforeCall(masm); + __ push(object_); + __ push(index_); + __ CallRuntime(Runtime::kStringCharCodeAt, 2); + if (!result_.is(rax)) { + __ movq(result_, rax); + } + call_helper.AfterCall(masm); + __ jmp(&exit_); + + __ Abort("Unexpected fallthrough from CharCodeAt slow case"); +} + + +// ------------------------------------------------------------------------- +// StringCharFromCodeGenerator + +void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { + // Fast case of Heap::LookupSingleCharacterStringFromCode. + __ JumpIfNotSmi(code_, &slow_case_); + __ SmiCompare(code_, Smi::FromInt(String::kMaxAsciiCharCode)); + __ j(above, &slow_case_); + + __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); + SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2); + __ movq(result_, FieldOperand(result_, index.reg, index.scale, + FixedArray::kHeaderSize)); + __ CompareRoot(result_, Heap::kUndefinedValueRootIndex); + __ j(equal, &slow_case_); + __ bind(&exit_); +} + + +void StringCharFromCodeGenerator::GenerateSlow( + MacroAssembler* masm, const RuntimeCallHelper& call_helper) { + __ Abort("Unexpected fallthrough to CharFromCode slow case"); + + __ bind(&slow_case_); + call_helper.BeforeCall(masm); + __ push(code_); + __ CallRuntime(Runtime::kCharFromCode, 1); + if (!result_.is(rax)) { + __ movq(result_, rax); + } + call_helper.AfterCall(masm); + __ jmp(&exit_); + + __ Abort("Unexpected fallthrough from CharFromCode slow case"); +} + + +// ------------------------------------------------------------------------- +// StringCharAtGenerator + +void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) { + char_code_at_generator_.GenerateFast(masm); + char_from_code_generator_.GenerateFast(masm); +} + + +void StringCharAtGenerator::GenerateSlow( + MacroAssembler* masm, const RuntimeCallHelper& call_helper) { + char_code_at_generator_.GenerateSlow(masm, call_helper); + char_from_code_generator_.GenerateSlow(masm, call_helper); +} + + +void StringAddStub::Generate(MacroAssembler* masm) { + Label string_add_runtime; + + // Load the two arguments. + __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument. + __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument. + + // Make sure that both arguments are strings if not known in advance. + if (string_check_) { + Condition is_smi; + is_smi = masm->CheckSmi(rax); + __ j(is_smi, &string_add_runtime); + __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8); + __ j(above_equal, &string_add_runtime); + + // First argument is a a string, test second. + is_smi = masm->CheckSmi(rdx); + __ j(is_smi, &string_add_runtime); + __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9); + __ j(above_equal, &string_add_runtime); + } + + // Both arguments are strings. + // rax: first string + // rdx: second string + // Check if either of the strings are empty. In that case return the other. + Label second_not_zero_length, both_not_zero_length; + __ movq(rcx, FieldOperand(rdx, String::kLengthOffset)); + __ SmiTest(rcx); + __ j(not_zero, &second_not_zero_length); + // Second string is empty, result is first string which is already in rax. + __ IncrementCounter(&Counters::string_add_native, 1); + __ ret(2 * kPointerSize); + __ bind(&second_not_zero_length); + __ movq(rbx, FieldOperand(rax, String::kLengthOffset)); + __ SmiTest(rbx); + __ j(not_zero, &both_not_zero_length); + // First string is empty, result is second string which is in rdx. + __ movq(rax, rdx); + __ IncrementCounter(&Counters::string_add_native, 1); + __ ret(2 * kPointerSize); + + // Both strings are non-empty. + // rax: first string + // rbx: length of first string + // rcx: length of second string + // rdx: second string + // r8: map of first string if string check was performed above + // r9: map of second string if string check was performed above + Label string_add_flat_result, longer_than_two; + __ bind(&both_not_zero_length); + + // If arguments where known to be strings, maps are not loaded to r8 and r9 + // by the code above. + if (!string_check_) { + __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset)); + __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset)); + } + // Get the instance types of the two strings as they will be needed soon. + __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset)); + __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset)); + + // Look at the length of the result of adding the two strings. + STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2); + __ SmiAdd(rbx, rbx, rcx, NULL); + // Use the runtime system when adding two one character strings, as it + // contains optimizations for this specific case using the symbol table. + __ SmiCompare(rbx, Smi::FromInt(2)); + __ j(not_equal, &longer_than_two); + + // Check that both strings are non-external ascii strings. + __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx, + &string_add_runtime); + + // Get the two characters forming the sub string. + __ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize)); + __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize)); + + // Try to lookup two character string in symbol table. If it is not found + // just allocate a new one. + Label make_two_character_string, make_flat_ascii_string; + StringHelper::GenerateTwoCharacterSymbolTableProbe( + masm, rbx, rcx, r14, r11, rdi, r12, &make_two_character_string); + __ IncrementCounter(&Counters::string_add_native, 1); + __ ret(2 * kPointerSize); + + __ bind(&make_two_character_string); + __ Set(rbx, 2); + __ jmp(&make_flat_ascii_string); + + __ bind(&longer_than_two); + // Check if resulting string will be flat. + __ SmiCompare(rbx, Smi::FromInt(String::kMinNonFlatLength)); + __ j(below, &string_add_flat_result); + // Handle exceptionally long strings in the runtime system. + STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0); + __ SmiCompare(rbx, Smi::FromInt(String::kMaxLength)); + __ j(above, &string_add_runtime); + + // If result is not supposed to be flat, allocate a cons string object. If + // both strings are ascii the result is an ascii cons string. + // rax: first string + // rbx: length of resulting flat string + // rdx: second string + // r8: instance type of first string + // r9: instance type of second string + Label non_ascii, allocated, ascii_data; + __ movl(rcx, r8); + __ and_(rcx, r9); + STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag); + __ testl(rcx, Immediate(kAsciiStringTag)); + __ j(zero, &non_ascii); + __ bind(&ascii_data); + // Allocate an acsii cons string. + __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime); + __ bind(&allocated); + // Fill the fields of the cons string. + __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx); + __ movq(FieldOperand(rcx, ConsString::kHashFieldOffset), + Immediate(String::kEmptyHashField)); + __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax); + __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx); + __ movq(rax, rcx); + __ IncrementCounter(&Counters::string_add_native, 1); + __ ret(2 * kPointerSize); + __ bind(&non_ascii); + // At least one of the strings is two-byte. Check whether it happens + // to contain only ascii characters. + // rcx: first instance type AND second instance type. + // r8: first instance type. + // r9: second instance type. + __ testb(rcx, Immediate(kAsciiDataHintMask)); + __ j(not_zero, &ascii_data); + __ xor_(r8, r9); + STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); + __ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag)); + __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag)); + __ j(equal, &ascii_data); + // Allocate a two byte cons string. + __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime); + __ jmp(&allocated); + + // Handle creating a flat result. First check that both strings are not + // external strings. + // rax: first string + // rbx: length of resulting flat string as smi + // rdx: second string + // r8: instance type of first string + // r9: instance type of first string + __ bind(&string_add_flat_result); + __ SmiToInteger32(rbx, rbx); + __ movl(rcx, r8); + __ and_(rcx, Immediate(kStringRepresentationMask)); + __ cmpl(rcx, Immediate(kExternalStringTag)); + __ j(equal, &string_add_runtime); + __ movl(rcx, r9); + __ and_(rcx, Immediate(kStringRepresentationMask)); + __ cmpl(rcx, Immediate(kExternalStringTag)); + __ j(equal, &string_add_runtime); + // Now check if both strings are ascii strings. + // rax: first string + // rbx: length of resulting flat string + // rdx: second string + // r8: instance type of first string + // r9: instance type of second string + Label non_ascii_string_add_flat_result; + STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag); + __ testl(r8, Immediate(kAsciiStringTag)); + __ j(zero, &non_ascii_string_add_flat_result); + __ testl(r9, Immediate(kAsciiStringTag)); + __ j(zero, &string_add_runtime); + + __ bind(&make_flat_ascii_string); + // Both strings are ascii strings. As they are short they are both flat. + __ AllocateAsciiString(rcx, rbx, rdi, r14, r11, &string_add_runtime); + // rcx: result string + __ movq(rbx, rcx); + // Locate first character of result. + __ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + // Locate first character of first argument + __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset)); + __ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + // rax: first char of first argument + // rbx: result string + // rcx: first character of result + // rdx: second string + // rdi: length of first argument + StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, true); + // Locate first character of second argument. + __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset)); + __ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + // rbx: result string + // rcx: next character of result + // rdx: first char of second argument + // rdi: length of second argument + StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, true); + __ movq(rax, rbx); + __ IncrementCounter(&Counters::string_add_native, 1); + __ ret(2 * kPointerSize); + + // Handle creating a flat two byte result. + // rax: first string - known to be two byte + // rbx: length of resulting flat string + // rdx: second string + // r8: instance type of first string + // r9: instance type of first string + __ bind(&non_ascii_string_add_flat_result); + __ and_(r9, Immediate(kAsciiStringTag)); + __ j(not_zero, &string_add_runtime); + // Both strings are two byte strings. As they are short they are both + // flat. + __ AllocateTwoByteString(rcx, rbx, rdi, r14, r11, &string_add_runtime); + // rcx: result string + __ movq(rbx, rcx); + // Locate first character of result. + __ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + // Locate first character of first argument. + __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset)); + __ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + // rax: first char of first argument + // rbx: result string + // rcx: first character of result + // rdx: second argument + // rdi: length of first argument + StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, false); + // Locate first character of second argument. + __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset)); + __ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + // rbx: result string + // rcx: next character of result + // rdx: first char of second argument + // rdi: length of second argument + StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, false); + __ movq(rax, rbx); + __ IncrementCounter(&Counters::string_add_native, 1); + __ ret(2 * kPointerSize); + + // Just jump to runtime to add the two strings. + __ bind(&string_add_runtime); + __ TailCallRuntime(Runtime::kStringAdd, 2, 1); +} + + +void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + bool ascii) { + Label loop; + __ bind(&loop); + // This loop just copies one character at a time, as it is only used for very + // short strings. + if (ascii) { + __ movb(kScratchRegister, Operand(src, 0)); + __ movb(Operand(dest, 0), kScratchRegister); + __ incq(src); + __ incq(dest); + } else { + __ movzxwl(kScratchRegister, Operand(src, 0)); + __ movw(Operand(dest, 0), kScratchRegister); + __ addq(src, Immediate(2)); + __ addq(dest, Immediate(2)); + } + __ decl(count); + __ j(not_zero, &loop); +} + + +void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, + Register dest, + Register src, + Register count, + bool ascii) { + // Copy characters using rep movs of doublewords. Align destination on 4 byte + // boundary before starting rep movs. Copy remaining characters after running + // rep movs. + // Count is positive int32, dest and src are character pointers. + ASSERT(dest.is(rdi)); // rep movs destination + ASSERT(src.is(rsi)); // rep movs source + ASSERT(count.is(rcx)); // rep movs count + + // Nothing to do for zero characters. + Label done; + __ testl(count, count); + __ j(zero, &done); + + // Make count the number of bytes to copy. + if (!ascii) { + STATIC_ASSERT(2 == sizeof(uc16)); + __ addl(count, count); + } + + // Don't enter the rep movs if there are less than 4 bytes to copy. + Label last_bytes; + __ testl(count, Immediate(~7)); + __ j(zero, &last_bytes); + + // Copy from edi to esi using rep movs instruction. + __ movl(kScratchRegister, count); + __ shr(count, Immediate(3)); // Number of doublewords to copy. + __ repmovsq(); + + // Find number of bytes left. + __ movl(count, kScratchRegister); + __ and_(count, Immediate(7)); + + // Check if there are more bytes to copy. + __ bind(&last_bytes); + __ testl(count, count); + __ j(zero, &done); + + // Copy remaining characters. + Label loop; + __ bind(&loop); + __ movb(kScratchRegister, Operand(src, 0)); + __ movb(Operand(dest, 0), kScratchRegister); + __ incq(src); + __ incq(dest); + __ decl(count); + __ j(not_zero, &loop); + + __ bind(&done); +} + +void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, + Register c1, + Register c2, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Label* not_found) { + // Register scratch3 is the general scratch register in this function. + Register scratch = scratch3; + + // Make sure that both characters are not digits as such strings has a + // different hash algorithm. Don't try to look for these in the symbol table. + Label not_array_index; + __ leal(scratch, Operand(c1, -'0')); + __ cmpl(scratch, Immediate(static_cast('9' - '0'))); + __ j(above, ¬_array_index); + __ leal(scratch, Operand(c2, -'0')); + __ cmpl(scratch, Immediate(static_cast('9' - '0'))); + __ j(below_equal, not_found); + + __ bind(¬_array_index); + // Calculate the two character string hash. + Register hash = scratch1; + GenerateHashInit(masm, hash, c1, scratch); + GenerateHashAddCharacter(masm, hash, c2, scratch); + GenerateHashGetHash(masm, hash, scratch); + + // Collect the two characters in a register. + Register chars = c1; + __ shl(c2, Immediate(kBitsPerByte)); + __ orl(chars, c2); + + // chars: two character string, char 1 in byte 0 and char 2 in byte 1. + // hash: hash of two character string. + + // Load the symbol table. + Register symbol_table = c2; + __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex); + + // Calculate capacity mask from the symbol table capacity. + Register mask = scratch2; + __ SmiToInteger32(mask, + FieldOperand(symbol_table, SymbolTable::kCapacityOffset)); + __ decl(mask); + + Register undefined = scratch4; + __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); + + // Registers + // chars: two character string, char 1 in byte 0 and char 2 in byte 1. + // hash: hash of two character string (32-bit int) + // symbol_table: symbol table + // mask: capacity mask (32-bit int) + // undefined: undefined value + // scratch: - + + // Perform a number of probes in the symbol table. + static const int kProbes = 4; + Label found_in_symbol_table; + Label next_probe[kProbes]; + for (int i = 0; i < kProbes; i++) { + // Calculate entry in symbol table. + __ movl(scratch, hash); + if (i > 0) { + __ addl(scratch, Immediate(SymbolTable::GetProbeOffset(i))); + } + __ andl(scratch, mask); + + // Load the entry from the symble table. + Register candidate = scratch; // Scratch register contains candidate. + STATIC_ASSERT(SymbolTable::kEntrySize == 1); + __ movq(candidate, + FieldOperand(symbol_table, + scratch, + times_pointer_size, + SymbolTable::kElementsStartOffset)); + + // If entry is undefined no string with this hash can be found. + __ cmpq(candidate, undefined); + __ j(equal, not_found); + + // If length is not 2 the string is not a candidate. + __ SmiCompare(FieldOperand(candidate, String::kLengthOffset), + Smi::FromInt(2)); + __ j(not_equal, &next_probe[i]); + + // We use kScratchRegister as a temporary register in assumption that + // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly + Register temp = kScratchRegister; + + // Check that the candidate is a non-external ascii string. + __ movq(temp, FieldOperand(candidate, HeapObject::kMapOffset)); + __ movzxbl(temp, FieldOperand(temp, Map::kInstanceTypeOffset)); + __ JumpIfInstanceTypeIsNotSequentialAscii( + temp, temp, &next_probe[i]); + + // Check if the two characters match. + __ movl(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize)); + __ andl(temp, Immediate(0x0000ffff)); + __ cmpl(chars, temp); + __ j(equal, &found_in_symbol_table); + __ bind(&next_probe[i]); + } + + // No matching 2 character string found by probing. + __ jmp(not_found); + + // Scratch register contains result when we fall through to here. + Register result = scratch; + __ bind(&found_in_symbol_table); + if (!result.is(rax)) { + __ movq(rax, result); + } +} + + +void StringHelper::GenerateHashInit(MacroAssembler* masm, + Register hash, + Register character, + Register scratch) { + // hash = character + (character << 10); + __ movl(hash, character); + __ shll(hash, Immediate(10)); + __ addl(hash, character); + // hash ^= hash >> 6; + __ movl(scratch, hash); + __ sarl(scratch, Immediate(6)); + __ xorl(hash, scratch); +} + + +void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, + Register hash, + Register character, + Register scratch) { + // hash += character; + __ addl(hash, character); + // hash += hash << 10; + __ movl(scratch, hash); + __ shll(scratch, Immediate(10)); + __ addl(hash, scratch); + // hash ^= hash >> 6; + __ movl(scratch, hash); + __ sarl(scratch, Immediate(6)); + __ xorl(hash, scratch); +} + + +void StringHelper::GenerateHashGetHash(MacroAssembler* masm, + Register hash, + Register scratch) { + // hash += hash << 3; + __ leal(hash, Operand(hash, hash, times_8, 0)); + // hash ^= hash >> 11; + __ movl(scratch, hash); + __ sarl(scratch, Immediate(11)); + __ xorl(hash, scratch); + // hash += hash << 15; + __ movl(scratch, hash); + __ shll(scratch, Immediate(15)); + __ addl(hash, scratch); + + // if (hash == 0) hash = 27; + Label hash_not_zero; + __ j(not_zero, &hash_not_zero); + __ movl(hash, Immediate(27)); + __ bind(&hash_not_zero); +} + +void SubStringStub::Generate(MacroAssembler* masm) { + Label runtime; + + // Stack frame on entry. + // rsp[0]: return address + // rsp[8]: to + // rsp[16]: from + // rsp[24]: string + + const int kToOffset = 1 * kPointerSize; + const int kFromOffset = kToOffset + kPointerSize; + const int kStringOffset = kFromOffset + kPointerSize; + const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset; + + // Make sure first argument is a string. + __ movq(rax, Operand(rsp, kStringOffset)); + STATIC_ASSERT(kSmiTag == 0); + __ testl(rax, Immediate(kSmiTagMask)); + __ j(zero, &runtime); + Condition is_string = masm->IsObjectStringType(rax, rbx, rbx); + __ j(NegateCondition(is_string), &runtime); + + // rax: string + // rbx: instance type + // Calculate length of sub string using the smi values. + Label result_longer_than_two; + __ movq(rcx, Operand(rsp, kToOffset)); + __ movq(rdx, Operand(rsp, kFromOffset)); + __ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime); + + __ SmiSub(rcx, rcx, rdx, NULL); // Overflow doesn't happen. + __ cmpq(FieldOperand(rax, String::kLengthOffset), rcx); + Label return_rax; + __ j(equal, &return_rax); + // Special handling of sub-strings of length 1 and 2. One character strings + // are handled in the runtime system (looked up in the single character + // cache). Two character strings are looked for in the symbol cache. + __ SmiToInteger32(rcx, rcx); + __ cmpl(rcx, Immediate(2)); + __ j(greater, &result_longer_than_two); + __ j(less, &runtime); + + // Sub string of length 2 requested. + // rax: string + // rbx: instance type + // rcx: sub string length (value is 2) + // rdx: from index (smi) + __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &runtime); + + // Get the two characters forming the sub string. + __ SmiToInteger32(rdx, rdx); // From index is no longer smi. + __ movzxbq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize)); + __ movzxbq(rcx, + FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize + 1)); + + // Try to lookup two character string in symbol table. + Label make_two_character_string; + StringHelper::GenerateTwoCharacterSymbolTableProbe( + masm, rbx, rcx, rax, rdx, rdi, r14, &make_two_character_string); + __ ret(3 * kPointerSize); + + __ bind(&make_two_character_string); + // Setup registers for allocating the two character string. + __ movq(rax, Operand(rsp, kStringOffset)); + __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); + __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset)); + __ Set(rcx, 2); + + __ bind(&result_longer_than_two); + + // rax: string + // rbx: instance type + // rcx: result string length + // Check for flat ascii string + Label non_ascii_flat; + __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &non_ascii_flat); + + // Allocate the result. + __ AllocateAsciiString(rax, rcx, rbx, rdx, rdi, &runtime); + + // rax: result string + // rcx: result string length + __ movq(rdx, rsi); // esi used by following code. + // Locate first character of result. + __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize)); + // Load string argument and locate character of sub string start. + __ movq(rsi, Operand(rsp, kStringOffset)); + __ movq(rbx, Operand(rsp, kFromOffset)); + { + SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_1); + __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale, + SeqAsciiString::kHeaderSize - kHeapObjectTag)); + } + + // rax: result string + // rcx: result length + // rdx: original value of rsi + // rdi: first character of result + // rsi: character of sub string start + StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true); + __ movq(rsi, rdx); // Restore rsi. + __ IncrementCounter(&Counters::sub_string_native, 1); + __ ret(kArgumentsSize); + + __ bind(&non_ascii_flat); + // rax: string + // rbx: instance type & kStringRepresentationMask | kStringEncodingMask + // rcx: result string length + // Check for sequential two byte string + __ cmpb(rbx, Immediate(kSeqStringTag | kTwoByteStringTag)); + __ j(not_equal, &runtime); + + // Allocate the result. + __ AllocateTwoByteString(rax, rcx, rbx, rdx, rdi, &runtime); + + // rax: result string + // rcx: result string length + __ movq(rdx, rsi); // esi used by following code. + // Locate first character of result. + __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize)); + // Load string argument and locate character of sub string start. + __ movq(rsi, Operand(rsp, kStringOffset)); + __ movq(rbx, Operand(rsp, kFromOffset)); + { + SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_2); + __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale, + SeqAsciiString::kHeaderSize - kHeapObjectTag)); + } + + // rax: result string + // rcx: result length + // rdx: original value of rsi + // rdi: first character of result + // rsi: character of sub string start + StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false); + __ movq(rsi, rdx); // Restore esi. + + __ bind(&return_rax); + __ IncrementCounter(&Counters::sub_string_native, 1); + __ ret(kArgumentsSize); + + // Just jump to runtime to create the sub string. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kSubString, 3, 1); +} + + +void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, + Register left, + Register right, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4) { + // Ensure that you can always subtract a string length from a non-negative + // number (e.g. another length). + STATIC_ASSERT(String::kMaxLength < 0x7fffffff); + + // Find minimum length and length difference. + __ movq(scratch1, FieldOperand(left, String::kLengthOffset)); + __ movq(scratch4, scratch1); + __ SmiSub(scratch4, + scratch4, + FieldOperand(right, String::kLengthOffset), + NULL); + // Register scratch4 now holds left.length - right.length. + const Register length_difference = scratch4; + Label left_shorter; + __ j(less, &left_shorter); + // The right string isn't longer that the left one. + // Get the right string's length by subtracting the (non-negative) difference + // from the left string's length. + __ SmiSub(scratch1, scratch1, length_difference, NULL); + __ bind(&left_shorter); + // Register scratch1 now holds Min(left.length, right.length). + const Register min_length = scratch1; + + Label compare_lengths; + // If min-length is zero, go directly to comparing lengths. + __ SmiTest(min_length); + __ j(zero, &compare_lengths); + + __ SmiToInteger32(min_length, min_length); + + // Registers scratch2 and scratch3 are free. + Label result_not_equal; + Label loop; + { + // Check characters 0 .. min_length - 1 in a loop. + // Use scratch3 as loop index, min_length as limit and scratch2 + // for computation. + const Register index = scratch3; + __ movl(index, Immediate(0)); // Index into strings. + __ bind(&loop); + // Compare characters. + // TODO(lrn): Could we load more than one character at a time? + __ movb(scratch2, FieldOperand(left, + index, + times_1, + SeqAsciiString::kHeaderSize)); + // Increment index and use -1 modifier on next load to give + // the previous load extra time to complete. + __ addl(index, Immediate(1)); + __ cmpb(scratch2, FieldOperand(right, + index, + times_1, + SeqAsciiString::kHeaderSize - 1)); + __ j(not_equal, &result_not_equal); + __ cmpl(index, min_length); + __ j(not_equal, &loop); + } + // Completed loop without finding different characters. + // Compare lengths (precomputed). + __ bind(&compare_lengths); + __ SmiTest(length_difference); + __ j(not_zero, &result_not_equal); + + // Result is EQUAL. + __ Move(rax, Smi::FromInt(EQUAL)); + __ ret(0); + + Label result_greater; + __ bind(&result_not_equal); + // Unequal comparison of left to right, either character or length. + __ j(greater, &result_greater); + + // Result is LESS. + __ Move(rax, Smi::FromInt(LESS)); + __ ret(0); + + // Result is GREATER. + __ bind(&result_greater); + __ Move(rax, Smi::FromInt(GREATER)); + __ ret(0); +} + + +void StringCompareStub::Generate(MacroAssembler* masm) { + Label runtime; + + // Stack frame on entry. + // rsp[0]: return address + // rsp[8]: right string + // rsp[16]: left string + + __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // left + __ movq(rax, Operand(rsp, 1 * kPointerSize)); // right + + // Check for identity. + Label not_same; + __ cmpq(rdx, rax); + __ j(not_equal, ¬_same); + __ Move(rax, Smi::FromInt(EQUAL)); + __ IncrementCounter(&Counters::string_compare_native, 1); + __ ret(2 * kPointerSize); + + __ bind(¬_same); + + // Check that both are sequential ASCII strings. + __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime); + + // Inline comparison of ascii strings. + __ IncrementCounter(&Counters::string_compare_native, 1); + // Drop arguments from the stack + __ pop(rcx); + __ addq(rsp, Immediate(2 * kPointerSize)); + __ push(rcx); + GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8); + + // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) + // tagged as a small integer. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kStringCompare, 2, 1); +} + +#undef __ + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_X64 diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h new file mode 100644 index 0000000..a83643b --- /dev/null +++ b/src/x64/code-stubs-x64.h @@ -0,0 +1,392 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_X64_CODE_STUBS_X64_H_ +#define V8_X64_CODE_STUBS_X64_H_ + +#include "codegen-inl.h" +#include "ast.h" +#include "ic-inl.h" + +namespace v8 { +namespace internal { + + +// Compute a transcendental math function natively, or call the +// TranscendentalCache runtime function. +class TranscendentalCacheStub: public CodeStub { + public: + explicit TranscendentalCacheStub(TranscendentalCache::Type type) + : type_(type) {} + void Generate(MacroAssembler* masm); + private: + TranscendentalCache::Type type_; + Major MajorKey() { return TranscendentalCache; } + int MinorKey() { return type_; } + Runtime::FunctionId RuntimeFunction(); + void GenerateOperation(MacroAssembler* masm, Label* on_nan_result); +}; + + +class ToBooleanStub: public CodeStub { + public: + ToBooleanStub() { } + + void Generate(MacroAssembler* masm); + + private: + Major MajorKey() { return ToBoolean; } + int MinorKey() { return 0; } +}; + + +// Flag that indicates how to generate code for the stub GenericBinaryOpStub. +enum GenericBinaryFlags { + NO_GENERIC_BINARY_FLAGS = 0, + NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub. +}; + + +class GenericBinaryOpStub: public CodeStub { + public: + GenericBinaryOpStub(Token::Value op, + OverwriteMode mode, + GenericBinaryFlags flags, + TypeInfo operands_type = TypeInfo::Unknown()) + : op_(op), + mode_(mode), + flags_(flags), + args_in_registers_(false), + args_reversed_(false), + static_operands_type_(operands_type), + runtime_operands_type_(BinaryOpIC::DEFAULT), + name_(NULL) { + ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); + } + + GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) + : op_(OpBits::decode(key)), + mode_(ModeBits::decode(key)), + flags_(FlagBits::decode(key)), + args_in_registers_(ArgsInRegistersBits::decode(key)), + args_reversed_(ArgsReversedBits::decode(key)), + static_operands_type_(TypeInfo::ExpandedRepresentation( + StaticTypeInfoBits::decode(key))), + runtime_operands_type_(type_info), + name_(NULL) { + } + + // Generate code to call the stub with the supplied arguments. This will add + // code at the call site to prepare arguments either in registers or on the + // stack together with the actual call. + void GenerateCall(MacroAssembler* masm, Register left, Register right); + void GenerateCall(MacroAssembler* masm, Register left, Smi* right); + void GenerateCall(MacroAssembler* masm, Smi* left, Register right); + + Result GenerateCall(MacroAssembler* masm, + VirtualFrame* frame, + Result* left, + Result* right); + + private: + Token::Value op_; + OverwriteMode mode_; + GenericBinaryFlags flags_; + bool args_in_registers_; // Arguments passed in registers not on the stack. + bool args_reversed_; // Left and right argument are swapped. + + // Number type information of operands, determined by code generator. + TypeInfo static_operands_type_; + + // Operand type information determined at runtime. + BinaryOpIC::TypeInfo runtime_operands_type_; + + char* name_; + + const char* GetName(); + +#ifdef DEBUG + void Print() { + PrintF("GenericBinaryOpStub %d (op %s), " + "(mode %d, flags %d, registers %d, reversed %d, only_numbers %s)\n", + MinorKey(), + Token::String(op_), + static_cast(mode_), + static_cast(flags_), + static_cast(args_in_registers_), + static_cast(args_reversed_), + static_operands_type_.ToString()); + } +#endif + + // Minor key encoding in 17 bits TTNNNFRAOOOOOOOMM. + class ModeBits: public BitField {}; + class OpBits: public BitField {}; + class ArgsInRegistersBits: public BitField {}; + class ArgsReversedBits: public BitField {}; + class FlagBits: public BitField {}; + class StaticTypeInfoBits: public BitField {}; + class RuntimeTypeInfoBits: public BitField {}; + + Major MajorKey() { return GenericBinaryOp; } + int MinorKey() { + // Encode the parameters in a unique 18 bit value. + return OpBits::encode(op_) + | ModeBits::encode(mode_) + | FlagBits::encode(flags_) + | ArgsInRegistersBits::encode(args_in_registers_) + | ArgsReversedBits::encode(args_reversed_) + | StaticTypeInfoBits::encode( + static_operands_type_.ThreeBitRepresentation()) + | RuntimeTypeInfoBits::encode(runtime_operands_type_); + } + + void Generate(MacroAssembler* masm); + void GenerateSmiCode(MacroAssembler* masm, Label* slow); + void GenerateLoadArguments(MacroAssembler* masm); + void GenerateReturn(MacroAssembler* masm); + void GenerateRegisterArgsPush(MacroAssembler* masm); + void GenerateTypeTransition(MacroAssembler* masm); + + bool ArgsInRegistersSupported() { + return (op_ == Token::ADD) || (op_ == Token::SUB) + || (op_ == Token::MUL) || (op_ == Token::DIV); + } + bool IsOperationCommutative() { + return (op_ == Token::ADD) || (op_ == Token::MUL); + } + + void SetArgsInRegisters() { args_in_registers_ = true; } + void SetArgsReversed() { args_reversed_ = true; } + bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; } + bool HasArgsInRegisters() { return args_in_registers_; } + bool HasArgsReversed() { return args_reversed_; } + + bool ShouldGenerateSmiCode() { + return HasSmiCodeInStub() && + runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS && + runtime_operands_type_ != BinaryOpIC::STRINGS; + } + + bool ShouldGenerateFPCode() { + return runtime_operands_type_ != BinaryOpIC::STRINGS; + } + + virtual int GetCodeKind() { return Code::BINARY_OP_IC; } + + virtual InlineCacheState GetICState() { + return BinaryOpIC::ToState(runtime_operands_type_); + } +}; + +class StringHelper : public AllStatic { + public: + // Generate code for copying characters using a simple loop. This should only + // be used in places where the number of characters is small and the + // additional setup and checking in GenerateCopyCharactersREP adds too much + // overhead. Copying of overlapping regions is not supported. + static void GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + bool ascii); + + // Generate code for copying characters using the rep movs instruction. + // Copies rcx characters from rsi to rdi. Copying of overlapping regions is + // not supported. + static void GenerateCopyCharactersREP(MacroAssembler* masm, + Register dest, // Must be rdi. + Register src, // Must be rsi. + Register count, // Must be rcx. + bool ascii); + + + // Probe the symbol table for a two character string. If the string is + // not found by probing a jump to the label not_found is performed. This jump + // does not guarantee that the string is not in the symbol table. If the + // string is found the code falls through with the string in register rax. + static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, + Register c1, + Register c2, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Label* not_found); + + // Generate string hash. + static void GenerateHashInit(MacroAssembler* masm, + Register hash, + Register character, + Register scratch); + static void GenerateHashAddCharacter(MacroAssembler* masm, + Register hash, + Register character, + Register scratch); + static void GenerateHashGetHash(MacroAssembler* masm, + Register hash, + Register scratch); + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); +}; + + +// Flag that indicates how to generate code for the stub StringAddStub. +enum StringAddFlags { + NO_STRING_ADD_FLAGS = 0, + NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub. +}; + + +class StringAddStub: public CodeStub { + public: + explicit StringAddStub(StringAddFlags flags) { + string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0); + } + + private: + Major MajorKey() { return StringAdd; } + int MinorKey() { return string_check_ ? 0 : 1; } + + void Generate(MacroAssembler* masm); + + // Should the stub check whether arguments are strings? + bool string_check_; +}; + + +class SubStringStub: public CodeStub { + public: + SubStringStub() {} + + private: + Major MajorKey() { return SubString; } + int MinorKey() { return 0; } + + void Generate(MacroAssembler* masm); +}; + + +class StringCompareStub: public CodeStub { + public: + explicit StringCompareStub() {} + + // Compare two flat ascii strings and returns result in rax after popping two + // arguments from the stack. + static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, + Register left, + Register right, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4); + + private: + Major MajorKey() { return StringCompare; } + int MinorKey() { return 0; } + + void Generate(MacroAssembler* masm); +}; + + +class NumberToStringStub: public CodeStub { + public: + NumberToStringStub() { } + + // Generate code to do a lookup in the number string cache. If the number in + // the register object is found in the cache the generated code falls through + // with the result in the result register. The object and the result register + // can be the same. If the number is not found in the cache the code jumps to + // the label not_found with only the content of register object unchanged. + static void GenerateLookupNumberStringCache(MacroAssembler* masm, + Register object, + Register result, + Register scratch1, + Register scratch2, + bool object_is_smi, + Label* not_found); + + private: + static void GenerateConvertHashCodeToIndex(MacroAssembler* masm, + Register hash, + Register mask); + + Major MajorKey() { return NumberToString; } + int MinorKey() { return 0; } + + void Generate(MacroAssembler* masm); + + const char* GetName() { return "NumberToStringStub"; } + +#ifdef DEBUG + void Print() { + PrintF("NumberToStringStub\n"); + } +#endif +}; + + +class RecordWriteStub : public CodeStub { + public: + RecordWriteStub(Register object, Register addr, Register scratch) + : object_(object), addr_(addr), scratch_(scratch) { } + + void Generate(MacroAssembler* masm); + + private: + Register object_; + Register addr_; + Register scratch_; + +#ifdef DEBUG + void Print() { + PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n", + object_.code(), addr_.code(), scratch_.code()); + } +#endif + + // Minor key encoding in 12 bits. 4 bits for each of the three + // registers (object, address and scratch) OOOOAAAASSSS. + class ScratchBits : public BitField {}; + class AddressBits : public BitField {}; + class ObjectBits : public BitField {}; + + Major MajorKey() { return RecordWrite; } + + int MinorKey() { + // Encode the registers. + return ObjectBits::encode(object_.code()) | + AddressBits::encode(addr_.code()) | + ScratchBits::encode(scratch_.code()); + } +}; + + +} } // namespace v8::internal + +#endif // V8_X64_CODE_STUBS_X64_H_ diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc index b5a0d4b..5cb5b10 100644 --- a/src/x64/codegen-x64.cc +++ b/src/x64/codegen-x64.cc @@ -30,6 +30,7 @@ #if defined(V8_TARGET_ARCH_X64) #include "bootstrapper.h" +#include "code-stubs-x64.h" #include "codegen-inl.h" #include "compiler.h" #include "debug.h" @@ -807,55 +808,6 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) { } -class FloatingPointHelper : public AllStatic { - public: - // Load the operands from rdx and rax into xmm0 and xmm1, as doubles. - // If the operands are not both numbers, jump to not_numbers. - // Leaves rdx and rax unchanged. SmiOperands assumes both are smis. - // NumberOperands assumes both are smis or heap numbers. - static void LoadSSE2SmiOperands(MacroAssembler* masm); - static void LoadSSE2NumberOperands(MacroAssembler* masm); - static void LoadSSE2UnknownOperands(MacroAssembler* masm, - Label* not_numbers); - - // Takes the operands in rdx and rax and loads them as integers in rax - // and rcx. - static void LoadAsIntegers(MacroAssembler* masm, - Label* operand_conversion_failure, - Register heap_number_map); - // As above, but we know the operands to be numbers. In that case, - // conversion can't fail. - static void LoadNumbersAsIntegers(MacroAssembler* masm); -}; - - -const char* GenericBinaryOpStub::GetName() { - if (name_ != NULL) return name_; - const int kMaxNameLength = 100; - name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); - if (name_ == NULL) return "OOM"; - const char* op_name = Token::Name(op_); - const char* overwrite_name; - switch (mode_) { - case NO_OVERWRITE: overwrite_name = "Alloc"; break; - case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; - case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; - default: overwrite_name = "UnknownOverwrite"; break; - } - - OS::SNPrintF(Vector(name_, kMaxNameLength), - "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s", - op_name, - overwrite_name, - (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "", - args_in_registers_ ? "RegArgs" : "StackArgs", - args_reversed_ ? "_R" : "", - static_operands_type_.ToString(), - BinaryOpIC::GetName(runtime_operands_type_)); - return name_; -} - - // Call the specialized stub for a binary operation. class DeferredInlineBinaryOperation: public DeferredCode { public: @@ -8819,350 +8771,6 @@ void Reference::SetValue(InitState init_state) { } -void FastNewClosureStub::Generate(MacroAssembler* masm) { - // Create a new closure from the given function info in new - // space. Set the context to the current context in rsi. - Label gc; - __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT); - - // Get the function info from the stack. - __ movq(rdx, Operand(rsp, 1 * kPointerSize)); - - // Compute the function map in the current global context and set that - // as the map of the allocated object. - __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); - __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset)); - __ movq(rcx, Operand(rcx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX))); - __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx); - - // Initialize the rest of the function. We don't have to update the - // write barrier because the allocated object is in new space. - __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex); - __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex); - __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx); - __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx); - __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx); - __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx); - __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi); - __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx); - - // Initialize the code pointer in the function to be the one - // found in the shared function info object. - __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset)); - __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize)); - __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx); - - - // Return and remove the on-stack parameter. - __ ret(1 * kPointerSize); - - // Create a new closure through the slower runtime call. - __ bind(&gc); - __ pop(rcx); // Temporarily remove return address. - __ pop(rdx); - __ push(rsi); - __ push(rdx); - __ push(rcx); // Restore return address. - __ TailCallRuntime(Runtime::kNewClosure, 2, 1); -} - - -void FastNewContextStub::Generate(MacroAssembler* masm) { - // Try to allocate the context in new space. - Label gc; - int length = slots_ + Context::MIN_CONTEXT_SLOTS; - __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize, - rax, rbx, rcx, &gc, TAG_OBJECT); - - // Get the function from the stack. - __ movq(rcx, Operand(rsp, 1 * kPointerSize)); - - // Setup the object header. - __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex); - __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister); - __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length)); - - // Setup the fixed slots. - __ xor_(rbx, rbx); // Set to NULL. - __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx); - __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax); - __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx); - __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx); - - // Copy the global object from the surrounding context. - __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); - __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx); - - // Initialize the rest of the slots to undefined. - __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex); - for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { - __ movq(Operand(rax, Context::SlotOffset(i)), rbx); - } - - // Return and remove the on-stack parameter. - __ movq(rsi, rax); - __ ret(1 * kPointerSize); - - // Need to collect. Call into runtime system. - __ bind(&gc); - __ TailCallRuntime(Runtime::kNewContext, 1, 1); -} - - -void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { - // Stack layout on entry: - // - // [rsp + kPointerSize]: constant elements. - // [rsp + (2 * kPointerSize)]: literal index. - // [rsp + (3 * kPointerSize)]: literals array. - - // All sizes here are multiples of kPointerSize. - int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0; - int size = JSArray::kSize + elements_size; - - // Load boilerplate object into rcx and check if we need to create a - // boilerplate. - Label slow_case; - __ movq(rcx, Operand(rsp, 3 * kPointerSize)); - __ movq(rax, Operand(rsp, 2 * kPointerSize)); - SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2); - __ movq(rcx, - FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize)); - __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex); - __ j(equal, &slow_case); - - if (FLAG_debug_code) { - const char* message; - Heap::RootListIndex expected_map_index; - if (mode_ == CLONE_ELEMENTS) { - message = "Expected (writable) fixed array"; - expected_map_index = Heap::kFixedArrayMapRootIndex; - } else { - ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS); - message = "Expected copy-on-write fixed array"; - expected_map_index = Heap::kFixedCOWArrayMapRootIndex; - } - __ push(rcx); - __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset)); - __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset), - expected_map_index); - __ Assert(equal, message); - __ pop(rcx); - } - - // Allocate both the JS array and the elements array in one big - // allocation. This avoids multiple limit checks. - __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT); - - // Copy the JS array part. - for (int i = 0; i < JSArray::kSize; i += kPointerSize) { - if ((i != JSArray::kElementsOffset) || (length_ == 0)) { - __ movq(rbx, FieldOperand(rcx, i)); - __ movq(FieldOperand(rax, i), rbx); - } - } - - if (length_ > 0) { - // Get hold of the elements array of the boilerplate and setup the - // elements pointer in the resulting object. - __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset)); - __ lea(rdx, Operand(rax, JSArray::kSize)); - __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx); - - // Copy the elements array. - for (int i = 0; i < elements_size; i += kPointerSize) { - __ movq(rbx, FieldOperand(rcx, i)); - __ movq(FieldOperand(rdx, i), rbx); - } - } - - // Return and remove the on-stack parameters. - __ ret(3 * kPointerSize); - - __ bind(&slow_case); - __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); -} - - -void ToBooleanStub::Generate(MacroAssembler* masm) { - Label false_result, true_result, not_string; - __ movq(rax, Operand(rsp, 1 * kPointerSize)); - - // 'null' => false. - __ CompareRoot(rax, Heap::kNullValueRootIndex); - __ j(equal, &false_result); - - // Get the map and type of the heap object. - // We don't use CmpObjectType because we manipulate the type field. - __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset)); - __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset)); - - // Undetectable => false. - __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset)); - __ and_(rbx, Immediate(1 << Map::kIsUndetectable)); - __ j(not_zero, &false_result); - - // JavaScript object => true. - __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE)); - __ j(above_equal, &true_result); - - // String value => false iff empty. - __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE)); - __ j(above_equal, ¬_string); - __ movq(rdx, FieldOperand(rax, String::kLengthOffset)); - __ SmiTest(rdx); - __ j(zero, &false_result); - __ jmp(&true_result); - - __ bind(¬_string); - __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex); - __ j(not_equal, &true_result); - // HeapNumber => false iff +0, -0, or NaN. - // These three cases set the zero flag when compared to zero using ucomisd. - __ xorpd(xmm0, xmm0); - __ ucomisd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset)); - __ j(zero, &false_result); - // Fall through to |true_result|. - - // Return 1/0 for true/false in rax. - __ bind(&true_result); - __ movq(rax, Immediate(1)); - __ ret(1 * kPointerSize); - __ bind(&false_result); - __ xor_(rax, rax); - __ ret(1 * kPointerSize); -} - - -void GenericBinaryOpStub::GenerateCall( - MacroAssembler* masm, - Register left, - Register right) { - if (!ArgsInRegistersSupported()) { - // Pass arguments on the stack. - __ push(left); - __ push(right); - } else { - // The calling convention with registers is left in rdx and right in rax. - Register left_arg = rdx; - Register right_arg = rax; - if (!(left.is(left_arg) && right.is(right_arg))) { - if (left.is(right_arg) && right.is(left_arg)) { - if (IsOperationCommutative()) { - SetArgsReversed(); - } else { - __ xchg(left, right); - } - } else if (left.is(left_arg)) { - __ movq(right_arg, right); - } else if (right.is(right_arg)) { - __ movq(left_arg, left); - } else if (left.is(right_arg)) { - if (IsOperationCommutative()) { - __ movq(left_arg, right); - SetArgsReversed(); - } else { - // Order of moves important to avoid destroying left argument. - __ movq(left_arg, left); - __ movq(right_arg, right); - } - } else if (right.is(left_arg)) { - if (IsOperationCommutative()) { - __ movq(right_arg, left); - SetArgsReversed(); - } else { - // Order of moves important to avoid destroying right argument. - __ movq(right_arg, right); - __ movq(left_arg, left); - } - } else { - // Order of moves is not important. - __ movq(left_arg, left); - __ movq(right_arg, right); - } - } - - // Update flags to indicate that arguments are in registers. - SetArgsInRegisters(); - __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); - } - - // Call the stub. - __ CallStub(this); -} - - -void GenericBinaryOpStub::GenerateCall( - MacroAssembler* masm, - Register left, - Smi* right) { - if (!ArgsInRegistersSupported()) { - // Pass arguments on the stack. - __ push(left); - __ Push(right); - } else { - // The calling convention with registers is left in rdx and right in rax. - Register left_arg = rdx; - Register right_arg = rax; - if (left.is(left_arg)) { - __ Move(right_arg, right); - } else if (left.is(right_arg) && IsOperationCommutative()) { - __ Move(left_arg, right); - SetArgsReversed(); - } else { - // For non-commutative operations, left and right_arg might be - // the same register. Therefore, the order of the moves is - // important here in order to not overwrite left before moving - // it to left_arg. - __ movq(left_arg, left); - __ Move(right_arg, right); - } - - // Update flags to indicate that arguments are in registers. - SetArgsInRegisters(); - __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); - } - - // Call the stub. - __ CallStub(this); -} - - -void GenericBinaryOpStub::GenerateCall( - MacroAssembler* masm, - Smi* left, - Register right) { - if (!ArgsInRegistersSupported()) { - // Pass arguments on the stack. - __ Push(left); - __ push(right); - } else { - // The calling convention with registers is left in rdx and right in rax. - Register left_arg = rdx; - Register right_arg = rax; - if (right.is(right_arg)) { - __ Move(left_arg, left); - } else if (right.is(left_arg) && IsOperationCommutative()) { - __ Move(right_arg, left); - SetArgsReversed(); - } else { - // For non-commutative operations, right and left_arg might be - // the same register. Therefore, the order of the moves is - // important here in order to not overwrite right before moving - // it to right_arg. - __ movq(right_arg, right); - __ Move(left_arg, left); - } - // Update flags to indicate that arguments are in registers. - SetArgsInRegisters(); - __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); - } - - // Call the stub. - __ CallStub(this); -} - - Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm, VirtualFrame* frame, Result* left, @@ -9177,3585 +8785,6 @@ Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm, } } - -void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { - // 1. Move arguments into rdx, rax except for DIV and MOD, which need the - // dividend in rax and rdx free for the division. Use rax, rbx for those. - Comment load_comment(masm, "-- Load arguments"); - Register left = rdx; - Register right = rax; - if (op_ == Token::DIV || op_ == Token::MOD) { - left = rax; - right = rbx; - if (HasArgsInRegisters()) { - __ movq(rbx, rax); - __ movq(rax, rdx); - } - } - if (!HasArgsInRegisters()) { - __ movq(right, Operand(rsp, 1 * kPointerSize)); - __ movq(left, Operand(rsp, 2 * kPointerSize)); - } - - Label not_smis; - // 2. Smi check both operands. - if (static_operands_type_.IsSmi()) { - // Skip smi check if we know that both arguments are smis. - if (FLAG_debug_code) { - __ AbortIfNotSmi(left); - __ AbortIfNotSmi(right); - } - if (op_ == Token::BIT_OR) { - // Handle OR here, since we do extra smi-checking in the or code below. - __ SmiOr(right, right, left); - GenerateReturn(masm); - return; - } - } else { - if (op_ != Token::BIT_OR) { - // Skip the check for OR as it is better combined with the - // actual operation. - Comment smi_check_comment(masm, "-- Smi check arguments"); - __ JumpIfNotBothSmi(left, right, ¬_smis); - } - } - - // 3. Operands are both smis (except for OR), perform the operation leaving - // the result in rax and check the result if necessary. - Comment perform_smi(masm, "-- Perform smi operation"); - Label use_fp_on_smis; - switch (op_) { - case Token::ADD: { - ASSERT(right.is(rax)); - __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative. - break; - } - - case Token::SUB: { - __ SmiSub(left, left, right, &use_fp_on_smis); - __ movq(rax, left); - break; - } - - case Token::MUL: - ASSERT(right.is(rax)); - __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative. - break; - - case Token::DIV: - ASSERT(left.is(rax)); - __ SmiDiv(left, left, right, &use_fp_on_smis); - break; - - case Token::MOD: - ASSERT(left.is(rax)); - __ SmiMod(left, left, right, slow); - break; - - case Token::BIT_OR: - ASSERT(right.is(rax)); - __ movq(rcx, right); // Save the right operand. - __ SmiOr(right, right, left); // BIT_OR is commutative. - __ testb(right, Immediate(kSmiTagMask)); - __ j(not_zero, ¬_smis); - break; - - case Token::BIT_AND: - ASSERT(right.is(rax)); - __ SmiAnd(right, right, left); // BIT_AND is commutative. - break; - - case Token::BIT_XOR: - ASSERT(right.is(rax)); - __ SmiXor(right, right, left); // BIT_XOR is commutative. - break; - - case Token::SHL: - case Token::SHR: - case Token::SAR: - switch (op_) { - case Token::SAR: - __ SmiShiftArithmeticRight(left, left, right); - break; - case Token::SHR: - __ SmiShiftLogicalRight(left, left, right, slow); - break; - case Token::SHL: - __ SmiShiftLeft(left, left, right); - break; - default: - UNREACHABLE(); - } - __ movq(rax, left); - break; - - default: - UNREACHABLE(); - break; - } - - // 4. Emit return of result in rax. - GenerateReturn(masm); - - // 5. For some operations emit inline code to perform floating point - // operations on known smis (e.g., if the result of the operation - // overflowed the smi range). - switch (op_) { - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: { - ASSERT(use_fp_on_smis.is_linked()); - __ bind(&use_fp_on_smis); - if (op_ == Token::DIV) { - __ movq(rdx, rax); - __ movq(rax, rbx); - } - // left is rdx, right is rax. - __ AllocateHeapNumber(rbx, rcx, slow); - FloatingPointHelper::LoadSSE2SmiOperands(masm); - switch (op_) { - case Token::ADD: __ addsd(xmm0, xmm1); break; - case Token::SUB: __ subsd(xmm0, xmm1); break; - case Token::MUL: __ mulsd(xmm0, xmm1); break; - case Token::DIV: __ divsd(xmm0, xmm1); break; - default: UNREACHABLE(); - } - __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0); - __ movq(rax, rbx); - GenerateReturn(masm); - } - default: - break; - } - - // 6. Non-smi operands, fall out to the non-smi code with the operands in - // rdx and rax. - Comment done_comment(masm, "-- Enter non-smi code"); - __ bind(¬_smis); - - switch (op_) { - case Token::DIV: - case Token::MOD: - // Operands are in rax, rbx at this point. - __ movq(rdx, rax); - __ movq(rax, rbx); - break; - - case Token::BIT_OR: - // Right operand is saved in rcx and rax was destroyed by the smi - // operation. - __ movq(rax, rcx); - break; - - default: - break; - } -} - - -void GenericBinaryOpStub::Generate(MacroAssembler* masm) { - Label call_runtime; - - if (ShouldGenerateSmiCode()) { - GenerateSmiCode(masm, &call_runtime); - } else if (op_ != Token::MOD) { - if (!HasArgsInRegisters()) { - GenerateLoadArguments(masm); - } - } - // Floating point case. - if (ShouldGenerateFPCode()) { - switch (op_) { - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: { - if (runtime_operands_type_ == BinaryOpIC::DEFAULT && - HasSmiCodeInStub()) { - // Execution reaches this point when the first non-smi argument occurs - // (and only if smi code is generated). This is the right moment to - // patch to HEAP_NUMBERS state. The transition is attempted only for - // the four basic operations. The stub stays in the DEFAULT state - // forever for all other operations (also if smi code is skipped). - GenerateTypeTransition(masm); - break; - } - - Label not_floats; - // rax: y - // rdx: x - if (static_operands_type_.IsNumber()) { - if (FLAG_debug_code) { - // Assert at runtime that inputs are only numbers. - __ AbortIfNotNumber(rdx); - __ AbortIfNotNumber(rax); - } - FloatingPointHelper::LoadSSE2NumberOperands(masm); - } else { - FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime); - } - - switch (op_) { - case Token::ADD: __ addsd(xmm0, xmm1); break; - case Token::SUB: __ subsd(xmm0, xmm1); break; - case Token::MUL: __ mulsd(xmm0, xmm1); break; - case Token::DIV: __ divsd(xmm0, xmm1); break; - default: UNREACHABLE(); - } - // Allocate a heap number, if needed. - Label skip_allocation; - OverwriteMode mode = mode_; - if (HasArgsReversed()) { - if (mode == OVERWRITE_RIGHT) { - mode = OVERWRITE_LEFT; - } else if (mode == OVERWRITE_LEFT) { - mode = OVERWRITE_RIGHT; - } - } - switch (mode) { - case OVERWRITE_LEFT: - __ JumpIfNotSmi(rdx, &skip_allocation); - __ AllocateHeapNumber(rbx, rcx, &call_runtime); - __ movq(rdx, rbx); - __ bind(&skip_allocation); - __ movq(rax, rdx); - break; - case OVERWRITE_RIGHT: - // If the argument in rax is already an object, we skip the - // allocation of a heap number. - __ JumpIfNotSmi(rax, &skip_allocation); - // Fall through! - case NO_OVERWRITE: - // Allocate a heap number for the result. Keep rax and rdx intact - // for the possible runtime call. - __ AllocateHeapNumber(rbx, rcx, &call_runtime); - __ movq(rax, rbx); - __ bind(&skip_allocation); - break; - default: UNREACHABLE(); - } - __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); - GenerateReturn(masm); - __ bind(¬_floats); - if (runtime_operands_type_ == BinaryOpIC::DEFAULT && - !HasSmiCodeInStub()) { - // Execution reaches this point when the first non-number argument - // occurs (and only if smi code is skipped from the stub, otherwise - // the patching has already been done earlier in this case branch). - // A perfect moment to try patching to STRINGS for ADD operation. - if (op_ == Token::ADD) { - GenerateTypeTransition(masm); - } - } - break; - } - case Token::MOD: { - // For MOD we go directly to runtime in the non-smi case. - break; - } - case Token::BIT_OR: - case Token::BIT_AND: - case Token::BIT_XOR: - case Token::SAR: - case Token::SHL: - case Token::SHR: { - Label skip_allocation, non_smi_shr_result; - Register heap_number_map = r9; - __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - if (static_operands_type_.IsNumber()) { - if (FLAG_debug_code) { - // Assert at runtime that inputs are only numbers. - __ AbortIfNotNumber(rdx); - __ AbortIfNotNumber(rax); - } - FloatingPointHelper::LoadNumbersAsIntegers(masm); - } else { - FloatingPointHelper::LoadAsIntegers(masm, - &call_runtime, - heap_number_map); - } - switch (op_) { - case Token::BIT_OR: __ orl(rax, rcx); break; - case Token::BIT_AND: __ andl(rax, rcx); break; - case Token::BIT_XOR: __ xorl(rax, rcx); break; - case Token::SAR: __ sarl_cl(rax); break; - case Token::SHL: __ shll_cl(rax); break; - case Token::SHR: { - __ shrl_cl(rax); - // Check if result is negative. This can only happen for a shift - // by zero. - __ testl(rax, rax); - __ j(negative, &non_smi_shr_result); - break; - } - default: UNREACHABLE(); - } - - STATIC_ASSERT(kSmiValueSize == 32); - // Tag smi result and return. - __ Integer32ToSmi(rax, rax); - GenerateReturn(masm); - - // All bit-ops except SHR return a signed int32 that can be - // returned immediately as a smi. - // We might need to allocate a HeapNumber if we shift a negative - // number right by zero (i.e., convert to UInt32). - if (op_ == Token::SHR) { - ASSERT(non_smi_shr_result.is_linked()); - __ bind(&non_smi_shr_result); - // Allocate a heap number if needed. - __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). - switch (mode_) { - case OVERWRITE_LEFT: - case OVERWRITE_RIGHT: - // If the operand was an object, we skip the - // allocation of a heap number. - __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ? - 1 * kPointerSize : 2 * kPointerSize)); - __ JumpIfNotSmi(rax, &skip_allocation); - // Fall through! - case NO_OVERWRITE: - // Allocate heap number in new space. - // Not using AllocateHeapNumber macro in order to reuse - // already loaded heap_number_map. - __ AllocateInNewSpace(HeapNumber::kSize, - rax, - rcx, - no_reg, - &call_runtime, - TAG_OBJECT); - // Set the map. - if (FLAG_debug_code) { - __ AbortIfNotRootValue(heap_number_map, - Heap::kHeapNumberMapRootIndex, - "HeapNumberMap register clobbered."); - } - __ movq(FieldOperand(rax, HeapObject::kMapOffset), - heap_number_map); - __ bind(&skip_allocation); - break; - default: UNREACHABLE(); - } - // Store the result in the HeapNumber and return. - __ cvtqsi2sd(xmm0, rbx); - __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); - GenerateReturn(masm); - } - - break; - } - default: UNREACHABLE(); break; - } - } - - // If all else fails, use the runtime system to get the correct - // result. If arguments was passed in registers now place them on the - // stack in the correct order below the return address. - __ bind(&call_runtime); - - if (HasArgsInRegisters()) { - GenerateRegisterArgsPush(masm); - } - - switch (op_) { - case Token::ADD: { - // Registers containing left and right operands respectively. - Register lhs, rhs; - - if (HasArgsReversed()) { - lhs = rax; - rhs = rdx; - } else { - lhs = rdx; - rhs = rax; - } - - // Test for string arguments before calling runtime. - Label not_strings, both_strings, not_string1, string1, string1_smi2; - - // If this stub has already generated FP-specific code then the arguments - // are already in rdx and rax. - if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) { - GenerateLoadArguments(masm); - } - - Condition is_smi; - is_smi = masm->CheckSmi(lhs); - __ j(is_smi, ¬_string1); - __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8); - __ j(above_equal, ¬_string1); - - // First argument is a a string, test second. - is_smi = masm->CheckSmi(rhs); - __ j(is_smi, &string1_smi2); - __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9); - __ j(above_equal, &string1); - - // First and second argument are strings. - StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); - __ TailCallStub(&string_add_stub); - - __ bind(&string1_smi2); - // First argument is a string, second is a smi. Try to lookup the number - // string for the smi in the number string cache. - NumberToStringStub::GenerateLookupNumberStringCache( - masm, rhs, rbx, rcx, r8, true, &string1); - - // Replace second argument on stack and tailcall string add stub to make - // the result. - __ movq(Operand(rsp, 1 * kPointerSize), rbx); - __ TailCallStub(&string_add_stub); - - // Only first argument is a string. - __ bind(&string1); - __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION); - - // First argument was not a string, test second. - __ bind(¬_string1); - is_smi = masm->CheckSmi(rhs); - __ j(is_smi, ¬_strings); - __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs); - __ j(above_equal, ¬_strings); - - // Only second argument is a string. - __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION); - - __ bind(¬_strings); - // Neither argument is a string. - __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); - break; - } - case Token::SUB: - __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); - break; - case Token::MUL: - __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); - break; - case Token::DIV: - __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); - break; - case Token::MOD: - __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); - break; - case Token::BIT_OR: - __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); - break; - case Token::BIT_AND: - __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); - break; - case Token::BIT_XOR: - __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); - break; - case Token::SAR: - __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); - break; - case Token::SHL: - __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); - break; - case Token::SHR: - __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); - break; - default: - UNREACHABLE(); - } -} - - -void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) { - ASSERT(!HasArgsInRegisters()); - __ movq(rax, Operand(rsp, 1 * kPointerSize)); - __ movq(rdx, Operand(rsp, 2 * kPointerSize)); -} - - -void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) { - // If arguments are not passed in registers remove them from the stack before - // returning. - if (!HasArgsInRegisters()) { - __ ret(2 * kPointerSize); // Remove both operands - } else { - __ ret(0); - } -} - - -void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { - ASSERT(HasArgsInRegisters()); - __ pop(rcx); - if (HasArgsReversed()) { - __ push(rax); - __ push(rdx); - } else { - __ push(rdx); - __ push(rax); - } - __ push(rcx); -} - - -void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { - Label get_result; - - // Ensure the operands are on the stack. - if (HasArgsInRegisters()) { - GenerateRegisterArgsPush(masm); - } - - // Left and right arguments are already on stack. - __ pop(rcx); // Save the return address. - - // Push this stub's key. - __ Push(Smi::FromInt(MinorKey())); - - // Although the operation and the type info are encoded into the key, - // the encoding is opaque, so push them too. - __ Push(Smi::FromInt(op_)); - - __ Push(Smi::FromInt(runtime_operands_type_)); - - __ push(rcx); // The return address. - - // Perform patching to an appropriate fast case and return the result. - __ TailCallExternalReference( - ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), - 5, - 1); -} - - -Handle GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { - GenericBinaryOpStub stub(key, type_info); - return stub.GetCode(); -} - - -void TranscendentalCacheStub::Generate(MacroAssembler* masm) { - // Input on stack: - // rsp[8]: argument (should be number). - // rsp[0]: return address. - Label runtime_call; - Label runtime_call_clear_stack; - Label input_not_smi; - Label loaded; - // Test that rax is a number. - __ movq(rax, Operand(rsp, kPointerSize)); - __ JumpIfNotSmi(rax, &input_not_smi); - // Input is a smi. Untag and load it onto the FPU stack. - // Then load the bits of the double into rbx. - __ SmiToInteger32(rax, rax); - __ subq(rsp, Immediate(kPointerSize)); - __ cvtlsi2sd(xmm1, rax); - __ movsd(Operand(rsp, 0), xmm1); - __ movq(rbx, xmm1); - __ movq(rdx, xmm1); - __ fld_d(Operand(rsp, 0)); - __ addq(rsp, Immediate(kPointerSize)); - __ jmp(&loaded); - - __ bind(&input_not_smi); - // Check if input is a HeapNumber. - __ Move(rbx, Factory::heap_number_map()); - __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); - __ j(not_equal, &runtime_call); - // Input is a HeapNumber. Push it on the FPU stack and load its - // bits into rbx. - __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset)); - __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset)); - __ movq(rdx, rbx); - __ bind(&loaded); - // ST[0] == double value - // rbx = bits of double value. - // rdx = also bits of double value. - // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic): - // h = h0 = bits ^ (bits >> 32); - // h ^= h >> 16; - // h ^= h >> 8; - // h = h & (cacheSize - 1); - // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1) - __ sar(rdx, Immediate(32)); - __ xorl(rdx, rbx); - __ movl(rcx, rdx); - __ movl(rax, rdx); - __ movl(rdi, rdx); - __ sarl(rdx, Immediate(8)); - __ sarl(rcx, Immediate(16)); - __ sarl(rax, Immediate(24)); - __ xorl(rcx, rdx); - __ xorl(rax, rdi); - __ xorl(rcx, rax); - ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize)); - __ andl(rcx, Immediate(TranscendentalCache::kCacheSize - 1)); - - // ST[0] == double value. - // rbx = bits of double value. - // rcx = TranscendentalCache::hash(double value). - __ movq(rax, ExternalReference::transcendental_cache_array_address()); - // rax points to cache array. - __ movq(rax, Operand(rax, type_ * sizeof(TranscendentalCache::caches_[0]))); - // rax points to the cache for the type type_. - // If NULL, the cache hasn't been initialized yet, so go through runtime. - __ testq(rax, rax); - __ j(zero, &runtime_call_clear_stack); -#ifdef DEBUG - // Check that the layout of cache elements match expectations. - { // NOLINT - doesn't like a single brace on a line. - TranscendentalCache::Element test_elem[2]; - char* elem_start = reinterpret_cast(&test_elem[0]); - char* elem2_start = reinterpret_cast(&test_elem[1]); - char* elem_in0 = reinterpret_cast(&(test_elem[0].in[0])); - char* elem_in1 = reinterpret_cast(&(test_elem[0].in[1])); - char* elem_out = reinterpret_cast(&(test_elem[0].output)); - // Two uint_32's and a pointer per element. - CHECK_EQ(16, static_cast(elem2_start - elem_start)); - CHECK_EQ(0, static_cast(elem_in0 - elem_start)); - CHECK_EQ(kIntSize, static_cast(elem_in1 - elem_start)); - CHECK_EQ(2 * kIntSize, static_cast(elem_out - elem_start)); - } -#endif - // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16]. - __ addl(rcx, rcx); - __ lea(rcx, Operand(rax, rcx, times_8, 0)); - // Check if cache matches: Double value is stored in uint32_t[2] array. - Label cache_miss; - __ cmpq(rbx, Operand(rcx, 0)); - __ j(not_equal, &cache_miss); - // Cache hit! - __ movq(rax, Operand(rcx, 2 * kIntSize)); - __ fstp(0); // Clear FPU stack. - __ ret(kPointerSize); - - __ bind(&cache_miss); - // Update cache with new value. - Label nan_result; - GenerateOperation(masm, &nan_result); - __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack); - __ movq(Operand(rcx, 0), rbx); - __ movq(Operand(rcx, 2 * kIntSize), rax); - __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset)); - __ ret(kPointerSize); - - __ bind(&runtime_call_clear_stack); - __ fstp(0); - __ bind(&runtime_call); - __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); - - __ bind(&nan_result); - __ fstp(0); // Remove argument from FPU stack. - __ LoadRoot(rax, Heap::kNanValueRootIndex); - __ movq(Operand(rcx, 0), rbx); - __ movq(Operand(rcx, 2 * kIntSize), rax); - __ ret(kPointerSize); -} - - -Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { - switch (type_) { - // Add more cases when necessary. - case TranscendentalCache::SIN: return Runtime::kMath_sin; - case TranscendentalCache::COS: return Runtime::kMath_cos; - default: - UNIMPLEMENTED(); - return Runtime::kAbort; - } -} - - -void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm, - Label* on_nan_result) { - // Registers: - // rbx: Bits of input double. Must be preserved. - // rcx: Pointer to cache entry. Must be preserved. - // st(0): Input double - Label done; - ASSERT(type_ == TranscendentalCache::SIN || - type_ == TranscendentalCache::COS); - // More transcendental types can be added later. - - // Both fsin and fcos require arguments in the range +/-2^63 and - // return NaN for infinities and NaN. They can share all code except - // the actual fsin/fcos operation. - Label in_range; - // If argument is outside the range -2^63..2^63, fsin/cos doesn't - // work. We must reduce it to the appropriate range. - __ movq(rdi, rbx); - // Move exponent and sign bits to low bits. - __ shr(rdi, Immediate(HeapNumber::kMantissaBits)); - // Remove sign bit. - __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1)); - int supported_exponent_limit = (63 + HeapNumber::kExponentBias); - __ cmpl(rdi, Immediate(supported_exponent_limit)); - __ j(below, &in_range); - // Check for infinity and NaN. Both return NaN for sin. - __ cmpl(rdi, Immediate(0x7ff)); - __ j(equal, on_nan_result); - - // Use fpmod to restrict argument to the range +/-2*PI. - __ fldpi(); - __ fadd(0); - __ fld(1); - // FPU Stack: input, 2*pi, input. - { - Label no_exceptions; - __ fwait(); - __ fnstsw_ax(); - // Clear if Illegal Operand or Zero Division exceptions are set. - __ testl(rax, Immediate(5)); // #IO and #ZD flags of FPU status word. - __ j(zero, &no_exceptions); - __ fnclex(); - __ bind(&no_exceptions); - } - - // Compute st(0) % st(1) - { - Label partial_remainder_loop; - __ bind(&partial_remainder_loop); - __ fprem1(); - __ fwait(); - __ fnstsw_ax(); - __ testl(rax, Immediate(0x400)); // Check C2 bit of FPU status word. - // If C2 is set, computation only has partial result. Loop to - // continue computation. - __ j(not_zero, &partial_remainder_loop); - } - // FPU Stack: input, 2*pi, input % 2*pi - __ fstp(2); - // FPU Stack: input % 2*pi, 2*pi, - __ fstp(0); - // FPU Stack: input % 2*pi - __ bind(&in_range); - switch (type_) { - case TranscendentalCache::SIN: - __ fsin(); - break; - case TranscendentalCache::COS: - __ fcos(); - break; - default: - UNREACHABLE(); - } - __ bind(&done); -} - - -// Get the integer part of a heap number. -// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx. -void IntegerConvert(MacroAssembler* masm, - Register result, - Register source) { - // Result may be rcx. If result and source are the same register, source will - // be overwritten. - ASSERT(!result.is(rdi) && !result.is(rbx)); - // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use - // cvttsd2si (32-bit version) directly. - Register double_exponent = rbx; - Register double_value = rdi; - Label done, exponent_63_plus; - // Get double and extract exponent. - __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset)); - // Clear result preemptively, in case we need to return zero. - __ xorl(result, result); - __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there. - // Double to remove sign bit, shift exponent down to least significant bits. - // and subtract bias to get the unshifted, unbiased exponent. - __ lea(double_exponent, Operand(double_value, double_value, times_1, 0)); - __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits)); - __ subl(double_exponent, Immediate(HeapNumber::kExponentBias)); - // Check whether the exponent is too big for a 63 bit unsigned integer. - __ cmpl(double_exponent, Immediate(63)); - __ j(above_equal, &exponent_63_plus); - // Handle exponent range 0..62. - __ cvttsd2siq(result, xmm0); - __ jmp(&done); - - __ bind(&exponent_63_plus); - // Exponent negative or 63+. - __ cmpl(double_exponent, Immediate(83)); - // If exponent negative or above 83, number contains no significant bits in - // the range 0..2^31, so result is zero, and rcx already holds zero. - __ j(above, &done); - - // Exponent in rage 63..83. - // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely - // the least significant exponent-52 bits. - - // Negate low bits of mantissa if value is negative. - __ addq(double_value, double_value); // Move sign bit to carry. - __ sbbl(result, result); // And convert carry to -1 in result register. - // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0. - __ addl(double_value, result); - // Do xor in opposite directions depending on where we want the result - // (depending on whether result is rcx or not). - - if (result.is(rcx)) { - __ xorl(double_value, result); - // Left shift mantissa by (exponent - mantissabits - 1) to save the - // bits that have positional values below 2^32 (the extra -1 comes from the - // doubling done above to move the sign bit into the carry flag). - __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1)); - __ shll_cl(double_value); - __ movl(result, double_value); - } else { - // As the then-branch, but move double-value to result before shifting. - __ xorl(result, double_value); - __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1)); - __ shll_cl(result); - } - - __ bind(&done); -} - - -// Input: rdx, rax are the left and right objects of a bit op. -// Output: rax, rcx are left and right integers for a bit op. -void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) { - // Check float operands. - Label done; - Label rax_is_smi; - Label rax_is_object; - Label rdx_is_object; - - __ JumpIfNotSmi(rdx, &rdx_is_object); - __ SmiToInteger32(rdx, rdx); - __ JumpIfSmi(rax, &rax_is_smi); - - __ bind(&rax_is_object); - IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx. - __ jmp(&done); - - __ bind(&rdx_is_object); - IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx. - __ JumpIfNotSmi(rax, &rax_is_object); - __ bind(&rax_is_smi); - __ SmiToInteger32(rcx, rax); - - __ bind(&done); - __ movl(rax, rdx); -} - - -// Input: rdx, rax are the left and right objects of a bit op. -// Output: rax, rcx are left and right integers for a bit op. -void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, - Label* conversion_failure, - Register heap_number_map) { - // Check float operands. - Label arg1_is_object, check_undefined_arg1; - Label arg2_is_object, check_undefined_arg2; - Label load_arg2, done; - - __ JumpIfNotSmi(rdx, &arg1_is_object); - __ SmiToInteger32(rdx, rdx); - __ jmp(&load_arg2); - - // If the argument is undefined it converts to zero (ECMA-262, section 9.5). - __ bind(&check_undefined_arg1); - __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex); - __ j(not_equal, conversion_failure); - __ movl(rdx, Immediate(0)); - __ jmp(&load_arg2); - - __ bind(&arg1_is_object); - __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map); - __ j(not_equal, &check_undefined_arg1); - // Get the untagged integer version of the edx heap number in rcx. - IntegerConvert(masm, rdx, rdx); - - // Here rdx has the untagged integer, rax has a Smi or a heap number. - __ bind(&load_arg2); - // Test if arg2 is a Smi. - __ JumpIfNotSmi(rax, &arg2_is_object); - __ SmiToInteger32(rax, rax); - __ movl(rcx, rax); - __ jmp(&done); - - // If the argument is undefined it converts to zero (ECMA-262, section 9.5). - __ bind(&check_undefined_arg2); - __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); - __ j(not_equal, conversion_failure); - __ movl(rcx, Immediate(0)); - __ jmp(&done); - - __ bind(&arg2_is_object); - __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map); - __ j(not_equal, &check_undefined_arg2); - // Get the untagged integer version of the rax heap number in rcx. - IntegerConvert(masm, rcx, rax); - __ bind(&done); - __ movl(rax, rdx); -} - - -void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) { - __ SmiToInteger32(kScratchRegister, rdx); - __ cvtlsi2sd(xmm0, kScratchRegister); - __ SmiToInteger32(kScratchRegister, rax); - __ cvtlsi2sd(xmm1, kScratchRegister); -} - - -void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) { - Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done; - // Load operand in rdx into xmm0. - __ JumpIfSmi(rdx, &load_smi_rdx); - __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); - // Load operand in rax into xmm1. - __ JumpIfSmi(rax, &load_smi_rax); - __ bind(&load_nonsmi_rax); - __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); - __ jmp(&done); - - __ bind(&load_smi_rdx); - __ SmiToInteger32(kScratchRegister, rdx); - __ cvtlsi2sd(xmm0, kScratchRegister); - __ JumpIfNotSmi(rax, &load_nonsmi_rax); - - __ bind(&load_smi_rax); - __ SmiToInteger32(kScratchRegister, rax); - __ cvtlsi2sd(xmm1, kScratchRegister); - - __ bind(&done); -} - - -void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm, - Label* not_numbers) { - Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done; - // Load operand in rdx into xmm0, or branch to not_numbers. - __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex); - __ JumpIfSmi(rdx, &load_smi_rdx); - __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx); - __ j(not_equal, not_numbers); // Argument in rdx is not a number. - __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); - // Load operand in rax into xmm1, or branch to not_numbers. - __ JumpIfSmi(rax, &load_smi_rax); - - __ bind(&load_nonsmi_rax); - __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx); - __ j(not_equal, not_numbers); - __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); - __ jmp(&done); - - __ bind(&load_smi_rdx); - __ SmiToInteger32(kScratchRegister, rdx); - __ cvtlsi2sd(xmm0, kScratchRegister); - __ JumpIfNotSmi(rax, &load_nonsmi_rax); - - __ bind(&load_smi_rax); - __ SmiToInteger32(kScratchRegister, rax); - __ cvtlsi2sd(xmm1, kScratchRegister); - __ bind(&done); -} - - -void GenericUnaryOpStub::Generate(MacroAssembler* masm) { - Label slow, done; - - if (op_ == Token::SUB) { - // Check whether the value is a smi. - Label try_float; - __ JumpIfNotSmi(rax, &try_float); - - if (negative_zero_ == kIgnoreNegativeZero) { - __ SmiCompare(rax, Smi::FromInt(0)); - __ j(equal, &done); - } - - // Enter runtime system if the value of the smi is zero - // to make sure that we switch between 0 and -0. - // Also enter it if the value of the smi is Smi::kMinValue. - __ SmiNeg(rax, rax, &done); - - // Either zero or Smi::kMinValue, neither of which become a smi when - // negated. - if (negative_zero_ == kStrictNegativeZero) { - __ SmiCompare(rax, Smi::FromInt(0)); - __ j(not_equal, &slow); - __ Move(rax, Factory::minus_zero_value()); - __ jmp(&done); - } else { - __ jmp(&slow); - } - - // Try floating point case. - __ bind(&try_float); - __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset)); - __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex); - __ j(not_equal, &slow); - // Operand is a float, negate its value by flipping sign bit. - __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset)); - __ movq(kScratchRegister, Immediate(0x01)); - __ shl(kScratchRegister, Immediate(63)); - __ xor_(rdx, kScratchRegister); // Flip sign. - // rdx is value to store. - if (overwrite_ == UNARY_OVERWRITE) { - __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx); - } else { - __ AllocateHeapNumber(rcx, rbx, &slow); - // rcx: allocated 'empty' number - __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx); - __ movq(rax, rcx); - } - } else if (op_ == Token::BIT_NOT) { - // Check if the operand is a heap number. - __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset)); - __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex); - __ j(not_equal, &slow); - - // Convert the heap number in rax to an untagged integer in rcx. - IntegerConvert(masm, rax, rax); - - // Do the bitwise operation and smi tag the result. - __ notl(rax); - __ Integer32ToSmi(rax, rax); - } - - // Return from the stub. - __ bind(&done); - __ StubReturn(1); - - // Handle the slow case by jumping to the JavaScript builtin. - __ bind(&slow); - __ pop(rcx); // pop return address - __ push(rax); - __ push(rcx); // push return address - switch (op_) { - case Token::SUB: - __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); - break; - case Token::BIT_NOT: - __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); - break; - default: - UNREACHABLE(); - } -} - - -void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { - // The key is in rdx and the parameter count is in rax. - - // The displacement is used for skipping the frame pointer on the - // stack. It is the offset of the last parameter (if any) relative - // to the frame pointer. - static const int kDisplacement = 1 * kPointerSize; - - // Check that the key is a smi. - Label slow; - __ JumpIfNotSmi(rdx, &slow); - - // Check if the calling frame is an arguments adaptor frame. - Label adaptor; - __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); - __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset), - Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); - __ j(equal, &adaptor); - - // Check index against formal parameters count limit passed in - // through register rax. Use unsigned comparison to get negative - // check for free. - __ cmpq(rdx, rax); - __ j(above_equal, &slow); - - // Read the argument from the stack and return it. - SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2); - __ lea(rbx, Operand(rbp, index.reg, index.scale, 0)); - index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2); - __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement)); - __ Ret(); - - // Arguments adaptor case: Check index against actual arguments - // limit found in the arguments adaptor frame. Use unsigned - // comparison to get negative check for free. - __ bind(&adaptor); - __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ cmpq(rdx, rcx); - __ j(above_equal, &slow); - - // Read the argument from the stack and return it. - index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2); - __ lea(rbx, Operand(rbx, index.reg, index.scale, 0)); - index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2); - __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement)); - __ Ret(); - - // Slow-case: Handle non-smi or out-of-bounds access to arguments - // by calling the runtime system. - __ bind(&slow); - __ pop(rbx); // Return address. - __ push(rdx); - __ push(rbx); - __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); -} - - -void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { - // rsp[0] : return address - // rsp[8] : number of parameters - // rsp[16] : receiver displacement - // rsp[24] : function - - // The displacement is used for skipping the return address and the - // frame pointer on the stack. It is the offset of the last - // parameter (if any) relative to the frame pointer. - static const int kDisplacement = 2 * kPointerSize; - - // Check if the calling frame is an arguments adaptor frame. - Label adaptor_frame, try_allocate, runtime; - __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); - __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset), - Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); - __ j(equal, &adaptor_frame); - - // Get the length from the frame. - __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize)); - __ jmp(&try_allocate); - - // Patch the arguments.length and the parameters pointer. - __ bind(&adaptor_frame); - __ SmiToInteger32(rcx, - Operand(rdx, - ArgumentsAdaptorFrameConstants::kLengthOffset)); - // Space on stack must already hold a smi. - __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx); - // Do not clobber the length index for the indexing operation since - // it is used compute the size for allocation later. - __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement)); - __ movq(Operand(rsp, 2 * kPointerSize), rdx); - - // Try the new space allocation. Start out with computing the size of - // the arguments object and the elements array. - Label add_arguments_object; - __ bind(&try_allocate); - __ testl(rcx, rcx); - __ j(zero, &add_arguments_object); - __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize)); - __ bind(&add_arguments_object); - __ addl(rcx, Immediate(Heap::kArgumentsObjectSize)); - - // Do the allocation of both objects in one go. - __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT); - - // Get the arguments boilerplate from the current (global) context. - int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); - __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); - __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset)); - __ movq(rdi, Operand(rdi, offset)); - - // Copy the JS object part. - STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize); - __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize)); - __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize)); - __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize)); - __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister); - __ movq(FieldOperand(rax, 1 * kPointerSize), rdx); - __ movq(FieldOperand(rax, 2 * kPointerSize), rbx); - - // Setup the callee in-object property. - ASSERT(Heap::arguments_callee_index == 0); - __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize)); - __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister); - - // Get the length (smi tagged) and set that as an in-object property too. - ASSERT(Heap::arguments_length_index == 1); - __ movq(rcx, Operand(rsp, 1 * kPointerSize)); - __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx); - - // If there are no actual arguments, we're done. - Label done; - __ SmiTest(rcx); - __ j(zero, &done); - - // Get the parameters pointer from the stack and untag the length. - __ movq(rdx, Operand(rsp, 2 * kPointerSize)); - - // Setup the elements pointer in the allocated arguments object and - // initialize the header in the elements fixed array. - __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize)); - __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi); - __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex); - __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister); - __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx); - __ SmiToInteger32(rcx, rcx); // Untag length for the loop below. - - // Copy the fixed array slots. - Label loop; - __ bind(&loop); - __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver. - __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister); - __ addq(rdi, Immediate(kPointerSize)); - __ subq(rdx, Immediate(kPointerSize)); - __ decl(rcx); - __ j(not_zero, &loop); - - // Return and remove the on-stack parameters. - __ bind(&done); - __ ret(3 * kPointerSize); - - // Do the runtime call to allocate the arguments object. - __ bind(&runtime); - __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); -} - - -void RegExpExecStub::Generate(MacroAssembler* masm) { - // Just jump directly to runtime if native RegExp is not selected at compile - // time or if regexp entry in generated code is turned off runtime switch or - // at compilation. -#ifdef V8_INTERPRETED_REGEXP - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); -#else // V8_INTERPRETED_REGEXP - if (!FLAG_regexp_entry_native) { - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); - return; - } - - // Stack frame on entry. - // esp[0]: return address - // esp[8]: last_match_info (expected JSArray) - // esp[16]: previous index - // esp[24]: subject string - // esp[32]: JSRegExp object - - static const int kLastMatchInfoOffset = 1 * kPointerSize; - static const int kPreviousIndexOffset = 2 * kPointerSize; - static const int kSubjectOffset = 3 * kPointerSize; - static const int kJSRegExpOffset = 4 * kPointerSize; - - Label runtime; - - // Ensure that a RegExp stack is allocated. - ExternalReference address_of_regexp_stack_memory_address = - ExternalReference::address_of_regexp_stack_memory_address(); - ExternalReference address_of_regexp_stack_memory_size = - ExternalReference::address_of_regexp_stack_memory_size(); - __ movq(kScratchRegister, address_of_regexp_stack_memory_size); - __ movq(kScratchRegister, Operand(kScratchRegister, 0)); - __ testq(kScratchRegister, kScratchRegister); - __ j(zero, &runtime); - - - // Check that the first argument is a JSRegExp object. - __ movq(rax, Operand(rsp, kJSRegExpOffset)); - __ JumpIfSmi(rax, &runtime); - __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister); - __ j(not_equal, &runtime); - // Check that the RegExp has been compiled (data contains a fixed array). - __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset)); - if (FLAG_debug_code) { - Condition is_smi = masm->CheckSmi(rcx); - __ Check(NegateCondition(is_smi), - "Unexpected type for RegExp data, FixedArray expected"); - __ CmpObjectType(rcx, FIXED_ARRAY_TYPE, kScratchRegister); - __ Check(equal, "Unexpected type for RegExp data, FixedArray expected"); - } - - // rcx: RegExp data (FixedArray) - // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. - __ SmiToInteger32(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset)); - __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP)); - __ j(not_equal, &runtime); - - // rcx: RegExp data (FixedArray) - // Check that the number of captures fit in the static offsets vector buffer. - __ SmiToInteger32(rdx, - FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset)); - // Calculate number of capture registers (number_of_captures + 1) * 2. - __ leal(rdx, Operand(rdx, rdx, times_1, 2)); - // Check that the static offsets vector buffer is large enough. - __ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize)); - __ j(above, &runtime); - - // rcx: RegExp data (FixedArray) - // rdx: Number of capture registers - // Check that the second argument is a string. - __ movq(rax, Operand(rsp, kSubjectOffset)); - __ JumpIfSmi(rax, &runtime); - Condition is_string = masm->IsObjectStringType(rax, rbx, rbx); - __ j(NegateCondition(is_string), &runtime); - - // rax: Subject string. - // rcx: RegExp data (FixedArray). - // rdx: Number of capture registers. - // Check that the third argument is a positive smi less than the string - // length. A negative value will be greater (unsigned comparison). - __ movq(rbx, Operand(rsp, kPreviousIndexOffset)); - __ JumpIfNotSmi(rbx, &runtime); - __ SmiCompare(rbx, FieldOperand(rax, String::kLengthOffset)); - __ j(above_equal, &runtime); - - // rcx: RegExp data (FixedArray) - // rdx: Number of capture registers - // Check that the fourth object is a JSArray object. - __ movq(rax, Operand(rsp, kLastMatchInfoOffset)); - __ JumpIfSmi(rax, &runtime); - __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister); - __ j(not_equal, &runtime); - // Check that the JSArray is in fast case. - __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset)); - __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset)); - __ Cmp(rax, Factory::fixed_array_map()); - __ j(not_equal, &runtime); - // Check that the last match info has space for the capture registers and the - // additional information. Ensure no overflow in add. - STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset); - __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset)); - __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead)); - __ cmpl(rdx, rax); - __ j(greater, &runtime); - - // rcx: RegExp data (FixedArray) - // Check the representation and encoding of the subject string. - Label seq_ascii_string, seq_two_byte_string, check_code; - __ movq(rax, Operand(rsp, kSubjectOffset)); - __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); - __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset)); - // First check for flat two byte string. - __ andb(rbx, Immediate( - kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask)); - STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0); - __ j(zero, &seq_two_byte_string); - // Any other flat string must be a flat ascii string. - __ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask)); - __ j(zero, &seq_ascii_string); - - // Check for flat cons string. - // A flat cons string is a cons string where the second part is the empty - // string. In that case the subject string is just the first part of the cons - // string. Also in this case the first part of the cons string is known to be - // a sequential string or an external string. - STATIC_ASSERT(kExternalStringTag !=0); - STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0); - __ testb(rbx, Immediate(kIsNotStringMask | kExternalStringTag)); - __ j(not_zero, &runtime); - // String is a cons string. - __ movq(rdx, FieldOperand(rax, ConsString::kSecondOffset)); - __ Cmp(rdx, Factory::empty_string()); - __ j(not_equal, &runtime); - __ movq(rax, FieldOperand(rax, ConsString::kFirstOffset)); - __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); - // String is a cons string with empty second part. - // rax: first part of cons string. - // rbx: map of first part of cons string. - // Is first part a flat two byte string? - __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset), - Immediate(kStringRepresentationMask | kStringEncodingMask)); - STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0); - __ j(zero, &seq_two_byte_string); - // Any other flat string must be ascii. - __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset), - Immediate(kStringRepresentationMask)); - __ j(not_zero, &runtime); - - __ bind(&seq_ascii_string); - // rax: subject string (sequential ascii) - // rcx: RegExp data (FixedArray) - __ movq(r11, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset)); - __ Set(rdi, 1); // Type is ascii. - __ jmp(&check_code); - - __ bind(&seq_two_byte_string); - // rax: subject string (flat two-byte) - // rcx: RegExp data (FixedArray) - __ movq(r11, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset)); - __ Set(rdi, 0); // Type is two byte. - - __ bind(&check_code); - // Check that the irregexp code has been generated for the actual string - // encoding. If it has, the field contains a code object otherwise it contains - // the hole. - __ CmpObjectType(r11, CODE_TYPE, kScratchRegister); - __ j(not_equal, &runtime); - - // rax: subject string - // rdi: encoding of subject string (1 if ascii, 0 if two_byte); - // r11: code - // Load used arguments before starting to push arguments for call to native - // RegExp code to avoid handling changing stack height. - __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset)); - - // rax: subject string - // rbx: previous index - // rdi: encoding of subject string (1 if ascii 0 if two_byte); - // r11: code - // All checks done. Now push arguments for native regexp code. - __ IncrementCounter(&Counters::regexp_entry_native, 1); - - // rsi is caller save on Windows and used to pass parameter on Linux. - __ push(rsi); - - static const int kRegExpExecuteArguments = 7; - __ PrepareCallCFunction(kRegExpExecuteArguments); - int argument_slots_on_stack = - masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments); - - // Argument 7: Indicate that this is a direct call from JavaScript. - __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize), - Immediate(1)); - - // Argument 6: Start (high end) of backtracking stack memory area. - __ movq(kScratchRegister, address_of_regexp_stack_memory_address); - __ movq(r9, Operand(kScratchRegister, 0)); - __ movq(kScratchRegister, address_of_regexp_stack_memory_size); - __ addq(r9, Operand(kScratchRegister, 0)); - // Argument 6 passed in r9 on Linux and on the stack on Windows. -#ifdef _WIN64 - __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize), r9); -#endif - - // Argument 5: static offsets vector buffer. - __ movq(r8, ExternalReference::address_of_static_offsets_vector()); - // Argument 5 passed in r8 on Linux and on the stack on Windows. -#ifdef _WIN64 - __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r8); -#endif - - // First four arguments are passed in registers on both Linux and Windows. -#ifdef _WIN64 - Register arg4 = r9; - Register arg3 = r8; - Register arg2 = rdx; - Register arg1 = rcx; -#else - Register arg4 = rcx; - Register arg3 = rdx; - Register arg2 = rsi; - Register arg1 = rdi; -#endif - - // Keep track on aliasing between argX defined above and the registers used. - // rax: subject string - // rbx: previous index - // rdi: encoding of subject string (1 if ascii 0 if two_byte); - // r11: code - - // Argument 4: End of string data - // Argument 3: Start of string data - Label setup_two_byte, setup_rest; - __ testb(rdi, rdi); - __ j(zero, &setup_two_byte); - __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset)); - __ lea(arg4, FieldOperand(rax, rdi, times_1, SeqAsciiString::kHeaderSize)); - __ lea(arg3, FieldOperand(rax, rbx, times_1, SeqAsciiString::kHeaderSize)); - __ jmp(&setup_rest); - __ bind(&setup_two_byte); - __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset)); - __ lea(arg4, FieldOperand(rax, rdi, times_2, SeqTwoByteString::kHeaderSize)); - __ lea(arg3, FieldOperand(rax, rbx, times_2, SeqTwoByteString::kHeaderSize)); - - __ bind(&setup_rest); - // Argument 2: Previous index. - __ movq(arg2, rbx); - - // Argument 1: Subject string. - __ movq(arg1, rax); - - // Locate the code entry and call it. - __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ CallCFunction(r11, kRegExpExecuteArguments); - - // rsi is caller save, as it is used to pass parameter. - __ pop(rsi); - - // Check the result. - Label success; - __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS)); - __ j(equal, &success); - Label failure; - __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE)); - __ j(equal, &failure); - __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION)); - // If not exception it can only be retry. Handle that in the runtime system. - __ j(not_equal, &runtime); - // Result must now be exception. If there is no pending exception already a - // stack overflow (on the backtrack stack) was detected in RegExp code but - // haven't created the exception yet. Handle that in the runtime system. - // TODO(592): Rerunning the RegExp to get the stack overflow exception. - ExternalReference pending_exception_address(Top::k_pending_exception_address); - __ movq(kScratchRegister, pending_exception_address); - __ Cmp(kScratchRegister, Factory::the_hole_value()); - __ j(equal, &runtime); - __ bind(&failure); - // For failure and exception return null. - __ Move(rax, Factory::null_value()); - __ ret(4 * kPointerSize); - - // Load RegExp data. - __ bind(&success); - __ movq(rax, Operand(rsp, kJSRegExpOffset)); - __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset)); - __ SmiToInteger32(rax, - FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset)); - // Calculate number of capture registers (number_of_captures + 1) * 2. - __ leal(rdx, Operand(rax, rax, times_1, 2)); - - // rdx: Number of capture registers - // Load last_match_info which is still known to be a fast case JSArray. - __ movq(rax, Operand(rsp, kLastMatchInfoOffset)); - __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset)); - - // rbx: last_match_info backing store (FixedArray) - // rdx: number of capture registers - // Store the capture count. - __ Integer32ToSmi(kScratchRegister, rdx); - __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset), - kScratchRegister); - // Store last subject and last input. - __ movq(rax, Operand(rsp, kSubjectOffset)); - __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax); - __ movq(rcx, rbx); - __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi); - __ movq(rax, Operand(rsp, kSubjectOffset)); - __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax); - __ movq(rcx, rbx); - __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi); - - // Get the static offsets vector filled by the native regexp code. - __ movq(rcx, ExternalReference::address_of_static_offsets_vector()); - - // rbx: last_match_info backing store (FixedArray) - // rcx: offsets vector - // rdx: number of capture registers - Label next_capture, done; - // Capture register counter starts from number of capture registers and - // counts down until wraping after zero. - __ bind(&next_capture); - __ subq(rdx, Immediate(1)); - __ j(negative, &done); - // Read the value from the static offsets vector buffer and make it a smi. - __ movl(rdi, Operand(rcx, rdx, times_int_size, 0)); - __ Integer32ToSmi(rdi, rdi, &runtime); - // Store the smi value in the last match info. - __ movq(FieldOperand(rbx, - rdx, - times_pointer_size, - RegExpImpl::kFirstCaptureOffset), - rdi); - __ jmp(&next_capture); - __ bind(&done); - - // Return last match info. - __ movq(rax, Operand(rsp, kLastMatchInfoOffset)); - __ ret(4 * kPointerSize); - - // Do the runtime call to execute the regexp. - __ bind(&runtime); - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); -#endif // V8_INTERPRETED_REGEXP -} - - -void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, - Register object, - Register result, - Register scratch1, - Register scratch2, - bool object_is_smi, - Label* not_found) { - // Use of registers. Register result is used as a temporary. - Register number_string_cache = result; - Register mask = scratch1; - Register scratch = scratch2; - - // Load the number string cache. - __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); - - // Make the hash mask from the length of the number string cache. It - // contains two elements (number and string) for each cache entry. - __ SmiToInteger32( - mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); - __ shrl(mask, Immediate(1)); - __ subq(mask, Immediate(1)); // Make mask. - - // Calculate the entry in the number string cache. The hash value in the - // number string cache for smis is just the smi value, and the hash for - // doubles is the xor of the upper and lower words. See - // Heap::GetNumberStringCache. - Label is_smi; - Label load_result_from_cache; - if (!object_is_smi) { - __ JumpIfSmi(object, &is_smi); - __ CheckMap(object, Factory::heap_number_map(), not_found, true); - - STATIC_ASSERT(8 == kDoubleSize); - __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4)); - __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset)); - GenerateConvertHashCodeToIndex(masm, scratch, mask); - - Register index = scratch; - Register probe = mask; - __ movq(probe, - FieldOperand(number_string_cache, - index, - times_1, - FixedArray::kHeaderSize)); - __ JumpIfSmi(probe, not_found); - ASSERT(CpuFeatures::IsSupported(SSE2)); - CpuFeatures::Scope fscope(SSE2); - __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); - __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset)); - __ ucomisd(xmm0, xmm1); - __ j(parity_even, not_found); // Bail out if NaN is involved. - __ j(not_equal, not_found); // The cache did not contain this value. - __ jmp(&load_result_from_cache); - } - - __ bind(&is_smi); - __ SmiToInteger32(scratch, object); - GenerateConvertHashCodeToIndex(masm, scratch, mask); - - Register index = scratch; - // Check if the entry is the smi we are looking for. - __ cmpq(object, - FieldOperand(number_string_cache, - index, - times_1, - FixedArray::kHeaderSize)); - __ j(not_equal, not_found); - - // Get the result from the cache. - __ bind(&load_result_from_cache); - __ movq(result, - FieldOperand(number_string_cache, - index, - times_1, - FixedArray::kHeaderSize + kPointerSize)); - __ IncrementCounter(&Counters::number_to_string_native, 1); -} - - -void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm, - Register hash, - Register mask) { - __ and_(hash, mask); - // Each entry in string cache consists of two pointer sized fields, - // but times_twice_pointer_size (multiplication by 16) scale factor - // is not supported by addrmode on x64 platform. - // So we have to premultiply entry index before lookup. - __ shl(hash, Immediate(kPointerSizeLog2 + 1)); -} - - -void NumberToStringStub::Generate(MacroAssembler* masm) { - Label runtime; - - __ movq(rbx, Operand(rsp, kPointerSize)); - - // Generate code to lookup number in the number string cache. - GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime); - __ ret(1 * kPointerSize); - - __ bind(&runtime); - // Handle number to string in the runtime system if not found in the cache. - __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); -} - - -static int NegativeComparisonResult(Condition cc) { - ASSERT(cc != equal); - ASSERT((cc == less) || (cc == less_equal) - || (cc == greater) || (cc == greater_equal)); - return (cc == greater || cc == greater_equal) ? LESS : GREATER; -} - - -void CompareStub::Generate(MacroAssembler* masm) { - ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); - - Label check_unequal_objects, done; - // The compare stub returns a positive, negative, or zero 64-bit integer - // value in rax, corresponding to result of comparing the two inputs. - // NOTICE! This code is only reached after a smi-fast-case check, so - // it is certain that at least one operand isn't a smi. - - // Two identical objects are equal unless they are both NaN or undefined. - { - Label not_identical; - __ cmpq(rax, rdx); - __ j(not_equal, ¬_identical); - - if (cc_ != equal) { - // Check for undefined. undefined OP undefined is false even though - // undefined == undefined. - Label check_for_nan; - __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex); - __ j(not_equal, &check_for_nan); - __ Set(rax, NegativeComparisonResult(cc_)); - __ ret(0); - __ bind(&check_for_nan); - } - - // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), - // so we do the second best thing - test it ourselves. - // Note: if cc_ != equal, never_nan_nan_ is not used. - // We cannot set rax to EQUAL until just before return because - // rax must be unchanged on jump to not_identical. - - if (never_nan_nan_ && (cc_ == equal)) { - __ Set(rax, EQUAL); - __ ret(0); - } else { - Label heap_number; - // If it's not a heap number, then return equal for (in)equality operator. - __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), - Factory::heap_number_map()); - __ j(equal, &heap_number); - if (cc_ != equal) { - // Call runtime on identical JSObjects. Otherwise return equal. - __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx); - __ j(above_equal, ¬_identical); - } - __ Set(rax, EQUAL); - __ ret(0); - - __ bind(&heap_number); - // It is a heap number, so return equal if it's not NaN. - // For NaN, return 1 for every condition except greater and - // greater-equal. Return -1 for them, so the comparison yields - // false for all conditions except not-equal. - __ Set(rax, EQUAL); - __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); - __ ucomisd(xmm0, xmm0); - __ setcc(parity_even, rax); - // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs. - if (cc_ == greater_equal || cc_ == greater) { - __ neg(rax); - } - __ ret(0); - } - - __ bind(¬_identical); - } - - if (cc_ == equal) { // Both strict and non-strict. - Label slow; // Fallthrough label. - - // If we're doing a strict equality comparison, we don't have to do - // type conversion, so we generate code to do fast comparison for objects - // and oddballs. Non-smi numbers and strings still go through the usual - // slow-case code. - if (strict_) { - // If either is a Smi (we know that not both are), then they can only - // be equal if the other is a HeapNumber. If so, use the slow case. - { - Label not_smis; - __ SelectNonSmi(rbx, rax, rdx, ¬_smis); - - // Check if the non-smi operand is a heap number. - __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset), - Factory::heap_number_map()); - // If heap number, handle it in the slow case. - __ j(equal, &slow); - // Return non-equal. ebx (the lower half of rbx) is not zero. - __ movq(rax, rbx); - __ ret(0); - - __ bind(¬_smis); - } - - // If either operand is a JSObject or an oddball value, then they are not - // equal since their pointers are different - // There is no test for undetectability in strict equality. - - // If the first object is a JS object, we have done pointer comparison. - STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); - Label first_non_object; - __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx); - __ j(below, &first_non_object); - // Return non-zero (eax (not rax) is not zero) - Label return_not_equal; - STATIC_ASSERT(kHeapObjectTag != 0); - __ bind(&return_not_equal); - __ ret(0); - - __ bind(&first_non_object); - // Check for oddballs: true, false, null, undefined. - __ CmpInstanceType(rcx, ODDBALL_TYPE); - __ j(equal, &return_not_equal); - - __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx); - __ j(above_equal, &return_not_equal); - - // Check for oddballs: true, false, null, undefined. - __ CmpInstanceType(rcx, ODDBALL_TYPE); - __ j(equal, &return_not_equal); - - // Fall through to the general case. - } - __ bind(&slow); - } - - // Generate the number comparison code. - if (include_number_compare_) { - Label non_number_comparison; - Label unordered; - FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison); - __ xorl(rax, rax); - __ xorl(rcx, rcx); - __ ucomisd(xmm0, xmm1); - - // Don't base result on EFLAGS when a NaN is involved. - __ j(parity_even, &unordered); - // Return a result of -1, 0, or 1, based on EFLAGS. - __ setcc(above, rax); - __ setcc(below, rcx); - __ subq(rax, rcx); - __ ret(0); - - // If one of the numbers was NaN, then the result is always false. - // The cc is never not-equal. - __ bind(&unordered); - ASSERT(cc_ != not_equal); - if (cc_ == less || cc_ == less_equal) { - __ Set(rax, 1); - } else { - __ Set(rax, -1); - } - __ ret(0); - - // The number comparison code did not provide a valid result. - __ bind(&non_number_comparison); - } - - // Fast negative check for symbol-to-symbol equality. - Label check_for_strings; - if (cc_ == equal) { - BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister); - BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister); - - // We've already checked for object identity, so if both operands - // are symbols they aren't equal. Register eax (not rax) already holds a - // non-zero value, which indicates not equal, so just return. - __ ret(0); - } - - __ bind(&check_for_strings); - - __ JumpIfNotBothSequentialAsciiStrings( - rdx, rax, rcx, rbx, &check_unequal_objects); - - // Inline comparison of ascii strings. - StringCompareStub::GenerateCompareFlatAsciiStrings(masm, - rdx, - rax, - rcx, - rbx, - rdi, - r8); - -#ifdef DEBUG - __ Abort("Unexpected fall-through from string comparison"); -#endif - - __ bind(&check_unequal_objects); - if (cc_ == equal && !strict_) { - // Not strict equality. Objects are unequal if - // they are both JSObjects and not undetectable, - // and their pointers are different. - Label not_both_objects, return_unequal; - // At most one is a smi, so we can test for smi by adding the two. - // A smi plus a heap object has the low bit set, a heap object plus - // a heap object has the low bit clear. - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagMask == 1); - __ lea(rcx, Operand(rax, rdx, times_1, 0)); - __ testb(rcx, Immediate(kSmiTagMask)); - __ j(not_zero, ¬_both_objects); - __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx); - __ j(below, ¬_both_objects); - __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx); - __ j(below, ¬_both_objects); - __ testb(FieldOperand(rbx, Map::kBitFieldOffset), - Immediate(1 << Map::kIsUndetectable)); - __ j(zero, &return_unequal); - __ testb(FieldOperand(rcx, Map::kBitFieldOffset), - Immediate(1 << Map::kIsUndetectable)); - __ j(zero, &return_unequal); - // The objects are both undetectable, so they both compare as the value - // undefined, and are equal. - __ Set(rax, EQUAL); - __ bind(&return_unequal); - // Return non-equal by returning the non-zero object pointer in eax, - // or return equal if we fell through to here. - __ ret(0); - __ bind(¬_both_objects); - } - - // Push arguments below the return address to prepare jump to builtin. - __ pop(rcx); - __ push(rdx); - __ push(rax); - - // Figure out which native to call and setup the arguments. - Builtins::JavaScript builtin; - if (cc_ == equal) { - builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; - } else { - builtin = Builtins::COMPARE; - __ Push(Smi::FromInt(NegativeComparisonResult(cc_))); - } - - // Restore return address on the stack. - __ push(rcx); - - // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) - // tagged as a small integer. - __ InvokeBuiltin(builtin, JUMP_FUNCTION); -} - - -void CompareStub::BranchIfNonSymbol(MacroAssembler* masm, - Label* label, - Register object, - Register scratch) { - __ JumpIfSmi(object, label); - __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset)); - __ movzxbq(scratch, - FieldOperand(scratch, Map::kInstanceTypeOffset)); - // Ensure that no non-strings have the symbol bit set. - STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask); - STATIC_ASSERT(kSymbolTag != 0); - __ testb(scratch, Immediate(kIsSymbolMask)); - __ j(zero, label); -} - - -void StackCheckStub::Generate(MacroAssembler* masm) { - // Because builtins always remove the receiver from the stack, we - // have to fake one to avoid underflowing the stack. The receiver - // must be inserted below the return address on the stack so we - // temporarily store that in a register. - __ pop(rax); - __ Push(Smi::FromInt(0)); - __ push(rax); - - // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kStackGuard, 1, 1); -} - - -void CallFunctionStub::Generate(MacroAssembler* masm) { - Label slow; - - // If the receiver might be a value (string, number or boolean) check for this - // and box it if it is. - if (ReceiverMightBeValue()) { - // Get the receiver from the stack. - // +1 ~ return address - Label receiver_is_value, receiver_is_js_object; - __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize)); - - // Check if receiver is a smi (which is a number value). - __ JumpIfSmi(rax, &receiver_is_value); - - // Check if the receiver is a valid JS object. - __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi); - __ j(above_equal, &receiver_is_js_object); - - // Call the runtime to box the value. - __ bind(&receiver_is_value); - __ EnterInternalFrame(); - __ push(rax); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ LeaveInternalFrame(); - __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax); - - __ bind(&receiver_is_js_object); - } - - // Get the function to call from the stack. - // +2 ~ receiver, return address - __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize)); - - // Check that the function really is a JavaScript function. - __ JumpIfSmi(rdi, &slow); - // Goto slow case if we do not have a function. - __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); - __ j(not_equal, &slow); - - // Fast-case: Just invoke the function. - ParameterCount actual(argc_); - __ InvokeFunction(rdi, actual, JUMP_FUNCTION); - - // Slow-case: Non-function called. - __ bind(&slow); - // CALL_NON_FUNCTION expects the non-function callee as receiver (instead - // of the original receiver from the call site). - __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi); - __ Set(rax, argc_); - __ Set(rbx, 0); - __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION); - Handle adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)); - __ Jump(adaptor, RelocInfo::CODE_TARGET); -} - - -void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { - // Check that stack should contain next handler, frame pointer, state and - // return address in that order. - STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize == - StackHandlerConstants::kStateOffset); - STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize == - StackHandlerConstants::kPCOffset); - - ExternalReference handler_address(Top::k_handler_address); - __ movq(kScratchRegister, handler_address); - __ movq(rsp, Operand(kScratchRegister, 0)); - // get next in chain - __ pop(rcx); - __ movq(Operand(kScratchRegister, 0), rcx); - __ pop(rbp); // pop frame pointer - __ pop(rdx); // remove state - - // Before returning we restore the context from the frame pointer if not NULL. - // The frame pointer is NULL in the exception handler of a JS entry frame. - __ xor_(rsi, rsi); // tentatively set context pointer to NULL - Label skip; - __ cmpq(rbp, Immediate(0)); - __ j(equal, &skip); - __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); - __ bind(&skip); - __ ret(0); -} - - -void ApiGetterEntryStub::Generate(MacroAssembler* masm) { - Label empty_result; - Label prologue; - Label promote_scheduled_exception; - __ EnterApiExitFrame(ExitFrame::MODE_NORMAL, kStackSpace, 0); - ASSERT_EQ(kArgc, 4); -#ifdef _WIN64 - // All the parameters should be set up by a caller. -#else - // Set 1st parameter register with property name. - __ movq(rsi, rdx); - // Second parameter register rdi should be set with pointer to AccessorInfo - // by a caller. -#endif - // Call the api function! - __ movq(rax, - reinterpret_cast(fun()->address()), - RelocInfo::RUNTIME_ENTRY); - __ call(rax); - // Check if the function scheduled an exception. - ExternalReference scheduled_exception_address = - ExternalReference::scheduled_exception_address(); - __ movq(rsi, scheduled_exception_address); - __ Cmp(Operand(rsi, 0), Factory::the_hole_value()); - __ j(not_equal, &promote_scheduled_exception); -#ifdef _WIN64 - // rax keeps a pointer to v8::Handle, unpack it. - __ movq(rax, Operand(rax, 0)); -#endif - // Check if the result handle holds 0. - __ testq(rax, rax); - __ j(zero, &empty_result); - // It was non-zero. Dereference to get the result value. - __ movq(rax, Operand(rax, 0)); - __ bind(&prologue); - __ LeaveExitFrame(ExitFrame::MODE_NORMAL); - __ ret(0); - __ bind(&promote_scheduled_exception); - __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1); - __ bind(&empty_result); - // It was zero; the result is undefined. - __ Move(rax, Factory::undefined_value()); - __ jmp(&prologue); -} - - -void CEntryStub::GenerateCore(MacroAssembler* masm, - Label* throw_normal_exception, - Label* throw_termination_exception, - Label* throw_out_of_memory_exception, - bool do_gc, - bool always_allocate_scope, - int /* alignment_skew */) { - // rax: result parameter for PerformGC, if any. - // rbx: pointer to C function (C callee-saved). - // rbp: frame pointer (restored after C call). - // rsp: stack pointer (restored after C call). - // r14: number of arguments including receiver (C callee-saved). - // r12: pointer to the first argument (C callee-saved). - // This pointer is reused in LeaveExitFrame(), so it is stored in a - // callee-saved register. - - // Simple results returned in rax (both AMD64 and Win64 calling conventions). - // Complex results must be written to address passed as first argument. - // AMD64 calling convention: a struct of two pointers in rax+rdx - - // Check stack alignment. - if (FLAG_debug_code) { - __ CheckStackAlignment(); - } - - if (do_gc) { - // Pass failure code returned from last attempt as first argument to - // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the - // stack is known to be aligned. This function takes one argument which is - // passed in register. -#ifdef _WIN64 - __ movq(rcx, rax); -#else // _WIN64 - __ movq(rdi, rax); -#endif - __ movq(kScratchRegister, - FUNCTION_ADDR(Runtime::PerformGC), - RelocInfo::RUNTIME_ENTRY); - __ call(kScratchRegister); - } - - ExternalReference scope_depth = - ExternalReference::heap_always_allocate_scope_depth(); - if (always_allocate_scope) { - __ movq(kScratchRegister, scope_depth); - __ incl(Operand(kScratchRegister, 0)); - } - - // Call C function. -#ifdef _WIN64 - // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9 - // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots. - __ movq(Operand(rsp, 4 * kPointerSize), r14); // argc. - __ movq(Operand(rsp, 5 * kPointerSize), r12); // argv. - if (result_size_ < 2) { - // Pass a pointer to the Arguments object as the first argument. - // Return result in single register (rax). - __ lea(rcx, Operand(rsp, 4 * kPointerSize)); - } else { - ASSERT_EQ(2, result_size_); - // Pass a pointer to the result location as the first argument. - __ lea(rcx, Operand(rsp, 6 * kPointerSize)); - // Pass a pointer to the Arguments object as the second argument. - __ lea(rdx, Operand(rsp, 4 * kPointerSize)); - } - -#else // _WIN64 - // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9. - __ movq(rdi, r14); // argc. - __ movq(rsi, r12); // argv. -#endif - __ call(rbx); - // Result is in rax - do not destroy this register! - - if (always_allocate_scope) { - __ movq(kScratchRegister, scope_depth); - __ decl(Operand(kScratchRegister, 0)); - } - - // Check for failure result. - Label failure_returned; - STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); -#ifdef _WIN64 - // If return value is on the stack, pop it to registers. - if (result_size_ > 1) { - ASSERT_EQ(2, result_size_); - // Read result values stored on stack. Result is stored - // above the four argument mirror slots and the two - // Arguments object slots. - __ movq(rax, Operand(rsp, 6 * kPointerSize)); - __ movq(rdx, Operand(rsp, 7 * kPointerSize)); - } -#endif - __ lea(rcx, Operand(rax, 1)); - // Lower 2 bits of rcx are 0 iff rax has failure tag. - __ testl(rcx, Immediate(kFailureTagMask)); - __ j(zero, &failure_returned); - - // Exit the JavaScript to C++ exit frame. - __ LeaveExitFrame(mode_, result_size_); - __ ret(0); - - // Handling of failure. - __ bind(&failure_returned); - - Label retry; - // If the returned exception is RETRY_AFTER_GC continue at retry label - STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); - __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); - __ j(zero, &retry); - - // Special handling of out of memory exceptions. - __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE); - __ cmpq(rax, kScratchRegister); - __ j(equal, throw_out_of_memory_exception); - - // Retrieve the pending exception and clear the variable. - ExternalReference pending_exception_address(Top::k_pending_exception_address); - __ movq(kScratchRegister, pending_exception_address); - __ movq(rax, Operand(kScratchRegister, 0)); - __ movq(rdx, ExternalReference::the_hole_value_location()); - __ movq(rdx, Operand(rdx, 0)); - __ movq(Operand(kScratchRegister, 0), rdx); - - // Special handling of termination exceptions which are uncatchable - // by javascript code. - __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex); - __ j(equal, throw_termination_exception); - - // Handle normal exception. - __ jmp(throw_normal_exception); - - // Retry. - __ bind(&retry); -} - - -void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, - UncatchableExceptionType type) { - // Fetch top stack handler. - ExternalReference handler_address(Top::k_handler_address); - __ movq(kScratchRegister, handler_address); - __ movq(rsp, Operand(kScratchRegister, 0)); - - // Unwind the handlers until the ENTRY handler is found. - Label loop, done; - __ bind(&loop); - // Load the type of the current stack handler. - const int kStateOffset = StackHandlerConstants::kStateOffset; - __ cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY)); - __ j(equal, &done); - // Fetch the next handler in the list. - const int kNextOffset = StackHandlerConstants::kNextOffset; - __ movq(rsp, Operand(rsp, kNextOffset)); - __ jmp(&loop); - __ bind(&done); - - // Set the top handler address to next handler past the current ENTRY handler. - __ movq(kScratchRegister, handler_address); - __ pop(Operand(kScratchRegister, 0)); - - if (type == OUT_OF_MEMORY) { - // Set external caught exception to false. - ExternalReference external_caught(Top::k_external_caught_exception_address); - __ movq(rax, Immediate(false)); - __ store_rax(external_caught); - - // Set pending exception and rax to out of memory exception. - ExternalReference pending_exception(Top::k_pending_exception_address); - __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE); - __ store_rax(pending_exception); - } - - // Clear the context pointer. - __ xor_(rsi, rsi); - - // Restore registers from handler. - STATIC_ASSERT(StackHandlerConstants::kNextOffset + kPointerSize == - StackHandlerConstants::kFPOffset); - __ pop(rbp); // FP - STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize == - StackHandlerConstants::kStateOffset); - __ pop(rdx); // State - - STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize == - StackHandlerConstants::kPCOffset); - __ ret(0); -} - - -void CEntryStub::Generate(MacroAssembler* masm) { - // rax: number of arguments including receiver - // rbx: pointer to C function (C callee-saved) - // rbp: frame pointer of calling JS frame (restored after C call) - // rsp: stack pointer (restored after C call) - // rsi: current context (restored) - - // NOTE: Invocations of builtins may return failure objects - // instead of a proper result. The builtin entry handles - // this by performing a garbage collection and retrying the - // builtin once. - - // Enter the exit frame that transitions from JavaScript to C++. - __ EnterExitFrame(mode_, result_size_); - - // rax: Holds the context at this point, but should not be used. - // On entry to code generated by GenerateCore, it must hold - // a failure result if the collect_garbage argument to GenerateCore - // is true. This failure result can be the result of code - // generated by a previous call to GenerateCore. The value - // of rax is then passed to Runtime::PerformGC. - // rbx: pointer to builtin function (C callee-saved). - // rbp: frame pointer of exit frame (restored after C call). - // rsp: stack pointer (restored after C call). - // r14: number of arguments including receiver (C callee-saved). - // r12: argv pointer (C callee-saved). - - Label throw_normal_exception; - Label throw_termination_exception; - Label throw_out_of_memory_exception; - - // Call into the runtime system. - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - &throw_out_of_memory_exception, - false, - false); - - // Do space-specific GC and retry runtime call. - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - &throw_out_of_memory_exception, - true, - false); - - // Do full GC and retry runtime call one final time. - Failure* failure = Failure::InternalError(); - __ movq(rax, failure, RelocInfo::NONE); - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - &throw_out_of_memory_exception, - true, - true); - - __ bind(&throw_out_of_memory_exception); - GenerateThrowUncatchable(masm, OUT_OF_MEMORY); - - __ bind(&throw_termination_exception); - GenerateThrowUncatchable(masm, TERMINATION); - - __ bind(&throw_normal_exception); - GenerateThrowTOS(masm); -} - - -void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { - Label invoke, exit; -#ifdef ENABLE_LOGGING_AND_PROFILING - Label not_outermost_js, not_outermost_js_2; -#endif - - // Setup frame. - __ push(rbp); - __ movq(rbp, rsp); - - // Push the stack frame type marker twice. - int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; - // Scratch register is neither callee-save, nor an argument register on any - // platform. It's free to use at this point. - // Cannot use smi-register for loading yet. - __ movq(kScratchRegister, - reinterpret_cast(Smi::FromInt(marker)), - RelocInfo::NONE); - __ push(kScratchRegister); // context slot - __ push(kScratchRegister); // function slot - // Save callee-saved registers (X64/Win64 calling conventions). - __ push(r12); - __ push(r13); - __ push(r14); - __ push(r15); -#ifdef _WIN64 - __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI. - __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI. -#endif - __ push(rbx); - // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are - // callee save as well. - - // Save copies of the top frame descriptor on the stack. - ExternalReference c_entry_fp(Top::k_c_entry_fp_address); - __ load_rax(c_entry_fp); - __ push(rax); - - // Set up the roots and smi constant registers. - // Needs to be done before any further smi loads. - ExternalReference roots_address = ExternalReference::roots_address(); - __ movq(kRootRegister, roots_address); - __ InitializeSmiConstantRegister(); - -#ifdef ENABLE_LOGGING_AND_PROFILING - // If this is the outermost JS call, set js_entry_sp value. - ExternalReference js_entry_sp(Top::k_js_entry_sp_address); - __ load_rax(js_entry_sp); - __ testq(rax, rax); - __ j(not_zero, ¬_outermost_js); - __ movq(rax, rbp); - __ store_rax(js_entry_sp); - __ bind(¬_outermost_js); -#endif - - // Call a faked try-block that does the invoke. - __ call(&invoke); - - // Caught exception: Store result (exception) in the pending - // exception field in the JSEnv and return a failure sentinel. - ExternalReference pending_exception(Top::k_pending_exception_address); - __ store_rax(pending_exception); - __ movq(rax, Failure::Exception(), RelocInfo::NONE); - __ jmp(&exit); - - // Invoke: Link this frame into the handler chain. - __ bind(&invoke); - __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER); - - // Clear any pending exceptions. - __ load_rax(ExternalReference::the_hole_value_location()); - __ store_rax(pending_exception); - - // Fake a receiver (NULL). - __ push(Immediate(0)); // receiver - - // Invoke the function by calling through JS entry trampoline - // builtin and pop the faked function when we return. We load the address - // from an external reference instead of inlining the call target address - // directly in the code, because the builtin stubs may not have been - // generated yet at the time this code is generated. - if (is_construct) { - ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline); - __ load_rax(construct_entry); - } else { - ExternalReference entry(Builtins::JSEntryTrampoline); - __ load_rax(entry); - } - __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize)); - __ call(kScratchRegister); - - // Unlink this frame from the handler chain. - __ movq(kScratchRegister, ExternalReference(Top::k_handler_address)); - __ pop(Operand(kScratchRegister, 0)); - // Pop next_sp. - __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize)); - -#ifdef ENABLE_LOGGING_AND_PROFILING - // If current EBP value is the same as js_entry_sp value, it means that - // the current function is the outermost. - __ movq(kScratchRegister, js_entry_sp); - __ cmpq(rbp, Operand(kScratchRegister, 0)); - __ j(not_equal, ¬_outermost_js_2); - __ movq(Operand(kScratchRegister, 0), Immediate(0)); - __ bind(¬_outermost_js_2); -#endif - - // Restore the top frame descriptor from the stack. - __ bind(&exit); - __ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address)); - __ pop(Operand(kScratchRegister, 0)); - - // Restore callee-saved registers (X64 conventions). - __ pop(rbx); -#ifdef _WIN64 - // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI. - __ pop(rsi); - __ pop(rdi); -#endif - __ pop(r15); - __ pop(r14); - __ pop(r13); - __ pop(r12); - __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers - - // Restore frame pointer and return. - __ pop(rbp); - __ ret(0); -} - - -void InstanceofStub::Generate(MacroAssembler* masm) { - // Implements "value instanceof function" operator. - // Expected input state: - // rsp[0] : return address - // rsp[1] : function pointer - // rsp[2] : value - // Returns a bitwise zero to indicate that the value - // is and instance of the function and anything else to - // indicate that the value is not an instance. - - // Get the object - go slow case if it's a smi. - Label slow; - __ movq(rax, Operand(rsp, 2 * kPointerSize)); - __ JumpIfSmi(rax, &slow); - - // Check that the left hand is a JS object. Leave its map in rax. - __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax); - __ j(below, &slow); - __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE); - __ j(above, &slow); - - // Get the prototype of the function. - __ movq(rdx, Operand(rsp, 1 * kPointerSize)); - // rdx is function, rax is map. - - // Look up the function and the map in the instanceof cache. - Label miss; - __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex); - __ j(not_equal, &miss); - __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex); - __ j(not_equal, &miss); - __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex); - __ ret(2 * kPointerSize); - - __ bind(&miss); - __ TryGetFunctionPrototype(rdx, rbx, &slow); - - // Check that the function prototype is a JS object. - __ JumpIfSmi(rbx, &slow); - __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister); - __ j(below, &slow); - __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE); - __ j(above, &slow); - - // Register mapping: - // rax is object map. - // rdx is function. - // rbx is function prototype. - __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex); - __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex); - - __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset)); - - // Loop through the prototype chain looking for the function prototype. - Label loop, is_instance, is_not_instance; - __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex); - __ bind(&loop); - __ cmpq(rcx, rbx); - __ j(equal, &is_instance); - __ cmpq(rcx, kScratchRegister); - // The code at is_not_instance assumes that kScratchRegister contains a - // non-zero GCable value (the null object in this case). - __ j(equal, &is_not_instance); - __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset)); - __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset)); - __ jmp(&loop); - - __ bind(&is_instance); - __ xorl(rax, rax); - // Store bitwise zero in the cache. This is a Smi in GC terms. - STATIC_ASSERT(kSmiTag == 0); - __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex); - __ ret(2 * kPointerSize); - - __ bind(&is_not_instance); - // We have to store a non-zero value in the cache. - __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex); - __ ret(2 * kPointerSize); - - // Slow-case: Go through the JavaScript implementation. - __ bind(&slow); - __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); -} - - -int CompareStub::MinorKey() { - // Encode the three parameters in a unique 16 bit value. To avoid duplicate - // stubs the never NaN NaN condition is only taken into account if the - // condition is equals. - ASSERT(static_cast(cc_) < (1 << 12)); - ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); - return ConditionField::encode(static_cast(cc_)) - | RegisterField::encode(false) // lhs_ and rhs_ are not used - | StrictField::encode(strict_) - | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false) - | IncludeNumberCompareField::encode(include_number_compare_); -} - - -// Unfortunately you have to run without snapshots to see most of these -// names in the profile since most compare stubs end up in the snapshot. -const char* CompareStub::GetName() { - ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); - - if (name_ != NULL) return name_; - const int kMaxNameLength = 100; - name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); - if (name_ == NULL) return "OOM"; - - const char* cc_name; - switch (cc_) { - case less: cc_name = "LT"; break; - case greater: cc_name = "GT"; break; - case less_equal: cc_name = "LE"; break; - case greater_equal: cc_name = "GE"; break; - case equal: cc_name = "EQ"; break; - case not_equal: cc_name = "NE"; break; - default: cc_name = "UnknownCondition"; break; - } - - const char* strict_name = ""; - if (strict_ && (cc_ == equal || cc_ == not_equal)) { - strict_name = "_STRICT"; - } - - const char* never_nan_nan_name = ""; - if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) { - never_nan_nan_name = "_NO_NAN"; - } - - const char* include_number_compare_name = ""; - if (!include_number_compare_) { - include_number_compare_name = "_NO_NUMBER"; - } - - OS::SNPrintF(Vector(name_, kMaxNameLength), - "CompareStub_%s%s%s%s", - cc_name, - strict_name, - never_nan_nan_name, - include_number_compare_name); - return name_; -} - - -// ------------------------------------------------------------------------- -// StringCharCodeAtGenerator - -void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { - Label flat_string; - Label ascii_string; - Label got_char_code; - - // If the receiver is a smi trigger the non-string case. - __ JumpIfSmi(object_, receiver_not_string_); - - // Fetch the instance type of the receiver into result register. - __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset)); - __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); - // If the receiver is not a string trigger the non-string case. - __ testb(result_, Immediate(kIsNotStringMask)); - __ j(not_zero, receiver_not_string_); - - // If the index is non-smi trigger the non-smi case. - __ JumpIfNotSmi(index_, &index_not_smi_); - - // Put smi-tagged index into scratch register. - __ movq(scratch_, index_); - __ bind(&got_smi_index_); - - // Check for index out of range. - __ SmiCompare(scratch_, FieldOperand(object_, String::kLengthOffset)); - __ j(above_equal, index_out_of_range_); - - // We need special handling for non-flat strings. - STATIC_ASSERT(kSeqStringTag == 0); - __ testb(result_, Immediate(kStringRepresentationMask)); - __ j(zero, &flat_string); - - // Handle non-flat strings. - __ testb(result_, Immediate(kIsConsStringMask)); - __ j(zero, &call_runtime_); - - // ConsString. - // Check whether the right hand side is the empty string (i.e. if - // this is really a flat string in a cons string). If that is not - // the case we would rather go to the runtime system now to flatten - // the string. - __ CompareRoot(FieldOperand(object_, ConsString::kSecondOffset), - Heap::kEmptyStringRootIndex); - __ j(not_equal, &call_runtime_); - // Get the first of the two strings and load its instance type. - __ movq(object_, FieldOperand(object_, ConsString::kFirstOffset)); - __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset)); - __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); - // If the first cons component is also non-flat, then go to runtime. - STATIC_ASSERT(kSeqStringTag == 0); - __ testb(result_, Immediate(kStringRepresentationMask)); - __ j(not_zero, &call_runtime_); - - // Check for 1-byte or 2-byte string. - __ bind(&flat_string); - STATIC_ASSERT(kAsciiStringTag != 0); - __ testb(result_, Immediate(kStringEncodingMask)); - __ j(not_zero, &ascii_string); - - // 2-byte string. - // Load the 2-byte character code into the result register. - __ SmiToInteger32(scratch_, scratch_); - __ movzxwl(result_, FieldOperand(object_, - scratch_, times_2, - SeqTwoByteString::kHeaderSize)); - __ jmp(&got_char_code); - - // ASCII string. - // Load the byte into the result register. - __ bind(&ascii_string); - __ SmiToInteger32(scratch_, scratch_); - __ movzxbl(result_, FieldOperand(object_, - scratch_, times_1, - SeqAsciiString::kHeaderSize)); - __ bind(&got_char_code); - __ Integer32ToSmi(result_, result_); - __ bind(&exit_); -} - - -void StringCharCodeAtGenerator::GenerateSlow( - MacroAssembler* masm, const RuntimeCallHelper& call_helper) { - __ Abort("Unexpected fallthrough to CharCodeAt slow case"); - - // Index is not a smi. - __ bind(&index_not_smi_); - // If index is a heap number, try converting it to an integer. - __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true); - call_helper.BeforeCall(masm); - __ push(object_); - __ push(index_); - __ push(index_); // Consumed by runtime conversion function. - if (index_flags_ == STRING_INDEX_IS_NUMBER) { - __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); - } else { - ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); - // NumberToSmi discards numbers that are not exact integers. - __ CallRuntime(Runtime::kNumberToSmi, 1); - } - if (!scratch_.is(rax)) { - // Save the conversion result before the pop instructions below - // have a chance to overwrite it. - __ movq(scratch_, rax); - } - __ pop(index_); - __ pop(object_); - // Reload the instance type. - __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset)); - __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); - call_helper.AfterCall(masm); - // If index is still not a smi, it must be out of range. - __ JumpIfNotSmi(scratch_, index_out_of_range_); - // Otherwise, return to the fast path. - __ jmp(&got_smi_index_); - - // Call runtime. We get here when the receiver is a string and the - // index is a number, but the code of getting the actual character - // is too complex (e.g., when the string needs to be flattened). - __ bind(&call_runtime_); - call_helper.BeforeCall(masm); - __ push(object_); - __ push(index_); - __ CallRuntime(Runtime::kStringCharCodeAt, 2); - if (!result_.is(rax)) { - __ movq(result_, rax); - } - call_helper.AfterCall(masm); - __ jmp(&exit_); - - __ Abort("Unexpected fallthrough from CharCodeAt slow case"); -} - - -// ------------------------------------------------------------------------- -// StringCharFromCodeGenerator - -void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { - // Fast case of Heap::LookupSingleCharacterStringFromCode. - __ JumpIfNotSmi(code_, &slow_case_); - __ SmiCompare(code_, Smi::FromInt(String::kMaxAsciiCharCode)); - __ j(above, &slow_case_); - - __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); - SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2); - __ movq(result_, FieldOperand(result_, index.reg, index.scale, - FixedArray::kHeaderSize)); - __ CompareRoot(result_, Heap::kUndefinedValueRootIndex); - __ j(equal, &slow_case_); - __ bind(&exit_); -} - - -void StringCharFromCodeGenerator::GenerateSlow( - MacroAssembler* masm, const RuntimeCallHelper& call_helper) { - __ Abort("Unexpected fallthrough to CharFromCode slow case"); - - __ bind(&slow_case_); - call_helper.BeforeCall(masm); - __ push(code_); - __ CallRuntime(Runtime::kCharFromCode, 1); - if (!result_.is(rax)) { - __ movq(result_, rax); - } - call_helper.AfterCall(masm); - __ jmp(&exit_); - - __ Abort("Unexpected fallthrough from CharFromCode slow case"); -} - - -// ------------------------------------------------------------------------- -// StringCharAtGenerator - -void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) { - char_code_at_generator_.GenerateFast(masm); - char_from_code_generator_.GenerateFast(masm); -} - - -void StringCharAtGenerator::GenerateSlow( - MacroAssembler* masm, const RuntimeCallHelper& call_helper) { - char_code_at_generator_.GenerateSlow(masm, call_helper); - char_from_code_generator_.GenerateSlow(masm, call_helper); -} - - -void StringAddStub::Generate(MacroAssembler* masm) { - Label string_add_runtime; - - // Load the two arguments. - __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument. - __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument. - - // Make sure that both arguments are strings if not known in advance. - if (string_check_) { - Condition is_smi; - is_smi = masm->CheckSmi(rax); - __ j(is_smi, &string_add_runtime); - __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8); - __ j(above_equal, &string_add_runtime); - - // First argument is a a string, test second. - is_smi = masm->CheckSmi(rdx); - __ j(is_smi, &string_add_runtime); - __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9); - __ j(above_equal, &string_add_runtime); - } - - // Both arguments are strings. - // rax: first string - // rdx: second string - // Check if either of the strings are empty. In that case return the other. - Label second_not_zero_length, both_not_zero_length; - __ movq(rcx, FieldOperand(rdx, String::kLengthOffset)); - __ SmiTest(rcx); - __ j(not_zero, &second_not_zero_length); - // Second string is empty, result is first string which is already in rax. - __ IncrementCounter(&Counters::string_add_native, 1); - __ ret(2 * kPointerSize); - __ bind(&second_not_zero_length); - __ movq(rbx, FieldOperand(rax, String::kLengthOffset)); - __ SmiTest(rbx); - __ j(not_zero, &both_not_zero_length); - // First string is empty, result is second string which is in rdx. - __ movq(rax, rdx); - __ IncrementCounter(&Counters::string_add_native, 1); - __ ret(2 * kPointerSize); - - // Both strings are non-empty. - // rax: first string - // rbx: length of first string - // rcx: length of second string - // rdx: second string - // r8: map of first string if string check was performed above - // r9: map of second string if string check was performed above - Label string_add_flat_result, longer_than_two; - __ bind(&both_not_zero_length); - - // If arguments where known to be strings, maps are not loaded to r8 and r9 - // by the code above. - if (!string_check_) { - __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset)); - __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset)); - } - // Get the instance types of the two strings as they will be needed soon. - __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset)); - __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset)); - - // Look at the length of the result of adding the two strings. - STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2); - __ SmiAdd(rbx, rbx, rcx, NULL); - // Use the runtime system when adding two one character strings, as it - // contains optimizations for this specific case using the symbol table. - __ SmiCompare(rbx, Smi::FromInt(2)); - __ j(not_equal, &longer_than_two); - - // Check that both strings are non-external ascii strings. - __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx, - &string_add_runtime); - - // Get the two characters forming the sub string. - __ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize)); - __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize)); - - // Try to lookup two character string in symbol table. If it is not found - // just allocate a new one. - Label make_two_character_string, make_flat_ascii_string; - StringHelper::GenerateTwoCharacterSymbolTableProbe( - masm, rbx, rcx, r14, r11, rdi, r12, &make_two_character_string); - __ IncrementCounter(&Counters::string_add_native, 1); - __ ret(2 * kPointerSize); - - __ bind(&make_two_character_string); - __ Set(rbx, 2); - __ jmp(&make_flat_ascii_string); - - __ bind(&longer_than_two); - // Check if resulting string will be flat. - __ SmiCompare(rbx, Smi::FromInt(String::kMinNonFlatLength)); - __ j(below, &string_add_flat_result); - // Handle exceptionally long strings in the runtime system. - STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0); - __ SmiCompare(rbx, Smi::FromInt(String::kMaxLength)); - __ j(above, &string_add_runtime); - - // If result is not supposed to be flat, allocate a cons string object. If - // both strings are ascii the result is an ascii cons string. - // rax: first string - // rbx: length of resulting flat string - // rdx: second string - // r8: instance type of first string - // r9: instance type of second string - Label non_ascii, allocated, ascii_data; - __ movl(rcx, r8); - __ and_(rcx, r9); - STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag); - __ testl(rcx, Immediate(kAsciiStringTag)); - __ j(zero, &non_ascii); - __ bind(&ascii_data); - // Allocate an acsii cons string. - __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime); - __ bind(&allocated); - // Fill the fields of the cons string. - __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx); - __ movq(FieldOperand(rcx, ConsString::kHashFieldOffset), - Immediate(String::kEmptyHashField)); - __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax); - __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx); - __ movq(rax, rcx); - __ IncrementCounter(&Counters::string_add_native, 1); - __ ret(2 * kPointerSize); - __ bind(&non_ascii); - // At least one of the strings is two-byte. Check whether it happens - // to contain only ascii characters. - // rcx: first instance type AND second instance type. - // r8: first instance type. - // r9: second instance type. - __ testb(rcx, Immediate(kAsciiDataHintMask)); - __ j(not_zero, &ascii_data); - __ xor_(r8, r9); - STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); - __ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag)); - __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag)); - __ j(equal, &ascii_data); - // Allocate a two byte cons string. - __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime); - __ jmp(&allocated); - - // Handle creating a flat result. First check that both strings are not - // external strings. - // rax: first string - // rbx: length of resulting flat string as smi - // rdx: second string - // r8: instance type of first string - // r9: instance type of first string - __ bind(&string_add_flat_result); - __ SmiToInteger32(rbx, rbx); - __ movl(rcx, r8); - __ and_(rcx, Immediate(kStringRepresentationMask)); - __ cmpl(rcx, Immediate(kExternalStringTag)); - __ j(equal, &string_add_runtime); - __ movl(rcx, r9); - __ and_(rcx, Immediate(kStringRepresentationMask)); - __ cmpl(rcx, Immediate(kExternalStringTag)); - __ j(equal, &string_add_runtime); - // Now check if both strings are ascii strings. - // rax: first string - // rbx: length of resulting flat string - // rdx: second string - // r8: instance type of first string - // r9: instance type of second string - Label non_ascii_string_add_flat_result; - STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag); - __ testl(r8, Immediate(kAsciiStringTag)); - __ j(zero, &non_ascii_string_add_flat_result); - __ testl(r9, Immediate(kAsciiStringTag)); - __ j(zero, &string_add_runtime); - - __ bind(&make_flat_ascii_string); - // Both strings are ascii strings. As they are short they are both flat. - __ AllocateAsciiString(rcx, rbx, rdi, r14, r11, &string_add_runtime); - // rcx: result string - __ movq(rbx, rcx); - // Locate first character of result. - __ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - // Locate first character of first argument - __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset)); - __ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - // rax: first char of first argument - // rbx: result string - // rcx: first character of result - // rdx: second string - // rdi: length of first argument - StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, true); - // Locate first character of second argument. - __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset)); - __ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - // rbx: result string - // rcx: next character of result - // rdx: first char of second argument - // rdi: length of second argument - StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, true); - __ movq(rax, rbx); - __ IncrementCounter(&Counters::string_add_native, 1); - __ ret(2 * kPointerSize); - - // Handle creating a flat two byte result. - // rax: first string - known to be two byte - // rbx: length of resulting flat string - // rdx: second string - // r8: instance type of first string - // r9: instance type of first string - __ bind(&non_ascii_string_add_flat_result); - __ and_(r9, Immediate(kAsciiStringTag)); - __ j(not_zero, &string_add_runtime); - // Both strings are two byte strings. As they are short they are both - // flat. - __ AllocateTwoByteString(rcx, rbx, rdi, r14, r11, &string_add_runtime); - // rcx: result string - __ movq(rbx, rcx); - // Locate first character of result. - __ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - // Locate first character of first argument. - __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset)); - __ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - // rax: first char of first argument - // rbx: result string - // rcx: first character of result - // rdx: second argument - // rdi: length of first argument - StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, false); - // Locate first character of second argument. - __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset)); - __ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - // rbx: result string - // rcx: next character of result - // rdx: first char of second argument - // rdi: length of second argument - StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, false); - __ movq(rax, rbx); - __ IncrementCounter(&Counters::string_add_native, 1); - __ ret(2 * kPointerSize); - - // Just jump to runtime to add the two strings. - __ bind(&string_add_runtime); - __ TailCallRuntime(Runtime::kStringAdd, 2, 1); -} - - -void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, - Register dest, - Register src, - Register count, - bool ascii) { - Label loop; - __ bind(&loop); - // This loop just copies one character at a time, as it is only used for very - // short strings. - if (ascii) { - __ movb(kScratchRegister, Operand(src, 0)); - __ movb(Operand(dest, 0), kScratchRegister); - __ incq(src); - __ incq(dest); - } else { - __ movzxwl(kScratchRegister, Operand(src, 0)); - __ movw(Operand(dest, 0), kScratchRegister); - __ addq(src, Immediate(2)); - __ addq(dest, Immediate(2)); - } - __ decl(count); - __ j(not_zero, &loop); -} - - -void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, - Register dest, - Register src, - Register count, - bool ascii) { - // Copy characters using rep movs of doublewords. Align destination on 4 byte - // boundary before starting rep movs. Copy remaining characters after running - // rep movs. - // Count is positive int32, dest and src are character pointers. - ASSERT(dest.is(rdi)); // rep movs destination - ASSERT(src.is(rsi)); // rep movs source - ASSERT(count.is(rcx)); // rep movs count - - // Nothing to do for zero characters. - Label done; - __ testl(count, count); - __ j(zero, &done); - - // Make count the number of bytes to copy. - if (!ascii) { - STATIC_ASSERT(2 == sizeof(uc16)); - __ addl(count, count); - } - - // Don't enter the rep movs if there are less than 4 bytes to copy. - Label last_bytes; - __ testl(count, Immediate(~7)); - __ j(zero, &last_bytes); - - // Copy from edi to esi using rep movs instruction. - __ movl(kScratchRegister, count); - __ shr(count, Immediate(3)); // Number of doublewords to copy. - __ repmovsq(); - - // Find number of bytes left. - __ movl(count, kScratchRegister); - __ and_(count, Immediate(7)); - - // Check if there are more bytes to copy. - __ bind(&last_bytes); - __ testl(count, count); - __ j(zero, &done); - - // Copy remaining characters. - Label loop; - __ bind(&loop); - __ movb(kScratchRegister, Operand(src, 0)); - __ movb(Operand(dest, 0), kScratchRegister); - __ incq(src); - __ incq(dest); - __ decl(count); - __ j(not_zero, &loop); - - __ bind(&done); -} - -void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, - Register c1, - Register c2, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Label* not_found) { - // Register scratch3 is the general scratch register in this function. - Register scratch = scratch3; - - // Make sure that both characters are not digits as such strings has a - // different hash algorithm. Don't try to look for these in the symbol table. - Label not_array_index; - __ leal(scratch, Operand(c1, -'0')); - __ cmpl(scratch, Immediate(static_cast('9' - '0'))); - __ j(above, ¬_array_index); - __ leal(scratch, Operand(c2, -'0')); - __ cmpl(scratch, Immediate(static_cast('9' - '0'))); - __ j(below_equal, not_found); - - __ bind(¬_array_index); - // Calculate the two character string hash. - Register hash = scratch1; - GenerateHashInit(masm, hash, c1, scratch); - GenerateHashAddCharacter(masm, hash, c2, scratch); - GenerateHashGetHash(masm, hash, scratch); - - // Collect the two characters in a register. - Register chars = c1; - __ shl(c2, Immediate(kBitsPerByte)); - __ orl(chars, c2); - - // chars: two character string, char 1 in byte 0 and char 2 in byte 1. - // hash: hash of two character string. - - // Load the symbol table. - Register symbol_table = c2; - __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex); - - // Calculate capacity mask from the symbol table capacity. - Register mask = scratch2; - __ SmiToInteger32(mask, - FieldOperand(symbol_table, SymbolTable::kCapacityOffset)); - __ decl(mask); - - Register undefined = scratch4; - __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); - - // Registers - // chars: two character string, char 1 in byte 0 and char 2 in byte 1. - // hash: hash of two character string (32-bit int) - // symbol_table: symbol table - // mask: capacity mask (32-bit int) - // undefined: undefined value - // scratch: - - - // Perform a number of probes in the symbol table. - static const int kProbes = 4; - Label found_in_symbol_table; - Label next_probe[kProbes]; - for (int i = 0; i < kProbes; i++) { - // Calculate entry in symbol table. - __ movl(scratch, hash); - if (i > 0) { - __ addl(scratch, Immediate(SymbolTable::GetProbeOffset(i))); - } - __ andl(scratch, mask); - - // Load the entry from the symble table. - Register candidate = scratch; // Scratch register contains candidate. - STATIC_ASSERT(SymbolTable::kEntrySize == 1); - __ movq(candidate, - FieldOperand(symbol_table, - scratch, - times_pointer_size, - SymbolTable::kElementsStartOffset)); - - // If entry is undefined no string with this hash can be found. - __ cmpq(candidate, undefined); - __ j(equal, not_found); - - // If length is not 2 the string is not a candidate. - __ SmiCompare(FieldOperand(candidate, String::kLengthOffset), - Smi::FromInt(2)); - __ j(not_equal, &next_probe[i]); - - // We use kScratchRegister as a temporary register in assumption that - // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly - Register temp = kScratchRegister; - - // Check that the candidate is a non-external ascii string. - __ movq(temp, FieldOperand(candidate, HeapObject::kMapOffset)); - __ movzxbl(temp, FieldOperand(temp, Map::kInstanceTypeOffset)); - __ JumpIfInstanceTypeIsNotSequentialAscii( - temp, temp, &next_probe[i]); - - // Check if the two characters match. - __ movl(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize)); - __ andl(temp, Immediate(0x0000ffff)); - __ cmpl(chars, temp); - __ j(equal, &found_in_symbol_table); - __ bind(&next_probe[i]); - } - - // No matching 2 character string found by probing. - __ jmp(not_found); - - // Scratch register contains result when we fall through to here. - Register result = scratch; - __ bind(&found_in_symbol_table); - if (!result.is(rax)) { - __ movq(rax, result); - } -} - - -void StringHelper::GenerateHashInit(MacroAssembler* masm, - Register hash, - Register character, - Register scratch) { - // hash = character + (character << 10); - __ movl(hash, character); - __ shll(hash, Immediate(10)); - __ addl(hash, character); - // hash ^= hash >> 6; - __ movl(scratch, hash); - __ sarl(scratch, Immediate(6)); - __ xorl(hash, scratch); -} - - -void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, - Register hash, - Register character, - Register scratch) { - // hash += character; - __ addl(hash, character); - // hash += hash << 10; - __ movl(scratch, hash); - __ shll(scratch, Immediate(10)); - __ addl(hash, scratch); - // hash ^= hash >> 6; - __ movl(scratch, hash); - __ sarl(scratch, Immediate(6)); - __ xorl(hash, scratch); -} - - -void StringHelper::GenerateHashGetHash(MacroAssembler* masm, - Register hash, - Register scratch) { - // hash += hash << 3; - __ leal(hash, Operand(hash, hash, times_8, 0)); - // hash ^= hash >> 11; - __ movl(scratch, hash); - __ sarl(scratch, Immediate(11)); - __ xorl(hash, scratch); - // hash += hash << 15; - __ movl(scratch, hash); - __ shll(scratch, Immediate(15)); - __ addl(hash, scratch); - - // if (hash == 0) hash = 27; - Label hash_not_zero; - __ j(not_zero, &hash_not_zero); - __ movl(hash, Immediate(27)); - __ bind(&hash_not_zero); -} - -void SubStringStub::Generate(MacroAssembler* masm) { - Label runtime; - - // Stack frame on entry. - // rsp[0]: return address - // rsp[8]: to - // rsp[16]: from - // rsp[24]: string - - const int kToOffset = 1 * kPointerSize; - const int kFromOffset = kToOffset + kPointerSize; - const int kStringOffset = kFromOffset + kPointerSize; - const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset; - - // Make sure first argument is a string. - __ movq(rax, Operand(rsp, kStringOffset)); - STATIC_ASSERT(kSmiTag == 0); - __ testl(rax, Immediate(kSmiTagMask)); - __ j(zero, &runtime); - Condition is_string = masm->IsObjectStringType(rax, rbx, rbx); - __ j(NegateCondition(is_string), &runtime); - - // rax: string - // rbx: instance type - // Calculate length of sub string using the smi values. - Label result_longer_than_two; - __ movq(rcx, Operand(rsp, kToOffset)); - __ movq(rdx, Operand(rsp, kFromOffset)); - __ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime); - - __ SmiSub(rcx, rcx, rdx, NULL); // Overflow doesn't happen. - __ cmpq(FieldOperand(rax, String::kLengthOffset), rcx); - Label return_rax; - __ j(equal, &return_rax); - // Special handling of sub-strings of length 1 and 2. One character strings - // are handled in the runtime system (looked up in the single character - // cache). Two character strings are looked for in the symbol cache. - __ SmiToInteger32(rcx, rcx); - __ cmpl(rcx, Immediate(2)); - __ j(greater, &result_longer_than_two); - __ j(less, &runtime); - - // Sub string of length 2 requested. - // rax: string - // rbx: instance type - // rcx: sub string length (value is 2) - // rdx: from index (smi) - __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &runtime); - - // Get the two characters forming the sub string. - __ SmiToInteger32(rdx, rdx); // From index is no longer smi. - __ movzxbq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize)); - __ movzxbq(rcx, - FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize + 1)); - - // Try to lookup two character string in symbol table. - Label make_two_character_string; - StringHelper::GenerateTwoCharacterSymbolTableProbe( - masm, rbx, rcx, rax, rdx, rdi, r14, &make_two_character_string); - __ ret(3 * kPointerSize); - - __ bind(&make_two_character_string); - // Setup registers for allocating the two character string. - __ movq(rax, Operand(rsp, kStringOffset)); - __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); - __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset)); - __ Set(rcx, 2); - - __ bind(&result_longer_than_two); - - // rax: string - // rbx: instance type - // rcx: result string length - // Check for flat ascii string - Label non_ascii_flat; - __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &non_ascii_flat); - - // Allocate the result. - __ AllocateAsciiString(rax, rcx, rbx, rdx, rdi, &runtime); - - // rax: result string - // rcx: result string length - __ movq(rdx, rsi); // esi used by following code. - // Locate first character of result. - __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize)); - // Load string argument and locate character of sub string start. - __ movq(rsi, Operand(rsp, kStringOffset)); - __ movq(rbx, Operand(rsp, kFromOffset)); - { - SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_1); - __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale, - SeqAsciiString::kHeaderSize - kHeapObjectTag)); - } - - // rax: result string - // rcx: result length - // rdx: original value of rsi - // rdi: first character of result - // rsi: character of sub string start - StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true); - __ movq(rsi, rdx); // Restore rsi. - __ IncrementCounter(&Counters::sub_string_native, 1); - __ ret(kArgumentsSize); - - __ bind(&non_ascii_flat); - // rax: string - // rbx: instance type & kStringRepresentationMask | kStringEncodingMask - // rcx: result string length - // Check for sequential two byte string - __ cmpb(rbx, Immediate(kSeqStringTag | kTwoByteStringTag)); - __ j(not_equal, &runtime); - - // Allocate the result. - __ AllocateTwoByteString(rax, rcx, rbx, rdx, rdi, &runtime); - - // rax: result string - // rcx: result string length - __ movq(rdx, rsi); // esi used by following code. - // Locate first character of result. - __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize)); - // Load string argument and locate character of sub string start. - __ movq(rsi, Operand(rsp, kStringOffset)); - __ movq(rbx, Operand(rsp, kFromOffset)); - { - SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_2); - __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale, - SeqAsciiString::kHeaderSize - kHeapObjectTag)); - } - - // rax: result string - // rcx: result length - // rdx: original value of rsi - // rdi: first character of result - // rsi: character of sub string start - StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false); - __ movq(rsi, rdx); // Restore esi. - - __ bind(&return_rax); - __ IncrementCounter(&Counters::sub_string_native, 1); - __ ret(kArgumentsSize); - - // Just jump to runtime to create the sub string. - __ bind(&runtime); - __ TailCallRuntime(Runtime::kSubString, 3, 1); -} - - -void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, - Register left, - Register right, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4) { - // Ensure that you can always subtract a string length from a non-negative - // number (e.g. another length). - STATIC_ASSERT(String::kMaxLength < 0x7fffffff); - - // Find minimum length and length difference. - __ movq(scratch1, FieldOperand(left, String::kLengthOffset)); - __ movq(scratch4, scratch1); - __ SmiSub(scratch4, - scratch4, - FieldOperand(right, String::kLengthOffset), - NULL); - // Register scratch4 now holds left.length - right.length. - const Register length_difference = scratch4; - Label left_shorter; - __ j(less, &left_shorter); - // The right string isn't longer that the left one. - // Get the right string's length by subtracting the (non-negative) difference - // from the left string's length. - __ SmiSub(scratch1, scratch1, length_difference, NULL); - __ bind(&left_shorter); - // Register scratch1 now holds Min(left.length, right.length). - const Register min_length = scratch1; - - Label compare_lengths; - // If min-length is zero, go directly to comparing lengths. - __ SmiTest(min_length); - __ j(zero, &compare_lengths); - - __ SmiToInteger32(min_length, min_length); - - // Registers scratch2 and scratch3 are free. - Label result_not_equal; - Label loop; - { - // Check characters 0 .. min_length - 1 in a loop. - // Use scratch3 as loop index, min_length as limit and scratch2 - // for computation. - const Register index = scratch3; - __ movl(index, Immediate(0)); // Index into strings. - __ bind(&loop); - // Compare characters. - // TODO(lrn): Could we load more than one character at a time? - __ movb(scratch2, FieldOperand(left, - index, - times_1, - SeqAsciiString::kHeaderSize)); - // Increment index and use -1 modifier on next load to give - // the previous load extra time to complete. - __ addl(index, Immediate(1)); - __ cmpb(scratch2, FieldOperand(right, - index, - times_1, - SeqAsciiString::kHeaderSize - 1)); - __ j(not_equal, &result_not_equal); - __ cmpl(index, min_length); - __ j(not_equal, &loop); - } - // Completed loop without finding different characters. - // Compare lengths (precomputed). - __ bind(&compare_lengths); - __ SmiTest(length_difference); - __ j(not_zero, &result_not_equal); - - // Result is EQUAL. - __ Move(rax, Smi::FromInt(EQUAL)); - __ ret(0); - - Label result_greater; - __ bind(&result_not_equal); - // Unequal comparison of left to right, either character or length. - __ j(greater, &result_greater); - - // Result is LESS. - __ Move(rax, Smi::FromInt(LESS)); - __ ret(0); - - // Result is GREATER. - __ bind(&result_greater); - __ Move(rax, Smi::FromInt(GREATER)); - __ ret(0); -} - - -void StringCompareStub::Generate(MacroAssembler* masm) { - Label runtime; - - // Stack frame on entry. - // rsp[0]: return address - // rsp[8]: right string - // rsp[16]: left string - - __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // left - __ movq(rax, Operand(rsp, 1 * kPointerSize)); // right - - // Check for identity. - Label not_same; - __ cmpq(rdx, rax); - __ j(not_equal, ¬_same); - __ Move(rax, Smi::FromInt(EQUAL)); - __ IncrementCounter(&Counters::string_compare_native, 1); - __ ret(2 * kPointerSize); - - __ bind(¬_same); - - // Check that both are sequential ASCII strings. - __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime); - - // Inline comparison of ascii strings. - __ IncrementCounter(&Counters::string_compare_native, 1); - // Drop arguments from the stack - __ pop(rcx); - __ addq(rsp, Immediate(2 * kPointerSize)); - __ push(rcx); - GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8); - - // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) - // tagged as a small integer. - __ bind(&runtime); - __ TailCallRuntime(Runtime::kStringCompare, 2, 1); -} - #undef __ #define __ masm. diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h index 31f229d..e536aec 100644 --- a/src/x64/codegen-x64.h +++ b/src/x64/codegen-x64.h @@ -752,357 +752,6 @@ class CodeGenerator: public AstVisitor { }; -// Compute a transcendental math function natively, or call the -// TranscendentalCache runtime function. -class TranscendentalCacheStub: public CodeStub { - public: - explicit TranscendentalCacheStub(TranscendentalCache::Type type) - : type_(type) {} - void Generate(MacroAssembler* masm); - private: - TranscendentalCache::Type type_; - Major MajorKey() { return TranscendentalCache; } - int MinorKey() { return type_; } - Runtime::FunctionId RuntimeFunction(); - void GenerateOperation(MacroAssembler* masm, Label* on_nan_result); -}; - - -class ToBooleanStub: public CodeStub { - public: - ToBooleanStub() { } - - void Generate(MacroAssembler* masm); - - private: - Major MajorKey() { return ToBoolean; } - int MinorKey() { return 0; } -}; - - -// Flag that indicates how to generate code for the stub GenericBinaryOpStub. -enum GenericBinaryFlags { - NO_GENERIC_BINARY_FLAGS = 0, - NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub. -}; - - -class GenericBinaryOpStub: public CodeStub { - public: - GenericBinaryOpStub(Token::Value op, - OverwriteMode mode, - GenericBinaryFlags flags, - TypeInfo operands_type = TypeInfo::Unknown()) - : op_(op), - mode_(mode), - flags_(flags), - args_in_registers_(false), - args_reversed_(false), - static_operands_type_(operands_type), - runtime_operands_type_(BinaryOpIC::DEFAULT), - name_(NULL) { - ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); - } - - GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) - : op_(OpBits::decode(key)), - mode_(ModeBits::decode(key)), - flags_(FlagBits::decode(key)), - args_in_registers_(ArgsInRegistersBits::decode(key)), - args_reversed_(ArgsReversedBits::decode(key)), - static_operands_type_(TypeInfo::ExpandedRepresentation( - StaticTypeInfoBits::decode(key))), - runtime_operands_type_(type_info), - name_(NULL) { - } - - // Generate code to call the stub with the supplied arguments. This will add - // code at the call site to prepare arguments either in registers or on the - // stack together with the actual call. - void GenerateCall(MacroAssembler* masm, Register left, Register right); - void GenerateCall(MacroAssembler* masm, Register left, Smi* right); - void GenerateCall(MacroAssembler* masm, Smi* left, Register right); - - Result GenerateCall(MacroAssembler* masm, - VirtualFrame* frame, - Result* left, - Result* right); - - private: - Token::Value op_; - OverwriteMode mode_; - GenericBinaryFlags flags_; - bool args_in_registers_; // Arguments passed in registers not on the stack. - bool args_reversed_; // Left and right argument are swapped. - - // Number type information of operands, determined by code generator. - TypeInfo static_operands_type_; - - // Operand type information determined at runtime. - BinaryOpIC::TypeInfo runtime_operands_type_; - - char* name_; - - const char* GetName(); - -#ifdef DEBUG - void Print() { - PrintF("GenericBinaryOpStub %d (op %s), " - "(mode %d, flags %d, registers %d, reversed %d, only_numbers %s)\n", - MinorKey(), - Token::String(op_), - static_cast(mode_), - static_cast(flags_), - static_cast(args_in_registers_), - static_cast(args_reversed_), - static_operands_type_.ToString()); - } -#endif - - // Minor key encoding in 17 bits TTNNNFRAOOOOOOOMM. - class ModeBits: public BitField {}; - class OpBits: public BitField {}; - class ArgsInRegistersBits: public BitField {}; - class ArgsReversedBits: public BitField {}; - class FlagBits: public BitField {}; - class StaticTypeInfoBits: public BitField {}; - class RuntimeTypeInfoBits: public BitField {}; - - Major MajorKey() { return GenericBinaryOp; } - int MinorKey() { - // Encode the parameters in a unique 18 bit value. - return OpBits::encode(op_) - | ModeBits::encode(mode_) - | FlagBits::encode(flags_) - | ArgsInRegistersBits::encode(args_in_registers_) - | ArgsReversedBits::encode(args_reversed_) - | StaticTypeInfoBits::encode( - static_operands_type_.ThreeBitRepresentation()) - | RuntimeTypeInfoBits::encode(runtime_operands_type_); - } - - void Generate(MacroAssembler* masm); - void GenerateSmiCode(MacroAssembler* masm, Label* slow); - void GenerateLoadArguments(MacroAssembler* masm); - void GenerateReturn(MacroAssembler* masm); - void GenerateRegisterArgsPush(MacroAssembler* masm); - void GenerateTypeTransition(MacroAssembler* masm); - - bool ArgsInRegistersSupported() { - return (op_ == Token::ADD) || (op_ == Token::SUB) - || (op_ == Token::MUL) || (op_ == Token::DIV); - } - bool IsOperationCommutative() { - return (op_ == Token::ADD) || (op_ == Token::MUL); - } - - void SetArgsInRegisters() { args_in_registers_ = true; } - void SetArgsReversed() { args_reversed_ = true; } - bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; } - bool HasArgsInRegisters() { return args_in_registers_; } - bool HasArgsReversed() { return args_reversed_; } - - bool ShouldGenerateSmiCode() { - return HasSmiCodeInStub() && - runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS && - runtime_operands_type_ != BinaryOpIC::STRINGS; - } - - bool ShouldGenerateFPCode() { - return runtime_operands_type_ != BinaryOpIC::STRINGS; - } - - virtual int GetCodeKind() { return Code::BINARY_OP_IC; } - - virtual InlineCacheState GetICState() { - return BinaryOpIC::ToState(runtime_operands_type_); - } -}; - -class StringHelper : public AllStatic { - public: - // Generate code for copying characters using a simple loop. This should only - // be used in places where the number of characters is small and the - // additional setup and checking in GenerateCopyCharactersREP adds too much - // overhead. Copying of overlapping regions is not supported. - static void GenerateCopyCharacters(MacroAssembler* masm, - Register dest, - Register src, - Register count, - bool ascii); - - // Generate code for copying characters using the rep movs instruction. - // Copies rcx characters from rsi to rdi. Copying of overlapping regions is - // not supported. - static void GenerateCopyCharactersREP(MacroAssembler* masm, - Register dest, // Must be rdi. - Register src, // Must be rsi. - Register count, // Must be rcx. - bool ascii); - - - // Probe the symbol table for a two character string. If the string is - // not found by probing a jump to the label not_found is performed. This jump - // does not guarantee that the string is not in the symbol table. If the - // string is found the code falls through with the string in register rax. - static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, - Register c1, - Register c2, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Label* not_found); - - // Generate string hash. - static void GenerateHashInit(MacroAssembler* masm, - Register hash, - Register character, - Register scratch); - static void GenerateHashAddCharacter(MacroAssembler* masm, - Register hash, - Register character, - Register scratch); - static void GenerateHashGetHash(MacroAssembler* masm, - Register hash, - Register scratch); - - private: - DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); -}; - - -// Flag that indicates how to generate code for the stub StringAddStub. -enum StringAddFlags { - NO_STRING_ADD_FLAGS = 0, - NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub. -}; - - -class StringAddStub: public CodeStub { - public: - explicit StringAddStub(StringAddFlags flags) { - string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0); - } - - private: - Major MajorKey() { return StringAdd; } - int MinorKey() { return string_check_ ? 0 : 1; } - - void Generate(MacroAssembler* masm); - - // Should the stub check whether arguments are strings? - bool string_check_; -}; - - -class SubStringStub: public CodeStub { - public: - SubStringStub() {} - - private: - Major MajorKey() { return SubString; } - int MinorKey() { return 0; } - - void Generate(MacroAssembler* masm); -}; - - -class StringCompareStub: public CodeStub { - public: - explicit StringCompareStub() {} - - // Compare two flat ascii strings and returns result in rax after popping two - // arguments from the stack. - static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, - Register left, - Register right, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4); - - private: - Major MajorKey() { return StringCompare; } - int MinorKey() { return 0; } - - void Generate(MacroAssembler* masm); -}; - - -class NumberToStringStub: public CodeStub { - public: - NumberToStringStub() { } - - // Generate code to do a lookup in the number string cache. If the number in - // the register object is found in the cache the generated code falls through - // with the result in the result register. The object and the result register - // can be the same. If the number is not found in the cache the code jumps to - // the label not_found with only the content of register object unchanged. - static void GenerateLookupNumberStringCache(MacroAssembler* masm, - Register object, - Register result, - Register scratch1, - Register scratch2, - bool object_is_smi, - Label* not_found); - - private: - static void GenerateConvertHashCodeToIndex(MacroAssembler* masm, - Register hash, - Register mask); - - Major MajorKey() { return NumberToString; } - int MinorKey() { return 0; } - - void Generate(MacroAssembler* masm); - - const char* GetName() { return "NumberToStringStub"; } - -#ifdef DEBUG - void Print() { - PrintF("NumberToStringStub\n"); - } -#endif -}; - - -class RecordWriteStub : public CodeStub { - public: - RecordWriteStub(Register object, Register addr, Register scratch) - : object_(object), addr_(addr), scratch_(scratch) { } - - void Generate(MacroAssembler* masm); - - private: - Register object_; - Register addr_; - Register scratch_; - -#ifdef DEBUG - void Print() { - PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n", - object_.code(), addr_.code(), scratch_.code()); - } -#endif - - // Minor key encoding in 12 bits. 4 bits for each of the three - // registers (object, address and scratch) OOOOAAAASSSS. - class ScratchBits : public BitField {}; - class AddressBits : public BitField {}; - class ObjectBits : public BitField {}; - - Major MajorKey() { return RecordWrite; } - - int MinorKey() { - // Encode the registers. - return ObjectBits::encode(object_.code()) | - AddressBits::encode(addr_.code()) | - ScratchBits::encode(scratch_.code()); - } -}; - - } } // namespace v8::internal #endif // V8_X64_CODEGEN_X64_H_ diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc index b5f7fe4..94fd19d 100644 --- a/src/x64/full-codegen-x64.cc +++ b/src/x64/full-codegen-x64.cc @@ -29,6 +29,7 @@ #if defined(V8_TARGET_ARCH_X64) +#include "code-stubs-x64.h" #include "codegen-inl.h" #include "compiler.h" #include "debug.h" diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc index e4ab232..ef83f28 100644 --- a/src/x64/stub-cache-x64.cc +++ b/src/x64/stub-cache-x64.cc @@ -31,6 +31,7 @@ #if defined(V8_TARGET_ARCH_X64) #include "ic-inl.h" +#include "code-stubs-x64.h" #include "codegen-inl.h" #include "stub-cache.h" #include "macro-assembler-x64.h" diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp index ea92b1e..b355fb6 100644 --- a/tools/gyp/v8.gyp +++ b/tools/gyp/v8.gyp @@ -493,6 +493,8 @@ '../../src/arm/assembler-arm.cc', '../../src/arm/assembler-arm.h', '../../src/arm/builtins-arm.cc', + '../../src/arm/code-stubs-arm.cc', + '../../src/arm/code-stubs-arm.h', '../../src/arm/codegen-arm.cc', '../../src/arm/codegen-arm.h', '../../src/arm/constants-arm.h', @@ -539,6 +541,8 @@ '../../src/ia32/assembler-ia32.cc', '../../src/ia32/assembler-ia32.h', '../../src/ia32/builtins-ia32.cc', + '../../src/ia32/code-stubs-ia32.cc', + '../../src/ia32/code-stubs-ia32.h', '../../src/ia32/codegen-ia32.cc', '../../src/ia32/codegen-ia32.h', '../../src/ia32/cpu-ia32.cc', @@ -573,6 +577,8 @@ '../../src/x64/assembler-x64.cc', '../../src/x64/assembler-x64.h', '../../src/x64/builtins-x64.cc', + '../../src/x64/code-stubs-x64.cc', + '../../src/x64/code-stubs-x64.h', '../../src/x64/codegen-x64.cc', '../../src/x64/codegen-x64.h', '../../src/x64/cpu-x64.cc', diff --git a/tools/v8.xcodeproj/project.pbxproj b/tools/v8.xcodeproj/project.pbxproj index 0ca6a9d..3ebc458 100644 --- a/tools/v8.xcodeproj/project.pbxproj +++ b/tools/v8.xcodeproj/project.pbxproj @@ -223,14 +223,12 @@ 9FA38BB31175B2D200C4CD55 /* data-flow.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38B9C1175B2D200C4CD55 /* data-flow.cc */; }; 9FA38BB41175B2D200C4CD55 /* diy-fp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38B9E1175B2D200C4CD55 /* diy-fp.cc */; }; 9FA38BB51175B2D200C4CD55 /* fast-dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA11175B2D200C4CD55 /* fast-dtoa.cc */; }; - 9FA38BB61175B2D200C4CD55 /* flow-graph.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA31175B2D200C4CD55 /* flow-graph.cc */; }; 9FA38BB71175B2D200C4CD55 /* full-codegen.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA51175B2D200C4CD55 /* full-codegen.cc */; }; 9FA38BB81175B2D200C4CD55 /* liveedit.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA91175B2D200C4CD55 /* liveedit.cc */; }; 9FA38BB91175B2D200C4CD55 /* type-info.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BAE1175B2D200C4CD55 /* type-info.cc */; }; 9FA38BBA1175B2D200C4CD55 /* data-flow.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38B9C1175B2D200C4CD55 /* data-flow.cc */; }; 9FA38BBB1175B2D200C4CD55 /* diy-fp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38B9E1175B2D200C4CD55 /* diy-fp.cc */; }; 9FA38BBC1175B2D200C4CD55 /* fast-dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA11175B2D200C4CD55 /* fast-dtoa.cc */; }; - 9FA38BBD1175B2D200C4CD55 /* flow-graph.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA31175B2D200C4CD55 /* flow-graph.cc */; }; 9FA38BBE1175B2D200C4CD55 /* full-codegen.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA51175B2D200C4CD55 /* full-codegen.cc */; }; 9FA38BBF1175B2D200C4CD55 /* liveedit.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA91175B2D200C4CD55 /* liveedit.cc */; }; 9FA38BC01175B2D200C4CD55 /* type-info.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BAE1175B2D200C4CD55 /* type-info.cc */; }; @@ -248,6 +246,8 @@ C2BD4BE51201661F0046BF9F /* dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2BD4BD5120165460046BF9F /* dtoa.cc */; }; C2D1E9731212F2BC00187A52 /* objects-visiting.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2D1E9711212F27B00187A52 /* objects-visiting.cc */; }; C2D1E9741212F2CF00187A52 /* objects-visiting.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2D1E9711212F27B00187A52 /* objects-visiting.cc */; }; + C68081AD1225120B001EAFE4 /* code-stubs-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = C68081AB1225120B001EAFE4 /* code-stubs-arm.cc */; }; + C68081B112251239001EAFE4 /* code-stubs-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = C68081B012251239001EAFE4 /* code-stubs-ia32.cc */; }; /* End PBXBuildFile section */ /* Begin PBXContainerItemProxy section */ @@ -596,8 +596,6 @@ 9FA38BA01175B2D200C4CD55 /* double.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = double.h; sourceTree = ""; }; 9FA38BA11175B2D200C4CD55 /* fast-dtoa.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "fast-dtoa.cc"; sourceTree = ""; }; 9FA38BA21175B2D200C4CD55 /* fast-dtoa.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "fast-dtoa.h"; sourceTree = ""; }; - 9FA38BA31175B2D200C4CD55 /* flow-graph.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "flow-graph.cc"; sourceTree = ""; }; - 9FA38BA41175B2D200C4CD55 /* flow-graph.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "flow-graph.h"; sourceTree = ""; }; 9FA38BA51175B2D200C4CD55 /* full-codegen.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "full-codegen.cc"; sourceTree = ""; }; 9FA38BA61175B2D200C4CD55 /* full-codegen.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "full-codegen.h"; sourceTree = ""; }; 9FA38BA71175B2D200C4CD55 /* jump-target-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "jump-target-inl.h"; sourceTree = ""; }; @@ -628,6 +626,10 @@ C2BD4BDA120165A70046BF9F /* fixed-dtoa.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "fixed-dtoa.h"; sourceTree = ""; }; C2D1E9711212F27B00187A52 /* objects-visiting.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "objects-visiting.cc"; sourceTree = ""; }; C2D1E9721212F27B00187A52 /* objects-visiting.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "objects-visiting.h"; sourceTree = ""; }; + C68081AB1225120B001EAFE4 /* code-stubs-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "code-stubs-arm.cc"; path = "arm/code-stubs-arm.cc"; sourceTree = ""; }; + C68081AC1225120B001EAFE4 /* code-stubs-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "code-stubs-arm.h"; path = "arm/code-stubs-arm.h"; sourceTree = ""; }; + C68081B012251239001EAFE4 /* code-stubs-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "code-stubs-ia32.cc"; path = "ia32/code-stubs-ia32.cc"; sourceTree = ""; }; + C68081B412251257001EAFE4 /* code-stubs-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "code-stubs-ia32.h"; path = "ia32/code-stubs-ia32.h"; sourceTree = ""; }; /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ @@ -716,6 +718,10 @@ 897FF0D70E719AB300D62E90 /* C++ */ = { isa = PBXGroup; children = ( + C68081B412251257001EAFE4 /* code-stubs-ia32.h */, + C68081B012251239001EAFE4 /* code-stubs-ia32.cc */, + C68081AB1225120B001EAFE4 /* code-stubs-arm.cc */, + C68081AC1225120B001EAFE4 /* code-stubs-arm.h */, 897FF1750E719B8F00D62E90 /* SConscript */, 897FF0F60E719B8F00D62E90 /* accessors.cc */, 897FF0F70E719B8F00D62E90 /* accessors.h */, @@ -816,8 +822,6 @@ 89471C7F0EB23EE400B6874B /* flag-definitions.h */, 897FF1350E719B8F00D62E90 /* flags.cc */, 897FF1360E719B8F00D62E90 /* flags.h */, - 9FA38BA31175B2D200C4CD55 /* flow-graph.cc */, - 9FA38BA41175B2D200C4CD55 /* flow-graph.h */, 8981F5FE1010500F00D1520E /* frame-element.cc */, 8981F5FF1010500F00D1520E /* frame-element.h */, 897FF1370E719B8F00D62E90 /* frames-arm.cc */, @@ -1298,7 +1302,6 @@ 89A88E040E71A65D0043BA31 /* factory.cc in Sources */, 9FA38BBC1175B2D200C4CD55 /* fast-dtoa.cc in Sources */, 89A88E050E71A65D0043BA31 /* flags.cc in Sources */, - 9FA38BBD1175B2D200C4CD55 /* flow-graph.cc in Sources */, 8981F6001010501900D1520E /* frame-element.cc in Sources */, 89A88E060E71A6600043BA31 /* frames-ia32.cc in Sources */, 89A88E070E71A6610043BA31 /* frames.cc in Sources */, @@ -1369,6 +1372,7 @@ 58950D660F5551C200F3E8BA /* virtual-frame.cc in Sources */, 9FA37336116DD9F000C4CD55 /* vm-state.cc in Sources */, 89A88E2E0E71A6D60043BA31 /* zone.cc in Sources */, + C68081B112251239001EAFE4 /* code-stubs-ia32.cc in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -1422,7 +1426,6 @@ 89F23C570E78D5B2006B2466 /* factory.cc in Sources */, 9FA38BB51175B2D200C4CD55 /* fast-dtoa.cc in Sources */, 89F23C580E78D5B2006B2466 /* flags.cc in Sources */, - 9FA38BB61175B2D200C4CD55 /* flow-graph.cc in Sources */, 8981F6011010502800D1520E /* frame-element.cc in Sources */, 89F23C9C0E78D5F1006B2466 /* frames-arm.cc in Sources */, 89F23C5A0E78D5B2006B2466 /* frames.cc in Sources */, @@ -1494,6 +1497,7 @@ 58950D680F5551CB00F3E8BA /* virtual-frame.cc in Sources */, 9FA37335116DD9F000C4CD55 /* vm-state.cc in Sources */, 89F23C820E78D5B2006B2466 /* zone.cc in Sources */, + C68081AD1225120B001EAFE4 /* code-stubs-arm.cc in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; diff --git a/tools/visual_studio/v8_base.vcproj b/tools/visual_studio/v8_base.vcproj index 0d83830..4629b5d 100644 --- a/tools/visual_studio/v8_base.vcproj +++ b/tools/visual_studio/v8_base.vcproj @@ -305,6 +305,14 @@ > + + + + diff --git a/tools/visual_studio/v8_base_arm.vcproj b/tools/visual_studio/v8_base_arm.vcproj index aa1e822..4848c9b 100644 --- a/tools/visual_studio/v8_base_arm.vcproj +++ b/tools/visual_studio/v8_base_arm.vcproj @@ -277,6 +277,14 @@ > + + + + diff --git a/tools/visual_studio/v8_base_x64.vcproj b/tools/visual_studio/v8_base_x64.vcproj index 33c5394..f5cce21 100644 --- a/tools/visual_studio/v8_base_x64.vcproj +++ b/tools/visual_studio/v8_base_x64.vcproj @@ -277,6 +277,14 @@ > + + + + -- 2.7.4