1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #if defined(V8_TARGET_ARCH_MIPS)
32 #include "bootstrapper.h"
33 #include "code-stubs.h"
35 #include "regexp-macro-assembler.h"
41 #define __ ACCESS_MASM(masm)
43 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
47 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
53 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
54 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
59 // Check if the operand is a heap number.
60 static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
61 Register scratch1, Register scratch2,
62 Label* not_a_heap_number) {
63 __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
64 __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
65 __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2));
69 void ToNumberStub::Generate(MacroAssembler* masm) {
70 // The ToNumber stub takes one argument in a0.
71 Label check_heap_number, call_builtin;
72 __ JumpIfNotSmi(a0, &check_heap_number);
73 __ Ret(USE_DELAY_SLOT);
76 __ bind(&check_heap_number);
77 EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
78 __ Ret(USE_DELAY_SLOT);
81 __ bind(&call_builtin);
83 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
87 void FastNewClosureStub::Generate(MacroAssembler* masm) {
88 // Create a new closure from the given function info in new
89 // space. Set the context to the current context in cp.
92 // Pop the function info from the stack.
95 // Attempt to allocate new JSFunction in new space.
96 __ AllocateInNewSpace(JSFunction::kSize,
103 int map_index = (language_mode_ == CLASSIC_MODE)
104 ? Context::FUNCTION_MAP_INDEX
105 : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
107 // Compute the function map in the current global context and set that
108 // as the map of the allocated object.
109 __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
110 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
111 __ lw(a2, MemOperand(a2, Context::SlotOffset(map_index)));
112 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
114 // Initialize the rest of the function. We don't have to update the
115 // write barrier because the allocated object is in new space.
116 __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
117 __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
118 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
119 __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
120 __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
121 __ sw(a2, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
122 __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
123 __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
124 __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
125 __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
127 // Initialize the code pointer in the function to be the one
128 // found in the shared function info object.
129 __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
130 __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
132 // Return result. The argument function info has been popped already.
133 __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
136 // Create a new closure through the slower runtime call.
138 __ LoadRoot(t0, Heap::kFalseValueRootIndex);
140 __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
144 void FastNewContextStub::Generate(MacroAssembler* masm) {
145 // Try to allocate the context in new space.
147 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
149 // Attempt to allocate the context in new space.
150 __ AllocateInNewSpace(FixedArray::SizeFor(length),
157 // Load the function from the stack.
158 __ lw(a3, MemOperand(sp, 0));
160 // Set up the object header.
161 __ LoadRoot(a1, Heap::kFunctionContextMapRootIndex);
162 __ li(a2, Operand(Smi::FromInt(length)));
163 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
164 __ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
166 // Set up the fixed slots, copy the global object from the previous context.
167 __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
168 __ li(a1, Operand(Smi::FromInt(0)));
169 __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
170 __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
171 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
172 __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
174 // Copy the qml global object from the surrounding context.
175 __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::QML_GLOBAL_INDEX)));
176 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::QML_GLOBAL_INDEX)));
179 // Initialize the rest of the slots to undefined.
180 __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
181 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
182 __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
185 // Remove the on-stack argument and return.
189 // Need to collect. Call into runtime system.
191 __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
195 void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
196 // Stack layout on entry:
199 // [sp + kPointerSize]: serialized scope info
201 // Try to allocate the context in new space.
203 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
204 __ AllocateInNewSpace(FixedArray::SizeFor(length),
205 v0, a1, a2, &gc, TAG_OBJECT);
207 // Load the function from the stack.
208 __ lw(a3, MemOperand(sp, 0));
210 // Load the serialized scope info from the stack.
211 __ lw(a1, MemOperand(sp, 1 * kPointerSize));
213 // Set up the object header.
214 __ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
215 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
216 __ li(a2, Operand(Smi::FromInt(length)));
217 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
219 // If this block context is nested in the global context we get a smi
220 // sentinel instead of a function. The block context should get the
221 // canonical empty function of the global context as its closure which
222 // we still have to look up.
223 Label after_sentinel;
224 __ JumpIfNotSmi(a3, &after_sentinel);
225 if (FLAG_debug_code) {
226 const char* message = "Expected 0 as a Smi sentinel";
227 __ Assert(eq, message, a3, Operand(zero_reg));
229 __ lw(a3, GlobalObjectOperand());
230 __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset));
231 __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
232 __ bind(&after_sentinel);
234 // Set up the fixed slots, copy the global object from the previous context.
235 __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
236 __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
237 __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
238 __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
239 __ sw(a2, ContextOperand(v0, Context::GLOBAL_INDEX));
241 // Copy the qml global object from the surrounding context.
242 __ lw(a1, ContextOperand(cp, Context::QML_GLOBAL_INDEX));
243 __ sw(a1, ContextOperand(v0, Context::QML_GLOBAL_INDEX));
245 // Initialize the rest of the slots to the hole value.
246 __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
247 for (int i = 0; i < slots_; i++) {
248 __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS));
251 // Remove the on-stack argument and return.
255 // Need to collect. Call into runtime system.
257 __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
261 static void GenerateFastCloneShallowArrayCommon(
262 MacroAssembler* masm,
264 FastCloneShallowArrayStub::Mode mode,
266 // Registers on entry:
267 // a3: boilerplate literal array.
268 ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
270 // All sizes here are multiples of kPointerSize.
271 int elements_size = 0;
273 elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
274 ? FixedDoubleArray::SizeFor(length)
275 : FixedArray::SizeFor(length);
277 int size = JSArray::kSize + elements_size;
279 // Allocate both the JS array and the elements array in one big
280 // allocation. This avoids multiple limit checks.
281 __ AllocateInNewSpace(size,
288 // Copy the JS array part.
289 for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
290 if ((i != JSArray::kElementsOffset) || (length == 0)) {
291 __ lw(a1, FieldMemOperand(a3, i));
292 __ sw(a1, FieldMemOperand(v0, i));
297 // Get hold of the elements array of the boilerplate and setup the
298 // elements pointer in the resulting object.
299 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
300 __ Addu(a2, v0, Operand(JSArray::kSize));
301 __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
303 // Copy the elements array.
304 ASSERT((elements_size % kPointerSize) == 0);
305 __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
309 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
310 // Stack layout on entry:
312 // [sp]: constant elements.
313 // [sp + kPointerSize]: literal index.
314 // [sp + (2 * kPointerSize)]: literals array.
316 // Load boilerplate object into r3 and check if we need to create a
319 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
320 __ lw(a0, MemOperand(sp, 1 * kPointerSize));
321 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
322 __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
324 __ lw(a3, MemOperand(t0));
325 __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
326 __ Branch(&slow_case, eq, a3, Operand(t1));
328 FastCloneShallowArrayStub::Mode mode = mode_;
329 if (mode == CLONE_ANY_ELEMENTS) {
330 Label double_elements, check_fast_elements;
331 __ lw(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
332 __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
333 __ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex);
334 __ Branch(&check_fast_elements, ne, v0, Operand(t1));
335 GenerateFastCloneShallowArrayCommon(masm, 0,
336 COPY_ON_WRITE_ELEMENTS, &slow_case);
337 // Return and remove the on-stack parameters.
340 __ bind(&check_fast_elements);
341 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
342 __ Branch(&double_elements, ne, v0, Operand(t1));
343 GenerateFastCloneShallowArrayCommon(masm, length_,
344 CLONE_ELEMENTS, &slow_case);
345 // Return and remove the on-stack parameters.
348 __ bind(&double_elements);
349 mode = CLONE_DOUBLE_ELEMENTS;
350 // Fall through to generate the code to handle double elements.
353 if (FLAG_debug_code) {
355 Heap::RootListIndex expected_map_index;
356 if (mode == CLONE_ELEMENTS) {
357 message = "Expected (writable) fixed array";
358 expected_map_index = Heap::kFixedArrayMapRootIndex;
359 } else if (mode == CLONE_DOUBLE_ELEMENTS) {
360 message = "Expected (writable) fixed double array";
361 expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
363 ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
364 message = "Expected copy-on-write fixed array";
365 expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
368 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
369 __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset));
370 __ LoadRoot(at, expected_map_index);
371 __ Assert(eq, message, a3, Operand(at));
375 GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
377 // Return and remove the on-stack parameters.
381 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
385 void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
386 // Stack layout on entry:
388 // [sp]: object literal flags.
389 // [sp + kPointerSize]: constant properties.
390 // [sp + (2 * kPointerSize)]: literal index.
391 // [sp + (3 * kPointerSize)]: literals array.
393 // Load boilerplate object into a3 and check if we need to create a
396 __ lw(a3, MemOperand(sp, 3 * kPointerSize));
397 __ lw(a0, MemOperand(sp, 2 * kPointerSize));
398 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
399 __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
401 __ lw(a3, MemOperand(a3));
402 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
403 __ Branch(&slow_case, eq, a3, Operand(t0));
405 // Check that the boilerplate contains only fast properties and we can
406 // statically determine the instance size.
407 int size = JSObject::kHeaderSize + length_ * kPointerSize;
408 __ lw(a0, FieldMemOperand(a3, HeapObject::kMapOffset));
409 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceSizeOffset));
410 __ Branch(&slow_case, ne, a0, Operand(size >> kPointerSizeLog2));
412 // Allocate the JS object and copy header together with all in-object
413 // properties from the boilerplate.
414 __ AllocateInNewSpace(size, v0, a1, a2, &slow_case, TAG_OBJECT);
415 for (int i = 0; i < size; i += kPointerSize) {
416 __ lw(a1, FieldMemOperand(a3, i));
417 __ sw(a1, FieldMemOperand(v0, i));
420 // Return and remove the on-stack parameters.
424 __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
428 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
429 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
430 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
431 // scratch register. Destroys the source register. No GC occurs during this
432 // stub so you don't have to set up the frame.
433 class ConvertToDoubleStub : public CodeStub {
435 ConvertToDoubleStub(Register result_reg_1,
436 Register result_reg_2,
438 Register scratch_reg)
439 : result1_(result_reg_1),
440 result2_(result_reg_2),
442 zeros_(scratch_reg) { }
450 // Minor key encoding in 16 bits.
451 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
452 class OpBits: public BitField<Token::Value, 2, 14> {};
454 Major MajorKey() { return ConvertToDouble; }
456 // Encode the parameters in a unique 16 bit value.
457 return result1_.code() +
458 (result2_.code() << 4) +
459 (source_.code() << 8) +
460 (zeros_.code() << 12);
463 void Generate(MacroAssembler* masm);
467 void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
468 #ifndef BIG_ENDIAN_FLOATING_POINT
469 Register exponent = result1_;
470 Register mantissa = result2_;
472 Register exponent = result2_;
473 Register mantissa = result1_;
476 // Convert from Smi to integer.
477 __ sra(source_, source_, kSmiTagSize);
478 // Move sign bit from source to destination. This works because the sign bit
479 // in the exponent word of the double has the same position and polarity as
480 // the 2's complement sign bit in a Smi.
481 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
482 __ And(exponent, source_, Operand(HeapNumber::kSignMask));
483 // Subtract from 0 if source was negative.
484 __ subu(at, zero_reg, source_);
485 __ Movn(source_, at, exponent);
487 // We have -1, 0 or 1, which we treat specially. Register source_ contains
488 // absolute value: it is either equal to 1 (special case of -1 and 1),
489 // greater than 1 (not a special case) or less than 1 (special case of 0).
490 __ Branch(¬_special, gt, source_, Operand(1));
492 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
493 const uint32_t exponent_word_for_1 =
494 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
495 // Safe to use 'at' as dest reg here.
496 __ Or(at, exponent, Operand(exponent_word_for_1));
497 __ Movn(exponent, at, source_); // Write exp when source not 0.
498 // 1, 0 and -1 all have 0 for the second word.
499 __ Ret(USE_DELAY_SLOT);
500 __ mov(mantissa, zero_reg);
502 __ bind(¬_special);
503 // Count leading zeros.
504 // Gets the wrong answer for 0, but we already checked for that case above.
505 __ Clz(zeros_, source_);
506 // Compute exponent and or it into the exponent register.
507 // We use mantissa as a scratch register here.
508 __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
509 __ subu(mantissa, mantissa, zeros_);
510 __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
511 __ Or(exponent, exponent, mantissa);
513 // Shift up the source chopping the top bit off.
514 __ Addu(zeros_, zeros_, Operand(1));
515 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
516 __ sllv(source_, source_, zeros_);
517 // Compute lower part of fraction (last 12 bits).
518 __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
519 // And the top (top 20 bits).
520 __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
522 __ Ret(USE_DELAY_SLOT);
523 __ or_(exponent, exponent, source_);
527 void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
528 FloatingPointHelper::Destination destination,
531 if (CpuFeatures::IsSupported(FPU)) {
532 CpuFeatures::Scope scope(FPU);
533 __ sra(scratch1, a0, kSmiTagSize);
534 __ mtc1(scratch1, f14);
535 __ cvt_d_w(f14, f14);
536 __ sra(scratch1, a1, kSmiTagSize);
537 __ mtc1(scratch1, f12);
538 __ cvt_d_w(f12, f12);
539 if (destination == kCoreRegisters) {
540 __ Move(a2, a3, f14);
541 __ Move(a0, a1, f12);
544 ASSERT(destination == kCoreRegisters);
545 // Write Smi from a0 to a3 and a2 in double format.
546 __ mov(scratch1, a0);
547 ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
549 __ Call(stub1.GetCode());
550 // Write Smi from a1 to a1 and a0 in double format.
551 __ mov(scratch1, a1);
552 ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
553 __ Call(stub2.GetCode());
559 void FloatingPointHelper::LoadOperands(
560 MacroAssembler* masm,
561 FloatingPointHelper::Destination destination,
562 Register heap_number_map,
567 // Load right operand (a0) to f12 or a2/a3.
568 LoadNumber(masm, destination,
569 a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
571 // Load left operand (a1) to f14 or a0/a1.
572 LoadNumber(masm, destination,
573 a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow);
577 void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
578 Destination destination,
583 Register heap_number_map,
587 if (FLAG_debug_code) {
588 __ AbortIfNotRootValue(heap_number_map,
589 Heap::kHeapNumberMapRootIndex,
590 "HeapNumberMap register clobbered.");
596 __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
598 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
600 // Handle loading a double from a heap number.
601 if (CpuFeatures::IsSupported(FPU) &&
602 destination == kFPURegisters) {
603 CpuFeatures::Scope scope(FPU);
604 // Load the double from tagged HeapNumber to double register.
606 // ARM uses a workaround here because of the unaligned HeapNumber
607 // kValueOffset. On MIPS this workaround is built into ldc1 so there's no
608 // point in generating even more instructions.
609 __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
611 ASSERT(destination == kCoreRegisters);
612 // Load the double from heap number to dst1 and dst2 in double format.
613 __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
614 __ lw(dst2, FieldMemOperand(object,
615 HeapNumber::kValueOffset + kPointerSize));
619 // Handle loading a double from a smi.
621 if (CpuFeatures::IsSupported(FPU)) {
622 CpuFeatures::Scope scope(FPU);
623 // Convert smi to double using FPU instructions.
624 __ mtc1(scratch1, dst);
625 __ cvt_d_w(dst, dst);
626 if (destination == kCoreRegisters) {
627 // Load the converted smi to dst1 and dst2 in double format.
628 __ Move(dst1, dst2, dst);
631 ASSERT(destination == kCoreRegisters);
632 // Write smi to dst1 and dst2 double format.
633 __ mov(scratch1, object);
634 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
636 __ Call(stub.GetCode());
644 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
647 Register heap_number_map,
651 FPURegister double_scratch,
653 if (FLAG_debug_code) {
654 __ AbortIfNotRootValue(heap_number_map,
655 Heap::kHeapNumberMapRootIndex,
656 "HeapNumberMap register clobbered.");
659 Label not_in_int32_range;
661 __ UntagAndJumpIfSmi(dst, object, &done);
662 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
663 __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
664 __ ConvertToInt32(object,
669 ¬_in_int32_range);
672 __ bind(¬_in_int32_range);
673 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
674 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
676 __ EmitOutOfInt32RangeTruncate(dst,
685 void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
686 Register int_scratch,
687 Destination destination,
688 FPURegister double_dst,
692 FPURegister single_scratch) {
693 ASSERT(!int_scratch.is(scratch2));
694 ASSERT(!int_scratch.is(dst1));
695 ASSERT(!int_scratch.is(dst2));
699 if (CpuFeatures::IsSupported(FPU)) {
700 CpuFeatures::Scope scope(FPU);
701 __ mtc1(int_scratch, single_scratch);
702 __ cvt_d_w(double_dst, single_scratch);
703 if (destination == kCoreRegisters) {
704 __ Move(dst1, dst2, double_dst);
707 Label fewer_than_20_useful_bits;
710 // | s | exp | mantissa |
713 __ mov(dst2, int_scratch);
714 __ mov(dst1, int_scratch);
715 __ Branch(&done, eq, int_scratch, Operand(zero_reg));
717 // Preload the sign of the value.
718 __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask));
719 // Get the absolute value of the object (as an unsigned integer).
721 __ Branch(&skip_sub, ge, dst2, Operand(zero_reg));
722 __ Subu(int_scratch, zero_reg, int_scratch);
725 // Get mantissa[51:20].
727 // Get the position of the first set bit.
728 __ Clz(dst1, int_scratch);
730 __ Subu(dst1, scratch2, dst1);
733 __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias));
734 __ Ins(dst2, scratch2,
735 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
737 // Clear the first non null bit.
738 __ li(scratch2, Operand(1));
739 __ sllv(scratch2, scratch2, dst1);
741 __ Xor(scratch2, scratch2, at);
742 __ And(int_scratch, int_scratch, scratch2);
744 // Get the number of bits to set in the lower part of the mantissa.
745 __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
746 __ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
747 // Set the higher 20 bits of the mantissa.
748 __ srlv(at, int_scratch, scratch2);
749 __ or_(dst2, dst2, at);
751 __ subu(scratch2, at, scratch2);
752 __ sllv(dst1, int_scratch, scratch2);
755 __ bind(&fewer_than_20_useful_bits);
756 __ li(at, HeapNumber::kMantissaBitsInTopWord);
757 __ subu(scratch2, at, dst1);
758 __ sllv(scratch2, int_scratch, scratch2);
759 __ Or(dst2, dst2, scratch2);
761 __ mov(dst1, zero_reg);
767 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
769 Destination destination,
770 DoubleRegister double_dst,
773 Register heap_number_map,
776 FPURegister single_scratch,
778 ASSERT(!scratch1.is(object) && !scratch2.is(object));
779 ASSERT(!scratch1.is(scratch2));
780 ASSERT(!heap_number_map.is(object) &&
781 !heap_number_map.is(scratch1) &&
782 !heap_number_map.is(scratch2));
784 Label done, obj_is_not_smi;
786 __ JumpIfNotSmi(object, &obj_is_not_smi);
787 __ SmiUntag(scratch1, object);
788 ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
789 scratch2, single_scratch);
792 __ bind(&obj_is_not_smi);
793 if (FLAG_debug_code) {
794 __ AbortIfNotRootValue(heap_number_map,
795 Heap::kHeapNumberMapRootIndex,
796 "HeapNumberMap register clobbered.");
798 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
801 if (CpuFeatures::IsSupported(FPU)) {
802 CpuFeatures::Scope scope(FPU);
803 // Load the double value.
804 __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
806 Register except_flag = scratch2;
807 __ EmitFPUTruncate(kRoundToZero,
812 kCheckForInexactConversion);
814 // Jump to not_int32 if the operation did not succeed.
815 __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
817 if (destination == kCoreRegisters) {
818 __ Move(dst1, dst2, double_dst);
822 ASSERT(!scratch1.is(object) && !scratch2.is(object));
823 // Load the double value in the destination registers.
824 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
825 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
827 // Check for 0 and -0.
828 __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask));
829 __ Or(scratch1, scratch1, Operand(dst2));
830 __ Branch(&done, eq, scratch1, Operand(zero_reg));
832 // Check that the value can be exactly represented by a 32-bit integer.
833 // Jump to not_int32 if that's not the case.
834 DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
836 // dst1 and dst2 were trashed. Reload the double value.
837 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
838 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
845 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
848 Register heap_number_map,
852 DoubleRegister double_scratch,
854 ASSERT(!dst.is(object));
855 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
856 ASSERT(!scratch1.is(scratch2) &&
857 !scratch1.is(scratch3) &&
858 !scratch2.is(scratch3));
862 __ UntagAndJumpIfSmi(dst, object, &done);
864 if (FLAG_debug_code) {
865 __ AbortIfNotRootValue(heap_number_map,
866 Heap::kHeapNumberMapRootIndex,
867 "HeapNumberMap register clobbered.");
869 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
871 // Object is a heap number.
872 // Convert the floating point value to a 32-bit integer.
873 if (CpuFeatures::IsSupported(FPU)) {
874 CpuFeatures::Scope scope(FPU);
875 // Load the double value.
876 __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
878 FPURegister single_scratch = double_scratch.low();
879 Register except_flag = scratch2;
880 __ EmitFPUTruncate(kRoundToZero,
885 kCheckForInexactConversion);
887 // Jump to not_int32 if the operation did not succeed.
888 __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
889 // Get the result in the destination register.
890 __ mfc1(dst, single_scratch);
893 // Load the double value in the destination registers.
894 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
895 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
897 // Check for 0 and -0.
898 __ And(dst, scratch1, Operand(~HeapNumber::kSignMask));
899 __ Or(dst, scratch2, Operand(dst));
900 __ Branch(&done, eq, dst, Operand(zero_reg));
902 DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
904 // Registers state after DoubleIs32BitInteger.
905 // dst: mantissa[51:20].
908 // Shift back the higher bits of the mantissa.
909 __ srlv(dst, dst, scratch3);
910 // Set the implicit first bit.
912 __ subu(scratch3, at, scratch3);
913 __ sllv(scratch2, scratch2, scratch3);
914 __ Or(dst, dst, scratch2);
916 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
917 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
919 __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg));
920 __ Subu(dst, zero_reg, dst);
928 void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
934 // Get exponent alone in scratch.
937 HeapNumber::kExponentShift,
938 HeapNumber::kExponentBits);
940 // Substract the bias from the exponent.
941 __ Subu(scratch, scratch, Operand(HeapNumber::kExponentBias));
943 // src1: higher (exponent) part of the double value.
944 // src2: lower (mantissa) part of the double value.
945 // scratch: unbiased exponent.
947 // Fast cases. Check for obvious non 32-bit integer values.
948 // Negative exponent cannot yield 32-bit integers.
949 __ Branch(not_int32, lt, scratch, Operand(zero_reg));
950 // Exponent greater than 31 cannot yield 32-bit integers.
951 // Also, a positive value with an exponent equal to 31 is outside of the
952 // signed 32-bit integer range.
953 // Another way to put it is that if (exponent - signbit) > 30 then the
954 // number cannot be represented as an int32.
956 __ srl(at, src1, 31);
957 __ subu(tmp, scratch, at);
958 __ Branch(not_int32, gt, tmp, Operand(30));
959 // - Bits [21:0] in the mantissa are not null.
960 __ And(tmp, src2, 0x3fffff);
961 __ Branch(not_int32, ne, tmp, Operand(zero_reg));
963 // Otherwise the exponent needs to be big enough to shift left all the
964 // non zero bits left. So we need the (30 - exponent) last bits of the
965 // 31 higher bits of the mantissa to be null.
966 // Because bits [21:0] are null, we can check instead that the
967 // (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
969 // Get the 32 higher bits of the mantissa in dst.
972 HeapNumber::kMantissaBitsInTopWord,
973 32 - HeapNumber::kMantissaBitsInTopWord);
974 __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord);
975 __ or_(dst, dst, at);
977 // Create the mask and test the lower bits (of the higher bits).
979 __ subu(scratch, at, scratch);
981 __ sllv(src1, src2, scratch);
982 __ Subu(src1, src1, Operand(1));
983 __ And(src1, dst, src1);
984 __ Branch(not_int32, ne, src1, Operand(zero_reg));
988 void FloatingPointHelper::CallCCodeForDoubleOperation(
989 MacroAssembler* masm,
991 Register heap_number_result,
993 // Using core registers:
994 // a0: Left value (least significant part of mantissa).
995 // a1: Left value (sign, exponent, top of mantissa).
996 // a2: Right value (least significant part of mantissa).
997 // a3: Right value (sign, exponent, top of mantissa).
999 // Assert that heap_number_result is saved.
1000 // We currently always use s0 to pass it.
1001 ASSERT(heap_number_result.is(s0));
1003 // Push the current return address before the C call.
1005 __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
1006 if (!IsMipsSoftFloatABI) {
1007 CpuFeatures::Scope scope(FPU);
1008 // We are not using MIPS FPU instructions, and parameters for the runtime
1009 // function call are prepaired in a0-a3 registers, but function we are
1010 // calling is compiled with hard-float flag and expecting hard float ABI
1011 // (parameters in f12/f14 registers). We need to copy parameters from
1012 // a0-a3 registers to f12/f14 register pairs.
1013 __ Move(f12, a0, a1);
1014 __ Move(f14, a2, a3);
1017 AllowExternalCallThatCantCauseGC scope(masm);
1019 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
1021 // Store answer in the overwritable heap number.
1022 if (!IsMipsSoftFloatABI) {
1023 CpuFeatures::Scope scope(FPU);
1024 // Double returned in register f0.
1025 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
1027 // Double returned in registers v0 and v1.
1028 __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
1029 __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
1031 // Place heap_number_result in v0 and return to the pushed return address.
1033 __ Ret(USE_DELAY_SLOT);
1034 __ mov(v0, heap_number_result);
1038 bool WriteInt32ToHeapNumberStub::IsPregenerated() {
1039 // These variants are compiled ahead of time. See next method.
1040 if (the_int_.is(a1) &&
1041 the_heap_number_.is(v0) &&
1046 if (the_int_.is(a2) &&
1047 the_heap_number_.is(v0) &&
1052 // Other register combinations are generated as and when they are needed,
1053 // so it is unsafe to call them from stubs (we can't generate a stub while
1054 // we are generating a stub).
1059 void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
1060 WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
1061 WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
1062 stub1.GetCode()->set_is_pregenerated(true);
1063 stub2.GetCode()->set_is_pregenerated(true);
1067 // See comment for class, this does NOT work for int32's that are in Smi range.
1068 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
1069 Label max_negative_int;
1070 // the_int_ has the answer which is a signed int32 but not a Smi.
1071 // We test for the special value that has a different exponent.
1072 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
1073 // Test sign, and save for later conditionals.
1074 __ And(sign_, the_int_, Operand(0x80000000u));
1075 __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
1077 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
1078 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
1079 uint32_t non_smi_exponent =
1080 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
1081 __ li(scratch_, Operand(non_smi_exponent));
1082 // Set the sign bit in scratch_ if the value was negative.
1083 __ or_(scratch_, scratch_, sign_);
1084 // Subtract from 0 if the value was negative.
1085 __ subu(at, zero_reg, the_int_);
1086 __ Movn(the_int_, at, sign_);
1087 // We should be masking the implict first digit of the mantissa away here,
1088 // but it just ends up combining harmlessly with the last digit of the
1089 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
1090 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
1091 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
1092 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1093 __ srl(at, the_int_, shift_distance);
1094 __ or_(scratch_, scratch_, at);
1095 __ sw(scratch_, FieldMemOperand(the_heap_number_,
1096 HeapNumber::kExponentOffset));
1097 __ sll(scratch_, the_int_, 32 - shift_distance);
1098 __ sw(scratch_, FieldMemOperand(the_heap_number_,
1099 HeapNumber::kMantissaOffset));
1102 __ bind(&max_negative_int);
1103 // The max negative int32 is stored as a positive number in the mantissa of
1104 // a double because it uses a sign bit instead of using two's complement.
1105 // The actual mantissa bits stored are all 0 because the implicit most
1106 // significant 1 bit is not stored.
1107 non_smi_exponent += 1 << HeapNumber::kExponentShift;
1108 __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
1110 FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
1111 __ mov(scratch_, zero_reg);
1113 FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
1118 // Handle the case where the lhs and rhs are the same object.
1119 // Equality is almost reflexive (everything but NaN), so this is a test
1120 // for "identity and not NaN".
1121 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
1124 bool never_nan_nan) {
1125 Label not_identical;
1126 Label heap_number, return_equal;
1127 Register exp_mask_reg = t5;
1129 __ Branch(¬_identical, ne, a0, Operand(a1));
1131 // The two objects are identical. If we know that one of them isn't NaN then
1132 // we now know they test equal.
1133 if (cc != eq || !never_nan_nan) {
1134 __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
1136 // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
1137 // so we do the second best thing - test it ourselves.
1138 // They are both equal and they are not both Smis so both of them are not
1139 // Smis. If it's not a heap number, then return equal.
1140 if (cc == less || cc == greater) {
1141 __ GetObjectType(a0, t4, t4);
1142 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
1144 __ GetObjectType(a0, t4, t4);
1145 __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
1146 // Comparing JS objects with <=, >= is complicated.
1148 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
1149 // Normally here we fall through to return_equal, but undefined is
1150 // special: (undefined == undefined) == true, but
1151 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
1152 if (cc == less_equal || cc == greater_equal) {
1153 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
1154 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
1155 __ Branch(&return_equal, ne, a0, Operand(t2));
1157 // undefined <= undefined should fail.
1158 __ li(v0, Operand(GREATER));
1160 // undefined >= undefined should fail.
1161 __ li(v0, Operand(LESS));
1169 __ bind(&return_equal);
1172 __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
1173 } else if (cc == greater) {
1174 __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
1176 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
1180 if (cc != eq || !never_nan_nan) {
1181 // For less and greater we don't have to check for NaN since the result of
1182 // x < x is false regardless. For the others here is some code to check
1184 if (cc != lt && cc != gt) {
1185 __ bind(&heap_number);
1186 // It is a heap number, so return non-equal if it's NaN and equal if it's
1189 // The representation of NaN values has all exponent bits (52..62) set,
1190 // and not all mantissa bits (0..51) clear.
1191 // Read top bits of double representation (second word of value).
1192 __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1193 // Test that exponent bits are all set.
1194 __ And(t3, t2, Operand(exp_mask_reg));
1195 // If all bits not set (ne cond), then not a NaN, objects are equal.
1196 __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
1198 // Shift out flag and all exponent bits, retaining only mantissa.
1199 __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
1200 // Or with all low-bits of mantissa.
1201 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
1202 __ Or(v0, t3, Operand(t2));
1203 // For equal we already have the right value in v0: Return zero (equal)
1204 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
1205 // not (it's a NaN). For <= and >= we need to load v0 with the failing
1206 // value if it's a NaN.
1208 // All-zero means Infinity means equal.
1209 __ Ret(eq, v0, Operand(zero_reg));
1211 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
1213 __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
1218 // No fall through here.
1221 __ bind(¬_identical);
1225 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
1228 Label* both_loaded_as_doubles,
1231 ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1232 (lhs.is(a1) && rhs.is(a0)));
1235 __ JumpIfSmi(lhs, &lhs_is_smi);
1237 // Check whether the non-smi is a heap number.
1238 __ GetObjectType(lhs, t4, t4);
1240 // If lhs was not a number and rhs was a Smi then strict equality cannot
1241 // succeed. Return non-equal (lhs is already not zero).
1242 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
1245 // Smi compared non-strictly with a non-Smi non-heap-number. Call
1247 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1250 // Rhs is a smi, lhs is a number.
1251 // Convert smi rhs to double.
1252 if (CpuFeatures::IsSupported(FPU)) {
1253 CpuFeatures::Scope scope(FPU);
1254 __ sra(at, rhs, kSmiTagSize);
1256 __ cvt_d_w(f14, f14);
1257 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1259 // Load lhs to a double in a2, a3.
1260 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1261 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1263 // Write Smi from rhs to a1 and a0 in double format. t5 is scratch.
1265 ConvertToDoubleStub stub1(a1, a0, t6, t5);
1267 __ Call(stub1.GetCode());
1272 // We now have both loaded as doubles.
1273 __ jmp(both_loaded_as_doubles);
1275 __ bind(&lhs_is_smi);
1276 // Lhs is a Smi. Check whether the non-smi is a heap number.
1277 __ GetObjectType(rhs, t4, t4);
1279 // If lhs was not a number and rhs was a Smi then strict equality cannot
1280 // succeed. Return non-equal.
1281 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
1282 __ li(v0, Operand(1));
1284 // Smi compared non-strictly with a non-Smi non-heap-number. Call
1286 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1289 // Lhs is a smi, rhs is a number.
1290 // Convert smi lhs to double.
1291 if (CpuFeatures::IsSupported(FPU)) {
1292 CpuFeatures::Scope scope(FPU);
1293 __ sra(at, lhs, kSmiTagSize);
1295 __ cvt_d_w(f12, f12);
1296 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1298 // Convert lhs to a double format. t5 is scratch.
1300 ConvertToDoubleStub stub2(a3, a2, t6, t5);
1302 __ Call(stub2.GetCode());
1304 // Load rhs to a double in a1, a0.
1306 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1307 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1309 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1310 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1313 // Fall through to both_loaded_as_doubles.
1317 void EmitNanCheck(MacroAssembler* masm, Condition cc) {
1318 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1319 if (CpuFeatures::IsSupported(FPU)) {
1320 CpuFeatures::Scope scope(FPU);
1321 // Lhs and rhs are already loaded to f12 and f14 register pairs.
1322 __ Move(t0, t1, f14);
1323 __ Move(t2, t3, f12);
1325 // Lhs and rhs are already loaded to GP registers.
1326 __ mov(t0, a0); // a0 has LS 32 bits of rhs.
1327 __ mov(t1, a1); // a1 has MS 32 bits of rhs.
1328 __ mov(t2, a2); // a2 has LS 32 bits of lhs.
1329 __ mov(t3, a3); // a3 has MS 32 bits of lhs.
1331 Register rhs_exponent = exp_first ? t0 : t1;
1332 Register lhs_exponent = exp_first ? t2 : t3;
1333 Register rhs_mantissa = exp_first ? t1 : t0;
1334 Register lhs_mantissa = exp_first ? t3 : t2;
1335 Label one_is_nan, neither_is_nan;
1336 Label lhs_not_nan_exp_mask_is_loaded;
1338 Register exp_mask_reg = t4;
1339 __ li(exp_mask_reg, HeapNumber::kExponentMask);
1340 __ and_(t5, lhs_exponent, exp_mask_reg);
1341 __ Branch(&lhs_not_nan_exp_mask_is_loaded, ne, t5, Operand(exp_mask_reg));
1343 __ sll(t5, lhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
1344 __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
1346 __ Branch(&one_is_nan, ne, lhs_mantissa, Operand(zero_reg));
1348 __ li(exp_mask_reg, HeapNumber::kExponentMask);
1349 __ bind(&lhs_not_nan_exp_mask_is_loaded);
1350 __ and_(t5, rhs_exponent, exp_mask_reg);
1352 __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg));
1354 __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
1355 __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
1357 __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg));
1359 __ bind(&one_is_nan);
1360 // NaN comparisons always fail.
1361 // Load whatever we need in v0 to make the comparison fail.
1363 if (cc == lt || cc == le) {
1364 __ li(v0, Operand(GREATER));
1366 __ li(v0, Operand(LESS));
1370 __ bind(&neither_is_nan);
1374 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
1375 // f12 and f14 have the two doubles. Neither is a NaN.
1376 // Call a native function to do a comparison between two non-NaNs.
1377 // Call C routine that may not cause GC or other trouble.
1378 // We use a call_was and return manually because we need arguments slots to
1381 Label return_result_not_equal, return_result_equal;
1383 // Doubles are not equal unless they have the same bit pattern.
1384 // Exception: 0 and -0.
1385 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1386 if (CpuFeatures::IsSupported(FPU)) {
1387 CpuFeatures::Scope scope(FPU);
1388 // Lhs and rhs are already loaded to f12 and f14 register pairs.
1389 __ Move(t0, t1, f14);
1390 __ Move(t2, t3, f12);
1392 // Lhs and rhs are already loaded to GP registers.
1393 __ mov(t0, a0); // a0 has LS 32 bits of rhs.
1394 __ mov(t1, a1); // a1 has MS 32 bits of rhs.
1395 __ mov(t2, a2); // a2 has LS 32 bits of lhs.
1396 __ mov(t3, a3); // a3 has MS 32 bits of lhs.
1398 Register rhs_exponent = exp_first ? t0 : t1;
1399 Register lhs_exponent = exp_first ? t2 : t3;
1400 Register rhs_mantissa = exp_first ? t1 : t0;
1401 Register lhs_mantissa = exp_first ? t3 : t2;
1403 __ xor_(v0, rhs_mantissa, lhs_mantissa);
1404 __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg));
1406 __ subu(v0, rhs_exponent, lhs_exponent);
1407 __ Branch(&return_result_equal, eq, v0, Operand(zero_reg));
1409 __ sll(rhs_exponent, rhs_exponent, kSmiTagSize);
1410 __ sll(lhs_exponent, lhs_exponent, kSmiTagSize);
1411 __ or_(t4, rhs_exponent, lhs_exponent);
1412 __ or_(t4, t4, rhs_mantissa);
1414 __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
1416 __ bind(&return_result_equal);
1418 __ li(v0, Operand(EQUAL));
1422 __ bind(&return_result_not_equal);
1424 if (!CpuFeatures::IsSupported(FPU)) {
1426 __ PrepareCallCFunction(0, 2, t4);
1427 if (!IsMipsSoftFloatABI) {
1428 // We are not using MIPS FPU instructions, and parameters for the runtime
1429 // function call are prepaired in a0-a3 registers, but function we are
1430 // calling is compiled with hard-float flag and expecting hard float ABI
1431 // (parameters in f12/f14 registers). We need to copy parameters from
1432 // a0-a3 registers to f12/f14 register pairs.
1433 __ Move(f12, a0, a1);
1434 __ Move(f14, a2, a3);
1437 AllowExternalCallThatCantCauseGC scope(masm);
1438 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
1440 __ pop(ra); // Because this function returns int, result is in v0.
1443 CpuFeatures::Scope scope(FPU);
1444 Label equal, less_than;
1445 __ BranchF(&equal, NULL, eq, f12, f14);
1446 __ BranchF(&less_than, NULL, lt, f12, f14);
1448 // Not equal, not less, not NaN, must be greater.
1450 __ li(v0, Operand(GREATER));
1454 __ li(v0, Operand(EQUAL));
1457 __ bind(&less_than);
1458 __ li(v0, Operand(LESS));
1464 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
1467 // If either operand is a JS object or an oddball value, then they are
1468 // not equal since their pointers are different.
1469 // There is no test for undetectability in strict equality.
1470 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
1471 Label first_non_object;
1472 // Get the type of the first operand into a2 and compare it with
1473 // FIRST_SPEC_OBJECT_TYPE.
1474 __ GetObjectType(lhs, a2, a2);
1475 __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
1478 Label return_not_equal;
1479 __ bind(&return_not_equal);
1480 __ Ret(USE_DELAY_SLOT);
1481 __ li(v0, Operand(1));
1483 __ bind(&first_non_object);
1484 // Check for oddballs: true, false, null, undefined.
1485 __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
1487 __ GetObjectType(rhs, a3, a3);
1488 __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1490 // Check for oddballs: true, false, null, undefined.
1491 __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
1493 // Now that we have the types we might as well check for symbol-symbol.
1494 // Ensure that no non-strings have the symbol bit set.
1495 STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
1496 STATIC_ASSERT(kSymbolTag != 0);
1497 __ And(t2, a2, Operand(a3));
1498 __ And(t0, t2, Operand(kIsSymbolMask));
1499 __ Branch(&return_not_equal, ne, t0, Operand(zero_reg));
1503 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
1506 Label* both_loaded_as_doubles,
1507 Label* not_heap_numbers,
1509 __ GetObjectType(lhs, a3, a2);
1510 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
1511 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
1512 // If first was a heap number & second wasn't, go to slow case.
1513 __ Branch(slow, ne, a3, Operand(a2));
1515 // Both are heap numbers. Load them up then jump to the code we have
1517 if (CpuFeatures::IsSupported(FPU)) {
1518 CpuFeatures::Scope scope(FPU);
1519 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1520 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1522 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1523 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1525 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1526 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1528 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1529 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1532 __ jmp(both_loaded_as_doubles);
1536 // Fast negative check for symbol-to-symbol equality.
1537 static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
1540 Label* possible_strings,
1541 Label* not_both_strings) {
1542 ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1543 (lhs.is(a1) && rhs.is(a0)));
1545 // a2 is object type of lhs.
1546 // Ensure that no non-strings have the symbol bit set.
1548 STATIC_ASSERT(kSymbolTag != 0);
1549 __ And(at, a2, Operand(kIsNotStringMask));
1550 __ Branch(&object_test, ne, at, Operand(zero_reg));
1551 __ And(at, a2, Operand(kIsSymbolMask));
1552 __ Branch(possible_strings, eq, at, Operand(zero_reg));
1553 __ GetObjectType(rhs, a3, a3);
1554 __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
1555 __ And(at, a3, Operand(kIsSymbolMask));
1556 __ Branch(possible_strings, eq, at, Operand(zero_reg));
1558 // Both are symbols. We already checked they weren't the same pointer
1559 // so they are not equal.
1560 __ Ret(USE_DELAY_SLOT);
1561 __ li(v0, Operand(1)); // Non-zero indicates not equal.
1563 __ bind(&object_test);
1564 __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
1565 __ GetObjectType(rhs, a2, a3);
1566 __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1568 // If both objects are undetectable, they are equal. Otherwise, they
1569 // are not equal, since they are different objects and an object is not
1570 // equal to undefined.
1571 __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
1572 __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
1573 __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
1574 __ and_(a0, a2, a3);
1575 __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
1576 __ Ret(USE_DELAY_SLOT);
1577 __ xori(v0, a0, 1 << Map::kIsUndetectable);
1581 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
1589 // Use of registers. Register result is used as a temporary.
1590 Register number_string_cache = result;
1591 Register mask = scratch3;
1593 // Load the number string cache.
1594 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
1596 // Make the hash mask from the length of the number string cache. It
1597 // contains two elements (number and string) for each cache entry.
1598 __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
1599 // Divide length by two (length is a smi).
1600 __ sra(mask, mask, kSmiTagSize + 1);
1601 __ Addu(mask, mask, -1); // Make mask.
1603 // Calculate the entry in the number string cache. The hash value in the
1604 // number string cache for smis is just the smi value, and the hash for
1605 // doubles is the xor of the upper and lower words. See
1606 // Heap::GetNumberStringCache.
1607 Isolate* isolate = masm->isolate();
1609 Label load_result_from_cache;
1610 if (!object_is_smi) {
1611 __ JumpIfSmi(object, &is_smi);
1612 if (CpuFeatures::IsSupported(FPU)) {
1613 CpuFeatures::Scope scope(FPU);
1616 Heap::kHeapNumberMapRootIndex,
1620 STATIC_ASSERT(8 == kDoubleSize);
1623 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
1624 __ lw(scratch2, MemOperand(scratch1, kPointerSize));
1625 __ lw(scratch1, MemOperand(scratch1, 0));
1626 __ Xor(scratch1, scratch1, Operand(scratch2));
1627 __ And(scratch1, scratch1, Operand(mask));
1629 // Calculate address of entry in string cache: each entry consists
1630 // of two pointer sized fields.
1631 __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
1632 __ Addu(scratch1, number_string_cache, scratch1);
1634 Register probe = mask;
1636 FieldMemOperand(scratch1, FixedArray::kHeaderSize));
1637 __ JumpIfSmi(probe, not_found);
1638 __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
1639 __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
1640 __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
1641 __ Branch(not_found);
1643 // Note that there is no cache check for non-FPU case, even though
1644 // it seems there could be. May be a tiny opimization for non-FPU
1646 __ Branch(not_found);
1651 Register scratch = scratch1;
1652 __ sra(scratch, object, 1); // Shift away the tag.
1653 __ And(scratch, mask, Operand(scratch));
1655 // Calculate address of entry in string cache: each entry consists
1656 // of two pointer sized fields.
1657 __ sll(scratch, scratch, kPointerSizeLog2 + 1);
1658 __ Addu(scratch, number_string_cache, scratch);
1660 // Check if the entry is the smi we are looking for.
1661 Register probe = mask;
1662 __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
1663 __ Branch(not_found, ne, object, Operand(probe));
1665 // Get the result from the cache.
1666 __ bind(&load_result_from_cache);
1668 FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
1670 __ IncrementCounter(isolate->counters()->number_to_string_native(),
1677 void NumberToStringStub::Generate(MacroAssembler* masm) {
1680 __ lw(a1, MemOperand(sp, 0));
1682 // Generate code to lookup number in the number string cache.
1683 GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
1687 // Handle number to string in the runtime system if not found in the cache.
1688 __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
1692 // On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared.
1693 // On exit, v0 is 0, positive, or negative (smi) to indicate the result
1694 // of the comparison.
1695 void CompareStub::Generate(MacroAssembler* masm) {
1696 Label slow; // Call builtin.
1697 Label not_smis, both_loaded_as_doubles;
1700 if (include_smi_compare_) {
1701 Label not_two_smis, smi_done;
1703 __ JumpIfNotSmi(a2, ¬_two_smis);
1706 __ Ret(USE_DELAY_SLOT);
1707 __ subu(v0, a1, a0);
1708 __ bind(¬_two_smis);
1709 } else if (FLAG_debug_code) {
1711 __ And(a2, a2, kSmiTagMask);
1712 __ Assert(ne, "CompareStub: unexpected smi operands.",
1713 a2, Operand(zero_reg));
1717 // NOTICE! This code is only reached after a smi-fast-case check, so
1718 // it is certain that at least one operand isn't a smi.
1720 // This is optimized for reading the code and not benchmarked for
1721 // speed or amount of instructions. The code is not ordered for speed
1722 // or anything like this
1723 Label miss, user_compare;
1725 // No global compare if both operands are SMIs
1726 __ And(a2, a1, Operand(a0));
1727 __ JumpIfSmi(a2, &miss);
1730 // We need to check if lhs and rhs are both objects, if not we are
1731 // jumping out of the function. We will keep the 'map' in t0 (lhs) and
1732 // t1 (rhs) for later usage.
1733 __ GetObjectType(a0, t0, a3);
1734 __ Branch(&miss, ne, a3, Operand(JS_OBJECT_TYPE));
1736 __ GetObjectType(a1, t1, a3);
1737 __ Branch(&miss, ne, a3, Operand(JS_OBJECT_TYPE));
1739 // Check if the UseUserComparison flag is set by using the map of t0 for lhs
1740 __ lbu(t0, FieldMemOperand(t0, Map::kBitField2Offset));
1741 __ And(t0, t0, Operand(1 << Map::kUseUserObjectComparison));
1742 __ Branch(&user_compare, eq, t0, Operand(1 << Map::kUseUserObjectComparison));
1745 // Check if the UseUserComparison flag is _not_ set by using the map of t1 for
1746 // rhs and then jump to the miss label.
1747 __ lbu(t1, FieldMemOperand(t1, Map::kBitField2Offset));
1748 __ And(t1, t1, Operand(1 << Map::kUseUserObjectComparison));
1749 __ Branch(&miss, ne, t1, Operand(1 << Map::kUseUserObjectComparison));
1751 // Invoke the runtime function here
1752 __ bind(&user_compare);
1754 __ TailCallRuntime(Runtime::kUserObjectEquals, 2, 1);
1756 // We exit here without doing anything
1760 // Handle the case where the objects are identical. Either returns the answer
1761 // or goes to slow. Only falls through if the objects were not identical.
1762 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
1764 // If either is a Smi (we know that not both are), then they can only
1765 // be strictly equal if the other is a HeapNumber.
1766 STATIC_ASSERT(kSmiTag == 0);
1767 ASSERT_EQ(0, Smi::FromInt(0));
1768 __ And(t2, lhs_, Operand(rhs_));
1769 __ JumpIfNotSmi(t2, ¬_smis, t0);
1770 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
1771 // 1) Return the answer.
1773 // 3) Fall through to both_loaded_as_doubles.
1774 // 4) Jump to rhs_not_nan.
1775 // In cases 3 and 4 we have found out we were dealing with a number-number
1776 // comparison and the numbers have been loaded into f12 and f14 as doubles,
1777 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
1778 EmitSmiNonsmiComparison(masm, lhs_, rhs_,
1779 &both_loaded_as_doubles, &slow, strict_);
1781 __ bind(&both_loaded_as_doubles);
1782 // f12, f14 are the double representations of the left hand side
1783 // and the right hand side if we have FPU. Otherwise a2, a3 represent
1784 // left hand side and a0, a1 represent right hand side.
1786 Isolate* isolate = masm->isolate();
1787 if (CpuFeatures::IsSupported(FPU)) {
1788 CpuFeatures::Scope scope(FPU);
1790 __ li(t0, Operand(LESS));
1791 __ li(t1, Operand(GREATER));
1792 __ li(t2, Operand(EQUAL));
1794 // Check if either rhs or lhs is NaN.
1795 __ BranchF(NULL, &nan, eq, f12, f14);
1797 // Check if LESS condition is satisfied. If true, move conditionally
1799 __ c(OLT, D, f12, f14);
1801 // Use previous check to store conditionally to v0 oposite condition
1802 // (GREATER). If rhs is equal to lhs, this will be corrected in next
1805 // Check if EQUAL condition is satisfied. If true, move conditionally
1807 __ c(EQ, D, f12, f14);
1813 // NaN comparisons always fail.
1814 // Load whatever we need in v0 to make the comparison fail.
1815 if (cc_ == lt || cc_ == le) {
1816 __ li(v0, Operand(GREATER));
1818 __ li(v0, Operand(LESS));
1822 // Checks for NaN in the doubles we have loaded. Can return the answer or
1823 // fall through if neither is a NaN. Also binds rhs_not_nan.
1824 EmitNanCheck(masm, cc_);
1826 // Compares two doubles that are not NaNs. Returns the answer.
1827 // Never falls through.
1828 EmitTwoNonNanDoubleComparison(masm, cc_);
1832 // At this point we know we are dealing with two different objects,
1833 // and neither of them is a Smi. The objects are in lhs_ and rhs_.
1835 // This returns non-equal for some object types, or falls through if it
1837 EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
1840 Label check_for_symbols;
1841 Label flat_string_check;
1842 // Check for heap-number-heap-number comparison. Can jump to slow case,
1843 // or load both doubles and jump to the code that handles
1844 // that case. If the inputs are not doubles then jumps to check_for_symbols.
1845 // In this case a2 will contain the type of lhs_.
1846 EmitCheckForTwoHeapNumbers(masm,
1849 &both_loaded_as_doubles,
1851 &flat_string_check);
1853 __ bind(&check_for_symbols);
1854 if (cc_ == eq && !strict_) {
1855 // Returns an answer for two symbols or two detectable objects.
1856 // Otherwise jumps to string case or not both strings case.
1857 // Assumes that a2 is the type of lhs_ on entry.
1858 EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
1861 // Check for both being sequential ASCII strings, and inline if that is the
1863 __ bind(&flat_string_check);
1865 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow);
1867 __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
1869 StringCompareStub::GenerateFlatAsciiStringEquals(masm,
1876 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
1884 // Never falls through to here.
1887 // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
1889 __ Push(lhs_, rhs_);
1890 // Figure out which native to call and setup the arguments.
1891 Builtins::JavaScript native;
1893 native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1895 native = Builtins::COMPARE;
1896 int ncr; // NaN compare result.
1897 if (cc_ == lt || cc_ == le) {
1900 ASSERT(cc_ == gt || cc_ == ge); // Remaining cases.
1903 __ li(a0, Operand(Smi::FromInt(ncr)));
1907 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1908 // tagged as a small integer.
1909 __ InvokeBuiltin(native, JUMP_FUNCTION);
1913 // The stub expects its argument in the tos_ register and returns its result in
1914 // it, too: zero for false, and a non-zero value for true.
1915 void ToBooleanStub::Generate(MacroAssembler* masm) {
1916 // This stub uses FPU instructions.
1917 CpuFeatures::Scope scope(FPU);
1920 const Register map = t5.is(tos_) ? t3 : t5;
1922 // undefined -> false.
1923 CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
1925 // Boolean -> its value.
1926 CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
1927 CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
1930 CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
1932 if (types_.Contains(SMI)) {
1933 // Smis: 0 -> false, all other -> true
1934 __ And(at, tos_, kSmiTagMask);
1935 // tos_ contains the correct return value already
1936 __ Ret(eq, at, Operand(zero_reg));
1937 } else if (types_.NeedsMap()) {
1938 // If we need a map later and have a Smi -> patch.
1939 __ JumpIfSmi(tos_, &patch);
1942 if (types_.NeedsMap()) {
1943 __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
1945 if (types_.CanBeUndetectable()) {
1946 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
1947 __ And(at, at, Operand(1 << Map::kIsUndetectable));
1948 // Undetectable -> false.
1949 __ Movn(tos_, zero_reg, at);
1950 __ Ret(ne, at, Operand(zero_reg));
1954 if (types_.Contains(SPEC_OBJECT)) {
1955 // Spec object -> true.
1956 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1957 // tos_ contains the correct non-zero return value already.
1958 __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
1961 if (types_.Contains(STRING)) {
1962 // String value -> false iff empty.
1963 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1965 __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE));
1966 __ Ret(USE_DELAY_SLOT); // the string length is OK as the return value
1967 __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
1971 if (types_.Contains(HEAP_NUMBER)) {
1972 // Heap number -> false iff +0, -0, or NaN.
1973 Label not_heap_number;
1974 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1975 __ Branch(¬_heap_number, ne, map, Operand(at));
1976 Label zero_or_nan, number;
1977 __ ldc1(f2, FieldMemOperand(tos_, HeapNumber::kValueOffset));
1978 __ BranchF(&number, &zero_or_nan, ne, f2, kDoubleRegZero);
1979 // "tos_" is a register, and contains a non zero value by default.
1980 // Hence we only need to overwrite "tos_" with zero to return false for
1981 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
1982 __ bind(&zero_or_nan);
1983 __ mov(tos_, zero_reg);
1986 __ bind(¬_heap_number);
1990 GenerateTypeTransition(masm);
1994 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
1996 Heap::RootListIndex value,
1998 if (types_.Contains(type)) {
1999 // If we see an expected oddball, return its ToBoolean value tos_.
2000 __ LoadRoot(at, value);
2001 __ Subu(at, at, tos_); // This is a check for equality for the movz below.
2002 // The value of a root is never NULL, so we can avoid loading a non-null
2003 // value into tos_ when we want to return 'true'.
2005 __ Movz(tos_, zero_reg, at);
2007 __ Ret(eq, at, Operand(zero_reg));
2012 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
2014 __ li(a2, Operand(Smi::FromInt(tos_.code())));
2015 __ li(a1, Operand(Smi::FromInt(types_.ToByte())));
2016 __ Push(a3, a2, a1);
2017 // Patch the caller to an appropriate specialized stub and return the
2018 // operation result to the caller of the stub.
2019 __ TailCallExternalReference(
2020 ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
2026 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
2027 // We don't allow a GC during a store buffer overflow so there is no need to
2028 // store the registers in any particular way, but we do have to store and
2030 __ MultiPush(kJSCallerSaved | ra.bit());
2031 if (save_doubles_ == kSaveFPRegs) {
2032 CpuFeatures::Scope scope(FPU);
2033 __ MultiPushFPU(kCallerSavedFPU);
2035 const int argument_count = 1;
2036 const int fp_argument_count = 0;
2037 const Register scratch = a1;
2039 AllowExternalCallThatCantCauseGC scope(masm);
2040 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
2041 __ li(a0, Operand(ExternalReference::isolate_address()));
2043 ExternalReference::store_buffer_overflow_function(masm->isolate()),
2045 if (save_doubles_ == kSaveFPRegs) {
2046 CpuFeatures::Scope scope(FPU);
2047 __ MultiPopFPU(kCallerSavedFPU);
2050 __ MultiPop(kJSCallerSaved | ra.bit());
2055 void UnaryOpStub::PrintName(StringStream* stream) {
2056 const char* op_name = Token::Name(op_);
2057 const char* overwrite_name = NULL; // Make g++ happy.
2059 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
2060 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
2062 stream->Add("UnaryOpStub_%s_%s_%s",
2065 UnaryOpIC::GetName(operand_type_));
2069 // TODO(svenpanne): Use virtual functions instead of switch.
2070 void UnaryOpStub::Generate(MacroAssembler* masm) {
2071 switch (operand_type_) {
2072 case UnaryOpIC::UNINITIALIZED:
2073 GenerateTypeTransition(masm);
2075 case UnaryOpIC::SMI:
2076 GenerateSmiStub(masm);
2078 case UnaryOpIC::HEAP_NUMBER:
2079 GenerateHeapNumberStub(masm);
2081 case UnaryOpIC::GENERIC:
2082 GenerateGenericStub(masm);
2088 void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2089 // Argument is in a0 and v0 at this point, so we can overwrite a0.
2090 __ li(a2, Operand(Smi::FromInt(op_)));
2091 __ li(a1, Operand(Smi::FromInt(mode_)));
2092 __ li(a0, Operand(Smi::FromInt(operand_type_)));
2093 __ Push(v0, a2, a1, a0);
2095 __ TailCallExternalReference(
2096 ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
2100 // TODO(svenpanne): Use virtual functions instead of switch.
2101 void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2104 GenerateSmiStubSub(masm);
2106 case Token::BIT_NOT:
2107 GenerateSmiStubBitNot(masm);
2115 void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
2116 Label non_smi, slow;
2117 GenerateSmiCodeSub(masm, &non_smi, &slow);
2120 GenerateTypeTransition(masm);
2124 void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
2126 GenerateSmiCodeBitNot(masm, &non_smi);
2128 GenerateTypeTransition(masm);
2132 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
2135 __ JumpIfNotSmi(a0, non_smi);
2137 // The result of negating zero or the smallest negative smi is not a smi.
2138 __ And(t0, a0, ~0x80000000);
2139 __ Branch(slow, eq, t0, Operand(zero_reg));
2141 // Return '0 - value'.
2142 __ Ret(USE_DELAY_SLOT);
2143 __ subu(v0, zero_reg, a0);
2147 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
2149 __ JumpIfNotSmi(a0, non_smi);
2151 // Flip bits and revert inverted smi-tag.
2153 __ And(v0, v0, ~kSmiTagMask);
2158 // TODO(svenpanne): Use virtual functions instead of switch.
2159 void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2162 GenerateHeapNumberStubSub(masm);
2164 case Token::BIT_NOT:
2165 GenerateHeapNumberStubBitNot(masm);
2173 void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
2174 Label non_smi, slow, call_builtin;
2175 GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
2177 GenerateHeapNumberCodeSub(masm, &slow);
2179 GenerateTypeTransition(masm);
2180 __ bind(&call_builtin);
2181 GenerateGenericCodeFallback(masm);
2185 void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
2186 Label non_smi, slow;
2187 GenerateSmiCodeBitNot(masm, &non_smi);
2189 GenerateHeapNumberCodeBitNot(masm, &slow);
2191 GenerateTypeTransition(masm);
2195 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
2197 EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
2198 // a0 is a heap number. Get a new heap number in a1.
2199 if (mode_ == UNARY_OVERWRITE) {
2200 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
2201 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
2202 __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
2204 Label slow_allocate_heapnumber, heapnumber_allocated;
2205 __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
2206 __ jmp(&heapnumber_allocated);
2208 __ bind(&slow_allocate_heapnumber);
2210 FrameScope scope(masm, StackFrame::INTERNAL);
2212 __ CallRuntime(Runtime::kNumberAlloc, 0);
2217 __ bind(&heapnumber_allocated);
2218 __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
2219 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
2220 __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
2221 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
2222 __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset));
2229 void UnaryOpStub::GenerateHeapNumberCodeBitNot(
2230 MacroAssembler* masm,
2234 EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
2235 // Convert the heap number in a0 to an untagged integer in a1.
2236 __ ConvertToInt32(a0, a1, a2, a3, f0, slow);
2238 // Do the bitwise operation and check if the result fits in a smi.
2241 __ Addu(a2, a1, Operand(0x40000000));
2242 __ Branch(&try_float, lt, a2, Operand(zero_reg));
2244 // Tag the result as a smi and we're done.
2248 // Try to store the result in a heap number.
2249 __ bind(&try_float);
2250 if (mode_ == UNARY_NO_OVERWRITE) {
2251 Label slow_allocate_heapnumber, heapnumber_allocated;
2252 // Allocate a new heap number without zapping v0, which we need if it fails.
2253 __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber);
2254 __ jmp(&heapnumber_allocated);
2256 __ bind(&slow_allocate_heapnumber);
2258 FrameScope scope(masm, StackFrame::INTERNAL);
2259 __ push(v0); // Push the heap number, not the untagged int32.
2260 __ CallRuntime(Runtime::kNumberAlloc, 0);
2261 __ mov(a2, v0); // Move the new heap number into a2.
2262 // Get the heap number into v0, now that the new heap number is in a2.
2266 // Convert the heap number in v0 to an untagged integer in a1.
2267 // This can't go slow-case because it's the same number we already
2268 // converted once again.
2269 __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible);
2270 // Negate the result.
2273 __ bind(&heapnumber_allocated);
2274 __ mov(v0, a2); // Move newly allocated heap number to v0.
2277 if (CpuFeatures::IsSupported(FPU)) {
2278 // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
2279 CpuFeatures::Scope scope(FPU);
2282 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
2285 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
2286 // have to set up a frame.
2287 WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
2288 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2291 __ bind(&impossible);
2292 if (FLAG_debug_code) {
2293 __ stop("Incorrect assumption in bit-not stub");
2298 // TODO(svenpanne): Use virtual functions instead of switch.
2299 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
2302 GenerateGenericStubSub(masm);
2304 case Token::BIT_NOT:
2305 GenerateGenericStubBitNot(masm);
2313 void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
2314 Label non_smi, slow;
2315 GenerateSmiCodeSub(masm, &non_smi, &slow);
2317 GenerateHeapNumberCodeSub(masm, &slow);
2319 GenerateGenericCodeFallback(masm);
2323 void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
2324 Label non_smi, slow;
2325 GenerateSmiCodeBitNot(masm, &non_smi);
2327 GenerateHeapNumberCodeBitNot(masm, &slow);
2329 GenerateGenericCodeFallback(masm);
2333 void UnaryOpStub::GenerateGenericCodeFallback(
2334 MacroAssembler* masm) {
2335 // Handle the slow case by jumping to the JavaScript builtin.
2339 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
2341 case Token::BIT_NOT:
2342 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
2350 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2355 __ li(a2, Operand(Smi::FromInt(MinorKey())));
2356 __ li(a1, Operand(Smi::FromInt(op_)));
2357 __ li(a0, Operand(Smi::FromInt(operands_type_)));
2358 __ Push(a2, a1, a0);
2360 __ TailCallExternalReference(
2361 ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
2368 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
2369 MacroAssembler* masm) {
2374 void BinaryOpStub::Generate(MacroAssembler* masm) {
2375 // Explicitly allow generation of nested stubs. It is safe here because
2376 // generation code does not use any raw pointers.
2377 AllowStubCallsScope allow_stub_calls(masm, true);
2378 switch (operands_type_) {
2379 case BinaryOpIC::UNINITIALIZED:
2380 GenerateTypeTransition(masm);
2382 case BinaryOpIC::SMI:
2383 GenerateSmiStub(masm);
2385 case BinaryOpIC::INT32:
2386 GenerateInt32Stub(masm);
2388 case BinaryOpIC::HEAP_NUMBER:
2389 GenerateHeapNumberStub(masm);
2391 case BinaryOpIC::ODDBALL:
2392 GenerateOddballStub(masm);
2394 case BinaryOpIC::BOTH_STRING:
2395 GenerateBothStringStub(masm);
2397 case BinaryOpIC::STRING:
2398 GenerateStringStub(masm);
2400 case BinaryOpIC::GENERIC:
2401 GenerateGeneric(masm);
2409 void BinaryOpStub::PrintName(StringStream* stream) {
2410 const char* op_name = Token::Name(op_);
2411 const char* overwrite_name;
2413 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
2414 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
2415 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
2416 default: overwrite_name = "UnknownOverwrite"; break;
2418 stream->Add("BinaryOpStub_%s_%s_%s",
2421 BinaryOpIC::GetName(operands_type_));
2426 void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
2428 Register right = a0;
2430 Register scratch1 = t0;
2431 Register scratch2 = t1;
2433 ASSERT(right.is(a0));
2434 STATIC_ASSERT(kSmiTag == 0);
2436 Label not_smi_result;
2439 __ AdduAndCheckForOverflow(v0, left, right, scratch1);
2440 __ RetOnNoOverflow(scratch1);
2441 // No need to revert anything - right and left are intact.
2444 __ SubuAndCheckForOverflow(v0, left, right, scratch1);
2445 __ RetOnNoOverflow(scratch1);
2446 // No need to revert anything - right and left are intact.
2449 // Remove tag from one of the operands. This way the multiplication result
2450 // will be a smi if it fits the smi range.
2451 __ SmiUntag(scratch1, right);
2452 // Do multiplication.
2453 // lo = lower 32 bits of scratch1 * left.
2454 // hi = higher 32 bits of scratch1 * left.
2455 __ Mult(left, scratch1);
2456 // Check for overflowing the smi range - no overflow if higher 33 bits of
2457 // the result are identical.
2460 __ sra(scratch1, scratch1, 31);
2461 __ Branch(¬_smi_result, ne, scratch1, Operand(scratch2));
2462 // Go slow on zero result to handle -0.
2464 __ Ret(ne, v0, Operand(zero_reg));
2465 // We need -0 if we were multiplying a negative number with 0 to get 0.
2466 // We know one of them was zero.
2467 __ Addu(scratch2, right, left);
2469 // ARM uses the 'pl' condition, which is 'ge'.
2470 // Negating it results in 'lt'.
2471 __ Branch(&skip, lt, scratch2, Operand(zero_reg));
2472 ASSERT(Smi::FromInt(0) == 0);
2473 __ Ret(USE_DELAY_SLOT);
2474 __ mov(v0, zero_reg); // Return smi 0 if the non-zero one was positive.
2476 // We fall through here if we multiplied a negative number with 0, because
2477 // that would mean we should produce -0.
2482 __ SmiUntag(scratch2, right);
2483 __ SmiUntag(scratch1, left);
2484 __ Div(scratch1, scratch2);
2485 // A minor optimization: div may be calculated asynchronously, so we check
2486 // for division by zero before getting the result.
2487 __ Branch(¬_smi_result, eq, scratch2, Operand(zero_reg));
2488 // If the result is 0, we need to make sure the dividsor (right) is
2489 // positive, otherwise it is a -0 case.
2490 // Quotient is in 'lo', remainder is in 'hi'.
2491 // Check for no remainder first.
2493 __ Branch(¬_smi_result, ne, scratch1, Operand(zero_reg));
2495 __ Branch(&done, ne, scratch1, Operand(zero_reg));
2496 __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg));
2498 // Check that the signed result fits in a Smi.
2499 __ Addu(scratch2, scratch1, Operand(0x40000000));
2500 __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg));
2501 __ SmiTag(v0, scratch1);
2507 __ SmiUntag(scratch2, right);
2508 __ SmiUntag(scratch1, left);
2509 __ Div(scratch1, scratch2);
2510 // A minor optimization: div may be calculated asynchronously, so we check
2511 // for division by 0 before calling mfhi.
2512 // Check for zero on the right hand side.
2513 __ Branch(¬_smi_result, eq, scratch2, Operand(zero_reg));
2514 // If the result is 0, we need to make sure the dividend (left) is
2515 // positive (or 0), otherwise it is a -0 case.
2516 // Remainder is in 'hi'.
2518 __ Branch(&done, ne, scratch2, Operand(zero_reg));
2519 __ Branch(¬_smi_result, lt, scratch1, Operand(zero_reg));
2521 // Check that the signed result fits in a Smi.
2522 __ Addu(scratch1, scratch2, Operand(0x40000000));
2523 __ Branch(¬_smi_result, lt, scratch1, Operand(zero_reg));
2524 __ SmiTag(v0, scratch2);
2529 __ Ret(USE_DELAY_SLOT);
2530 __ or_(v0, left, right);
2532 case Token::BIT_AND:
2533 __ Ret(USE_DELAY_SLOT);
2534 __ and_(v0, left, right);
2536 case Token::BIT_XOR:
2537 __ Ret(USE_DELAY_SLOT);
2538 __ xor_(v0, left, right);
2541 // Remove tags from right operand.
2542 __ GetLeastBitsFromSmi(scratch1, right, 5);
2543 __ srav(scratch1, left, scratch1);
2545 __ And(v0, scratch1, ~kSmiTagMask);
2549 // Remove tags from operands. We can't do this on a 31 bit number
2550 // because then the 0s get shifted into bit 30 instead of bit 31.
2551 __ SmiUntag(scratch1, left);
2552 __ GetLeastBitsFromSmi(scratch2, right, 5);
2553 __ srlv(v0, scratch1, scratch2);
2554 // Unsigned shift is not allowed to produce a negative number, so
2555 // check the sign bit and the sign bit after Smi tagging.
2556 __ And(scratch1, v0, Operand(0xc0000000));
2557 __ Branch(¬_smi_result, ne, scratch1, Operand(zero_reg));
2563 // Remove tags from operands.
2564 __ SmiUntag(scratch1, left);
2565 __ GetLeastBitsFromSmi(scratch2, right, 5);
2566 __ sllv(scratch1, scratch1, scratch2);
2567 // Check that the signed result fits in a Smi.
2568 __ Addu(scratch2, scratch1, Operand(0x40000000));
2569 __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg));
2570 __ SmiTag(v0, scratch1);
2576 __ bind(¬_smi_result);
2580 void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2583 Label* gc_required) {
2585 Register right = a0;
2586 Register scratch1 = t3;
2587 Register scratch2 = t5;
2588 Register scratch3 = t0;
2590 ASSERT(smi_operands || (not_numbers != NULL));
2591 if (smi_operands && FLAG_debug_code) {
2592 __ AbortIfNotSmi(left);
2593 __ AbortIfNotSmi(right);
2596 Register heap_number_map = t2;
2597 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2605 // Load left and right operands into f12 and f14 or a0/a1 and a2/a3
2606 // depending on whether FPU is available or not.
2607 FloatingPointHelper::Destination destination =
2608 CpuFeatures::IsSupported(FPU) &&
2610 FloatingPointHelper::kFPURegisters :
2611 FloatingPointHelper::kCoreRegisters;
2613 // Allocate new heap number for result.
2614 Register result = s0;
2615 GenerateHeapResultAllocation(
2616 masm, result, heap_number_map, scratch1, scratch2, gc_required);
2618 // Load the operands.
2620 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2622 FloatingPointHelper::LoadOperands(masm,
2630 // Calculate the result.
2631 if (destination == FloatingPointHelper::kFPURegisters) {
2632 // Using FPU registers:
2634 // f14: Right value.
2635 CpuFeatures::Scope scope(FPU);
2638 __ add_d(f10, f12, f14);
2641 __ sub_d(f10, f12, f14);
2644 __ mul_d(f10, f12, f14);
2647 __ div_d(f10, f12, f14);
2653 // ARM uses a workaround here because of the unaligned HeapNumber
2654 // kValueOffset. On MIPS this workaround is built into sdc1 so
2655 // there's no point in generating even more instructions.
2656 __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
2657 __ Ret(USE_DELAY_SLOT);
2660 // Call the C function to handle the double operation.
2661 FloatingPointHelper::CallCCodeForDoubleOperation(masm,
2665 if (FLAG_debug_code) {
2666 __ stop("Unreachable code.");
2672 case Token::BIT_XOR:
2673 case Token::BIT_AND:
2678 __ SmiUntag(a3, left);
2679 __ SmiUntag(a2, right);
2681 // Convert operands to 32-bit integers. Right in a2 and left in a3.
2682 FloatingPointHelper::ConvertNumberToInt32(masm,
2691 FloatingPointHelper::ConvertNumberToInt32(masm,
2701 Label result_not_a_smi;
2704 __ Or(a2, a3, Operand(a2));
2706 case Token::BIT_XOR:
2707 __ Xor(a2, a3, Operand(a2));
2709 case Token::BIT_AND:
2710 __ And(a2, a3, Operand(a2));
2713 // Use only the 5 least significant bits of the shift count.
2714 __ GetLeastBitsFromInt32(a2, a2, 5);
2715 __ srav(a2, a3, a2);
2718 // Use only the 5 least significant bits of the shift count.
2719 __ GetLeastBitsFromInt32(a2, a2, 5);
2720 __ srlv(a2, a3, a2);
2721 // SHR is special because it is required to produce a positive answer.
2722 // The code below for writing into heap numbers isn't capable of
2723 // writing the register as an unsigned int so we go to slow case if we
2725 if (CpuFeatures::IsSupported(FPU)) {
2726 __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
2728 __ Branch(not_numbers, lt, a2, Operand(zero_reg));
2732 // Use only the 5 least significant bits of the shift count.
2733 __ GetLeastBitsFromInt32(a2, a2, 5);
2734 __ sllv(a2, a3, a2);
2739 // Check that the *signed* result fits in a smi.
2740 __ Addu(a3, a2, Operand(0x40000000));
2741 __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
2745 // Allocate new heap number for result.
2746 __ bind(&result_not_a_smi);
2747 Register result = t1;
2749 __ AllocateHeapNumber(
2750 result, scratch1, scratch2, heap_number_map, gc_required);
2752 GenerateHeapResultAllocation(
2753 masm, result, heap_number_map, scratch1, scratch2, gc_required);
2756 // a2: Answer as signed int32.
2757 // t1: Heap number to write answer into.
2759 // Nothing can go wrong now, so move the heap number to v0, which is the
2763 if (CpuFeatures::IsSupported(FPU)) {
2764 // Convert the int32 in a2 to the heap number in a0. As
2765 // mentioned above SHR needs to always produce a positive result.
2766 CpuFeatures::Scope scope(FPU);
2768 if (op_ == Token::SHR) {
2769 __ Cvt_d_uw(f0, f0, f22);
2773 // ARM uses a workaround here because of the unaligned HeapNumber
2774 // kValueOffset. On MIPS this workaround is built into sdc1 so
2775 // there's no point in generating even more instructions.
2776 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
2779 // Tail call that writes the int32 in a2 to the heap number in v0, using
2780 // a3 and a0 as scratch. v0 is preserved and returned.
2781 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
2782 __ TailCallStub(&stub);
2792 // Generate the smi code. If the operation on smis are successful this return is
2793 // generated. If the result is not a smi and heap number allocation is not
2794 // requested the code falls through. If number allocation is requested but a
2795 // heap number cannot be allocated the code jumps to the lable gc_required.
2796 void BinaryOpStub::GenerateSmiCode(
2797 MacroAssembler* masm,
2800 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
2804 Register right = a0;
2805 Register scratch1 = t3;
2807 // Perform combined smi check on both operands.
2808 __ Or(scratch1, left, Operand(right));
2809 STATIC_ASSERT(kSmiTag == 0);
2810 __ JumpIfNotSmi(scratch1, ¬_smis);
2812 // If the smi-smi operation results in a smi return is generated.
2813 GenerateSmiSmiOperation(masm);
2815 // If heap number results are possible generate the result in an allocated
2817 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
2818 GenerateFPOperation(masm, true, use_runtime, gc_required);
2824 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2825 Label not_smis, call_runtime;
2827 if (result_type_ == BinaryOpIC::UNINITIALIZED ||
2828 result_type_ == BinaryOpIC::SMI) {
2829 // Only allow smi results.
2830 GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
2832 // Allow heap number result and don't make a transition if a heap number
2833 // cannot be allocated.
2834 GenerateSmiCode(masm,
2837 ALLOW_HEAPNUMBER_RESULTS);
2840 // Code falls through if the result is not returned as either a smi or heap
2842 GenerateTypeTransition(masm);
2844 __ bind(&call_runtime);
2845 GenerateCallRuntime(masm);
2849 void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
2850 ASSERT(operands_type_ == BinaryOpIC::STRING);
2851 // Try to add arguments as strings, otherwise, transition to the generic
2853 GenerateAddStrings(masm);
2854 GenerateTypeTransition(masm);
2858 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
2860 ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
2861 ASSERT(op_ == Token::ADD);
2862 // If both arguments are strings, call the string add stub.
2863 // Otherwise, do a transition.
2865 // Registers containing left and right operands respectively.
2867 Register right = a0;
2869 // Test if left operand is a string.
2870 __ JumpIfSmi(left, &call_runtime);
2871 __ GetObjectType(left, a2, a2);
2872 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2874 // Test if right operand is a string.
2875 __ JumpIfSmi(right, &call_runtime);
2876 __ GetObjectType(right, a2, a2);
2877 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2879 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
2880 GenerateRegisterArgsPush(masm);
2881 __ TailCallStub(&string_add_stub);
2883 __ bind(&call_runtime);
2884 GenerateTypeTransition(masm);
2888 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2889 ASSERT(operands_type_ == BinaryOpIC::INT32);
2892 Register right = a0;
2893 Register scratch1 = t3;
2894 Register scratch2 = t5;
2895 FPURegister double_scratch = f0;
2896 FPURegister single_scratch = f6;
2898 Register heap_number_result = no_reg;
2899 Register heap_number_map = t2;
2900 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2903 // Labels for type transition, used for wrong input or output types.
2904 // Both label are currently actually bound to the same position. We use two
2905 // different label to differentiate the cause leading to type transition.
2908 // Smi-smi fast case.
2910 __ Or(scratch1, left, right);
2911 __ JumpIfNotSmi(scratch1, &skip);
2912 GenerateSmiSmiOperation(masm);
2913 // Fall through if the result is not a smi.
2922 // Load both operands and check that they are 32-bit integer.
2923 // Jump to type transition if they are not. The registers a0 and a1 (right
2924 // and left) are preserved for the runtime call.
2925 FloatingPointHelper::Destination destination =
2926 (CpuFeatures::IsSupported(FPU) && op_ != Token::MOD)
2927 ? FloatingPointHelper::kFPURegisters
2928 : FloatingPointHelper::kCoreRegisters;
2930 FloatingPointHelper::LoadNumberAsInt32Double(masm,
2941 FloatingPointHelper::LoadNumberAsInt32Double(masm,
2953 if (destination == FloatingPointHelper::kFPURegisters) {
2954 CpuFeatures::Scope scope(FPU);
2955 Label return_heap_number;
2958 __ add_d(f10, f12, f14);
2961 __ sub_d(f10, f12, f14);
2964 __ mul_d(f10, f12, f14);
2967 __ div_d(f10, f12, f14);
2973 if (op_ != Token::DIV) {
2974 // These operations produce an integer result.
2975 // Try to return a smi if we can.
2976 // Otherwise return a heap number if allowed, or jump to type
2979 Register except_flag = scratch2;
2980 __ EmitFPUTruncate(kRoundToZero,
2986 if (result_type_ <= BinaryOpIC::INT32) {
2987 // If except_flag != 0, result does not fit in a 32-bit integer.
2988 __ Branch(&transition, ne, except_flag, Operand(zero_reg));
2991 // Check if the result fits in a smi.
2992 __ mfc1(scratch1, single_scratch);
2993 __ Addu(scratch2, scratch1, Operand(0x40000000));
2994 // If not try to return a heap number.
2995 __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
2996 // Check for minus zero. Return heap number for minus zero.
2998 __ Branch(¬_zero, ne, scratch1, Operand(zero_reg));
2999 __ mfc1(scratch2, f11);
3000 __ And(scratch2, scratch2, HeapNumber::kSignMask);
3001 __ Branch(&return_heap_number, ne, scratch2, Operand(zero_reg));
3004 // Tag the result and return.
3005 __ SmiTag(v0, scratch1);
3008 // DIV just falls through to allocating a heap number.
3011 __ bind(&return_heap_number);
3012 // Return a heap number, or fall through to type transition or runtime
3013 // call if we can't.
3014 if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
3015 : BinaryOpIC::INT32)) {
3016 // We are using FPU registers so s0 is available.
3017 heap_number_result = s0;
3018 GenerateHeapResultAllocation(masm,
3024 __ mov(v0, heap_number_result);
3025 __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset));
3029 // A DIV operation expecting an integer result falls through
3030 // to type transition.
3033 // We preserved a0 and a1 to be able to call runtime.
3034 // Save the left value on the stack.
3037 Label pop_and_call_runtime;
3039 // Allocate a heap number to store the result.
3040 heap_number_result = s0;
3041 GenerateHeapResultAllocation(masm,
3046 &pop_and_call_runtime);
3048 // Load the left value from the value saved on the stack.
3051 // Call the C function to handle the double operation.
3052 FloatingPointHelper::CallCCodeForDoubleOperation(
3053 masm, op_, heap_number_result, scratch1);
3054 if (FLAG_debug_code) {
3055 __ stop("Unreachable code.");
3058 __ bind(&pop_and_call_runtime);
3060 __ Branch(&call_runtime);
3067 case Token::BIT_XOR:
3068 case Token::BIT_AND:
3072 Label return_heap_number;
3073 Register scratch3 = t1;
3074 // Convert operands to 32-bit integers. Right in a2 and left in a3. The
3075 // registers a0 and a1 (right and left) are preserved for the runtime
3077 FloatingPointHelper::LoadNumberAsInt32(masm,
3086 FloatingPointHelper::LoadNumberAsInt32(masm,
3096 // The ECMA-262 standard specifies that, for shift operations, only the
3097 // 5 least significant bits of the shift value should be used.
3100 __ Or(a2, a3, Operand(a2));
3102 case Token::BIT_XOR:
3103 __ Xor(a2, a3, Operand(a2));
3105 case Token::BIT_AND:
3106 __ And(a2, a3, Operand(a2));
3109 __ And(a2, a2, Operand(0x1f));
3110 __ srav(a2, a3, a2);
3113 __ And(a2, a2, Operand(0x1f));
3114 __ srlv(a2, a3, a2);
3115 // SHR is special because it is required to produce a positive answer.
3116 // We only get a negative result if the shift value (a2) is 0.
3117 // This result cannot be respresented as a signed 32-bit integer, try
3118 // to return a heap number if we can.
3119 // The non FPU code does not support this special case, so jump to
3120 // runtime if we don't support it.
3121 if (CpuFeatures::IsSupported(FPU)) {
3122 __ Branch((result_type_ <= BinaryOpIC::INT32)
3124 : &return_heap_number,
3129 __ Branch((result_type_ <= BinaryOpIC::INT32)
3138 __ And(a2, a2, Operand(0x1f));
3139 __ sllv(a2, a3, a2);
3145 // Check if the result fits in a smi.
3146 __ Addu(scratch1, a2, Operand(0x40000000));
3147 // If not try to return a heap number. (We know the result is an int32.)
3148 __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
3149 // Tag the result and return.
3153 __ bind(&return_heap_number);
3154 heap_number_result = t1;
3155 GenerateHeapResultAllocation(masm,
3162 if (CpuFeatures::IsSupported(FPU)) {
3163 CpuFeatures::Scope scope(FPU);
3165 if (op_ != Token::SHR) {
3166 // Convert the result to a floating point value.
3167 __ mtc1(a2, double_scratch);
3168 __ cvt_d_w(double_scratch, double_scratch);
3170 // The result must be interpreted as an unsigned 32-bit integer.
3171 __ mtc1(a2, double_scratch);
3172 __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
3175 // Store the result.
3176 __ mov(v0, heap_number_result);
3177 __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
3180 // Tail call that writes the int32 in a2 to the heap number in v0, using
3181 // a3 and a0 as scratch. v0 is preserved and returned.
3183 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
3184 __ TailCallStub(&stub);
3194 // We never expect DIV to yield an integer result, so we always generate
3195 // type transition code for DIV operations expecting an integer result: the
3196 // code will fall through to this type transition.
3197 if (transition.is_linked() ||
3198 ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
3199 __ bind(&transition);
3200 GenerateTypeTransition(masm);
3203 __ bind(&call_runtime);
3204 GenerateCallRuntime(masm);
3208 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
3211 if (op_ == Token::ADD) {
3212 // Handle string addition here, because it is the only operation
3213 // that does not do a ToNumber conversion on the operands.
3214 GenerateAddStrings(masm);
3217 // Convert oddball arguments to numbers.
3219 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3220 __ Branch(&check, ne, a1, Operand(t0));
3221 if (Token::IsBitOp(op_)) {
3222 __ li(a1, Operand(Smi::FromInt(0)));
3224 __ LoadRoot(a1, Heap::kNanValueRootIndex);
3228 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3229 __ Branch(&done, ne, a0, Operand(t0));
3230 if (Token::IsBitOp(op_)) {
3231 __ li(a0, Operand(Smi::FromInt(0)));
3233 __ LoadRoot(a0, Heap::kNanValueRootIndex);
3237 GenerateHeapNumberStub(masm);
3241 void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
3243 GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
3245 __ bind(&call_runtime);
3246 GenerateCallRuntime(masm);
3250 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
3251 Label call_runtime, call_string_add_or_runtime;
3253 GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
3255 GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
3257 __ bind(&call_string_add_or_runtime);
3258 if (op_ == Token::ADD) {
3259 GenerateAddStrings(masm);
3262 __ bind(&call_runtime);
3263 GenerateCallRuntime(masm);
3267 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
3268 ASSERT(op_ == Token::ADD);
3269 Label left_not_string, call_runtime;
3272 Register right = a0;
3274 // Check if left argument is a string.
3275 __ JumpIfSmi(left, &left_not_string);
3276 __ GetObjectType(left, a2, a2);
3277 __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
3279 StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
3280 GenerateRegisterArgsPush(masm);
3281 __ TailCallStub(&string_add_left_stub);
3283 // Left operand is not a string, test right.
3284 __ bind(&left_not_string);
3285 __ JumpIfSmi(right, &call_runtime);
3286 __ GetObjectType(right, a2, a2);
3287 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
3289 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
3290 GenerateRegisterArgsPush(masm);
3291 __ TailCallStub(&string_add_right_stub);
3293 // At least one argument is not a string.
3294 __ bind(&call_runtime);
3298 void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
3299 GenerateRegisterArgsPush(masm);
3302 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
3305 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
3308 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
3311 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
3314 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
3317 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
3319 case Token::BIT_AND:
3320 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
3322 case Token::BIT_XOR:
3323 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
3326 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
3329 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
3332 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
3340 void BinaryOpStub::GenerateHeapResultAllocation(
3341 MacroAssembler* masm,
3343 Register heap_number_map,
3346 Label* gc_required) {
3348 // Code below will scratch result if allocation fails. To keep both arguments
3349 // intact for the runtime call result cannot be one of these.
3350 ASSERT(!result.is(a0) && !result.is(a1));
3352 if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
3353 Label skip_allocation, allocated;
3354 Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0;
3355 // If the overwritable operand is already an object, we skip the
3356 // allocation of a heap number.
3357 __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
3358 // Allocate a heap number for the result.
3359 __ AllocateHeapNumber(
3360 result, scratch1, scratch2, heap_number_map, gc_required);
3361 __ Branch(&allocated);
3362 __ bind(&skip_allocation);
3363 // Use object holding the overwritable operand for result.
3364 __ mov(result, overwritable_operand);
3365 __ bind(&allocated);
3367 ASSERT(mode_ == NO_OVERWRITE);
3368 __ AllocateHeapNumber(
3369 result, scratch1, scratch2, heap_number_map, gc_required);
3374 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
3380 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
3381 // Untagged case: double input in f4, double result goes
3383 // Tagged case: tagged input on top of stack and in a0,
3384 // tagged result (heap number) goes into v0.
3386 Label input_not_smi;
3389 Label invalid_cache;
3390 const Register scratch0 = t5;
3391 const Register scratch1 = t3;
3392 const Register cache_entry = a0;
3393 const bool tagged = (argument_type_ == TAGGED);
3395 if (CpuFeatures::IsSupported(FPU)) {
3396 CpuFeatures::Scope scope(FPU);
3399 // Argument is a number and is on stack and in a0.
3400 // Load argument and check if it is a smi.
3401 __ JumpIfNotSmi(a0, &input_not_smi);
3403 // Input is a smi. Convert to double and load the low and high words
3404 // of the double into a2, a3.
3405 __ sra(t0, a0, kSmiTagSize);
3408 __ Move(a2, a3, f4);
3411 __ bind(&input_not_smi);
3412 // Check if input is a HeapNumber.
3415 Heap::kHeapNumberMapRootIndex,
3418 // Input is a HeapNumber. Store the
3419 // low and high words into a2, a3.
3420 __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
3421 __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
3423 // Input is untagged double in f4. Output goes to f4.
3424 __ Move(a2, a3, f4);
3427 // a2 = low 32 bits of double value.
3428 // a3 = high 32 bits of double value.
3429 // Compute hash (the shifts are arithmetic):
3430 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
3436 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
3437 __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
3439 // a2 = low 32 bits of double value.
3440 // a3 = high 32 bits of double value.
3441 // a1 = TranscendentalCache::hash(double value).
3442 __ li(cache_entry, Operand(
3443 ExternalReference::transcendental_cache_array_address(
3445 // a0 points to cache array.
3446 __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
3447 Isolate::Current()->transcendental_cache()->caches_[0])));
3448 // a0 points to the cache for the type type_.
3449 // If NULL, the cache hasn't been initialized yet, so go through runtime.
3450 __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
3453 // Check that the layout of cache elements match expectations.
3454 { TranscendentalCache::SubCache::Element test_elem[2];
3455 char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
3456 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
3457 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
3458 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
3459 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
3460 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
3461 CHECK_EQ(0, elem_in0 - elem_start);
3462 CHECK_EQ(kIntSize, elem_in1 - elem_start);
3463 CHECK_EQ(2 * kIntSize, elem_out - elem_start);
3467 // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
3469 __ Addu(a1, a1, t0);
3471 __ Addu(cache_entry, cache_entry, t0);
3473 // Check if cache matches: Double value is stored in uint32_t[2] array.
3474 __ lw(t0, MemOperand(cache_entry, 0));
3475 __ lw(t1, MemOperand(cache_entry, 4));
3476 __ lw(t2, MemOperand(cache_entry, 8));
3477 __ Branch(&calculate, ne, a2, Operand(t0));
3478 __ Branch(&calculate, ne, a3, Operand(t1));
3479 // Cache hit. Load result, cleanup and return.
3480 Counters* counters = masm->isolate()->counters();
3481 __ IncrementCounter(
3482 counters->transcendental_cache_hit(), 1, scratch0, scratch1);
3484 // Pop input value from stack and load result into v0.
3488 // Load result into f4.
3489 __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
3492 } // if (CpuFeatures::IsSupported(FPU))
3494 __ bind(&calculate);
3495 Counters* counters = masm->isolate()->counters();
3496 __ IncrementCounter(
3497 counters->transcendental_cache_miss(), 1, scratch0, scratch1);
3499 __ bind(&invalid_cache);
3500 __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
3505 if (!CpuFeatures::IsSupported(FPU)) UNREACHABLE();
3506 CpuFeatures::Scope scope(FPU);
3511 // Call C function to calculate the result and update the cache.
3512 // Register a0 holds precalculated cache entry address; preserve
3513 // it on the stack and pop it into register cache_entry after the
3515 __ Push(cache_entry, a2, a3);
3516 GenerateCallCFunction(masm, scratch0);
3517 __ GetCFunctionDoubleResult(f4);
3519 // Try to update the cache. If we cannot allocate a
3520 // heap number, we return the result without updating.
3521 __ Pop(cache_entry, a2, a3);
3522 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3523 __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
3524 __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
3526 __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
3527 __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
3528 __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
3530 __ Ret(USE_DELAY_SLOT);
3531 __ mov(v0, cache_entry);
3533 __ bind(&invalid_cache);
3534 // The cache is invalid. Call runtime which will recreate the
3536 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3537 __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
3538 __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
3540 FrameScope scope(masm, StackFrame::INTERNAL);
3542 __ CallRuntime(RuntimeFunction(), 1);
3544 __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
3547 __ bind(&skip_cache);
3548 // Call C function to calculate the result and answer directly
3549 // without updating the cache.
3550 GenerateCallCFunction(masm, scratch0);
3551 __ GetCFunctionDoubleResult(f4);
3552 __ bind(&no_update);
3554 // We return the value in f4 without adding it to the cache, but
3555 // we cause a scavenging GC so that future allocations will succeed.
3557 FrameScope scope(masm, StackFrame::INTERNAL);
3559 // Allocate an aligned object larger than a HeapNumber.
3560 ASSERT(4 * kPointerSize >= HeapNumber::kSize);
3561 __ li(scratch0, Operand(4 * kPointerSize));
3563 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
3570 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
3573 __ PrepareCallCFunction(2, scratch);
3574 if (IsMipsSoftFloatABI) {
3575 __ Move(a0, a1, f4);
3579 AllowExternalCallThatCantCauseGC scope(masm);
3580 Isolate* isolate = masm->isolate();
3582 case TranscendentalCache::SIN:
3584 ExternalReference::math_sin_double_function(isolate),
3587 case TranscendentalCache::COS:
3589 ExternalReference::math_cos_double_function(isolate),
3592 case TranscendentalCache::TAN:
3593 __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
3596 case TranscendentalCache::LOG:
3598 ExternalReference::math_log_double_function(isolate),
3609 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
3611 // Add more cases when necessary.
3612 case TranscendentalCache::SIN: return Runtime::kMath_sin;
3613 case TranscendentalCache::COS: return Runtime::kMath_cos;
3614 case TranscendentalCache::TAN: return Runtime::kMath_tan;
3615 case TranscendentalCache::LOG: return Runtime::kMath_log;
3618 return Runtime::kAbort;
3623 void StackCheckStub::Generate(MacroAssembler* masm) {
3624 __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
3628 void InterruptStub::Generate(MacroAssembler* masm) {
3629 __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
3633 void MathPowStub::Generate(MacroAssembler* masm) {
3634 CpuFeatures::Scope fpu_scope(FPU);
3635 const Register base = a1;
3636 const Register exponent = a2;
3637 const Register heapnumbermap = t1;
3638 const Register heapnumber = v0;
3639 const DoubleRegister double_base = f2;
3640 const DoubleRegister double_exponent = f4;
3641 const DoubleRegister double_result = f0;
3642 const DoubleRegister double_scratch = f6;
3643 const FPURegister single_scratch = f8;
3644 const Register scratch = t5;
3645 const Register scratch2 = t3;
3647 Label call_runtime, done, int_exponent;
3648 if (exponent_type_ == ON_STACK) {
3649 Label base_is_smi, unpack_exponent;
3650 // The exponent and base are supplied as arguments on the stack.
3651 // This can only happen if the stub is called from non-optimized code.
3652 // Load input parameters from stack to double registers.
3653 __ lw(base, MemOperand(sp, 1 * kPointerSize));
3654 __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
3656 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
3658 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
3659 __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
3660 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
3662 __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
3663 __ jmp(&unpack_exponent);
3665 __ bind(&base_is_smi);
3666 __ mtc1(scratch, single_scratch);
3667 __ cvt_d_w(double_base, single_scratch);
3668 __ bind(&unpack_exponent);
3670 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
3672 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
3673 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
3674 __ ldc1(double_exponent,
3675 FieldMemOperand(exponent, HeapNumber::kValueOffset));
3676 } else if (exponent_type_ == TAGGED) {
3677 // Base is already in double_base.
3678 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
3680 __ ldc1(double_exponent,
3681 FieldMemOperand(exponent, HeapNumber::kValueOffset));
3684 if (exponent_type_ != INTEGER) {
3685 Label int_exponent_convert;
3686 // Detect integer exponents stored as double.
3687 __ EmitFPUTruncate(kRoundToMinusInf,
3692 kCheckForInexactConversion);
3693 // scratch2 == 0 means there was no conversion error.
3694 __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
3696 if (exponent_type_ == ON_STACK) {
3697 // Detect square root case. Crankshaft detects constant +/-0.5 at
3698 // compile time and uses DoMathPowHalf instead. We then skip this check
3699 // for non-constant cases of +/-0.5 as these hardly occur.
3700 Label not_plus_half;
3703 __ Move(double_scratch, 0.5);
3704 __ BranchF(USE_DELAY_SLOT,
3710 // double_scratch can be overwritten in the delay slot.
3711 // Calculates square root of base. Check for the special case of
3712 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
3713 __ Move(double_scratch, -V8_INFINITY);
3714 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
3715 __ neg_d(double_result, double_scratch);
3717 // Add +0 to convert -0 to +0.
3718 __ add_d(double_scratch, double_base, kDoubleRegZero);
3719 __ sqrt_d(double_result, double_scratch);
3722 __ bind(¬_plus_half);
3723 __ Move(double_scratch, -0.5);
3724 __ BranchF(USE_DELAY_SLOT,
3730 // double_scratch can be overwritten in the delay slot.
3731 // Calculates square root of base. Check for the special case of
3732 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
3733 __ Move(double_scratch, -V8_INFINITY);
3734 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
3735 __ Move(double_result, kDoubleRegZero);
3737 // Add +0 to convert -0 to +0.
3738 __ add_d(double_scratch, double_base, kDoubleRegZero);
3739 __ Move(double_result, 1);
3740 __ sqrt_d(double_scratch, double_scratch);
3741 __ div_d(double_result, double_result, double_scratch);
3747 AllowExternalCallThatCantCauseGC scope(masm);
3748 __ PrepareCallCFunction(0, 2, scratch);
3749 __ SetCallCDoubleArguments(double_base, double_exponent);
3751 ExternalReference::power_double_double_function(masm->isolate()),
3755 __ GetCFunctionDoubleResult(double_result);
3758 __ bind(&int_exponent_convert);
3759 __ mfc1(scratch, single_scratch);
3762 // Calculate power with integer exponent.
3763 __ bind(&int_exponent);
3765 // Get two copies of exponent in the registers scratch and exponent.
3766 if (exponent_type_ == INTEGER) {
3767 __ mov(scratch, exponent);
3769 // Exponent has previously been stored into scratch as untagged integer.
3770 __ mov(exponent, scratch);
3773 __ mov_d(double_scratch, double_base); // Back up base.
3774 __ Move(double_result, 1.0);
3776 // Get absolute value of exponent.
3777 Label positive_exponent;
3778 __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
3779 __ Subu(scratch, zero_reg, scratch);
3780 __ bind(&positive_exponent);
3782 Label while_true, no_carry, loop_end;
3783 __ bind(&while_true);
3785 __ And(scratch2, scratch, 1);
3787 __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
3788 __ mul_d(double_result, double_result, double_scratch);
3791 __ sra(scratch, scratch, 1);
3793 __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
3794 __ mul_d(double_scratch, double_scratch, double_scratch);
3796 __ Branch(&while_true);
3800 __ Branch(&done, ge, exponent, Operand(zero_reg));
3801 __ Move(double_scratch, 1.0);
3802 __ div_d(double_result, double_scratch, double_result);
3803 // Test whether result is zero. Bail out to check for subnormal result.
3804 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
3805 __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
3807 // double_exponent may not contain the exponent value if the input was a
3808 // smi. We set it with exponent value before bailing out.
3809 __ mtc1(exponent, single_scratch);
3810 __ cvt_d_w(double_exponent, single_scratch);
3812 // Returning or bailing out.
3813 Counters* counters = masm->isolate()->counters();
3814 if (exponent_type_ == ON_STACK) {
3815 // The arguments are still on the stack.
3816 __ bind(&call_runtime);
3817 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
3819 // The stub is called from non-optimized code, which expects the result
3820 // as heap number in exponent.
3822 __ AllocateHeapNumber(
3823 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
3824 __ sdc1(double_result,
3825 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
3826 ASSERT(heapnumber.is(v0));
3827 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
3832 AllowExternalCallThatCantCauseGC scope(masm);
3833 __ PrepareCallCFunction(0, 2, scratch);
3834 __ SetCallCDoubleArguments(double_base, double_exponent);
3836 ExternalReference::power_double_double_function(masm->isolate()),
3840 __ GetCFunctionDoubleResult(double_result);
3843 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
3849 bool CEntryStub::NeedsImmovableCode() {
3854 bool CEntryStub::IsPregenerated() {
3855 return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
3860 void CodeStub::GenerateStubsAheadOfTime() {
3861 CEntryStub::GenerateAheadOfTime();
3862 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
3863 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
3864 RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
3868 void CodeStub::GenerateFPStubs() {
3869 CEntryStub save_doubles(1, kSaveFPRegs);
3870 Handle<Code> code = save_doubles.GetCode();
3871 code->set_is_pregenerated(true);
3872 StoreBufferOverflowStub stub(kSaveFPRegs);
3873 stub.GetCode()->set_is_pregenerated(true);
3874 code->GetIsolate()->set_fp_stubs_generated(true);
3878 void CEntryStub::GenerateAheadOfTime() {
3879 CEntryStub stub(1, kDontSaveFPRegs);
3880 Handle<Code> code = stub.GetCode();
3881 code->set_is_pregenerated(true);
3885 void CEntryStub::GenerateCore(MacroAssembler* masm,
3886 Label* throw_normal_exception,
3887 Label* throw_termination_exception,
3888 Label* throw_out_of_memory_exception,
3890 bool always_allocate) {
3891 // v0: result parameter for PerformGC, if any
3892 // s0: number of arguments including receiver (C callee-saved)
3893 // s1: pointer to the first argument (C callee-saved)
3894 // s2: pointer to builtin function (C callee-saved)
3896 Isolate* isolate = masm->isolate();
3899 // Move result passed in v0 into a0 to call PerformGC.
3901 __ PrepareCallCFunction(1, 0, a1);
3902 __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0);
3905 ExternalReference scope_depth =
3906 ExternalReference::heap_always_allocate_scope_depth(isolate);
3907 if (always_allocate) {
3908 __ li(a0, Operand(scope_depth));
3909 __ lw(a1, MemOperand(a0));
3910 __ Addu(a1, a1, Operand(1));
3911 __ sw(a1, MemOperand(a0));
3914 // Prepare arguments for C routine.
3917 // a1 = argv (set in the delay slot after find_ra below).
3919 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
3920 // also need to reserve the 4 argument slots on the stack.
3922 __ AssertStackIsAligned();
3924 __ li(a2, Operand(ExternalReference::isolate_address()));
3926 // To let the GC traverse the return address of the exit frames, we need to
3927 // know where the return address is. The CEntryStub is unmovable, so
3928 // we can store the address on the stack to be able to find it again and
3929 // we never have to restore it, because it will not change.
3930 { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
3931 // This branch-and-link sequence is needed to find the current PC on mips,
3932 // saved to the ra register.
3933 // Use masm-> here instead of the double-underscore macro since extra
3934 // coverage code can interfere with the proper calculation of ra.
3936 masm->bal(&find_ra); // bal exposes branch delay slot.
3938 masm->bind(&find_ra);
3940 // Adjust the value in ra to point to the correct return location, 2nd
3941 // instruction past the real call into C code (the jalr(t9)), and push it.
3942 // This is the return address of the exit frame.
3943 const int kNumInstructionsToJump = 5;
3944 masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
3945 masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
3946 // Stack space reservation moved to the branch delay slot below.
3947 // Stack is still aligned.
3949 // Call the C routine.
3950 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
3952 // Set up sp in the delay slot.
3953 masm->addiu(sp, sp, -kCArgsSlotsSize);
3954 // Make sure the stored 'ra' points to this position.
3955 ASSERT_EQ(kNumInstructionsToJump,
3956 masm->InstructionsGeneratedSince(&find_ra));
3959 if (always_allocate) {
3960 // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
3961 __ li(a2, Operand(scope_depth));
3962 __ lw(a3, MemOperand(a2));
3963 __ Subu(a3, a3, Operand(1));
3964 __ sw(a3, MemOperand(a2));
3967 // Check for failure result.
3968 Label failure_returned;
3969 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
3970 __ addiu(a2, v0, 1);
3971 __ andi(t0, a2, kFailureTagMask);
3972 __ Branch(USE_DELAY_SLOT, &failure_returned, eq, t0, Operand(zero_reg));
3973 // Restore stack (remove arg slots) in branch delay slot.
3974 __ addiu(sp, sp, kCArgsSlotsSize);
3977 // Exit C frame and return.
3979 // sp: stack pointer
3980 // fp: frame pointer
3981 __ LeaveExitFrame(save_doubles_, s0, true);
3983 // Check if we should retry or throw exception.
3985 __ bind(&failure_returned);
3986 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
3987 __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
3988 __ Branch(&retry, eq, t0, Operand(zero_reg));
3990 // Special handling of out of memory exceptions.
3991 Failure* out_of_memory = Failure::OutOfMemoryException();
3992 __ Branch(USE_DELAY_SLOT,
3993 throw_out_of_memory_exception,
3996 Operand(reinterpret_cast<int32_t>(out_of_memory)));
3997 // If we throw the OOM exception, the value of a3 doesn't matter.
3998 // Any instruction can be in the delay slot that's not a jump.
4000 // Retrieve the pending exception and clear the variable.
4001 __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
4002 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4004 __ lw(v0, MemOperand(t0));
4005 __ sw(a3, MemOperand(t0));
4007 // Special handling of termination exceptions which are uncatchable
4008 // by javascript code.
4009 __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
4010 __ Branch(throw_termination_exception, eq, v0, Operand(t0));
4012 // Handle normal exception.
4013 __ jmp(throw_normal_exception);
4016 // Last failure (v0) will be moved to (a0) for parameter when retrying.
4020 void CEntryStub::Generate(MacroAssembler* masm) {
4021 // Called from JavaScript; parameters are on stack as if calling JS function
4022 // s0: number of arguments including receiver
4023 // s1: size of arguments excluding receiver
4024 // s2: pointer to builtin function
4025 // fp: frame pointer (restored after C call)
4026 // sp: stack pointer (restored as callee's sp after C call)
4027 // cp: current context (C callee-saved)
4029 // NOTE: Invocations of builtins may return failure objects
4030 // instead of a proper result. The builtin entry handles
4031 // this by performing a garbage collection and retrying the
4034 // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
4035 // The reason for this is that these arguments would need to be saved anyway
4036 // so it's faster to set them up directly.
4037 // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
4039 // Compute the argv pointer in a callee-saved register.
4040 __ Addu(s1, sp, s1);
4042 // Enter the exit frame that transitions from JavaScript to C++.
4043 FrameScope scope(masm, StackFrame::MANUAL);
4044 __ EnterExitFrame(save_doubles_);
4046 // s0: number of arguments (C callee-saved)
4047 // s1: pointer to first argument (C callee-saved)
4048 // s2: pointer to builtin function (C callee-saved)
4050 Label throw_normal_exception;
4051 Label throw_termination_exception;
4052 Label throw_out_of_memory_exception;
4054 // Call into the runtime system.
4056 &throw_normal_exception,
4057 &throw_termination_exception,
4058 &throw_out_of_memory_exception,
4062 // Do space-specific GC and retry runtime call.
4064 &throw_normal_exception,
4065 &throw_termination_exception,
4066 &throw_out_of_memory_exception,
4070 // Do full GC and retry runtime call one final time.
4071 Failure* failure = Failure::InternalError();
4072 __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
4074 &throw_normal_exception,
4075 &throw_termination_exception,
4076 &throw_out_of_memory_exception,
4080 __ bind(&throw_out_of_memory_exception);
4081 // Set external caught exception to false.
4082 Isolate* isolate = masm->isolate();
4083 ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
4085 __ li(a0, Operand(false, RelocInfo::NONE));
4086 __ li(a2, Operand(external_caught));
4087 __ sw(a0, MemOperand(a2));
4089 // Set pending exception and v0 to out of memory exception.
4090 Failure* out_of_memory = Failure::OutOfMemoryException();
4091 __ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
4092 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4094 __ sw(v0, MemOperand(a2));
4095 // Fall through to the next label.
4097 __ bind(&throw_termination_exception);
4098 __ ThrowUncatchable(v0);
4100 __ bind(&throw_normal_exception);
4105 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
4106 Label invoke, handler_entry, exit;
4107 Isolate* isolate = masm->isolate();
4110 // a0: entry address
4119 // Save callee saved registers on the stack.
4120 __ MultiPush(kCalleeSaved | ra.bit());
4122 if (CpuFeatures::IsSupported(FPU)) {
4123 CpuFeatures::Scope scope(FPU);
4124 // Save callee-saved FPU registers.
4125 __ MultiPushFPU(kCalleeSavedFPU);
4126 // Set up the reserved register for 0.0.
4127 __ Move(kDoubleRegZero, 0.0);
4131 // Load argv in s0 register.
4132 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
4133 if (CpuFeatures::IsSupported(FPU)) {
4134 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
4137 __ InitializeRootRegister();
4138 __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
4140 // We build an EntryFrame.
4141 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
4142 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
4143 __ li(t2, Operand(Smi::FromInt(marker)));
4144 __ li(t1, Operand(Smi::FromInt(marker)));
4145 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
4147 __ lw(t0, MemOperand(t0));
4148 __ Push(t3, t2, t1, t0);
4149 // Set up frame pointer for the frame to be pushed.
4150 __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
4153 // a0: entry_address
4155 // a2: receiver_pointer
4161 // function slot | entry frame
4163 // bad fp (0xff...f) |
4164 // callee saved registers + ra
4168 // If this is the outermost JS call, set js_entry_sp value.
4169 Label non_outermost_js;
4170 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
4171 __ li(t1, Operand(ExternalReference(js_entry_sp)));
4172 __ lw(t2, MemOperand(t1));
4173 __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
4174 __ sw(fp, MemOperand(t1));
4175 __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4178 __ nop(); // Branch delay slot nop.
4179 __ bind(&non_outermost_js);
4180 __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
4184 // Jump to a faked try block that does the invoke, with a faked catch
4185 // block that sets the pending exception.
4187 __ bind(&handler_entry);
4188 handler_offset_ = handler_entry.pos();
4189 // Caught exception: Store result (exception) in the pending exception
4190 // field in the JSEnv and return a failure sentinel. Coming in here the
4191 // fp will be invalid because the PushTryHandler below sets it to 0 to
4192 // signal the existence of the JSEntry frame.
4193 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4195 __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
4196 __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
4197 __ b(&exit); // b exposes branch delay slot.
4198 __ nop(); // Branch delay slot nop.
4200 // Invoke: Link this frame into the handler chain. There's only one
4201 // handler block in this code object, so its index is 0.
4203 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
4204 // If an exception not caught by another handler occurs, this handler
4205 // returns control to the code after the bal(&invoke) above, which
4206 // restores all kCalleeSaved registers (including cp and fp) to their
4207 // saved values before returning a failure to C.
4209 // Clear any pending exceptions.
4210 __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
4211 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4213 __ sw(t1, MemOperand(t0));
4215 // Invoke the function by calling through JS entry trampoline builtin.
4216 // Notice that we cannot store a reference to the trampoline code directly in
4217 // this stub, because runtime stubs are not traversed when doing GC.
4220 // a0: entry_address
4222 // a2: receiver_pointer
4229 // callee saved registers + ra
4234 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
4236 __ li(t0, Operand(construct_entry));
4238 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
4239 __ li(t0, Operand(entry));
4241 __ lw(t9, MemOperand(t0)); // Deref address.
4243 // Call JSEntryTrampoline.
4244 __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
4247 // Unlink this frame from the handler chain.
4250 __ bind(&exit); // v0 holds result
4251 // Check if the current stack frame is marked as the outermost JS frame.
4252 Label non_outermost_js_2;
4254 __ Branch(&non_outermost_js_2,
4257 Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4258 __ li(t1, Operand(ExternalReference(js_entry_sp)));
4259 __ sw(zero_reg, MemOperand(t1));
4260 __ bind(&non_outermost_js_2);
4262 // Restore the top frame descriptors from the stack.
4264 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
4266 __ sw(t1, MemOperand(t0));
4268 // Reset the stack to the callee saved registers.
4269 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
4271 if (CpuFeatures::IsSupported(FPU)) {
4272 CpuFeatures::Scope scope(FPU);
4273 // Restore callee-saved fpu registers.
4274 __ MultiPopFPU(kCalleeSavedFPU);
4277 // Restore callee saved registers from the stack.
4278 __ MultiPop(kCalleeSaved | ra.bit());
4284 // Uses registers a0 to t0.
4285 // Expected input (depending on whether args are in registers or on the stack):
4286 // * object: a0 or at sp + 1 * kPointerSize.
4287 // * function: a1 or at sp.
4289 // An inlined call site may have been generated before calling this stub.
4290 // In this case the offset to the inline site to patch is passed on the stack,
4291 // in the safepoint slot for register t0.
4292 void InstanceofStub::Generate(MacroAssembler* masm) {
4293 // Call site inlining and patching implies arguments in registers.
4294 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
4295 // ReturnTrueFalse is only implemented for inlined call sites.
4296 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
4298 // Fixed register usage throughout the stub:
4299 const Register object = a0; // Object (lhs).
4300 Register map = a3; // Map of the object.
4301 const Register function = a1; // Function (rhs).
4302 const Register prototype = t0; // Prototype of the function.
4303 const Register inline_site = t5;
4304 const Register scratch = a2;
4306 const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
4308 Label slow, loop, is_instance, is_not_instance, not_js_object;
4310 if (!HasArgsInRegisters()) {
4311 __ lw(object, MemOperand(sp, 1 * kPointerSize));
4312 __ lw(function, MemOperand(sp, 0));
4315 // Check that the left hand is a JS object and load map.
4316 __ JumpIfSmi(object, ¬_js_object);
4317 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
4319 // If there is a call site cache don't look in the global cache, but do the
4320 // real lookup and update the call site cache.
4321 if (!HasCallSiteInlineCheck()) {
4323 __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
4324 __ Branch(&miss, ne, function, Operand(at));
4325 __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
4326 __ Branch(&miss, ne, map, Operand(at));
4327 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4328 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4333 // Get the prototype of the function.
4334 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
4336 // Check that the function prototype is a JS object.
4337 __ JumpIfSmi(prototype, &slow);
4338 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
4340 // Update the global instanceof or call site inlined cache with the current
4341 // map and function. The cached answer will be set when it is known below.
4342 if (!HasCallSiteInlineCheck()) {
4343 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
4344 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
4346 ASSERT(HasArgsInRegisters());
4347 // Patch the (relocated) inlined map check.
4349 // The offset was stored in t0 safepoint slot.
4350 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
4351 __ LoadFromSafepointRegisterSlot(scratch, t0);
4352 __ Subu(inline_site, ra, scratch);
4353 // Get the map location in scratch and patch it.
4354 __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
4355 __ sw(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
4358 // Register mapping: a3 is object map and t0 is function prototype.
4359 // Get prototype of object into a2.
4360 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
4362 // We don't need map any more. Use it as a scratch register.
4363 Register scratch2 = map;
4366 // Loop through the prototype chain looking for the function prototype.
4367 __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
4369 __ Branch(&is_instance, eq, scratch, Operand(prototype));
4370 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
4371 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
4372 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
4375 __ bind(&is_instance);
4376 ASSERT(Smi::FromInt(0) == 0);
4377 if (!HasCallSiteInlineCheck()) {
4378 __ mov(v0, zero_reg);
4379 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4381 // Patch the call site to return true.
4382 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
4383 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4384 // Get the boolean result location in scratch and patch it.
4385 __ PatchRelocatedValue(inline_site, scratch, v0);
4387 if (!ReturnTrueFalseObject()) {
4388 ASSERT_EQ(Smi::FromInt(0), 0);
4389 __ mov(v0, zero_reg);
4392 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4394 __ bind(&is_not_instance);
4395 if (!HasCallSiteInlineCheck()) {
4396 __ li(v0, Operand(Smi::FromInt(1)));
4397 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4399 // Patch the call site to return false.
4400 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
4401 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4402 // Get the boolean result location in scratch and patch it.
4403 __ PatchRelocatedValue(inline_site, scratch, v0);
4405 if (!ReturnTrueFalseObject()) {
4406 __ li(v0, Operand(Smi::FromInt(1)));
4410 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4412 Label object_not_null, object_not_null_or_smi;
4413 __ bind(¬_js_object);
4414 // Before null, smi and string value checks, check that the rhs is a function
4415 // as for a non-function rhs an exception needs to be thrown.
4416 __ JumpIfSmi(function, &slow);
4417 __ GetObjectType(function, scratch2, scratch);
4418 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
4420 // Null is not instance of anything.
4421 __ Branch(&object_not_null,
4424 Operand(masm->isolate()->factory()->null_value()));
4425 __ li(v0, Operand(Smi::FromInt(1)));
4426 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4428 __ bind(&object_not_null);
4429 // Smi values are not instances of anything.
4430 __ JumpIfNotSmi(object, &object_not_null_or_smi);
4431 __ li(v0, Operand(Smi::FromInt(1)));
4432 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4434 __ bind(&object_not_null_or_smi);
4435 // String values are not instances of anything.
4436 __ IsObjectJSStringType(object, scratch, &slow);
4437 __ li(v0, Operand(Smi::FromInt(1)));
4438 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4440 // Slow-case. Tail call builtin.
4442 if (!ReturnTrueFalseObject()) {
4443 if (HasArgsInRegisters()) {
4446 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
4449 FrameScope scope(masm, StackFrame::INTERNAL);
4451 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
4454 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
4455 __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
4456 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
4457 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4462 Register InstanceofStub::left() { return a0; }
4465 Register InstanceofStub::right() { return a1; }
4468 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
4469 // The displacement is the offset of the last parameter (if any)
4470 // relative to the frame pointer.
4471 const int kDisplacement =
4472 StandardFrameConstants::kCallerSPOffset - kPointerSize;
4474 // Check that the key is a smiGenerateReadElement.
4476 __ JumpIfNotSmi(a1, &slow);
4478 // Check if the calling frame is an arguments adaptor frame.
4480 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4481 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
4485 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4487 // Check index (a1) against formal parameters count limit passed in
4488 // through register a0. Use unsigned comparison to get negative
4490 __ Branch(&slow, hs, a1, Operand(a0));
4492 // Read the argument from the stack and return it.
4493 __ subu(a3, a0, a1);
4494 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4495 __ Addu(a3, fp, Operand(t3));
4496 __ lw(v0, MemOperand(a3, kDisplacement));
4499 // Arguments adaptor case: Check index (a1) against actual arguments
4500 // limit found in the arguments adaptor frame. Use unsigned
4501 // comparison to get negative check for free.
4503 __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4504 __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
4506 // Read the argument from the adaptor frame and return it.
4507 __ subu(a3, a0, a1);
4508 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4509 __ Addu(a3, a2, Operand(t3));
4510 __ lw(v0, MemOperand(a3, kDisplacement));
4513 // Slow-case: Handle non-smi or out-of-bounds access to arguments
4514 // by calling the runtime system.
4517 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
4521 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
4522 // sp[0] : number of parameters
4523 // sp[4] : receiver displacement
4525 // Check if the calling frame is an arguments adaptor frame.
4527 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4528 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
4532 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4534 // Patch the arguments.length and the parameters pointer in the current frame.
4535 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4536 __ sw(a2, MemOperand(sp, 0 * kPointerSize));
4538 __ Addu(a3, a3, Operand(t3));
4539 __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
4540 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4543 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4547 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
4549 // sp[0] : number of parameters (tagged)
4550 // sp[4] : address of receiver argument
4552 // Registers used over whole function:
4553 // t2 : allocated object (tagged)
4554 // t5 : mapped parameter count (tagged)
4556 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
4557 // a1 = parameter count (tagged)
4559 // Check if the calling frame is an arguments adaptor frame.
4561 Label adaptor_frame, try_allocate;
4562 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4563 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
4564 __ Branch(&adaptor_frame,
4567 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4569 // No adaptor, parameter count = argument count.
4571 __ b(&try_allocate);
4572 __ nop(); // Branch delay slot nop.
4574 // We have an adaptor frame. Patch the parameters pointer.
4575 __ bind(&adaptor_frame);
4576 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4578 __ Addu(a3, a3, Operand(t6));
4579 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4580 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4582 // a1 = parameter count (tagged)
4583 // a2 = argument count (tagged)
4584 // Compute the mapped parameter count = min(a1, a2) in a1.
4586 __ Branch(&skip_min, lt, a1, Operand(a2));
4590 __ bind(&try_allocate);
4592 // Compute the sizes of backing store, parameter map, and arguments object.
4593 // 1. Parameter map, has 2 extra words containing context and backing store.
4594 const int kParameterMapHeaderSize =
4595 FixedArray::kHeaderSize + 2 * kPointerSize;
4596 // If there are no mapped parameters, we do not need the parameter_map.
4597 Label param_map_size;
4598 ASSERT_EQ(0, Smi::FromInt(0));
4599 __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg));
4600 __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
4602 __ addiu(t5, t5, kParameterMapHeaderSize);
4603 __ bind(¶m_map_size);
4605 // 2. Backing store.
4607 __ Addu(t5, t5, Operand(t6));
4608 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
4610 // 3. Arguments object.
4611 __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
4613 // Do the allocation of all three objects in one go.
4614 __ AllocateInNewSpace(t5, v0, a3, t0, &runtime, TAG_OBJECT);
4616 // v0 = address of new object(s) (tagged)
4617 // a2 = argument count (tagged)
4618 // Get the arguments boilerplate from the current (global) context into t0.
4619 const int kNormalOffset =
4620 Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
4621 const int kAliasedOffset =
4622 Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
4624 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4625 __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
4626 Label skip2_ne, skip2_eq;
4627 __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
4628 __ lw(t0, MemOperand(t0, kNormalOffset));
4631 __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
4632 __ lw(t0, MemOperand(t0, kAliasedOffset));
4635 // v0 = address of new object (tagged)
4636 // a1 = mapped parameter count (tagged)
4637 // a2 = argument count (tagged)
4638 // t0 = address of boilerplate object (tagged)
4639 // Copy the JS object part.
4640 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
4641 __ lw(a3, FieldMemOperand(t0, i));
4642 __ sw(a3, FieldMemOperand(v0, i));
4645 // Set up the callee in-object property.
4646 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
4647 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
4648 const int kCalleeOffset = JSObject::kHeaderSize +
4649 Heap::kArgumentsCalleeIndex * kPointerSize;
4650 __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
4652 // Use the length (smi tagged) and set that as an in-object property too.
4653 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4654 const int kLengthOffset = JSObject::kHeaderSize +
4655 Heap::kArgumentsLengthIndex * kPointerSize;
4656 __ sw(a2, FieldMemOperand(v0, kLengthOffset));
4658 // Set up the elements pointer in the allocated arguments object.
4659 // If we allocated a parameter map, t0 will point there, otherwise
4660 // it will point to the backing store.
4661 __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
4662 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
4664 // v0 = address of new object (tagged)
4665 // a1 = mapped parameter count (tagged)
4666 // a2 = argument count (tagged)
4667 // t0 = address of parameter map or backing store (tagged)
4668 // Initialize parameter map. If there are no mapped arguments, we're done.
4669 Label skip_parameter_map;
4671 __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
4672 // Move backing store address to a3, because it is
4673 // expected there when filling in the unmapped arguments.
4677 __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
4679 __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
4680 __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
4681 __ Addu(t2, a1, Operand(Smi::FromInt(2)));
4682 __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
4683 __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
4685 __ Addu(t2, t0, Operand(t6));
4686 __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
4687 __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
4689 // Copy the parameter slots and the holes in the arguments.
4690 // We need to fill in mapped_parameter_count slots. They index the context,
4691 // where parameters are stored in reverse order, at
4692 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
4693 // The mapped parameter thus need to get indices
4694 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
4695 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
4696 // We loop from right to left.
4697 Label parameters_loop, parameters_test;
4699 __ lw(t5, MemOperand(sp, 0 * kPointerSize));
4700 __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
4701 __ Subu(t5, t5, Operand(a1));
4702 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
4704 __ Addu(a3, t0, Operand(t6));
4705 __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
4707 // t2 = loop variable (tagged)
4708 // a1 = mapping index (tagged)
4709 // a3 = address of backing store (tagged)
4710 // t0 = address of parameter map (tagged)
4711 // t1 = temporary scratch (a.o., for address calculation)
4712 // t3 = the hole value
4713 __ jmp(¶meters_test);
4715 __ bind(¶meters_loop);
4716 __ Subu(t2, t2, Operand(Smi::FromInt(1)));
4718 __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
4719 __ Addu(t6, t0, t1);
4720 __ sw(t5, MemOperand(t6));
4721 __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
4722 __ Addu(t6, a3, t1);
4723 __ sw(t3, MemOperand(t6));
4724 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
4725 __ bind(¶meters_test);
4726 __ Branch(¶meters_loop, ne, t2, Operand(Smi::FromInt(0)));
4728 __ bind(&skip_parameter_map);
4729 // a2 = argument count (tagged)
4730 // a3 = address of backing store (tagged)
4732 // Copy arguments header and remaining slots (if there are any).
4733 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
4734 __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
4735 __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
4737 Label arguments_loop, arguments_test;
4739 __ lw(t0, MemOperand(sp, 1 * kPointerSize));
4741 __ Subu(t0, t0, Operand(t6));
4742 __ jmp(&arguments_test);
4744 __ bind(&arguments_loop);
4745 __ Subu(t0, t0, Operand(kPointerSize));
4746 __ lw(t2, MemOperand(t0, 0));
4748 __ Addu(t1, a3, Operand(t6));
4749 __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
4750 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
4752 __ bind(&arguments_test);
4753 __ Branch(&arguments_loop, lt, t5, Operand(a2));
4755 // Return and remove the on-stack parameters.
4758 // Do the runtime call to allocate the arguments object.
4759 // a2 = argument count (tagged)
4761 __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
4762 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4766 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
4767 // sp[0] : number of parameters
4768 // sp[4] : receiver displacement
4770 // Check if the calling frame is an arguments adaptor frame.
4771 Label adaptor_frame, try_allocate, runtime;
4772 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4773 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
4774 __ Branch(&adaptor_frame,
4777 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4779 // Get the length from the frame.
4780 __ lw(a1, MemOperand(sp, 0));
4781 __ Branch(&try_allocate);
4783 // Patch the arguments.length and the parameters pointer.
4784 __ bind(&adaptor_frame);
4785 __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4786 __ sw(a1, MemOperand(sp, 0));
4787 __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
4788 __ Addu(a3, a2, Operand(at));
4790 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4791 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4793 // Try the new space allocation. Start out with computing the size
4794 // of the arguments object and the elements array in words.
4795 Label add_arguments_object;
4796 __ bind(&try_allocate);
4797 __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
4798 __ srl(a1, a1, kSmiTagSize);
4800 __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
4801 __ bind(&add_arguments_object);
4802 __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
4804 // Do the allocation of both objects in one go.
4805 __ AllocateInNewSpace(a1,
4810 static_cast<AllocationFlags>(TAG_OBJECT |
4813 // Get the arguments boilerplate from the current (global) context.
4814 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4815 __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
4816 __ lw(t0, MemOperand(t0, Context::SlotOffset(
4817 Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
4819 // Copy the JS object part.
4820 __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
4822 // Get the length (smi tagged) and set that as an in-object property too.
4823 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4824 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
4825 __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
4826 Heap::kArgumentsLengthIndex * kPointerSize));
4829 __ Branch(&done, eq, a1, Operand(zero_reg));
4831 // Get the parameters pointer from the stack.
4832 __ lw(a2, MemOperand(sp, 1 * kPointerSize));
4834 // Set up the elements pointer in the allocated arguments object and
4835 // initialize the header in the elements fixed array.
4836 __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
4837 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
4838 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
4839 __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
4840 __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
4841 // Untag the length for the loop.
4842 __ srl(a1, a1, kSmiTagSize);
4844 // Copy the fixed array slots.
4846 // Set up t0 to point to the first array slot.
4847 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4849 // Pre-decrement a2 with kPointerSize on each iteration.
4850 // Pre-decrement in order to skip receiver.
4851 __ Addu(a2, a2, Operand(-kPointerSize));
4852 __ lw(a3, MemOperand(a2));
4853 // Post-increment t0 with kPointerSize on each iteration.
4854 __ sw(a3, MemOperand(t0));
4855 __ Addu(t0, t0, Operand(kPointerSize));
4856 __ Subu(a1, a1, Operand(1));
4857 __ Branch(&loop, ne, a1, Operand(zero_reg));
4859 // Return and remove the on-stack parameters.
4863 // Do the runtime call to allocate the arguments object.
4865 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
4869 void RegExpExecStub::Generate(MacroAssembler* masm) {
4870 // Just jump directly to runtime if native RegExp is not selected at compile
4871 // time or if regexp entry in generated code is turned off runtime switch or
4873 #ifdef V8_INTERPRETED_REGEXP
4874 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4875 #else // V8_INTERPRETED_REGEXP
4877 // Stack frame on entry.
4878 // sp[0]: last_match_info (expected JSArray)
4879 // sp[4]: previous index
4880 // sp[8]: subject string
4881 // sp[12]: JSRegExp object
4883 const int kLastMatchInfoOffset = 0 * kPointerSize;
4884 const int kPreviousIndexOffset = 1 * kPointerSize;
4885 const int kSubjectOffset = 2 * kPointerSize;
4886 const int kJSRegExpOffset = 3 * kPointerSize;
4888 Isolate* isolate = masm->isolate();
4890 Label runtime, invoke_regexp;
4892 // Allocation of registers for this function. These are in callee save
4893 // registers and will be preserved by the call to the native RegExp code, as
4894 // this code is called using the normal C calling convention. When calling
4895 // directly from generated code the native RegExp code will not do a GC and
4896 // therefore the content of these registers are safe to use after the call.
4897 // MIPS - using s0..s2, since we are not using CEntry Stub.
4898 Register subject = s0;
4899 Register regexp_data = s1;
4900 Register last_match_info_elements = s2;
4902 // Ensure that a RegExp stack is allocated.
4903 ExternalReference address_of_regexp_stack_memory_address =
4904 ExternalReference::address_of_regexp_stack_memory_address(
4906 ExternalReference address_of_regexp_stack_memory_size =
4907 ExternalReference::address_of_regexp_stack_memory_size(isolate);
4908 __ li(a0, Operand(address_of_regexp_stack_memory_size));
4909 __ lw(a0, MemOperand(a0, 0));
4910 __ Branch(&runtime, eq, a0, Operand(zero_reg));
4912 // Check that the first argument is a JSRegExp object.
4913 __ lw(a0, MemOperand(sp, kJSRegExpOffset));
4914 STATIC_ASSERT(kSmiTag == 0);
4915 __ JumpIfSmi(a0, &runtime);
4916 __ GetObjectType(a0, a1, a1);
4917 __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
4919 // Check that the RegExp has been compiled (data contains a fixed array).
4920 __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
4921 if (FLAG_debug_code) {
4922 __ And(t0, regexp_data, Operand(kSmiTagMask));
4924 "Unexpected type for RegExp data, FixedArray expected",
4927 __ GetObjectType(regexp_data, a0, a0);
4929 "Unexpected type for RegExp data, FixedArray expected",
4931 Operand(FIXED_ARRAY_TYPE));
4934 // regexp_data: RegExp data (FixedArray)
4935 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
4936 __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
4937 __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
4939 // regexp_data: RegExp data (FixedArray)
4940 // Check that the number of captures fit in the static offsets vector buffer.
4942 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
4943 // Calculate number of capture registers (number_of_captures + 1) * 2. This
4944 // uses the asumption that smis are 2 * their untagged value.
4945 STATIC_ASSERT(kSmiTag == 0);
4946 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
4947 __ Addu(a2, a2, Operand(2)); // a2 was a smi.
4948 // Check that the static offsets vector buffer is large enough.
4949 __ Branch(&runtime, hi, a2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
4951 // a2: Number of capture registers
4952 // regexp_data: RegExp data (FixedArray)
4953 // Check that the second argument is a string.
4954 __ lw(subject, MemOperand(sp, kSubjectOffset));
4955 __ JumpIfSmi(subject, &runtime);
4956 __ GetObjectType(subject, a0, a0);
4957 __ And(a0, a0, Operand(kIsNotStringMask));
4958 STATIC_ASSERT(kStringTag == 0);
4959 __ Branch(&runtime, ne, a0, Operand(zero_reg));
4961 // Get the length of the string to r3.
4962 __ lw(a3, FieldMemOperand(subject, String::kLengthOffset));
4964 // a2: Number of capture registers
4965 // a3: Length of subject string as a smi
4966 // subject: Subject string
4967 // regexp_data: RegExp data (FixedArray)
4968 // Check that the third argument is a positive smi less than the subject
4969 // string length. A negative value will be greater (unsigned comparison).
4970 __ lw(a0, MemOperand(sp, kPreviousIndexOffset));
4971 __ JumpIfNotSmi(a0, &runtime);
4972 __ Branch(&runtime, ls, a3, Operand(a0));
4974 // a2: Number of capture registers
4975 // subject: Subject string
4976 // regexp_data: RegExp data (FixedArray)
4977 // Check that the fourth object is a JSArray object.
4978 __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
4979 __ JumpIfSmi(a0, &runtime);
4980 __ GetObjectType(a0, a1, a1);
4981 __ Branch(&runtime, ne, a1, Operand(JS_ARRAY_TYPE));
4982 // Check that the JSArray is in fast case.
4983 __ lw(last_match_info_elements,
4984 FieldMemOperand(a0, JSArray::kElementsOffset));
4985 __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
4986 __ Branch(&runtime, ne, a0, Operand(
4987 isolate->factory()->fixed_array_map()));
4988 // Check that the last match info has space for the capture registers and the
4989 // additional information.
4991 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
4992 __ Addu(a2, a2, Operand(RegExpImpl::kLastMatchOverhead));
4993 __ sra(at, a0, kSmiTagSize); // Untag length for comparison.
4994 __ Branch(&runtime, gt, a2, Operand(at));
4996 // Reset offset for possibly sliced string.
4997 __ mov(t0, zero_reg);
4998 // subject: Subject string
4999 // regexp_data: RegExp data (FixedArray)
5000 // Check the representation and encoding of the subject string.
5002 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
5003 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
5004 // First check for flat string. None of the following string type tests will
5005 // succeed if subject is not a string or a short external string.
5008 Operand(kIsNotStringMask |
5009 kStringRepresentationMask |
5010 kShortExternalStringMask));
5011 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
5012 __ Branch(&seq_string, eq, a1, Operand(zero_reg));
5014 // subject: Subject string
5015 // a0: instance type if Subject string
5016 // regexp_data: RegExp data (FixedArray)
5017 // a1: whether subject is a string and if yes, its string representation
5018 // Check for flat cons string or sliced string.
5019 // A flat cons string is a cons string where the second part is the empty
5020 // string. In that case the subject string is just the first part of the cons
5021 // string. Also in this case the first part of the cons string is known to be
5022 // a sequential string or an external string.
5023 // In the case of a sliced string its offset has to be taken into account.
5024 Label cons_string, external_string, check_encoding;
5025 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
5026 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
5027 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
5028 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
5029 __ Branch(&cons_string, lt, a1, Operand(kExternalStringTag));
5030 __ Branch(&external_string, eq, a1, Operand(kExternalStringTag));
5032 // Catch non-string subject or short external string.
5033 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
5034 __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
5035 __ Branch(&runtime, ne, at, Operand(zero_reg));
5037 // String is sliced.
5038 __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
5039 __ sra(t0, t0, kSmiTagSize);
5040 __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
5041 // t5: offset of sliced string, smi-tagged.
5042 __ jmp(&check_encoding);
5043 // String is a cons string, check whether it is flat.
5044 __ bind(&cons_string);
5045 __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
5046 __ LoadRoot(a1, Heap::kEmptyStringRootIndex);
5047 __ Branch(&runtime, ne, a0, Operand(a1));
5048 __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
5049 // Is first part of cons or parent of slice a flat string?
5050 __ bind(&check_encoding);
5051 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
5052 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
5053 STATIC_ASSERT(kSeqStringTag == 0);
5054 __ And(at, a0, Operand(kStringRepresentationMask));
5055 __ Branch(&external_string, ne, at, Operand(zero_reg));
5057 __ bind(&seq_string);
5058 // subject: Subject string
5059 // regexp_data: RegExp data (FixedArray)
5060 // a0: Instance type of subject string
5061 STATIC_ASSERT(kStringEncodingMask == 4);
5062 STATIC_ASSERT(kAsciiStringTag == 4);
5063 STATIC_ASSERT(kTwoByteStringTag == 0);
5064 // Find the code object based on the assumptions above.
5065 __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII.
5066 __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
5067 __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
5068 __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
5069 __ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
5071 // Check that the irregexp code has been generated for the actual string
5072 // encoding. If it has, the field contains a code object otherwise it contains
5073 // a smi (code flushing support).
5074 __ JumpIfSmi(t9, &runtime);
5076 // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
5078 // subject: Subject string
5079 // regexp_data: RegExp data (FixedArray)
5080 // Load used arguments before starting to push arguments for call to native
5081 // RegExp code to avoid handling changing stack height.
5082 __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
5083 __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
5085 // a1: previous index
5086 // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
5088 // subject: Subject string
5089 // regexp_data: RegExp data (FixedArray)
5090 // All checks done. Now push arguments for native regexp code.
5091 __ IncrementCounter(isolate->counters()->regexp_entry_native(),
5094 // Isolates: note we add an additional parameter here (isolate pointer).
5095 const int kRegExpExecuteArguments = 8;
5096 const int kParameterRegisters = 4;
5097 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
5099 // Stack pointer now points to cell where return address is to be written.
5100 // Arguments are before that on the stack or in registers, meaning we
5101 // treat the return address as argument 5. Thus every argument after that
5102 // needs to be shifted back by 1. Since DirectCEntryStub will handle
5103 // allocating space for the c argument slots, we don't need to calculate
5104 // that into the argument positions on the stack. This is how the stack will
5105 // look (sp meaning the value of sp at this moment):
5106 // [sp + 4] - Argument 8
5107 // [sp + 3] - Argument 7
5108 // [sp + 2] - Argument 6
5109 // [sp + 1] - Argument 5
5110 // [sp + 0] - saved ra
5112 // Argument 8: Pass current isolate address.
5113 // CFunctionArgumentOperand handles MIPS stack argument slots.
5114 __ li(a0, Operand(ExternalReference::isolate_address()));
5115 __ sw(a0, MemOperand(sp, 4 * kPointerSize));
5117 // Argument 7: Indicate that this is a direct call from JavaScript.
5118 __ li(a0, Operand(1));
5119 __ sw(a0, MemOperand(sp, 3 * kPointerSize));
5121 // Argument 6: Start (high end) of backtracking stack memory area.
5122 __ li(a0, Operand(address_of_regexp_stack_memory_address));
5123 __ lw(a0, MemOperand(a0, 0));
5124 __ li(a2, Operand(address_of_regexp_stack_memory_size));
5125 __ lw(a2, MemOperand(a2, 0));
5126 __ addu(a0, a0, a2);
5127 __ sw(a0, MemOperand(sp, 2 * kPointerSize));
5129 // Argument 5: static offsets vector buffer.
5131 ExternalReference::address_of_static_offsets_vector(isolate)));
5132 __ sw(a0, MemOperand(sp, 1 * kPointerSize));
5134 // For arguments 4 and 3 get string length, calculate start of string data
5135 // and calculate the shift of the index (0 for ASCII and 1 for two byte).
5136 __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
5137 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
5138 // Load the length from the original subject string from the previous stack
5139 // frame. Therefore we have to use fp, which points exactly to two pointer
5140 // sizes below the previous sp. (Because creating a new stack frame pushes
5141 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
5142 __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
5143 // If slice offset is not 0, load the length from the original sliced string.
5144 // Argument 4, a3: End of string data
5145 // Argument 3, a2: Start of string data
5146 // Prepare start and end index of the input.
5147 __ sllv(t1, t0, a3);
5148 __ addu(t0, t2, t1);
5149 __ sllv(t1, a1, a3);
5150 __ addu(a2, t0, t1);
5152 __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
5153 __ sra(t2, t2, kSmiTagSize);
5154 __ sllv(t1, t2, a3);
5155 __ addu(a3, t0, t1);
5156 // Argument 2 (a1): Previous index.
5159 // Argument 1 (a0): Subject string.
5160 __ mov(a0, subject);
5162 // Locate the code entry and call it.
5163 __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
5164 DirectCEntryStub stub;
5165 stub.GenerateCall(masm, t9);
5167 __ LeaveExitFrame(false, no_reg);
5170 // subject: subject string (callee saved)
5171 // regexp_data: RegExp data (callee saved)
5172 // last_match_info_elements: Last match info elements (callee saved)
5174 // Check the result.
5177 __ Branch(&success, eq, v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
5179 __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
5180 // If not exception it can only be retry. Handle that in the runtime system.
5181 __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
5182 // Result must now be exception. If there is no pending exception already a
5183 // stack overflow (on the backtrack stack) was detected in RegExp code but
5184 // haven't created the exception yet. Handle that in the runtime system.
5185 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
5186 __ li(a1, Operand(isolate->factory()->the_hole_value()));
5187 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
5189 __ lw(v0, MemOperand(a2, 0));
5190 __ Branch(&runtime, eq, v0, Operand(a1));
5192 __ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
5194 // Check if the exception is a termination. If so, throw as uncatchable.
5195 __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
5196 Label termination_exception;
5197 __ Branch(&termination_exception, eq, v0, Operand(a0));
5201 __ bind(&termination_exception);
5202 __ ThrowUncatchable(v0);
5205 // For failure and exception return null.
5206 __ li(v0, Operand(isolate->factory()->null_value()));
5209 // Process the result from the native regexp code.
5212 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
5213 // Calculate number of capture registers (number_of_captures + 1) * 2.
5214 STATIC_ASSERT(kSmiTag == 0);
5215 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
5216 __ Addu(a1, a1, Operand(2)); // a1 was a smi.
5218 // a1: number of capture registers
5219 // subject: subject string
5220 // Store the capture count.
5221 __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
5222 __ sw(a2, FieldMemOperand(last_match_info_elements,
5223 RegExpImpl::kLastCaptureCountOffset));
5224 // Store last subject and last input.
5226 FieldMemOperand(last_match_info_elements,
5227 RegExpImpl::kLastSubjectOffset));
5228 __ mov(a2, subject);
5229 __ RecordWriteField(last_match_info_elements,
5230 RegExpImpl::kLastSubjectOffset,
5236 FieldMemOperand(last_match_info_elements,
5237 RegExpImpl::kLastInputOffset));
5238 __ RecordWriteField(last_match_info_elements,
5239 RegExpImpl::kLastInputOffset,
5245 // Get the static offsets vector filled by the native regexp code.
5246 ExternalReference address_of_static_offsets_vector =
5247 ExternalReference::address_of_static_offsets_vector(isolate);
5248 __ li(a2, Operand(address_of_static_offsets_vector));
5250 // a1: number of capture registers
5251 // a2: offsets vector
5252 Label next_capture, done;
5253 // Capture register counter starts from number of capture registers and
5254 // counts down until wrapping after zero.
5256 last_match_info_elements,
5257 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
5258 __ bind(&next_capture);
5259 __ Subu(a1, a1, Operand(1));
5260 __ Branch(&done, lt, a1, Operand(zero_reg));
5261 // Read the value from the static offsets vector buffer.
5262 __ lw(a3, MemOperand(a2, 0));
5263 __ addiu(a2, a2, kPointerSize);
5264 // Store the smi value in the last match info.
5265 __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
5266 __ sw(a3, MemOperand(a0, 0));
5267 __ Branch(&next_capture, USE_DELAY_SLOT);
5268 __ addiu(a0, a0, kPointerSize); // In branch delay slot.
5272 // Return last match info.
5273 __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
5276 // External string. Short external strings have already been ruled out.
5278 __ bind(&external_string);
5279 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
5280 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
5281 if (FLAG_debug_code) {
5282 // Assert that we do not have a cons or slice (indirect strings) here.
5283 // Sequential strings have already been ruled out.
5284 __ And(at, a0, Operand(kIsIndirectStringMask));
5286 "external string expected, but not found",
5291 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
5292 // Move the pointer so that offset-wise, it looks like a sequential string.
5293 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
5296 SeqTwoByteString::kHeaderSize - kHeapObjectTag);
5297 __ jmp(&seq_string);
5299 // Do the runtime call to execute the regexp.
5301 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
5302 #endif // V8_INTERPRETED_REGEXP
5306 void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
5307 const int kMaxInlineLength = 100;
5310 __ lw(a1, MemOperand(sp, kPointerSize * 2));
5311 STATIC_ASSERT(kSmiTag == 0);
5312 STATIC_ASSERT(kSmiTagSize == 1);
5313 __ JumpIfNotSmi(a1, &slowcase);
5314 __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
5315 // Smi-tagging is equivalent to multiplying by 2.
5316 // Allocate RegExpResult followed by FixedArray with size in ebx.
5317 // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
5318 // Elements: [Map][Length][..elements..]
5319 // Size of JSArray with two in-object properties and the header of a
5322 (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
5323 __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
5324 __ Addu(a2, t1, Operand(objects_size));
5325 __ AllocateInNewSpace(
5326 a2, // In: Size, in words.
5327 v0, // Out: Start of allocation (tagged).
5328 a3, // Scratch register.
5329 t0, // Scratch register.
5331 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
5332 // v0: Start of allocated area, object-tagged.
5333 // a1: Number of elements in array, as smi.
5334 // t1: Number of elements, untagged.
5336 // Set JSArray map to global.regexp_result_map().
5337 // Set empty properties FixedArray.
5338 // Set elements to point to FixedArray allocated right after the JSArray.
5339 // Interleave operations for better latency.
5340 __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
5341 __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
5342 __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
5343 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
5344 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
5345 __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
5346 __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
5347 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
5349 // Set input, index and length fields from arguments.
5350 __ lw(a1, MemOperand(sp, kPointerSize * 0));
5351 __ lw(a2, MemOperand(sp, kPointerSize * 1));
5352 __ lw(t2, MemOperand(sp, kPointerSize * 2));
5353 __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
5354 __ sw(a2, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
5355 __ sw(t2, FieldMemOperand(v0, JSArray::kLengthOffset));
5357 // Fill out the elements FixedArray.
5358 // v0: JSArray, tagged.
5359 // a3: FixedArray, tagged.
5360 // t1: Number of elements in array, untagged.
5363 __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
5364 __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
5365 // Set FixedArray length.
5366 __ sll(t2, t1, kSmiTagSize);
5367 __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
5368 // Fill contents of fixed-array with the-hole.
5369 __ li(a2, Operand(masm->isolate()->factory()->the_hole_value()));
5370 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5371 // Fill fixed array elements with hole.
5372 // v0: JSArray, tagged.
5374 // a3: Start of elements in FixedArray.
5375 // t1: Number of elements to fill.
5377 __ sll(t1, t1, kPointerSizeLog2); // Convert num elements to num bytes.
5378 __ addu(t1, t1, a3); // Point past last element to store.
5380 __ Branch(&done, ge, a3, Operand(t1)); // Break when a3 past end of elem.
5381 __ sw(a2, MemOperand(a3));
5382 __ Branch(&loop, USE_DELAY_SLOT);
5383 __ addiu(a3, a3, kPointerSize); // In branch delay slot.
5389 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
5393 static void GenerateRecordCallTarget(MacroAssembler* masm) {
5394 // Cache the called function in a global property cell. Cache states
5395 // are uninitialized, monomorphic (indicated by a JSFunction), and
5397 // a1 : the function to call
5398 // a2 : cache cell for call target
5401 ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
5402 masm->isolate()->heap()->undefined_value());
5403 ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
5404 masm->isolate()->heap()->the_hole_value());
5406 // Load the cache state into a3.
5407 __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
5409 // A monomorphic cache hit or an already megamorphic state: invoke the
5410 // function without changing the state.
5411 __ Branch(&done, eq, a3, Operand(a1));
5412 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5413 __ Branch(&done, eq, a3, Operand(at));
5415 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
5417 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5419 __ Branch(USE_DELAY_SLOT, &done, eq, a3, Operand(at));
5420 // An uninitialized cache is patched with the function.
5421 // Store a1 in the delay slot. This may or may not get overwritten depending
5422 // on the result of the comparison.
5423 __ sw(a1, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
5424 // No need for a write barrier here - cells are rescanned.
5426 // MegamorphicSentinel is an immortal immovable object (undefined) so no
5427 // write-barrier is needed.
5428 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5429 __ sw(at, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
5435 void CallFunctionStub::Generate(MacroAssembler* masm) {
5436 // a1 : the function to call
5437 // a2 : cache cell for call target
5438 Label slow, non_function;
5440 // The receiver might implicitly be the global object. This is
5441 // indicated by passing the hole as the receiver to the call
5443 if (ReceiverMightBeImplicit()) {
5445 // Get the receiver from the stack.
5446 // function, receiver [, arguments]
5447 __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
5448 // Call as function is indicated with the hole.
5449 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5450 __ Branch(&call, ne, t0, Operand(at));
5451 // Patch the receiver on the stack with the global receiver object.
5452 __ lw(a3, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
5453 __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalReceiverOffset));
5454 __ sw(a3, MemOperand(sp, argc_ * kPointerSize));
5458 // Check that the function is really a JavaScript function.
5459 // a1: pushed function (to be verified)
5460 __ JumpIfSmi(a1, &non_function);
5461 // Get the map of the function object.
5462 __ GetObjectType(a1, a3, a3);
5463 __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
5465 if (RecordCallTarget()) {
5466 GenerateRecordCallTarget(masm);
5469 // Fast-case: Invoke the function now.
5470 // a1: pushed function
5471 ParameterCount actual(argc_);
5473 if (ReceiverMightBeImplicit()) {
5474 Label call_as_function;
5475 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5476 __ Branch(&call_as_function, eq, t0, Operand(at));
5477 __ InvokeFunction(a1,
5482 __ bind(&call_as_function);
5484 __ InvokeFunction(a1,
5490 // Slow-case: Non-function called.
5492 if (RecordCallTarget()) {
5493 // If there is a call target cache, mark it megamorphic in the
5494 // non-function case. MegamorphicSentinel is an immortal immovable
5495 // object (undefined) so no write barrier is needed.
5496 ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
5497 masm->isolate()->heap()->undefined_value());
5498 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5499 __ sw(at, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
5501 // Check for function proxy.
5502 __ Branch(&non_function, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
5503 __ push(a1); // Put proxy as additional argument.
5504 __ li(a0, Operand(argc_ + 1, RelocInfo::NONE));
5505 __ li(a2, Operand(0, RelocInfo::NONE));
5506 __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
5507 __ SetCallKind(t1, CALL_AS_METHOD);
5509 Handle<Code> adaptor =
5510 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
5511 __ Jump(adaptor, RelocInfo::CODE_TARGET);
5514 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
5515 // of the original receiver from the call site).
5516 __ bind(&non_function);
5517 __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
5518 __ li(a0, Operand(argc_)); // Set up the number of arguments.
5519 __ mov(a2, zero_reg);
5520 __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
5521 __ SetCallKind(t1, CALL_AS_METHOD);
5522 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5523 RelocInfo::CODE_TARGET);
5527 void CallConstructStub::Generate(MacroAssembler* masm) {
5528 // a0 : number of arguments
5529 // a1 : the function to call
5530 // a2 : cache cell for call target
5531 Label slow, non_function_call;
5533 // Check that the function is not a smi.
5534 __ JumpIfSmi(a1, &non_function_call);
5535 // Check that the function is a JSFunction.
5536 __ GetObjectType(a1, a3, a3);
5537 __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
5539 if (RecordCallTarget()) {
5540 GenerateRecordCallTarget(masm);
5543 // Jump to the function-specific construct stub.
5544 __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
5545 __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset));
5546 __ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
5549 // a0: number of arguments
5550 // a1: called object
5554 __ Branch(&non_function_call, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
5555 __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
5558 __ bind(&non_function_call);
5559 __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
5561 // Set expected number of arguments to zero (not changing r0).
5562 __ li(a2, Operand(0, RelocInfo::NONE));
5563 __ SetCallKind(t1, CALL_AS_METHOD);
5564 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5565 RelocInfo::CODE_TARGET);
5569 // Unfortunately you have to run without snapshots to see most of these
5570 // names in the profile since most compare stubs end up in the snapshot.
5571 void CompareStub::PrintName(StringStream* stream) {
5572 ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
5573 (lhs_.is(a1) && rhs_.is(a0)));
5574 const char* cc_name;
5576 case lt: cc_name = "LT"; break;
5577 case gt: cc_name = "GT"; break;
5578 case le: cc_name = "LE"; break;
5579 case ge: cc_name = "GE"; break;
5580 case eq: cc_name = "EQ"; break;
5581 case ne: cc_name = "NE"; break;
5582 default: cc_name = "UnknownCondition"; break;
5584 bool is_equality = cc_ == eq || cc_ == ne;
5585 stream->Add("CompareStub_%s", cc_name);
5586 stream->Add(lhs_.is(a0) ? "_a0" : "_a1");
5587 stream->Add(rhs_.is(a0) ? "_a0" : "_a1");
5588 if (strict_ && is_equality) stream->Add("_STRICT");
5589 if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
5590 if (!include_number_compare_) stream->Add("_NO_NUMBER");
5591 if (!include_smi_compare_) stream->Add("_NO_SMI");
5595 int CompareStub::MinorKey() {
5596 // Encode the two parameters in a unique 16 bit value.
5597 ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
5598 ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
5599 (lhs_.is(a1) && rhs_.is(a0)));
5600 return ConditionField::encode(static_cast<unsigned>(cc_))
5601 | RegisterField::encode(lhs_.is(a0))
5602 | StrictField::encode(strict_)
5603 | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
5604 | IncludeSmiCompareField::encode(include_smi_compare_);
5608 // StringCharCodeAtGenerator.
5609 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
5612 Label got_char_code;
5613 Label sliced_string;
5615 ASSERT(!t0.is(index_));
5616 ASSERT(!t0.is(result_));
5617 ASSERT(!t0.is(object_));
5619 // If the receiver is a smi trigger the non-string case.
5620 __ JumpIfSmi(object_, receiver_not_string_);
5622 // Fetch the instance type of the receiver into result register.
5623 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5624 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5625 // If the receiver is not a string trigger the non-string case.
5626 __ And(t0, result_, Operand(kIsNotStringMask));
5627 __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
5629 // If the index is non-smi trigger the non-smi case.
5630 __ JumpIfNotSmi(index_, &index_not_smi_);
5632 __ bind(&got_smi_index_);
5634 // Check for index out of range.
5635 __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
5636 __ Branch(index_out_of_range_, ls, t0, Operand(index_));
5638 __ sra(index_, index_, kSmiTagSize);
5640 StringCharLoadGenerator::Generate(masm,
5646 __ sll(result_, result_, kSmiTagSize);
5651 void StringCharCodeAtGenerator::GenerateSlow(
5652 MacroAssembler* masm,
5653 const RuntimeCallHelper& call_helper) {
5654 __ Abort("Unexpected fallthrough to CharCodeAt slow case");
5656 // Index is not a smi.
5657 __ bind(&index_not_smi_);
5658 // If index is a heap number, try converting it to an integer.
5661 Heap::kHeapNumberMapRootIndex,
5664 call_helper.BeforeCall(masm);
5665 // Consumed by runtime conversion function:
5666 __ Push(object_, index_);
5667 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
5668 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
5670 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
5671 // NumberToSmi discards numbers that are not exact integers.
5672 __ CallRuntime(Runtime::kNumberToSmi, 1);
5675 // Save the conversion result before the pop instructions below
5676 // have a chance to overwrite it.
5678 __ Move(index_, v0);
5680 // Reload the instance type.
5681 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5682 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5683 call_helper.AfterCall(masm);
5684 // If index is still not a smi, it must be out of range.
5685 __ JumpIfNotSmi(index_, index_out_of_range_);
5686 // Otherwise, return to the fast path.
5687 __ Branch(&got_smi_index_);
5689 // Call runtime. We get here when the receiver is a string and the
5690 // index is a number, but the code of getting the actual character
5691 // is too complex (e.g., when the string needs to be flattened).
5692 __ bind(&call_runtime_);
5693 call_helper.BeforeCall(masm);
5694 __ sll(index_, index_, kSmiTagSize);
5695 __ Push(object_, index_);
5696 __ CallRuntime(Runtime::kStringCharCodeAt, 2);
5698 __ Move(result_, v0);
5700 call_helper.AfterCall(masm);
5703 __ Abort("Unexpected fallthrough from CharCodeAt slow case");
5707 // -------------------------------------------------------------------------
5708 // StringCharFromCodeGenerator
5710 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
5711 // Fast case of Heap::LookupSingleCharacterStringFromCode.
5713 ASSERT(!t0.is(result_));
5714 ASSERT(!t0.is(code_));
5716 STATIC_ASSERT(kSmiTag == 0);
5717 STATIC_ASSERT(kSmiShiftSize == 0);
5718 ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
5721 Operand(kSmiTagMask |
5722 ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
5723 __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
5725 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
5726 // At this point code register contains smi tagged ASCII char code.
5727 STATIC_ASSERT(kSmiTag == 0);
5728 __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
5729 __ Addu(result_, result_, t0);
5730 __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
5731 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
5732 __ Branch(&slow_case_, eq, result_, Operand(t0));
5737 void StringCharFromCodeGenerator::GenerateSlow(
5738 MacroAssembler* masm,
5739 const RuntimeCallHelper& call_helper) {
5740 __ Abort("Unexpected fallthrough to CharFromCode slow case");
5742 __ bind(&slow_case_);
5743 call_helper.BeforeCall(masm);
5745 __ CallRuntime(Runtime::kCharFromCode, 1);
5746 __ Move(result_, v0);
5748 call_helper.AfterCall(masm);
5751 __ Abort("Unexpected fallthrough from CharFromCode slow case");
5755 // -------------------------------------------------------------------------
5756 // StringCharAtGenerator
5758 void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
5759 char_code_at_generator_.GenerateFast(masm);
5760 char_from_code_generator_.GenerateFast(masm);
5764 void StringCharAtGenerator::GenerateSlow(
5765 MacroAssembler* masm,
5766 const RuntimeCallHelper& call_helper) {
5767 char_code_at_generator_.GenerateSlow(masm, call_helper);
5768 char_from_code_generator_.GenerateSlow(masm, call_helper);
5772 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5780 // This loop just copies one character at a time, as it is only used for
5781 // very short strings.
5783 __ addu(count, count, count);
5785 __ Branch(&done, eq, count, Operand(zero_reg));
5786 __ addu(count, dest, count); // Count now points to the last dest byte.
5789 __ lbu(scratch, MemOperand(src));
5790 __ addiu(src, src, 1);
5791 __ sb(scratch, MemOperand(dest));
5792 __ addiu(dest, dest, 1);
5793 __ Branch(&loop, lt, dest, Operand(count));
5799 enum CopyCharactersFlags {
5801 DEST_ALWAYS_ALIGNED = 2
5805 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
5815 bool ascii = (flags & COPY_ASCII) != 0;
5816 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
5818 if (dest_always_aligned && FLAG_debug_code) {
5819 // Check that destination is actually word aligned if the flag says
5821 __ And(scratch4, dest, Operand(kPointerAlignmentMask));
5823 "Destination of copy not aligned.",
5828 const int kReadAlignment = 4;
5829 const int kReadAlignmentMask = kReadAlignment - 1;
5830 // Ensure that reading an entire aligned word containing the last character
5831 // of a string will not read outside the allocated area (because we pad up
5832 // to kObjectAlignment).
5833 STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
5834 // Assumes word reads and writes are little endian.
5835 // Nothing to do for zero characters.
5839 __ addu(count, count, count);
5841 __ Branch(&done, eq, count, Operand(zero_reg));
5844 // Must copy at least eight bytes, otherwise just do it one byte at a time.
5845 __ Subu(scratch1, count, Operand(8));
5846 __ Addu(count, dest, Operand(count));
5847 Register limit = count; // Read until src equals this.
5848 __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
5850 if (!dest_always_aligned) {
5851 // Align dest by byte copying. Copies between zero and three bytes.
5852 __ And(scratch4, dest, Operand(kReadAlignmentMask));
5854 __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
5856 __ bind(&aligned_loop);
5857 __ lbu(scratch1, MemOperand(src));
5858 __ addiu(src, src, 1);
5859 __ sb(scratch1, MemOperand(dest));
5860 __ addiu(dest, dest, 1);
5861 __ addiu(scratch4, scratch4, 1);
5862 __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
5863 __ bind(&dest_aligned);
5868 __ And(scratch4, src, Operand(kReadAlignmentMask));
5869 __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
5871 // Loop for src/dst that are not aligned the same way.
5872 // This loop uses lwl and lwr instructions. These instructions
5873 // depend on the endianness, and the implementation assumes little-endian.
5877 __ lwr(scratch1, MemOperand(src));
5878 __ Addu(src, src, Operand(kReadAlignment));
5879 __ lwl(scratch1, MemOperand(src, -1));
5880 __ sw(scratch1, MemOperand(dest));
5881 __ Addu(dest, dest, Operand(kReadAlignment));
5882 __ Subu(scratch2, limit, dest);
5883 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
5886 __ Branch(&byte_loop);
5889 // Copy words from src to dest, until less than four bytes left.
5890 // Both src and dest are word aligned.
5891 __ bind(&simple_loop);
5895 __ lw(scratch1, MemOperand(src));
5896 __ Addu(src, src, Operand(kReadAlignment));
5897 __ sw(scratch1, MemOperand(dest));
5898 __ Addu(dest, dest, Operand(kReadAlignment));
5899 __ Subu(scratch2, limit, dest);
5900 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
5903 // Copy bytes from src to dest until dest hits limit.
5904 __ bind(&byte_loop);
5905 // Test if dest has already reached the limit.
5906 __ Branch(&done, ge, dest, Operand(limit));
5907 __ lbu(scratch1, MemOperand(src));
5908 __ addiu(src, src, 1);
5909 __ sb(scratch1, MemOperand(dest));
5910 __ addiu(dest, dest, 1);
5911 __ Branch(&byte_loop);
5917 void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5926 // Register scratch3 is the general scratch register in this function.
5927 Register scratch = scratch3;
5929 // Make sure that both characters are not digits as such strings has a
5930 // different hash algorithm. Don't try to look for these in the symbol table.
5931 Label not_array_index;
5932 __ Subu(scratch, c1, Operand(static_cast<int>('0')));
5933 __ Branch(¬_array_index,
5936 Operand(static_cast<int>('9' - '0')));
5937 __ Subu(scratch, c2, Operand(static_cast<int>('0')));
5939 // If check failed combine both characters into single halfword.
5940 // This is required by the contract of the method: code at the
5941 // not_found branch expects this combination in c1 register.
5943 __ sll(scratch1, c2, kBitsPerByte);
5944 __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
5945 __ Or(c1, c1, scratch1);
5948 not_found, Uless_equal, scratch, Operand(static_cast<int>('9' - '0')));
5950 __ bind(¬_array_index);
5951 // Calculate the two character string hash.
5952 Register hash = scratch1;
5953 StringHelper::GenerateHashInit(masm, hash, c1);
5954 StringHelper::GenerateHashAddCharacter(masm, hash, c2);
5955 StringHelper::GenerateHashGetHash(masm, hash);
5957 // Collect the two characters in a register.
5958 Register chars = c1;
5959 __ sll(scratch, c2, kBitsPerByte);
5960 __ Or(chars, chars, scratch);
5962 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5963 // hash: hash of two character string.
5965 // Load symbol table.
5966 // Load address of first element of the symbol table.
5967 Register symbol_table = c2;
5968 __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
5970 Register undefined = scratch4;
5971 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
5973 // Calculate capacity mask from the symbol table capacity.
5974 Register mask = scratch2;
5975 __ lw(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
5976 __ sra(mask, mask, 1);
5977 __ Addu(mask, mask, -1);
5979 // Calculate untagged address of the first element of the symbol table.
5980 Register first_symbol_table_element = symbol_table;
5981 __ Addu(first_symbol_table_element, symbol_table,
5982 Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
5985 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5986 // hash: hash of two character string
5987 // mask: capacity mask
5988 // first_symbol_table_element: address of the first element of
5990 // undefined: the undefined object
5993 // Perform a number of probes in the symbol table.
5994 const int kProbes = 4;
5995 Label found_in_symbol_table;
5996 Label next_probe[kProbes];
5997 Register candidate = scratch5; // Scratch register contains candidate.
5998 for (int i = 0; i < kProbes; i++) {
5999 // Calculate entry in symbol table.
6001 __ Addu(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
6003 __ mov(candidate, hash);
6006 __ And(candidate, candidate, Operand(mask));
6008 // Load the entry from the symble table.
6009 STATIC_ASSERT(SymbolTable::kEntrySize == 1);
6010 __ sll(scratch, candidate, kPointerSizeLog2);
6011 __ Addu(scratch, scratch, first_symbol_table_element);
6012 __ lw(candidate, MemOperand(scratch));
6014 // If entry is undefined no string with this hash can be found.
6016 __ GetObjectType(candidate, scratch, scratch);
6017 __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
6019 __ Branch(not_found, eq, undefined, Operand(candidate));
6020 // Must be the hole (deleted entry).
6021 if (FLAG_debug_code) {
6022 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
6023 __ Assert(eq, "oddball in symbol table is not undefined or the hole",
6024 scratch, Operand(candidate));
6026 __ jmp(&next_probe[i]);
6028 __ bind(&is_string);
6030 // Check that the candidate is a non-external ASCII string. The instance
6031 // type is still in the scratch register from the CompareObjectType
6033 __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
6035 // If length is not 2 the string is not a candidate.
6036 __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset));
6037 __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2)));
6039 // Check if the two characters match.
6040 // Assumes that word load is little endian.
6041 __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
6042 __ Branch(&found_in_symbol_table, eq, chars, Operand(scratch));
6043 __ bind(&next_probe[i]);
6046 // No matching 2 character string found by probing.
6049 // Scratch register contains result when we fall through to here.
6050 Register result = candidate;
6051 __ bind(&found_in_symbol_table);
6056 void StringHelper::GenerateHashInit(MacroAssembler* masm,
6058 Register character) {
6059 // hash = seed + character + ((seed + character) << 10);
6060 __ LoadRoot(hash, Heap::kHashSeedRootIndex);
6061 // Untag smi seed and add the character.
6063 __ addu(hash, hash, character);
6064 __ sll(at, hash, 10);
6065 __ addu(hash, hash, at);
6066 // hash ^= hash >> 6;
6067 __ srl(at, hash, 6);
6068 __ xor_(hash, hash, at);
6072 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
6074 Register character) {
6075 // hash += character;
6076 __ addu(hash, hash, character);
6077 // hash += hash << 10;
6078 __ sll(at, hash, 10);
6079 __ addu(hash, hash, at);
6080 // hash ^= hash >> 6;
6081 __ srl(at, hash, 6);
6082 __ xor_(hash, hash, at);
6086 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
6088 // hash += hash << 3;
6089 __ sll(at, hash, 3);
6090 __ addu(hash, hash, at);
6091 // hash ^= hash >> 11;
6092 __ srl(at, hash, 11);
6093 __ xor_(hash, hash, at);
6094 // hash += hash << 15;
6095 __ sll(at, hash, 15);
6096 __ addu(hash, hash, at);
6098 __ li(at, Operand(String::kHashBitMask));
6099 __ and_(hash, hash, at);
6101 // if (hash == 0) hash = 27;
6102 __ ori(at, zero_reg, StringHasher::kZeroHash);
6103 __ Movz(hash, at, hash);
6107 void SubStringStub::Generate(MacroAssembler* masm) {
6109 // Stack frame on entry.
6110 // ra: return address
6115 // This stub is called from the native-call %_SubString(...), so
6116 // nothing can be assumed about the arguments. It is tested that:
6117 // "string" is a sequential string,
6118 // both "from" and "to" are smis, and
6119 // 0 <= from <= to <= string.length.
6120 // If any of these assumptions fail, we call the runtime system.
6122 const int kToOffset = 0 * kPointerSize;
6123 const int kFromOffset = 1 * kPointerSize;
6124 const int kStringOffset = 2 * kPointerSize;
6126 __ lw(a2, MemOperand(sp, kToOffset));
6127 __ lw(a3, MemOperand(sp, kFromOffset));
6128 STATIC_ASSERT(kFromOffset == kToOffset + 4);
6129 STATIC_ASSERT(kSmiTag == 0);
6130 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
6132 // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
6133 // safe in this case.
6134 __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
6135 __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
6136 // Both a2 and a3 are untagged integers.
6138 __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
6140 __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to.
6141 __ Subu(a2, a2, a3);
6143 // Make sure first argument is a string.
6144 __ lw(v0, MemOperand(sp, kStringOffset));
6145 __ JumpIfSmi(v0, &runtime);
6146 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
6147 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
6148 __ And(t0, a1, Operand(kIsNotStringMask));
6150 __ Branch(&runtime, ne, t0, Operand(zero_reg));
6152 // Short-cut for the case of trivial substring.
6154 // v0: original string
6155 // a2: result string length
6156 __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
6158 // Return original string.
6159 __ Branch(&return_v0, eq, a2, Operand(t0));
6160 // Longer than original string's length or negative: unsafe arguments.
6161 __ Branch(&runtime, hi, a2, Operand(t0));
6162 // Shorter than original string's length: an actual substring.
6164 // Deal with different string types: update the index if necessary
6165 // and put the underlying string into t1.
6166 // v0: original string
6167 // a1: instance type
6169 // a3: from index (untagged)
6170 Label underlying_unpacked, sliced_string, seq_or_external_string;
6171 // If the string is not indirect, it can only be sequential or external.
6172 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
6173 STATIC_ASSERT(kIsIndirectStringMask != 0);
6174 __ And(t0, a1, Operand(kIsIndirectStringMask));
6175 __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
6176 // t0 is used as a scratch register and can be overwritten in either case.
6177 __ And(t0, a1, Operand(kSlicedNotConsMask));
6178 __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
6179 // Cons string. Check whether it is flat, then fetch first part.
6180 __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
6181 __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
6182 __ Branch(&runtime, ne, t1, Operand(t0));
6183 __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
6184 // Update instance type.
6185 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
6186 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
6187 __ jmp(&underlying_unpacked);
6189 __ bind(&sliced_string);
6190 // Sliced string. Fetch parent and correct start index by offset.
6191 __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
6192 __ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
6193 __ sra(t0, t0, 1); // Add offset to index.
6194 __ Addu(a3, a3, t0);
6195 // Update instance type.
6196 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
6197 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
6198 __ jmp(&underlying_unpacked);
6200 __ bind(&seq_or_external_string);
6201 // Sequential or external string. Just move string to the expected register.
6204 __ bind(&underlying_unpacked);
6206 if (FLAG_string_slices) {
6208 // t1: underlying subject string
6209 // a1: instance type of underlying subject string
6211 // a3: adjusted start index (untagged)
6212 // Short slice. Copy instead of slicing.
6213 __ Branch(©_routine, lt, a2, Operand(SlicedString::kMinLength));
6214 // Allocate new sliced string. At this point we do not reload the instance
6215 // type including the string encoding because we simply rely on the info
6216 // provided by the original string. It does not matter if the original
6217 // string's encoding is wrong because we always have to recheck encoding of
6218 // the newly created string's parent anyways due to externalized strings.
6219 Label two_byte_slice, set_slice_header;
6220 STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
6221 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
6222 __ And(t0, a1, Operand(kStringEncodingMask));
6223 __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
6224 __ AllocateAsciiSlicedString(v0, a2, t2, t3, &runtime);
6225 __ jmp(&set_slice_header);
6226 __ bind(&two_byte_slice);
6227 __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
6228 __ bind(&set_slice_header);
6230 __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
6231 __ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
6234 __ bind(©_routine);
6237 // t1: underlying subject string
6238 // a1: instance type of underlying subject string
6240 // a3: adjusted start index (untagged)
6241 Label two_byte_sequential, sequential_string, allocate_result;
6242 STATIC_ASSERT(kExternalStringTag != 0);
6243 STATIC_ASSERT(kSeqStringTag == 0);
6244 __ And(t0, a1, Operand(kExternalStringTag));
6245 __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
6247 // Handle external string.
6248 // Rule out short external strings.
6249 STATIC_CHECK(kShortExternalStringTag != 0);
6250 __ And(t0, a1, Operand(kShortExternalStringTag));
6251 __ Branch(&runtime, ne, t0, Operand(zero_reg));
6252 __ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
6253 // t1 already points to the first character of underlying string.
6254 __ jmp(&allocate_result);
6256 __ bind(&sequential_string);
6257 // Locate first character of underlying subject string.
6258 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
6259 __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6261 __ bind(&allocate_result);
6262 // Sequential acii string. Allocate the result.
6263 STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
6264 __ And(t0, a1, Operand(kStringEncodingMask));
6265 __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
6267 // Allocate and copy the resulting ASCII string.
6268 __ AllocateAsciiString(v0, a2, t0, t2, t3, &runtime);
6270 // Locate first character of substring to copy.
6271 __ Addu(t1, t1, a3);
6273 // Locate first character of result.
6274 __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6276 // v0: result string
6277 // a1: first character of result string
6278 // a2: result string length
6279 // t1: first character of substring to copy
6280 STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
6281 StringHelper::GenerateCopyCharactersLong(
6282 masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
6285 // Allocate and copy the resulting two-byte string.
6286 __ bind(&two_byte_sequential);
6287 __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
6289 // Locate first character of substring to copy.
6290 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
6292 __ Addu(t1, t1, t0);
6293 // Locate first character of result.
6294 __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6296 // v0: result string.
6297 // a1: first character of result.
6298 // a2: result length.
6299 // t1: first character of substring to copy.
6300 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
6301 StringHelper::GenerateCopyCharactersLong(
6302 masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
6304 __ bind(&return_v0);
6305 Counters* counters = masm->isolate()->counters();
6306 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
6309 // Just jump to runtime to create the sub string.
6311 __ TailCallRuntime(Runtime::kSubString, 3, 1);
6315 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
6320 Register scratch3) {
6321 Register length = scratch1;
6324 Label strings_not_equal, check_zero_length;
6325 __ lw(length, FieldMemOperand(left, String::kLengthOffset));
6326 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
6327 __ Branch(&check_zero_length, eq, length, Operand(scratch2));
6328 __ bind(&strings_not_equal);
6329 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
6332 // Check if the length is zero.
6333 Label compare_chars;
6334 __ bind(&check_zero_length);
6335 STATIC_ASSERT(kSmiTag == 0);
6336 __ Branch(&compare_chars, ne, length, Operand(zero_reg));
6337 __ li(v0, Operand(Smi::FromInt(EQUAL)));
6340 // Compare characters.
6341 __ bind(&compare_chars);
6343 GenerateAsciiCharsCompareLoop(masm,
6344 left, right, length, scratch2, scratch3, v0,
6345 &strings_not_equal);
6347 // Characters are equal.
6348 __ li(v0, Operand(Smi::FromInt(EQUAL)));
6353 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
6359 Register scratch4) {
6360 Label result_not_equal, compare_lengths;
6361 // Find minimum length and length difference.
6362 __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
6363 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
6364 __ Subu(scratch3, scratch1, Operand(scratch2));
6365 Register length_delta = scratch3;
6366 __ slt(scratch4, scratch2, scratch1);
6367 __ Movn(scratch1, scratch2, scratch4);
6368 Register min_length = scratch1;
6369 STATIC_ASSERT(kSmiTag == 0);
6370 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
6373 GenerateAsciiCharsCompareLoop(masm,
6374 left, right, min_length, scratch2, scratch4, v0,
6377 // Compare lengths - strings up to min-length are equal.
6378 __ bind(&compare_lengths);
6379 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
6380 // Use length_delta as result if it's zero.
6381 __ mov(scratch2, length_delta);
6382 __ mov(scratch4, zero_reg);
6383 __ mov(v0, zero_reg);
6385 __ bind(&result_not_equal);
6386 // Conditionally update the result based either on length_delta or
6387 // the last comparion performed in the loop above.
6389 __ Branch(&ret, eq, scratch2, Operand(scratch4));
6390 __ li(v0, Operand(Smi::FromInt(GREATER)));
6391 __ Branch(&ret, gt, scratch2, Operand(scratch4));
6392 __ li(v0, Operand(Smi::FromInt(LESS)));
6398 void StringCompareStub::GenerateAsciiCharsCompareLoop(
6399 MacroAssembler* masm,
6406 Label* chars_not_equal) {
6407 // Change index to run from -length to -1 by adding length to string
6408 // start. This means that loop ends when index reaches zero, which
6409 // doesn't need an additional compare.
6410 __ SmiUntag(length);
6411 __ Addu(scratch1, length,
6412 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6413 __ Addu(left, left, Operand(scratch1));
6414 __ Addu(right, right, Operand(scratch1));
6415 __ Subu(length, zero_reg, length);
6416 Register index = length; // index = -length;
6422 __ Addu(scratch3, left, index);
6423 __ lbu(scratch1, MemOperand(scratch3));
6424 __ Addu(scratch3, right, index);
6425 __ lbu(scratch2, MemOperand(scratch3));
6426 __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
6427 __ Addu(index, index, 1);
6428 __ Branch(&loop, ne, index, Operand(zero_reg));
6432 void StringCompareStub::Generate(MacroAssembler* masm) {
6435 Counters* counters = masm->isolate()->counters();
6437 // Stack frame on entry.
6438 // sp[0]: right string
6439 // sp[4]: left string
6440 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
6441 __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
6444 __ Branch(¬_same, ne, a0, Operand(a1));
6445 STATIC_ASSERT(EQUAL == 0);
6446 STATIC_ASSERT(kSmiTag == 0);
6447 __ li(v0, Operand(Smi::FromInt(EQUAL)));
6448 __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
6453 // Check that both objects are sequential ASCII strings.
6454 __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
6456 // Compare flat ASCII strings natively. Remove arguments from stack first.
6457 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
6458 __ Addu(sp, sp, Operand(2 * kPointerSize));
6459 GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
6462 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6466 void StringAddStub::Generate(MacroAssembler* masm) {
6467 Label call_runtime, call_builtin;
6468 Builtins::JavaScript builtin_id = Builtins::ADD;
6470 Counters* counters = masm->isolate()->counters();
6473 // sp[0]: second argument (right).
6474 // sp[4]: first argument (left).
6476 // Load the two arguments.
6477 __ lw(a0, MemOperand(sp, 1 * kPointerSize)); // First argument.
6478 __ lw(a1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
6480 // Make sure that both arguments are strings if not known in advance.
6481 if (flags_ == NO_STRING_ADD_FLAGS) {
6482 __ JumpIfEitherSmi(a0, a1, &call_runtime);
6483 // Load instance types.
6484 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6485 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6486 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6487 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6488 STATIC_ASSERT(kStringTag == 0);
6489 // If either is not a string, go to runtime.
6490 __ Or(t4, t0, Operand(t1));
6491 __ And(t4, t4, Operand(kIsNotStringMask));
6492 __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
6494 // Here at least one of the arguments is definitely a string.
6495 // We convert the one that is not known to be a string.
6496 if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
6497 ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
6498 GenerateConvertArgument(
6499 masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
6500 builtin_id = Builtins::STRING_ADD_RIGHT;
6501 } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
6502 ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
6503 GenerateConvertArgument(
6504 masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
6505 builtin_id = Builtins::STRING_ADD_LEFT;
6509 // Both arguments are strings.
6511 // a1: second string
6512 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6513 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6515 Label strings_not_empty;
6516 // Check if either of the strings are empty. In that case return the other.
6517 // These tests use zero-length check on string-length whch is an Smi.
6518 // Assert that Smi::FromInt(0) is really 0.
6519 STATIC_ASSERT(kSmiTag == 0);
6520 ASSERT(Smi::FromInt(0) == 0);
6521 __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
6522 __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
6523 __ mov(v0, a0); // Assume we'll return first string (from a0).
6524 __ Movz(v0, a1, a2); // If first is empty, return second (from a1).
6525 __ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1.
6526 __ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1.
6527 __ and_(t4, t4, t5); // Branch if both strings were non-empty.
6528 __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
6530 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6533 __ bind(&strings_not_empty);
6536 // Untag both string-lengths.
6537 __ sra(a2, a2, kSmiTagSize);
6538 __ sra(a3, a3, kSmiTagSize);
6540 // Both strings are non-empty.
6542 // a1: second string
6543 // a2: length of first string
6544 // a3: length of second string
6545 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6546 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6547 // Look at the length of the result of adding the two strings.
6548 Label string_add_flat_result, longer_than_two;
6549 // Adding two lengths can't overflow.
6550 STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
6551 __ Addu(t2, a2, Operand(a3));
6552 // Use the symbol table when adding two one character strings, as it
6553 // helps later optimizations to return a symbol here.
6554 __ Branch(&longer_than_two, ne, t2, Operand(2));
6556 // Check that both strings are non-external ASCII strings.
6557 if (flags_ != NO_STRING_ADD_FLAGS) {
6558 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6559 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6560 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6561 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6563 __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
6566 // Get the two characters forming the sub string.
6567 __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize));
6568 __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize));
6570 // Try to lookup two character string in symbol table. If it is not found
6571 // just allocate a new one.
6572 Label make_two_character_string;
6573 StringHelper::GenerateTwoCharacterSymbolTableProbe(
6574 masm, a2, a3, t2, t3, t0, t1, t5, &make_two_character_string);
6575 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6578 __ bind(&make_two_character_string);
6579 // Resulting string has length 2 and first chars of two strings
6580 // are combined into single halfword in a2 register.
6581 // So we can fill resulting string without two loops by a single
6582 // halfword store instruction (which assumes that processor is
6583 // in a little endian mode).
6584 __ li(t2, Operand(2));
6585 __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
6586 __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
6587 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6590 __ bind(&longer_than_two);
6591 // Check if resulting string will be flat.
6592 __ Branch(&string_add_flat_result, lt, t2, Operand(ConsString::kMinLength));
6593 // Handle exceptionally long strings in the runtime system.
6594 STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
6595 ASSERT(IsPowerOf2(String::kMaxLength + 1));
6596 // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
6597 __ Branch(&call_runtime, hs, t2, Operand(String::kMaxLength + 1));
6599 // If result is not supposed to be flat, allocate a cons string object.
6600 // If both strings are ASCII the result is an ASCII cons string.
6601 if (flags_ != NO_STRING_ADD_FLAGS) {
6602 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6603 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6604 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6605 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6607 Label non_ascii, allocated, ascii_data;
6608 STATIC_ASSERT(kTwoByteStringTag == 0);
6609 // Branch to non_ascii if either string-encoding field is zero (non-ASCII).
6610 __ And(t4, t0, Operand(t1));
6611 __ And(t4, t4, Operand(kStringEncodingMask));
6612 __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
6614 // Allocate an ASCII cons string.
6615 __ bind(&ascii_data);
6616 __ AllocateAsciiConsString(v0, t2, t0, t1, &call_runtime);
6617 __ bind(&allocated);
6618 // Fill the fields of the cons string.
6619 __ sw(a0, FieldMemOperand(v0, ConsString::kFirstOffset));
6620 __ sw(a1, FieldMemOperand(v0, ConsString::kSecondOffset));
6621 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6624 __ bind(&non_ascii);
6625 // At least one of the strings is two-byte. Check whether it happens
6626 // to contain only ASCII characters.
6627 // t0: first instance type.
6628 // t1: second instance type.
6629 // Branch to if _both_ instances have kAsciiDataHintMask set.
6630 __ And(at, t0, Operand(kAsciiDataHintMask));
6631 __ and_(at, at, t1);
6632 __ Branch(&ascii_data, ne, at, Operand(zero_reg));
6634 __ xor_(t0, t0, t1);
6635 STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
6636 __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
6637 __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
6639 // Allocate a two byte cons string.
6640 __ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
6641 __ Branch(&allocated);
6643 // We cannot encounter sliced strings or cons strings here since:
6644 STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
6645 // Handle creating a flat result from either external or sequential strings.
6646 // Locate the first characters' locations.
6648 // a1: second string
6649 // a2: length of first string
6650 // a3: length of second string
6651 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6652 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6653 // t2: sum of lengths.
6654 Label first_prepared, second_prepared;
6655 __ bind(&string_add_flat_result);
6656 if (flags_ != NO_STRING_ADD_FLAGS) {
6657 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6658 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6659 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6660 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6662 // Check whether both strings have same encoding
6663 __ Xor(t3, t0, Operand(t1));
6664 __ And(t3, t3, Operand(kStringEncodingMask));
6665 __ Branch(&call_runtime, ne, t3, Operand(zero_reg));
6667 STATIC_ASSERT(kSeqStringTag == 0);
6668 __ And(t4, t0, Operand(kStringRepresentationMask));
6670 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
6671 Label skip_first_add;
6672 __ Branch(&skip_first_add, ne, t4, Operand(zero_reg));
6673 __ Branch(USE_DELAY_SLOT, &first_prepared);
6674 __ addiu(t3, a0, SeqAsciiString::kHeaderSize - kHeapObjectTag);
6675 __ bind(&skip_first_add);
6676 // External string: rule out short external string and load string resource.
6677 STATIC_ASSERT(kShortExternalStringTag != 0);
6678 __ And(t4, t0, Operand(kShortExternalStringMask));
6679 __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
6680 __ lw(t3, FieldMemOperand(a0, ExternalString::kResourceDataOffset));
6681 __ bind(&first_prepared);
6683 STATIC_ASSERT(kSeqStringTag == 0);
6684 __ And(t4, t1, Operand(kStringRepresentationMask));
6685 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
6686 Label skip_second_add;
6687 __ Branch(&skip_second_add, ne, t4, Operand(zero_reg));
6688 __ Branch(USE_DELAY_SLOT, &second_prepared);
6689 __ addiu(a1, a1, SeqAsciiString::kHeaderSize - kHeapObjectTag);
6690 __ bind(&skip_second_add);
6691 // External string: rule out short external string and load string resource.
6692 STATIC_ASSERT(kShortExternalStringTag != 0);
6693 __ And(t4, t1, Operand(kShortExternalStringMask));
6694 __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
6695 __ lw(a1, FieldMemOperand(a1, ExternalString::kResourceDataOffset));
6696 __ bind(&second_prepared);
6698 Label non_ascii_string_add_flat_result;
6699 // t3: first character of first string
6700 // a1: first character of second string
6701 // a2: length of first string
6702 // a3: length of second string
6703 // t2: sum of lengths.
6704 // Both strings have the same encoding.
6705 STATIC_ASSERT(kTwoByteStringTag == 0);
6706 __ And(t4, t1, Operand(kStringEncodingMask));
6707 __ Branch(&non_ascii_string_add_flat_result, eq, t4, Operand(zero_reg));
6709 __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
6710 __ Addu(t2, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6711 // v0: result string.
6712 // t3: first character of first string.
6713 // a1: first character of second string
6714 // a2: length of first string.
6715 // a3: length of second string.
6716 // t2: first character of result.
6718 StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, true);
6719 // t2: next character of result.
6720 StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
6721 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6724 __ bind(&non_ascii_string_add_flat_result);
6725 __ AllocateTwoByteString(v0, t2, t0, t1, t5, &call_runtime);
6726 __ Addu(t2, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6727 // v0: result string.
6728 // t3: first character of first string.
6729 // a1: first character of second string.
6730 // a2: length of first string.
6731 // a3: length of second string.
6732 // t2: first character of result.
6733 StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, false);
6734 // t2: next character of result.
6735 StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
6737 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6740 // Just jump to runtime to add the two strings.
6741 __ bind(&call_runtime);
6742 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
6744 if (call_builtin.is_linked()) {
6745 __ bind(&call_builtin);
6746 __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
6751 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
6759 // First check if the argument is already a string.
6760 Label not_string, done;
6761 __ JumpIfSmi(arg, ¬_string);
6762 __ GetObjectType(arg, scratch1, scratch1);
6763 __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
6765 // Check the number to string cache.
6767 __ bind(¬_string);
6768 // Puts the cached result into scratch1.
6769 NumberToStringStub::GenerateLookupNumberStringCache(masm,
6777 __ mov(arg, scratch1);
6778 __ sw(arg, MemOperand(sp, stack_offset));
6781 // Check if the argument is a safe string wrapper.
6782 __ bind(¬_cached);
6783 __ JumpIfSmi(arg, slow);
6784 __ GetObjectType(arg, scratch1, scratch2); // map -> scratch1.
6785 __ Branch(slow, ne, scratch2, Operand(JS_VALUE_TYPE));
6786 __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
6787 __ li(scratch4, 1 << Map::kStringWrapperSafeForDefaultValueOf);
6788 __ And(scratch2, scratch2, scratch4);
6789 __ Branch(slow, ne, scratch2, Operand(scratch4));
6790 __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset));
6791 __ sw(arg, MemOperand(sp, stack_offset));
6797 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
6798 ASSERT(state_ == CompareIC::SMIS);
6801 __ JumpIfNotSmi(a2, &miss);
6803 if (GetCondition() == eq) {
6804 // For equality we do not care about the sign of the result.
6805 __ Subu(v0, a0, a1);
6807 // Untag before subtracting to avoid handling overflow.
6810 __ Subu(v0, a1, a0);
6819 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6820 ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6823 Label unordered, maybe_undefined1, maybe_undefined2;
6825 __ And(a2, a1, Operand(a0));
6826 __ JumpIfSmi(a2, &generic_stub);
6828 __ GetObjectType(a0, a2, a2);
6829 __ Branch(&maybe_undefined1, ne, a2, Operand(HEAP_NUMBER_TYPE));
6830 __ GetObjectType(a1, a2, a2);
6831 __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
6833 // Inlining the double comparison and falling back to the general compare
6834 // stub if NaN is involved or FPU is unsupported.
6835 if (CpuFeatures::IsSupported(FPU)) {
6836 CpuFeatures::Scope scope(FPU);
6838 // Load left and right operand.
6839 __ Subu(a2, a1, Operand(kHeapObjectTag));
6840 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
6841 __ Subu(a2, a0, Operand(kHeapObjectTag));
6842 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
6844 // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
6845 Label fpu_eq, fpu_lt;
6846 // Test if equal, and also handle the unordered/NaN case.
6847 __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
6849 // Test if less (unordered case is already handled).
6850 __ BranchF(&fpu_lt, NULL, lt, f0, f2);
6852 // Otherwise it's greater, so just fall thru, and return.
6853 __ li(v0, Operand(GREATER));
6857 __ li(v0, Operand(EQUAL));
6861 __ li(v0, Operand(LESS));
6865 __ bind(&unordered);
6867 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
6868 __ bind(&generic_stub);
6869 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
6871 __ bind(&maybe_undefined1);
6872 if (Token::IsOrderedRelationalCompareOp(op_)) {
6873 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
6874 __ Branch(&miss, ne, a0, Operand(at));
6875 __ GetObjectType(a1, a2, a2);
6876 __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
6880 __ bind(&maybe_undefined2);
6881 if (Token::IsOrderedRelationalCompareOp(op_)) {
6882 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
6883 __ Branch(&unordered, eq, a1, Operand(at));
6891 void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
6892 ASSERT(state_ == CompareIC::SYMBOLS);
6895 // Registers containing left and right operands respectively.
6897 Register right = a0;
6901 // Check that both operands are heap objects.
6902 __ JumpIfEitherSmi(left, right, &miss);
6904 // Check that both operands are symbols.
6905 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6906 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6907 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6908 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6909 STATIC_ASSERT(kSymbolTag != 0);
6910 __ And(tmp1, tmp1, Operand(tmp2));
6911 __ And(tmp1, tmp1, kIsSymbolMask);
6912 __ Branch(&miss, eq, tmp1, Operand(zero_reg));
6913 // Make sure a0 is non-zero. At this point input operands are
6914 // guaranteed to be non-zero.
6915 ASSERT(right.is(a0));
6916 STATIC_ASSERT(EQUAL == 0);
6917 STATIC_ASSERT(kSmiTag == 0);
6919 // Symbols are compared by identity.
6920 __ Ret(ne, left, Operand(right));
6921 __ li(v0, Operand(Smi::FromInt(EQUAL)));
6929 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
6930 ASSERT(state_ == CompareIC::STRINGS);
6933 bool equality = Token::IsEqualityOp(op_);
6935 // Registers containing left and right operands respectively.
6937 Register right = a0;
6944 // Check that both operands are heap objects.
6945 __ JumpIfEitherSmi(left, right, &miss);
6947 // Check that both operands are strings. This leaves the instance
6948 // types loaded in tmp1 and tmp2.
6949 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6950 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6951 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6952 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6953 STATIC_ASSERT(kNotStringTag != 0);
6954 __ Or(tmp3, tmp1, tmp2);
6955 __ And(tmp5, tmp3, Operand(kIsNotStringMask));
6956 __ Branch(&miss, ne, tmp5, Operand(zero_reg));
6958 // Fast check for identical strings.
6959 Label left_ne_right;
6960 STATIC_ASSERT(EQUAL == 0);
6961 STATIC_ASSERT(kSmiTag == 0);
6962 __ Branch(&left_ne_right, ne, left, Operand(right));
6963 __ Ret(USE_DELAY_SLOT);
6964 __ mov(v0, zero_reg); // In the delay slot.
6965 __ bind(&left_ne_right);
6967 // Handle not identical strings.
6969 // Check that both strings are symbols. If they are, we're done
6970 // because we already know they are not identical.
6972 ASSERT(GetCondition() == eq);
6973 STATIC_ASSERT(kSymbolTag != 0);
6974 __ And(tmp3, tmp1, Operand(tmp2));
6975 __ And(tmp5, tmp3, Operand(kIsSymbolMask));
6977 __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg));
6978 // Make sure a0 is non-zero. At this point input operands are
6979 // guaranteed to be non-zero.
6980 ASSERT(right.is(a0));
6981 __ Ret(USE_DELAY_SLOT);
6982 __ mov(v0, a0); // In the delay slot.
6983 __ bind(&is_symbol);
6986 // Check that both strings are sequential ASCII.
6988 __ JumpIfBothInstanceTypesAreNotSequentialAscii(
6989 tmp1, tmp2, tmp3, tmp4, &runtime);
6991 // Compare flat ASCII strings. Returns when done.
6993 StringCompareStub::GenerateFlatAsciiStringEquals(
6994 masm, left, right, tmp1, tmp2, tmp3);
6996 StringCompareStub::GenerateCompareFlatAsciiStrings(
6997 masm, left, right, tmp1, tmp2, tmp3, tmp4);
7000 // Handle more complex cases in runtime.
7002 __ Push(left, right);
7004 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
7006 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
7014 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
7015 ASSERT(state_ == CompareIC::OBJECTS);
7017 __ And(a2, a1, Operand(a0));
7018 __ JumpIfSmi(a2, &miss);
7020 // Compare lhs, a2 holds the map, a3 holds the type_reg
7021 __ GetObjectType(a0, a2, a3);
7022 __ Branch(&miss, ne, a3, Operand(JS_OBJECT_TYPE));
7023 __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
7024 __ And(a2, a2, Operand(1 << Map::kUseUserObjectComparison));
7025 __ Branch(&miss, eq, a2, Operand(1 << Map::kUseUserObjectComparison));
7028 // Compare rhs, a2 holds the map, a3 holds the type_reg
7029 __ GetObjectType(a1, a2, a3);
7030 __ Branch(&miss, ne, a3, Operand(JS_OBJECT_TYPE));
7031 __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
7032 __ And(a2, a2, Operand(1 << Map::kUseUserObjectComparison));
7033 __ Branch(&miss, eq, a2, Operand(1 << Map::kUseUserObjectComparison));
7035 ASSERT(GetCondition() == eq);
7036 __ Ret(USE_DELAY_SLOT);
7037 __ subu(v0, a0, a1);
7044 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
7047 __ JumpIfSmi(a2, &miss);
7048 __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
7049 __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
7051 // Check object in a0
7052 __ Branch(&miss, ne, a2, Operand(known_map_));
7053 __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
7054 __ And(a2, a2, Operand(1 << Map::kUseUserObjectComparison));
7055 __ Branch(&miss, eq, a2, Operand(1 << Map::kUseUserObjectComparison));
7057 // Check object in a1
7058 __ Branch(&miss, ne, a3, Operand(known_map_));
7059 __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
7060 __ And(a3, a3, Operand(1 << Map::kUseUserObjectComparison));
7061 __ Branch(&miss, eq, a3, Operand(1 << Map::kUseUserObjectComparison));
7063 __ Ret(USE_DELAY_SLOT);
7064 __ subu(v0, a0, a1);
7070 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
7072 // Call the runtime system in a fresh internal frame.
7073 ExternalReference miss =
7074 ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
7075 FrameScope scope(masm, StackFrame::INTERNAL);
7079 __ li(t0, Operand(Smi::FromInt(op_)));
7080 __ addiu(sp, sp, -kPointerSize);
7081 __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
7082 __ sw(t0, MemOperand(sp)); // In the delay slot.
7083 // Compute the entry point of the rewritten stub.
7084 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
7085 // Restore registers.
7092 void DirectCEntryStub::Generate(MacroAssembler* masm) {
7093 // No need to pop or drop anything, LeaveExitFrame will restore the old
7094 // stack, thus dropping the allocated space for the return value.
7095 // The saved ra is after the reserved stack space for the 4 args.
7096 __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
7098 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
7099 // In case of an error the return address may point to a memory area
7100 // filled with kZapValue by the GC.
7101 // Dereference the address and check for this.
7102 __ lw(t0, MemOperand(t9));
7103 __ Assert(ne, "Received invalid return address.", t0,
7104 Operand(reinterpret_cast<uint32_t>(kZapValue)));
7110 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
7111 ExternalReference function) {
7112 __ li(t9, Operand(function));
7113 this->GenerateCall(masm, t9);
7117 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
7119 __ Move(t9, target);
7120 __ AssertStackIsAligned();
7121 // Allocate space for arg slots.
7122 __ Subu(sp, sp, kCArgsSlotsSize);
7124 // Block the trampoline pool through the whole function to make sure the
7125 // number of generated instructions is constant.
7126 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
7128 // We need to get the current 'pc' value, which is not available on MIPS.
7130 masm->bal(&find_ra); // ra = pc + 8.
7131 masm->nop(); // Branch delay slot nop.
7132 masm->bind(&find_ra);
7134 const int kNumInstructionsToJump = 6;
7135 masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
7136 // Push return address (accessible to GC through exit frame pc).
7137 // This spot for ra was reserved in EnterExitFrame.
7138 masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
7140 Operand(reinterpret_cast<intptr_t>(GetCode().location()),
7141 RelocInfo::CODE_TARGET),
7143 // Call the function.
7145 // Make sure the stored 'ra' points to this position.
7146 ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
7150 void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
7154 Register properties,
7155 Handle<String> name,
7156 Register scratch0) {
7157 // If names of slots in range from 1 to kProbes - 1 for the hash value are
7158 // not equal to the name and kProbes-th slot is not used (its name is the
7159 // undefined value), it guarantees the hash table doesn't contain the
7160 // property. It's true even if some slots represent deleted properties
7161 // (their names are the hole value).
7162 for (int i = 0; i < kInlinedProbes; i++) {
7163 // scratch0 points to properties hash.
7164 // Compute the masked index: (hash + i + i * i) & mask.
7165 Register index = scratch0;
7166 // Capacity is smi 2^n.
7167 __ lw(index, FieldMemOperand(properties, kCapacityOffset));
7168 __ Subu(index, index, Operand(1));
7169 __ And(index, index, Operand(
7170 Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
7172 // Scale the index by multiplying by the entry size.
7173 ASSERT(StringDictionary::kEntrySize == 3);
7174 __ sll(at, index, 1);
7175 __ Addu(index, index, at);
7177 Register entity_name = scratch0;
7178 // Having undefined at this place means the name is not contained.
7179 ASSERT_EQ(kSmiTagSize, 1);
7180 Register tmp = properties;
7181 __ sll(scratch0, index, 1);
7182 __ Addu(tmp, properties, scratch0);
7183 __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
7185 ASSERT(!tmp.is(entity_name));
7186 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
7187 __ Branch(done, eq, entity_name, Operand(tmp));
7189 if (i != kInlinedProbes - 1) {
7190 // Load the hole ready for use below:
7191 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
7193 // Stop if found the property.
7194 __ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
7197 __ Branch(&the_hole, eq, entity_name, Operand(tmp));
7199 // Check if the entry name is not a symbol.
7200 __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
7202 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
7203 __ And(scratch0, entity_name, Operand(kIsSymbolMask));
7204 __ Branch(miss, eq, scratch0, Operand(zero_reg));
7208 // Restore the properties.
7210 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
7214 const int spill_mask =
7215 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
7216 a2.bit() | a1.bit() | a0.bit() | v0.bit());
7218 __ MultiPush(spill_mask);
7219 __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
7220 __ li(a1, Operand(Handle<String>(name)));
7221 StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
7224 __ MultiPop(spill_mask);
7226 __ Branch(done, eq, at, Operand(zero_reg));
7227 __ Branch(miss, ne, at, Operand(zero_reg));
7231 // Probe the string dictionary in the |elements| register. Jump to the
7232 // |done| label if a property with the given name is found. Jump to
7233 // the |miss| label otherwise.
7234 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
7235 void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
7241 Register scratch2) {
7242 ASSERT(!elements.is(scratch1));
7243 ASSERT(!elements.is(scratch2));
7244 ASSERT(!name.is(scratch1));
7245 ASSERT(!name.is(scratch2));
7247 // Assert that name contains a string.
7248 if (FLAG_debug_code) __ AbortIfNotString(name);
7250 // Compute the capacity mask.
7251 __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
7252 __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
7253 __ Subu(scratch1, scratch1, Operand(1));
7255 // Generate an unrolled loop that performs a few probes before
7256 // giving up. Measurements done on Gmail indicate that 2 probes
7257 // cover ~93% of loads from dictionaries.
7258 for (int i = 0; i < kInlinedProbes; i++) {
7259 // Compute the masked index: (hash + i + i * i) & mask.
7260 __ lw(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
7262 // Add the probe offset (i + i * i) left shifted to avoid right shifting
7263 // the hash in a separate instruction. The value hash + i + i * i is right
7264 // shifted in the following and instruction.
7265 ASSERT(StringDictionary::GetProbeOffset(i) <
7266 1 << (32 - String::kHashFieldOffset));
7267 __ Addu(scratch2, scratch2, Operand(
7268 StringDictionary::GetProbeOffset(i) << String::kHashShift));
7270 __ srl(scratch2, scratch2, String::kHashShift);
7271 __ And(scratch2, scratch1, scratch2);
7273 // Scale the index by multiplying by the element size.
7274 ASSERT(StringDictionary::kEntrySize == 3);
7275 // scratch2 = scratch2 * 3.
7277 __ sll(at, scratch2, 1);
7278 __ Addu(scratch2, scratch2, at);
7280 // Check if the key is identical to the name.
7281 __ sll(at, scratch2, 2);
7282 __ Addu(scratch2, elements, at);
7283 __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
7284 __ Branch(done, eq, name, Operand(at));
7287 const int spill_mask =
7288 (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
7289 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
7290 ~(scratch1.bit() | scratch2.bit());
7292 __ MultiPush(spill_mask);
7294 ASSERT(!elements.is(a1));
7296 __ Move(a0, elements);
7298 __ Move(a0, elements);
7301 StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
7303 __ mov(scratch2, a2);
7305 __ MultiPop(spill_mask);
7307 __ Branch(done, ne, at, Operand(zero_reg));
7308 __ Branch(miss, eq, at, Operand(zero_reg));
7312 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
7313 // This stub overrides SometimesSetsUpAFrame() to return false. That means
7314 // we cannot call anything that could cause a GC from this stub.
7316 // result: StringDictionary to probe
7318 // : StringDictionary to probe.
7319 // index_: will hold an index of entry if lookup is successful.
7320 // might alias with result_.
7322 // result_ is zero if lookup failed, non zero otherwise.
7324 Register result = v0;
7325 Register dictionary = a0;
7327 Register index = a2;
7330 Register undefined = t1;
7331 Register entry_key = t2;
7333 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
7335 __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
7336 __ sra(mask, mask, kSmiTagSize);
7337 __ Subu(mask, mask, Operand(1));
7339 __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
7341 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
7343 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
7344 // Compute the masked index: (hash + i + i * i) & mask.
7345 // Capacity is smi 2^n.
7347 // Add the probe offset (i + i * i) left shifted to avoid right shifting
7348 // the hash in a separate instruction. The value hash + i + i * i is right
7349 // shifted in the following and instruction.
7350 ASSERT(StringDictionary::GetProbeOffset(i) <
7351 1 << (32 - String::kHashFieldOffset));
7352 __ Addu(index, hash, Operand(
7353 StringDictionary::GetProbeOffset(i) << String::kHashShift));
7355 __ mov(index, hash);
7357 __ srl(index, index, String::kHashShift);
7358 __ And(index, mask, index);
7360 // Scale the index by multiplying by the entry size.
7361 ASSERT(StringDictionary::kEntrySize == 3);
7364 __ sll(index, index, 1);
7365 __ Addu(index, index, at);
7368 ASSERT_EQ(kSmiTagSize, 1);
7369 __ sll(index, index, 2);
7370 __ Addu(index, index, dictionary);
7371 __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
7373 // Having undefined at this place means the name is not contained.
7374 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined));
7376 // Stop if found the property.
7377 __ Branch(&in_dictionary, eq, entry_key, Operand(key));
7379 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
7380 // Check if the entry name is not a symbol.
7381 __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
7383 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
7384 __ And(result, entry_key, Operand(kIsSymbolMask));
7385 __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg));
7389 __ bind(&maybe_in_dictionary);
7390 // If we are doing negative lookup then probing failure should be
7391 // treated as a lookup success. For positive lookup probing failure
7392 // should be treated as lookup failure.
7393 if (mode_ == POSITIVE_LOOKUP) {
7394 __ Ret(USE_DELAY_SLOT);
7395 __ mov(result, zero_reg);
7398 __ bind(&in_dictionary);
7399 __ Ret(USE_DELAY_SLOT);
7402 __ bind(¬_in_dictionary);
7403 __ Ret(USE_DELAY_SLOT);
7404 __ mov(result, zero_reg);
7408 struct AheadOfTimeWriteBarrierStubList {
7409 Register object, value, address;
7410 RememberedSetAction action;
7413 #define REG(Name) { kRegister_ ## Name ## _Code }
7415 static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
7416 // Used in RegExpExecStub.
7417 { REG(s2), REG(s0), REG(t3), EMIT_REMEMBERED_SET },
7418 { REG(s2), REG(a2), REG(t3), EMIT_REMEMBERED_SET },
7419 // Used in CompileArrayPushCall.
7420 // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
7421 // Also used in KeyedStoreIC::GenerateGeneric.
7422 { REG(a3), REG(t0), REG(t1), EMIT_REMEMBERED_SET },
7423 // Used in CompileStoreGlobal.
7424 { REG(t0), REG(a1), REG(a2), OMIT_REMEMBERED_SET },
7425 // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
7426 { REG(a1), REG(a2), REG(a3), EMIT_REMEMBERED_SET },
7427 { REG(a3), REG(a2), REG(a1), EMIT_REMEMBERED_SET },
7428 // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
7429 { REG(a2), REG(a1), REG(a3), EMIT_REMEMBERED_SET },
7430 { REG(a3), REG(a1), REG(a2), EMIT_REMEMBERED_SET },
7431 // KeyedStoreStubCompiler::GenerateStoreFastElement.
7432 { REG(a3), REG(a2), REG(t0), EMIT_REMEMBERED_SET },
7433 { REG(a2), REG(a3), REG(t0), EMIT_REMEMBERED_SET },
7434 // ElementsTransitionGenerator::GenerateSmiOnlyToObject
7435 // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
7436 // and ElementsTransitionGenerator::GenerateDoubleToObject
7437 { REG(a2), REG(a3), REG(t5), EMIT_REMEMBERED_SET },
7438 { REG(a2), REG(a3), REG(t5), OMIT_REMEMBERED_SET },
7439 // ElementsTransitionGenerator::GenerateDoubleToObject
7440 { REG(t2), REG(a2), REG(a0), EMIT_REMEMBERED_SET },
7441 { REG(a2), REG(t2), REG(t5), EMIT_REMEMBERED_SET },
7442 // StoreArrayLiteralElementStub::Generate
7443 { REG(t1), REG(a0), REG(t2), EMIT_REMEMBERED_SET },
7444 // Null termination.
7445 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
7451 bool RecordWriteStub::IsPregenerated() {
7452 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7453 !entry->object.is(no_reg);
7455 if (object_.is(entry->object) &&
7456 value_.is(entry->value) &&
7457 address_.is(entry->address) &&
7458 remembered_set_action_ == entry->action &&
7459 save_fp_regs_mode_ == kDontSaveFPRegs) {
7467 bool StoreBufferOverflowStub::IsPregenerated() {
7468 return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
7472 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
7473 StoreBufferOverflowStub stub1(kDontSaveFPRegs);
7474 stub1.GetCode()->set_is_pregenerated(true);
7478 void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
7479 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7480 !entry->object.is(no_reg);
7482 RecordWriteStub stub(entry->object,
7487 stub.GetCode()->set_is_pregenerated(true);
7492 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
7493 // the value has just been written into the object, now this stub makes sure
7494 // we keep the GC informed. The word in the object where the value has been
7495 // written is in the address register.
7496 void RecordWriteStub::Generate(MacroAssembler* masm) {
7497 Label skip_to_incremental_noncompacting;
7498 Label skip_to_incremental_compacting;
7500 // The first two branch+nop instructions are generated with labels so as to
7501 // get the offset fixed up correctly by the bind(Label*) call. We patch it
7502 // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
7503 // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
7504 // incremental heap marking.
7505 // See RecordWriteStub::Patch for details.
7506 __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
7508 __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
7511 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7512 __ RememberedSetHelper(object_,
7516 MacroAssembler::kReturnAtEnd);
7520 __ bind(&skip_to_incremental_noncompacting);
7521 GenerateIncremental(masm, INCREMENTAL);
7523 __ bind(&skip_to_incremental_compacting);
7524 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
7526 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
7527 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
7529 PatchBranchIntoNop(masm, 0);
7530 PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
7534 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
7537 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7538 Label dont_need_remembered_set;
7540 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
7541 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
7543 &dont_need_remembered_set);
7545 __ CheckPageFlag(regs_.object(),
7547 1 << MemoryChunk::SCAN_ON_SCAVENGE,
7549 &dont_need_remembered_set);
7551 // First notify the incremental marker if necessary, then update the
7553 CheckNeedsToInformIncrementalMarker(
7554 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
7555 InformIncrementalMarker(masm, mode);
7556 regs_.Restore(masm);
7557 __ RememberedSetHelper(object_,
7561 MacroAssembler::kReturnAtEnd);
7563 __ bind(&dont_need_remembered_set);
7566 CheckNeedsToInformIncrementalMarker(
7567 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
7568 InformIncrementalMarker(masm, mode);
7569 regs_.Restore(masm);
7574 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
7575 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
7576 int argument_count = 3;
7577 __ PrepareCallCFunction(argument_count, regs_.scratch0());
7579 a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
7580 ASSERT(!address.is(regs_.object()));
7581 ASSERT(!address.is(a0));
7582 __ Move(address, regs_.address());
7583 __ Move(a0, regs_.object());
7584 if (mode == INCREMENTAL_COMPACTION) {
7585 __ Move(a1, address);
7587 ASSERT(mode == INCREMENTAL);
7588 __ lw(a1, MemOperand(address, 0));
7590 __ li(a2, Operand(ExternalReference::isolate_address()));
7592 AllowExternalCallThatCantCauseGC scope(masm);
7593 if (mode == INCREMENTAL_COMPACTION) {
7595 ExternalReference::incremental_evacuation_record_write_function(
7599 ASSERT(mode == INCREMENTAL);
7601 ExternalReference::incremental_marking_record_write_function(
7605 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
7609 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
7610 MacroAssembler* masm,
7611 OnNoNeedToInformIncrementalMarker on_no_need,
7614 Label need_incremental;
7615 Label need_incremental_pop_scratch;
7617 // Let's look at the color of the object: If it is not black we don't have
7618 // to inform the incremental marker.
7619 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
7621 regs_.Restore(masm);
7622 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7623 __ RememberedSetHelper(object_,
7627 MacroAssembler::kReturnAtEnd);
7634 // Get the value from the slot.
7635 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
7637 if (mode == INCREMENTAL_COMPACTION) {
7638 Label ensure_not_white;
7640 __ CheckPageFlag(regs_.scratch0(), // Contains value.
7641 regs_.scratch1(), // Scratch.
7642 MemoryChunk::kEvacuationCandidateMask,
7646 __ CheckPageFlag(regs_.object(),
7647 regs_.scratch1(), // Scratch.
7648 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
7652 __ bind(&ensure_not_white);
7655 // We need extra registers for this, so we push the object and the address
7656 // register temporarily.
7657 __ Push(regs_.object(), regs_.address());
7658 __ EnsureNotWhite(regs_.scratch0(), // The value.
7659 regs_.scratch1(), // Scratch.
7660 regs_.object(), // Scratch.
7661 regs_.address(), // Scratch.
7662 &need_incremental_pop_scratch);
7663 __ Pop(regs_.object(), regs_.address());
7665 regs_.Restore(masm);
7666 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7667 __ RememberedSetHelper(object_,
7671 MacroAssembler::kReturnAtEnd);
7676 __ bind(&need_incremental_pop_scratch);
7677 __ Pop(regs_.object(), regs_.address());
7679 __ bind(&need_incremental);
7681 // Fall through when we need to inform the incremental marker.
7685 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
7686 // ----------- S t a t e -------------
7687 // -- a0 : element value to store
7688 // -- a1 : array literal
7689 // -- a2 : map of array literal
7690 // -- a3 : element index as smi
7691 // -- t0 : array literal index in function as smi
7692 // -----------------------------------
7695 Label double_elements;
7697 Label slow_elements;
7698 Label fast_elements;
7700 __ CheckFastElements(a2, t1, &double_elements);
7701 // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
7702 __ JumpIfSmi(a0, &smi_element);
7703 __ CheckFastSmiOnlyElements(a2, t1, &fast_elements);
7705 // Store into the array literal requires a elements transition. Call into
7707 __ bind(&slow_elements);
7709 __ Push(a1, a3, a0);
7710 __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
7711 __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset));
7713 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
7715 // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
7716 __ bind(&fast_elements);
7717 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
7718 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
7719 __ Addu(t2, t1, t2);
7720 __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
7721 __ sw(a0, MemOperand(t2, 0));
7722 // Update the write barrier for the array store.
7723 __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
7724 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
7725 __ Ret(USE_DELAY_SLOT);
7728 // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
7729 // FAST_ELEMENTS, and value is Smi.
7730 __ bind(&smi_element);
7731 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
7732 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
7733 __ Addu(t2, t1, t2);
7734 __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
7735 __ Ret(USE_DELAY_SLOT);
7738 // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
7739 __ bind(&double_elements);
7740 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
7741 __ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, a2,
7743 __ Ret(USE_DELAY_SLOT);
7750 } } // namespace v8::internal
7752 #endif // V8_TARGET_ARCH_MIPS