1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #if defined(V8_TARGET_ARCH_MIPS)
32 #include "bootstrapper.h"
33 #include "code-stubs.h"
35 #include "regexp-macro-assembler.h"
41 #define __ ACCESS_MASM(masm)
43 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
47 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
53 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
54 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
59 // Check if the operand is a heap number.
60 static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
61 Register scratch1, Register scratch2,
62 Label* not_a_heap_number) {
63 __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
64 __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
65 __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2));
69 void ToNumberStub::Generate(MacroAssembler* masm) {
70 // The ToNumber stub takes one argument in a0.
71 Label check_heap_number, call_builtin;
72 __ JumpIfNotSmi(a0, &check_heap_number);
73 __ Ret(USE_DELAY_SLOT);
76 __ bind(&check_heap_number);
77 EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
78 __ Ret(USE_DELAY_SLOT);
81 __ bind(&call_builtin);
83 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
87 void FastNewClosureStub::Generate(MacroAssembler* masm) {
88 // Create a new closure from the given function info in new
89 // space. Set the context to the current context in cp.
92 // Pop the function info from the stack.
95 // Attempt to allocate new JSFunction in new space.
96 __ AllocateInNewSpace(JSFunction::kSize,
103 int map_index = (language_mode_ == CLASSIC_MODE)
104 ? Context::FUNCTION_MAP_INDEX
105 : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
107 // Compute the function map in the current global context and set that
108 // as the map of the allocated object.
109 __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
110 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
111 __ lw(a2, MemOperand(a2, Context::SlotOffset(map_index)));
112 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
114 // Initialize the rest of the function. We don't have to update the
115 // write barrier because the allocated object is in new space.
116 __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
117 __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
118 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
119 __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
120 __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
121 __ sw(a2, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
122 __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
123 __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
124 __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
125 __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
127 // Initialize the code pointer in the function to be the one
128 // found in the shared function info object.
129 __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
130 __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
132 // Return result. The argument function info has been popped already.
133 __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
136 // Create a new closure through the slower runtime call.
138 __ LoadRoot(t0, Heap::kFalseValueRootIndex);
140 __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
144 void FastNewContextStub::Generate(MacroAssembler* masm) {
145 // Try to allocate the context in new space.
147 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
149 // Attempt to allocate the context in new space.
150 __ AllocateInNewSpace(FixedArray::SizeFor(length),
157 // Load the function from the stack.
158 __ lw(a3, MemOperand(sp, 0));
160 // Set up the object header.
161 __ LoadRoot(a1, Heap::kFunctionContextMapRootIndex);
162 __ li(a2, Operand(Smi::FromInt(length)));
163 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
164 __ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
166 // Set up the fixed slots, copy the global object from the previous context.
167 __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
168 __ li(a1, Operand(Smi::FromInt(0)));
169 __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
170 __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
171 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
172 __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
174 // Initialize the rest of the slots to undefined.
175 __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
176 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
177 __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
180 // Remove the on-stack argument and return.
184 // Need to collect. Call into runtime system.
186 __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
190 void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
191 // Stack layout on entry:
194 // [sp + kPointerSize]: serialized scope info
196 // Try to allocate the context in new space.
198 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
199 __ AllocateInNewSpace(FixedArray::SizeFor(length),
200 v0, a1, a2, &gc, TAG_OBJECT);
202 // Load the function from the stack.
203 __ lw(a3, MemOperand(sp, 0));
205 // Load the serialized scope info from the stack.
206 __ lw(a1, MemOperand(sp, 1 * kPointerSize));
208 // Set up the object header.
209 __ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
210 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
211 __ li(a2, Operand(Smi::FromInt(length)));
212 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
214 // If this block context is nested in the global context we get a smi
215 // sentinel instead of a function. The block context should get the
216 // canonical empty function of the global context as its closure which
217 // we still have to look up.
218 Label after_sentinel;
219 __ JumpIfNotSmi(a3, &after_sentinel);
220 if (FLAG_debug_code) {
221 const char* message = "Expected 0 as a Smi sentinel";
222 __ Assert(eq, message, a3, Operand(zero_reg));
224 __ lw(a3, GlobalObjectOperand());
225 __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset));
226 __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
227 __ bind(&after_sentinel);
229 // Set up the fixed slots, copy the global object from the previous context.
230 __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
231 __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
232 __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
233 __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
234 __ sw(a2, ContextOperand(v0, Context::GLOBAL_INDEX));
236 // Initialize the rest of the slots to the hole value.
237 __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
238 for (int i = 0; i < slots_; i++) {
239 __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS));
242 // Remove the on-stack argument and return.
246 // Need to collect. Call into runtime system.
248 __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
252 static void GenerateFastCloneShallowArrayCommon(
253 MacroAssembler* masm,
255 FastCloneShallowArrayStub::Mode mode,
257 // Registers on entry:
258 // a3: boilerplate literal array.
259 ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
261 // All sizes here are multiples of kPointerSize.
262 int elements_size = 0;
264 elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
265 ? FixedDoubleArray::SizeFor(length)
266 : FixedArray::SizeFor(length);
268 int size = JSArray::kSize + elements_size;
270 // Allocate both the JS array and the elements array in one big
271 // allocation. This avoids multiple limit checks.
272 __ AllocateInNewSpace(size,
279 // Copy the JS array part.
280 for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
281 if ((i != JSArray::kElementsOffset) || (length == 0)) {
282 __ lw(a1, FieldMemOperand(a3, i));
283 __ sw(a1, FieldMemOperand(v0, i));
288 // Get hold of the elements array of the boilerplate and setup the
289 // elements pointer in the resulting object.
290 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
291 __ Addu(a2, v0, Operand(JSArray::kSize));
292 __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
294 // Copy the elements array.
295 ASSERT((elements_size % kPointerSize) == 0);
296 __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
300 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
301 // Stack layout on entry:
303 // [sp]: constant elements.
304 // [sp + kPointerSize]: literal index.
305 // [sp + (2 * kPointerSize)]: literals array.
307 // Load boilerplate object into r3 and check if we need to create a
310 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
311 __ lw(a0, MemOperand(sp, 1 * kPointerSize));
312 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
313 __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
315 __ lw(a3, MemOperand(t0));
316 __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
317 __ Branch(&slow_case, eq, a3, Operand(t1));
319 FastCloneShallowArrayStub::Mode mode = mode_;
320 if (mode == CLONE_ANY_ELEMENTS) {
321 Label double_elements, check_fast_elements;
322 __ lw(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
323 __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
324 __ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex);
325 __ Branch(&check_fast_elements, ne, v0, Operand(t1));
326 GenerateFastCloneShallowArrayCommon(masm, 0,
327 COPY_ON_WRITE_ELEMENTS, &slow_case);
328 // Return and remove the on-stack parameters.
331 __ bind(&check_fast_elements);
332 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
333 __ Branch(&double_elements, ne, v0, Operand(t1));
334 GenerateFastCloneShallowArrayCommon(masm, length_,
335 CLONE_ELEMENTS, &slow_case);
336 // Return and remove the on-stack parameters.
339 __ bind(&double_elements);
340 mode = CLONE_DOUBLE_ELEMENTS;
341 // Fall through to generate the code to handle double elements.
344 if (FLAG_debug_code) {
346 Heap::RootListIndex expected_map_index;
347 if (mode == CLONE_ELEMENTS) {
348 message = "Expected (writable) fixed array";
349 expected_map_index = Heap::kFixedArrayMapRootIndex;
350 } else if (mode == CLONE_DOUBLE_ELEMENTS) {
351 message = "Expected (writable) fixed double array";
352 expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
354 ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
355 message = "Expected copy-on-write fixed array";
356 expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
359 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
360 __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset));
361 __ LoadRoot(at, expected_map_index);
362 __ Assert(eq, message, a3, Operand(at));
366 GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
368 // Return and remove the on-stack parameters.
372 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
376 void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
377 // Stack layout on entry:
379 // [sp]: object literal flags.
380 // [sp + kPointerSize]: constant properties.
381 // [sp + (2 * kPointerSize)]: literal index.
382 // [sp + (3 * kPointerSize)]: literals array.
384 // Load boilerplate object into a3 and check if we need to create a
387 __ lw(a3, MemOperand(sp, 3 * kPointerSize));
388 __ lw(a0, MemOperand(sp, 2 * kPointerSize));
389 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
390 __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
392 __ lw(a3, MemOperand(a3));
393 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
394 __ Branch(&slow_case, eq, a3, Operand(t0));
396 // Check that the boilerplate contains only fast properties and we can
397 // statically determine the instance size.
398 int size = JSObject::kHeaderSize + length_ * kPointerSize;
399 __ lw(a0, FieldMemOperand(a3, HeapObject::kMapOffset));
400 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceSizeOffset));
401 __ Branch(&slow_case, ne, a0, Operand(size >> kPointerSizeLog2));
403 // Allocate the JS object and copy header together with all in-object
404 // properties from the boilerplate.
405 __ AllocateInNewSpace(size, v0, a1, a2, &slow_case, TAG_OBJECT);
406 for (int i = 0; i < size; i += kPointerSize) {
407 __ lw(a1, FieldMemOperand(a3, i));
408 __ sw(a1, FieldMemOperand(v0, i));
411 // Return and remove the on-stack parameters.
415 __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
419 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
420 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
421 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
422 // scratch register. Destroys the source register. No GC occurs during this
423 // stub so you don't have to set up the frame.
424 class ConvertToDoubleStub : public CodeStub {
426 ConvertToDoubleStub(Register result_reg_1,
427 Register result_reg_2,
429 Register scratch_reg)
430 : result1_(result_reg_1),
431 result2_(result_reg_2),
433 zeros_(scratch_reg) { }
441 // Minor key encoding in 16 bits.
442 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
443 class OpBits: public BitField<Token::Value, 2, 14> {};
445 Major MajorKey() { return ConvertToDouble; }
447 // Encode the parameters in a unique 16 bit value.
448 return result1_.code() +
449 (result2_.code() << 4) +
450 (source_.code() << 8) +
451 (zeros_.code() << 12);
454 void Generate(MacroAssembler* masm);
458 void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
459 #ifndef BIG_ENDIAN_FLOATING_POINT
460 Register exponent = result1_;
461 Register mantissa = result2_;
463 Register exponent = result2_;
464 Register mantissa = result1_;
467 // Convert from Smi to integer.
468 __ sra(source_, source_, kSmiTagSize);
469 // Move sign bit from source to destination. This works because the sign bit
470 // in the exponent word of the double has the same position and polarity as
471 // the 2's complement sign bit in a Smi.
472 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
473 __ And(exponent, source_, Operand(HeapNumber::kSignMask));
474 // Subtract from 0 if source was negative.
475 __ subu(at, zero_reg, source_);
476 __ Movn(source_, at, exponent);
478 // We have -1, 0 or 1, which we treat specially. Register source_ contains
479 // absolute value: it is either equal to 1 (special case of -1 and 1),
480 // greater than 1 (not a special case) or less than 1 (special case of 0).
481 __ Branch(¬_special, gt, source_, Operand(1));
483 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
484 const uint32_t exponent_word_for_1 =
485 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
486 // Safe to use 'at' as dest reg here.
487 __ Or(at, exponent, Operand(exponent_word_for_1));
488 __ Movn(exponent, at, source_); // Write exp when source not 0.
489 // 1, 0 and -1 all have 0 for the second word.
490 __ Ret(USE_DELAY_SLOT);
491 __ mov(mantissa, zero_reg);
493 __ bind(¬_special);
494 // Count leading zeros.
495 // Gets the wrong answer for 0, but we already checked for that case above.
496 __ Clz(zeros_, source_);
497 // Compute exponent and or it into the exponent register.
498 // We use mantissa as a scratch register here.
499 __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
500 __ subu(mantissa, mantissa, zeros_);
501 __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
502 __ Or(exponent, exponent, mantissa);
504 // Shift up the source chopping the top bit off.
505 __ Addu(zeros_, zeros_, Operand(1));
506 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
507 __ sllv(source_, source_, zeros_);
508 // Compute lower part of fraction (last 12 bits).
509 __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
510 // And the top (top 20 bits).
511 __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
513 __ Ret(USE_DELAY_SLOT);
514 __ or_(exponent, exponent, source_);
518 void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
519 FloatingPointHelper::Destination destination,
522 if (CpuFeatures::IsSupported(FPU)) {
523 CpuFeatures::Scope scope(FPU);
524 __ sra(scratch1, a0, kSmiTagSize);
525 __ mtc1(scratch1, f14);
526 __ cvt_d_w(f14, f14);
527 __ sra(scratch1, a1, kSmiTagSize);
528 __ mtc1(scratch1, f12);
529 __ cvt_d_w(f12, f12);
530 if (destination == kCoreRegisters) {
531 __ Move(a2, a3, f14);
532 __ Move(a0, a1, f12);
535 ASSERT(destination == kCoreRegisters);
536 // Write Smi from a0 to a3 and a2 in double format.
537 __ mov(scratch1, a0);
538 ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
540 __ Call(stub1.GetCode());
541 // Write Smi from a1 to a1 and a0 in double format.
542 __ mov(scratch1, a1);
543 ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
544 __ Call(stub2.GetCode());
550 void FloatingPointHelper::LoadOperands(
551 MacroAssembler* masm,
552 FloatingPointHelper::Destination destination,
553 Register heap_number_map,
558 // Load right operand (a0) to f12 or a2/a3.
559 LoadNumber(masm, destination,
560 a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
562 // Load left operand (a1) to f14 or a0/a1.
563 LoadNumber(masm, destination,
564 a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow);
568 void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
569 Destination destination,
574 Register heap_number_map,
578 if (FLAG_debug_code) {
579 __ AbortIfNotRootValue(heap_number_map,
580 Heap::kHeapNumberMapRootIndex,
581 "HeapNumberMap register clobbered.");
587 __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
589 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
591 // Handle loading a double from a heap number.
592 if (CpuFeatures::IsSupported(FPU) &&
593 destination == kFPURegisters) {
594 CpuFeatures::Scope scope(FPU);
595 // Load the double from tagged HeapNumber to double register.
597 // ARM uses a workaround here because of the unaligned HeapNumber
598 // kValueOffset. On MIPS this workaround is built into ldc1 so there's no
599 // point in generating even more instructions.
600 __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
602 ASSERT(destination == kCoreRegisters);
603 // Load the double from heap number to dst1 and dst2 in double format.
604 __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
605 __ lw(dst2, FieldMemOperand(object,
606 HeapNumber::kValueOffset + kPointerSize));
610 // Handle loading a double from a smi.
612 if (CpuFeatures::IsSupported(FPU)) {
613 CpuFeatures::Scope scope(FPU);
614 // Convert smi to double using FPU instructions.
615 __ mtc1(scratch1, dst);
616 __ cvt_d_w(dst, dst);
617 if (destination == kCoreRegisters) {
618 // Load the converted smi to dst1 and dst2 in double format.
619 __ Move(dst1, dst2, dst);
622 ASSERT(destination == kCoreRegisters);
623 // Write smi to dst1 and dst2 double format.
624 __ mov(scratch1, object);
625 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
627 __ Call(stub.GetCode());
635 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
638 Register heap_number_map,
642 FPURegister double_scratch,
644 if (FLAG_debug_code) {
645 __ AbortIfNotRootValue(heap_number_map,
646 Heap::kHeapNumberMapRootIndex,
647 "HeapNumberMap register clobbered.");
650 Label not_in_int32_range;
652 __ UntagAndJumpIfSmi(dst, object, &done);
653 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
654 __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
655 __ ConvertToInt32(object,
660 ¬_in_int32_range);
663 __ bind(¬_in_int32_range);
664 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
665 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
667 __ EmitOutOfInt32RangeTruncate(dst,
676 void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
677 Register int_scratch,
678 Destination destination,
679 FPURegister double_dst,
683 FPURegister single_scratch) {
684 ASSERT(!int_scratch.is(scratch2));
685 ASSERT(!int_scratch.is(dst1));
686 ASSERT(!int_scratch.is(dst2));
690 if (CpuFeatures::IsSupported(FPU)) {
691 CpuFeatures::Scope scope(FPU);
692 __ mtc1(int_scratch, single_scratch);
693 __ cvt_d_w(double_dst, single_scratch);
694 if (destination == kCoreRegisters) {
695 __ Move(dst1, dst2, double_dst);
698 Label fewer_than_20_useful_bits;
701 // | s | exp | mantissa |
704 __ mov(dst2, int_scratch);
705 __ mov(dst1, int_scratch);
706 __ Branch(&done, eq, int_scratch, Operand(zero_reg));
708 // Preload the sign of the value.
709 __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask));
710 // Get the absolute value of the object (as an unsigned integer).
712 __ Branch(&skip_sub, ge, dst2, Operand(zero_reg));
713 __ Subu(int_scratch, zero_reg, int_scratch);
716 // Get mantissa[51:20].
718 // Get the position of the first set bit.
719 __ Clz(dst1, int_scratch);
721 __ Subu(dst1, scratch2, dst1);
724 __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias));
725 __ Ins(dst2, scratch2,
726 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
728 // Clear the first non null bit.
729 __ li(scratch2, Operand(1));
730 __ sllv(scratch2, scratch2, dst1);
732 __ Xor(scratch2, scratch2, at);
733 __ And(int_scratch, int_scratch, scratch2);
735 // Get the number of bits to set in the lower part of the mantissa.
736 __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
737 __ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
738 // Set the higher 20 bits of the mantissa.
739 __ srlv(at, int_scratch, scratch2);
740 __ or_(dst2, dst2, at);
742 __ subu(scratch2, at, scratch2);
743 __ sllv(dst1, int_scratch, scratch2);
746 __ bind(&fewer_than_20_useful_bits);
747 __ li(at, HeapNumber::kMantissaBitsInTopWord);
748 __ subu(scratch2, at, dst1);
749 __ sllv(scratch2, int_scratch, scratch2);
750 __ Or(dst2, dst2, scratch2);
752 __ mov(dst1, zero_reg);
758 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
760 Destination destination,
761 DoubleRegister double_dst,
764 Register heap_number_map,
767 FPURegister single_scratch,
769 ASSERT(!scratch1.is(object) && !scratch2.is(object));
770 ASSERT(!scratch1.is(scratch2));
771 ASSERT(!heap_number_map.is(object) &&
772 !heap_number_map.is(scratch1) &&
773 !heap_number_map.is(scratch2));
775 Label done, obj_is_not_smi;
777 __ JumpIfNotSmi(object, &obj_is_not_smi);
778 __ SmiUntag(scratch1, object);
779 ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
780 scratch2, single_scratch);
783 __ bind(&obj_is_not_smi);
784 if (FLAG_debug_code) {
785 __ AbortIfNotRootValue(heap_number_map,
786 Heap::kHeapNumberMapRootIndex,
787 "HeapNumberMap register clobbered.");
789 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
792 if (CpuFeatures::IsSupported(FPU)) {
793 CpuFeatures::Scope scope(FPU);
794 // Load the double value.
795 __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
797 Register except_flag = scratch2;
798 __ EmitFPUTruncate(kRoundToZero,
803 kCheckForInexactConversion);
805 // Jump to not_int32 if the operation did not succeed.
806 __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
808 if (destination == kCoreRegisters) {
809 __ Move(dst1, dst2, double_dst);
813 ASSERT(!scratch1.is(object) && !scratch2.is(object));
814 // Load the double value in the destination registers.
815 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
816 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
818 // Check for 0 and -0.
819 __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask));
820 __ Or(scratch1, scratch1, Operand(dst2));
821 __ Branch(&done, eq, scratch1, Operand(zero_reg));
823 // Check that the value can be exactly represented by a 32-bit integer.
824 // Jump to not_int32 if that's not the case.
825 DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
827 // dst1 and dst2 were trashed. Reload the double value.
828 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
829 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
836 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
839 Register heap_number_map,
843 DoubleRegister double_scratch,
845 ASSERT(!dst.is(object));
846 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
847 ASSERT(!scratch1.is(scratch2) &&
848 !scratch1.is(scratch3) &&
849 !scratch2.is(scratch3));
853 __ UntagAndJumpIfSmi(dst, object, &done);
855 if (FLAG_debug_code) {
856 __ AbortIfNotRootValue(heap_number_map,
857 Heap::kHeapNumberMapRootIndex,
858 "HeapNumberMap register clobbered.");
860 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
862 // Object is a heap number.
863 // Convert the floating point value to a 32-bit integer.
864 if (CpuFeatures::IsSupported(FPU)) {
865 CpuFeatures::Scope scope(FPU);
866 // Load the double value.
867 __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
869 FPURegister single_scratch = double_scratch.low();
870 Register except_flag = scratch2;
871 __ EmitFPUTruncate(kRoundToZero,
876 kCheckForInexactConversion);
878 // Jump to not_int32 if the operation did not succeed.
879 __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
880 // Get the result in the destination register.
881 __ mfc1(dst, single_scratch);
884 // Load the double value in the destination registers.
885 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
886 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
888 // Check for 0 and -0.
889 __ And(dst, scratch1, Operand(~HeapNumber::kSignMask));
890 __ Or(dst, scratch2, Operand(dst));
891 __ Branch(&done, eq, dst, Operand(zero_reg));
893 DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
895 // Registers state after DoubleIs32BitInteger.
896 // dst: mantissa[51:20].
899 // Shift back the higher bits of the mantissa.
900 __ srlv(dst, dst, scratch3);
901 // Set the implicit first bit.
903 __ subu(scratch3, at, scratch3);
904 __ sllv(scratch2, scratch2, scratch3);
905 __ Or(dst, dst, scratch2);
907 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
908 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
910 __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg));
911 __ Subu(dst, zero_reg, dst);
919 void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
925 // Get exponent alone in scratch.
928 HeapNumber::kExponentShift,
929 HeapNumber::kExponentBits);
931 // Substract the bias from the exponent.
932 __ Subu(scratch, scratch, Operand(HeapNumber::kExponentBias));
934 // src1: higher (exponent) part of the double value.
935 // src2: lower (mantissa) part of the double value.
936 // scratch: unbiased exponent.
938 // Fast cases. Check for obvious non 32-bit integer values.
939 // Negative exponent cannot yield 32-bit integers.
940 __ Branch(not_int32, lt, scratch, Operand(zero_reg));
941 // Exponent greater than 31 cannot yield 32-bit integers.
942 // Also, a positive value with an exponent equal to 31 is outside of the
943 // signed 32-bit integer range.
944 // Another way to put it is that if (exponent - signbit) > 30 then the
945 // number cannot be represented as an int32.
947 __ srl(at, src1, 31);
948 __ subu(tmp, scratch, at);
949 __ Branch(not_int32, gt, tmp, Operand(30));
950 // - Bits [21:0] in the mantissa are not null.
951 __ And(tmp, src2, 0x3fffff);
952 __ Branch(not_int32, ne, tmp, Operand(zero_reg));
954 // Otherwise the exponent needs to be big enough to shift left all the
955 // non zero bits left. So we need the (30 - exponent) last bits of the
956 // 31 higher bits of the mantissa to be null.
957 // Because bits [21:0] are null, we can check instead that the
958 // (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
960 // Get the 32 higher bits of the mantissa in dst.
963 HeapNumber::kMantissaBitsInTopWord,
964 32 - HeapNumber::kMantissaBitsInTopWord);
965 __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord);
966 __ or_(dst, dst, at);
968 // Create the mask and test the lower bits (of the higher bits).
970 __ subu(scratch, at, scratch);
972 __ sllv(src1, src2, scratch);
973 __ Subu(src1, src1, Operand(1));
974 __ And(src1, dst, src1);
975 __ Branch(not_int32, ne, src1, Operand(zero_reg));
979 void FloatingPointHelper::CallCCodeForDoubleOperation(
980 MacroAssembler* masm,
982 Register heap_number_result,
984 // Using core registers:
985 // a0: Left value (least significant part of mantissa).
986 // a1: Left value (sign, exponent, top of mantissa).
987 // a2: Right value (least significant part of mantissa).
988 // a3: Right value (sign, exponent, top of mantissa).
990 // Assert that heap_number_result is saved.
991 // We currently always use s0 to pass it.
992 ASSERT(heap_number_result.is(s0));
994 // Push the current return address before the C call.
996 __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
997 if (!IsMipsSoftFloatABI) {
998 CpuFeatures::Scope scope(FPU);
999 // We are not using MIPS FPU instructions, and parameters for the runtime
1000 // function call are prepaired in a0-a3 registers, but function we are
1001 // calling is compiled with hard-float flag and expecting hard float ABI
1002 // (parameters in f12/f14 registers). We need to copy parameters from
1003 // a0-a3 registers to f12/f14 register pairs.
1004 __ Move(f12, a0, a1);
1005 __ Move(f14, a2, a3);
1008 AllowExternalCallThatCantCauseGC scope(masm);
1010 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
1012 // Store answer in the overwritable heap number.
1013 if (!IsMipsSoftFloatABI) {
1014 CpuFeatures::Scope scope(FPU);
1015 // Double returned in register f0.
1016 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
1018 // Double returned in registers v0 and v1.
1019 __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
1020 __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
1022 // Place heap_number_result in v0 and return to the pushed return address.
1024 __ Ret(USE_DELAY_SLOT);
1025 __ mov(v0, heap_number_result);
1029 bool WriteInt32ToHeapNumberStub::IsPregenerated() {
1030 // These variants are compiled ahead of time. See next method.
1031 if (the_int_.is(a1) &&
1032 the_heap_number_.is(v0) &&
1037 if (the_int_.is(a2) &&
1038 the_heap_number_.is(v0) &&
1043 // Other register combinations are generated as and when they are needed,
1044 // so it is unsafe to call them from stubs (we can't generate a stub while
1045 // we are generating a stub).
1050 void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
1051 WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
1052 WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
1053 stub1.GetCode()->set_is_pregenerated(true);
1054 stub2.GetCode()->set_is_pregenerated(true);
1058 // See comment for class, this does NOT work for int32's that are in Smi range.
1059 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
1060 Label max_negative_int;
1061 // the_int_ has the answer which is a signed int32 but not a Smi.
1062 // We test for the special value that has a different exponent.
1063 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
1064 // Test sign, and save for later conditionals.
1065 __ And(sign_, the_int_, Operand(0x80000000u));
1066 __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
1068 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
1069 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
1070 uint32_t non_smi_exponent =
1071 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
1072 __ li(scratch_, Operand(non_smi_exponent));
1073 // Set the sign bit in scratch_ if the value was negative.
1074 __ or_(scratch_, scratch_, sign_);
1075 // Subtract from 0 if the value was negative.
1076 __ subu(at, zero_reg, the_int_);
1077 __ Movn(the_int_, at, sign_);
1078 // We should be masking the implict first digit of the mantissa away here,
1079 // but it just ends up combining harmlessly with the last digit of the
1080 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
1081 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
1082 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
1083 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1084 __ srl(at, the_int_, shift_distance);
1085 __ or_(scratch_, scratch_, at);
1086 __ sw(scratch_, FieldMemOperand(the_heap_number_,
1087 HeapNumber::kExponentOffset));
1088 __ sll(scratch_, the_int_, 32 - shift_distance);
1089 __ sw(scratch_, FieldMemOperand(the_heap_number_,
1090 HeapNumber::kMantissaOffset));
1093 __ bind(&max_negative_int);
1094 // The max negative int32 is stored as a positive number in the mantissa of
1095 // a double because it uses a sign bit instead of using two's complement.
1096 // The actual mantissa bits stored are all 0 because the implicit most
1097 // significant 1 bit is not stored.
1098 non_smi_exponent += 1 << HeapNumber::kExponentShift;
1099 __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
1101 FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
1102 __ mov(scratch_, zero_reg);
1104 FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
1109 // Handle the case where the lhs and rhs are the same object.
1110 // Equality is almost reflexive (everything but NaN), so this is a test
1111 // for "identity and not NaN".
1112 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
1115 bool never_nan_nan) {
1116 Label not_identical;
1117 Label heap_number, return_equal;
1118 Register exp_mask_reg = t5;
1120 __ Branch(¬_identical, ne, a0, Operand(a1));
1122 // The two objects are identical. If we know that one of them isn't NaN then
1123 // we now know they test equal.
1124 if (cc != eq || !never_nan_nan) {
1125 __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
1127 // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
1128 // so we do the second best thing - test it ourselves.
1129 // They are both equal and they are not both Smis so both of them are not
1130 // Smis. If it's not a heap number, then return equal.
1131 if (cc == less || cc == greater) {
1132 __ GetObjectType(a0, t4, t4);
1133 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
1135 __ GetObjectType(a0, t4, t4);
1136 __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
1137 // Comparing JS objects with <=, >= is complicated.
1139 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
1140 // Normally here we fall through to return_equal, but undefined is
1141 // special: (undefined == undefined) == true, but
1142 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
1143 if (cc == less_equal || cc == greater_equal) {
1144 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
1145 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
1146 __ Branch(&return_equal, ne, a0, Operand(t2));
1148 // undefined <= undefined should fail.
1149 __ li(v0, Operand(GREATER));
1151 // undefined >= undefined should fail.
1152 __ li(v0, Operand(LESS));
1160 __ bind(&return_equal);
1163 __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
1164 } else if (cc == greater) {
1165 __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
1167 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
1171 if (cc != eq || !never_nan_nan) {
1172 // For less and greater we don't have to check for NaN since the result of
1173 // x < x is false regardless. For the others here is some code to check
1175 if (cc != lt && cc != gt) {
1176 __ bind(&heap_number);
1177 // It is a heap number, so return non-equal if it's NaN and equal if it's
1180 // The representation of NaN values has all exponent bits (52..62) set,
1181 // and not all mantissa bits (0..51) clear.
1182 // Read top bits of double representation (second word of value).
1183 __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1184 // Test that exponent bits are all set.
1185 __ And(t3, t2, Operand(exp_mask_reg));
1186 // If all bits not set (ne cond), then not a NaN, objects are equal.
1187 __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
1189 // Shift out flag and all exponent bits, retaining only mantissa.
1190 __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
1191 // Or with all low-bits of mantissa.
1192 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
1193 __ Or(v0, t3, Operand(t2));
1194 // For equal we already have the right value in v0: Return zero (equal)
1195 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
1196 // not (it's a NaN). For <= and >= we need to load v0 with the failing
1197 // value if it's a NaN.
1199 // All-zero means Infinity means equal.
1200 __ Ret(eq, v0, Operand(zero_reg));
1202 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
1204 __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
1209 // No fall through here.
1212 __ bind(¬_identical);
1216 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
1219 Label* both_loaded_as_doubles,
1222 ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1223 (lhs.is(a1) && rhs.is(a0)));
1226 __ JumpIfSmi(lhs, &lhs_is_smi);
1228 // Check whether the non-smi is a heap number.
1229 __ GetObjectType(lhs, t4, t4);
1231 // If lhs was not a number and rhs was a Smi then strict equality cannot
1232 // succeed. Return non-equal (lhs is already not zero).
1233 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
1236 // Smi compared non-strictly with a non-Smi non-heap-number. Call
1238 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1241 // Rhs is a smi, lhs is a number.
1242 // Convert smi rhs to double.
1243 if (CpuFeatures::IsSupported(FPU)) {
1244 CpuFeatures::Scope scope(FPU);
1245 __ sra(at, rhs, kSmiTagSize);
1247 __ cvt_d_w(f14, f14);
1248 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1250 // Load lhs to a double in a2, a3.
1251 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1252 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1254 // Write Smi from rhs to a1 and a0 in double format. t5 is scratch.
1256 ConvertToDoubleStub stub1(a1, a0, t6, t5);
1258 __ Call(stub1.GetCode());
1263 // We now have both loaded as doubles.
1264 __ jmp(both_loaded_as_doubles);
1266 __ bind(&lhs_is_smi);
1267 // Lhs is a Smi. Check whether the non-smi is a heap number.
1268 __ GetObjectType(rhs, t4, t4);
1270 // If lhs was not a number and rhs was a Smi then strict equality cannot
1271 // succeed. Return non-equal.
1272 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
1273 __ li(v0, Operand(1));
1275 // Smi compared non-strictly with a non-Smi non-heap-number. Call
1277 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1280 // Lhs is a smi, rhs is a number.
1281 // Convert smi lhs to double.
1282 if (CpuFeatures::IsSupported(FPU)) {
1283 CpuFeatures::Scope scope(FPU);
1284 __ sra(at, lhs, kSmiTagSize);
1286 __ cvt_d_w(f12, f12);
1287 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1289 // Convert lhs to a double format. t5 is scratch.
1291 ConvertToDoubleStub stub2(a3, a2, t6, t5);
1293 __ Call(stub2.GetCode());
1295 // Load rhs to a double in a1, a0.
1297 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1298 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1300 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1301 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1304 // Fall through to both_loaded_as_doubles.
1308 void EmitNanCheck(MacroAssembler* masm, Condition cc) {
1309 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1310 if (CpuFeatures::IsSupported(FPU)) {
1311 CpuFeatures::Scope scope(FPU);
1312 // Lhs and rhs are already loaded to f12 and f14 register pairs.
1313 __ Move(t0, t1, f14);
1314 __ Move(t2, t3, f12);
1316 // Lhs and rhs are already loaded to GP registers.
1317 __ mov(t0, a0); // a0 has LS 32 bits of rhs.
1318 __ mov(t1, a1); // a1 has MS 32 bits of rhs.
1319 __ mov(t2, a2); // a2 has LS 32 bits of lhs.
1320 __ mov(t3, a3); // a3 has MS 32 bits of lhs.
1322 Register rhs_exponent = exp_first ? t0 : t1;
1323 Register lhs_exponent = exp_first ? t2 : t3;
1324 Register rhs_mantissa = exp_first ? t1 : t0;
1325 Register lhs_mantissa = exp_first ? t3 : t2;
1326 Label one_is_nan, neither_is_nan;
1327 Label lhs_not_nan_exp_mask_is_loaded;
1329 Register exp_mask_reg = t4;
1330 __ li(exp_mask_reg, HeapNumber::kExponentMask);
1331 __ and_(t5, lhs_exponent, exp_mask_reg);
1332 __ Branch(&lhs_not_nan_exp_mask_is_loaded, ne, t5, Operand(exp_mask_reg));
1334 __ sll(t5, lhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
1335 __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
1337 __ Branch(&one_is_nan, ne, lhs_mantissa, Operand(zero_reg));
1339 __ li(exp_mask_reg, HeapNumber::kExponentMask);
1340 __ bind(&lhs_not_nan_exp_mask_is_loaded);
1341 __ and_(t5, rhs_exponent, exp_mask_reg);
1343 __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg));
1345 __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
1346 __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
1348 __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg));
1350 __ bind(&one_is_nan);
1351 // NaN comparisons always fail.
1352 // Load whatever we need in v0 to make the comparison fail.
1354 if (cc == lt || cc == le) {
1355 __ li(v0, Operand(GREATER));
1357 __ li(v0, Operand(LESS));
1361 __ bind(&neither_is_nan);
1365 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
1366 // f12 and f14 have the two doubles. Neither is a NaN.
1367 // Call a native function to do a comparison between two non-NaNs.
1368 // Call C routine that may not cause GC or other trouble.
1369 // We use a call_was and return manually because we need arguments slots to
1372 Label return_result_not_equal, return_result_equal;
1374 // Doubles are not equal unless they have the same bit pattern.
1375 // Exception: 0 and -0.
1376 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1377 if (CpuFeatures::IsSupported(FPU)) {
1378 CpuFeatures::Scope scope(FPU);
1379 // Lhs and rhs are already loaded to f12 and f14 register pairs.
1380 __ Move(t0, t1, f14);
1381 __ Move(t2, t3, f12);
1383 // Lhs and rhs are already loaded to GP registers.
1384 __ mov(t0, a0); // a0 has LS 32 bits of rhs.
1385 __ mov(t1, a1); // a1 has MS 32 bits of rhs.
1386 __ mov(t2, a2); // a2 has LS 32 bits of lhs.
1387 __ mov(t3, a3); // a3 has MS 32 bits of lhs.
1389 Register rhs_exponent = exp_first ? t0 : t1;
1390 Register lhs_exponent = exp_first ? t2 : t3;
1391 Register rhs_mantissa = exp_first ? t1 : t0;
1392 Register lhs_mantissa = exp_first ? t3 : t2;
1394 __ xor_(v0, rhs_mantissa, lhs_mantissa);
1395 __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg));
1397 __ subu(v0, rhs_exponent, lhs_exponent);
1398 __ Branch(&return_result_equal, eq, v0, Operand(zero_reg));
1400 __ sll(rhs_exponent, rhs_exponent, kSmiTagSize);
1401 __ sll(lhs_exponent, lhs_exponent, kSmiTagSize);
1402 __ or_(t4, rhs_exponent, lhs_exponent);
1403 __ or_(t4, t4, rhs_mantissa);
1405 __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
1407 __ bind(&return_result_equal);
1409 __ li(v0, Operand(EQUAL));
1413 __ bind(&return_result_not_equal);
1415 if (!CpuFeatures::IsSupported(FPU)) {
1417 __ PrepareCallCFunction(0, 2, t4);
1418 if (!IsMipsSoftFloatABI) {
1419 // We are not using MIPS FPU instructions, and parameters for the runtime
1420 // function call are prepaired in a0-a3 registers, but function we are
1421 // calling is compiled with hard-float flag and expecting hard float ABI
1422 // (parameters in f12/f14 registers). We need to copy parameters from
1423 // a0-a3 registers to f12/f14 register pairs.
1424 __ Move(f12, a0, a1);
1425 __ Move(f14, a2, a3);
1428 AllowExternalCallThatCantCauseGC scope(masm);
1429 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
1431 __ pop(ra); // Because this function returns int, result is in v0.
1434 CpuFeatures::Scope scope(FPU);
1435 Label equal, less_than;
1436 __ BranchF(&equal, NULL, eq, f12, f14);
1437 __ BranchF(&less_than, NULL, lt, f12, f14);
1439 // Not equal, not less, not NaN, must be greater.
1441 __ li(v0, Operand(GREATER));
1445 __ li(v0, Operand(EQUAL));
1448 __ bind(&less_than);
1449 __ li(v0, Operand(LESS));
1455 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
1458 // If either operand is a JS object or an oddball value, then they are
1459 // not equal since their pointers are different.
1460 // There is no test for undetectability in strict equality.
1461 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
1462 Label first_non_object;
1463 // Get the type of the first operand into a2 and compare it with
1464 // FIRST_SPEC_OBJECT_TYPE.
1465 __ GetObjectType(lhs, a2, a2);
1466 __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
1469 Label return_not_equal;
1470 __ bind(&return_not_equal);
1471 __ Ret(USE_DELAY_SLOT);
1472 __ li(v0, Operand(1));
1474 __ bind(&first_non_object);
1475 // Check for oddballs: true, false, null, undefined.
1476 __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
1478 __ GetObjectType(rhs, a3, a3);
1479 __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1481 // Check for oddballs: true, false, null, undefined.
1482 __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
1484 // Now that we have the types we might as well check for symbol-symbol.
1485 // Ensure that no non-strings have the symbol bit set.
1486 STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
1487 STATIC_ASSERT(kSymbolTag != 0);
1488 __ And(t2, a2, Operand(a3));
1489 __ And(t0, t2, Operand(kIsSymbolMask));
1490 __ Branch(&return_not_equal, ne, t0, Operand(zero_reg));
1494 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
1497 Label* both_loaded_as_doubles,
1498 Label* not_heap_numbers,
1500 __ GetObjectType(lhs, a3, a2);
1501 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
1502 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
1503 // If first was a heap number & second wasn't, go to slow case.
1504 __ Branch(slow, ne, a3, Operand(a2));
1506 // Both are heap numbers. Load them up then jump to the code we have
1508 if (CpuFeatures::IsSupported(FPU)) {
1509 CpuFeatures::Scope scope(FPU);
1510 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1511 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1513 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1514 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1516 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1517 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1519 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1520 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1523 __ jmp(both_loaded_as_doubles);
1527 // Fast negative check for symbol-to-symbol equality.
1528 static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
1531 Label* possible_strings,
1532 Label* not_both_strings) {
1533 ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1534 (lhs.is(a1) && rhs.is(a0)));
1536 // a2 is object type of lhs.
1537 // Ensure that no non-strings have the symbol bit set.
1539 STATIC_ASSERT(kSymbolTag != 0);
1540 __ And(at, a2, Operand(kIsNotStringMask));
1541 __ Branch(&object_test, ne, at, Operand(zero_reg));
1542 __ And(at, a2, Operand(kIsSymbolMask));
1543 __ Branch(possible_strings, eq, at, Operand(zero_reg));
1544 __ GetObjectType(rhs, a3, a3);
1545 __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
1546 __ And(at, a3, Operand(kIsSymbolMask));
1547 __ Branch(possible_strings, eq, at, Operand(zero_reg));
1549 // Both are symbols. We already checked they weren't the same pointer
1550 // so they are not equal.
1551 __ Ret(USE_DELAY_SLOT);
1552 __ li(v0, Operand(1)); // Non-zero indicates not equal.
1554 __ bind(&object_test);
1555 __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
1556 __ GetObjectType(rhs, a2, a3);
1557 __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1559 // If both objects are undetectable, they are equal. Otherwise, they
1560 // are not equal, since they are different objects and an object is not
1561 // equal to undefined.
1562 __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
1563 __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
1564 __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
1565 __ and_(a0, a2, a3);
1566 __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
1567 __ Ret(USE_DELAY_SLOT);
1568 __ xori(v0, a0, 1 << Map::kIsUndetectable);
1572 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
1580 // Use of registers. Register result is used as a temporary.
1581 Register number_string_cache = result;
1582 Register mask = scratch3;
1584 // Load the number string cache.
1585 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
1587 // Make the hash mask from the length of the number string cache. It
1588 // contains two elements (number and string) for each cache entry.
1589 __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
1590 // Divide length by two (length is a smi).
1591 __ sra(mask, mask, kSmiTagSize + 1);
1592 __ Addu(mask, mask, -1); // Make mask.
1594 // Calculate the entry in the number string cache. The hash value in the
1595 // number string cache for smis is just the smi value, and the hash for
1596 // doubles is the xor of the upper and lower words. See
1597 // Heap::GetNumberStringCache.
1598 Isolate* isolate = masm->isolate();
1600 Label load_result_from_cache;
1601 if (!object_is_smi) {
1602 __ JumpIfSmi(object, &is_smi);
1603 if (CpuFeatures::IsSupported(FPU)) {
1604 CpuFeatures::Scope scope(FPU);
1607 Heap::kHeapNumberMapRootIndex,
1611 STATIC_ASSERT(8 == kDoubleSize);
1614 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
1615 __ lw(scratch2, MemOperand(scratch1, kPointerSize));
1616 __ lw(scratch1, MemOperand(scratch1, 0));
1617 __ Xor(scratch1, scratch1, Operand(scratch2));
1618 __ And(scratch1, scratch1, Operand(mask));
1620 // Calculate address of entry in string cache: each entry consists
1621 // of two pointer sized fields.
1622 __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
1623 __ Addu(scratch1, number_string_cache, scratch1);
1625 Register probe = mask;
1627 FieldMemOperand(scratch1, FixedArray::kHeaderSize));
1628 __ JumpIfSmi(probe, not_found);
1629 __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
1630 __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
1631 __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
1632 __ Branch(not_found);
1634 // Note that there is no cache check for non-FPU case, even though
1635 // it seems there could be. May be a tiny opimization for non-FPU
1637 __ Branch(not_found);
1642 Register scratch = scratch1;
1643 __ sra(scratch, object, 1); // Shift away the tag.
1644 __ And(scratch, mask, Operand(scratch));
1646 // Calculate address of entry in string cache: each entry consists
1647 // of two pointer sized fields.
1648 __ sll(scratch, scratch, kPointerSizeLog2 + 1);
1649 __ Addu(scratch, number_string_cache, scratch);
1651 // Check if the entry is the smi we are looking for.
1652 Register probe = mask;
1653 __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
1654 __ Branch(not_found, ne, object, Operand(probe));
1656 // Get the result from the cache.
1657 __ bind(&load_result_from_cache);
1659 FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
1661 __ IncrementCounter(isolate->counters()->number_to_string_native(),
1668 void NumberToStringStub::Generate(MacroAssembler* masm) {
1671 __ lw(a1, MemOperand(sp, 0));
1673 // Generate code to lookup number in the number string cache.
1674 GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
1678 // Handle number to string in the runtime system if not found in the cache.
1679 __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
1683 // On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared.
1684 // On exit, v0 is 0, positive, or negative (smi) to indicate the result
1685 // of the comparison.
1686 void CompareStub::Generate(MacroAssembler* masm) {
1687 Label slow; // Call builtin.
1688 Label not_smis, both_loaded_as_doubles;
1691 if (include_smi_compare_) {
1692 Label not_two_smis, smi_done;
1694 __ JumpIfNotSmi(a2, ¬_two_smis);
1697 __ Ret(USE_DELAY_SLOT);
1698 __ subu(v0, a1, a0);
1699 __ bind(¬_two_smis);
1700 } else if (FLAG_debug_code) {
1702 __ And(a2, a2, kSmiTagMask);
1703 __ Assert(ne, "CompareStub: unexpected smi operands.",
1704 a2, Operand(zero_reg));
1708 // NOTICE! This code is only reached after a smi-fast-case check, so
1709 // it is certain that at least one operand isn't a smi.
1711 // Handle the case where the objects are identical. Either returns the answer
1712 // or goes to slow. Only falls through if the objects were not identical.
1713 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
1715 // If either is a Smi (we know that not both are), then they can only
1716 // be strictly equal if the other is a HeapNumber.
1717 STATIC_ASSERT(kSmiTag == 0);
1718 ASSERT_EQ(0, Smi::FromInt(0));
1719 __ And(t2, lhs_, Operand(rhs_));
1720 __ JumpIfNotSmi(t2, ¬_smis, t0);
1721 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
1722 // 1) Return the answer.
1724 // 3) Fall through to both_loaded_as_doubles.
1725 // 4) Jump to rhs_not_nan.
1726 // In cases 3 and 4 we have found out we were dealing with a number-number
1727 // comparison and the numbers have been loaded into f12 and f14 as doubles,
1728 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
1729 EmitSmiNonsmiComparison(masm, lhs_, rhs_,
1730 &both_loaded_as_doubles, &slow, strict_);
1732 __ bind(&both_loaded_as_doubles);
1733 // f12, f14 are the double representations of the left hand side
1734 // and the right hand side if we have FPU. Otherwise a2, a3 represent
1735 // left hand side and a0, a1 represent right hand side.
1737 Isolate* isolate = masm->isolate();
1738 if (CpuFeatures::IsSupported(FPU)) {
1739 CpuFeatures::Scope scope(FPU);
1741 __ li(t0, Operand(LESS));
1742 __ li(t1, Operand(GREATER));
1743 __ li(t2, Operand(EQUAL));
1745 // Check if either rhs or lhs is NaN.
1746 __ BranchF(NULL, &nan, eq, f12, f14);
1748 // Check if LESS condition is satisfied. If true, move conditionally
1750 __ c(OLT, D, f12, f14);
1752 // Use previous check to store conditionally to v0 oposite condition
1753 // (GREATER). If rhs is equal to lhs, this will be corrected in next
1756 // Check if EQUAL condition is satisfied. If true, move conditionally
1758 __ c(EQ, D, f12, f14);
1764 // NaN comparisons always fail.
1765 // Load whatever we need in v0 to make the comparison fail.
1766 if (cc_ == lt || cc_ == le) {
1767 __ li(v0, Operand(GREATER));
1769 __ li(v0, Operand(LESS));
1773 // Checks for NaN in the doubles we have loaded. Can return the answer or
1774 // fall through if neither is a NaN. Also binds rhs_not_nan.
1775 EmitNanCheck(masm, cc_);
1777 // Compares two doubles that are not NaNs. Returns the answer.
1778 // Never falls through.
1779 EmitTwoNonNanDoubleComparison(masm, cc_);
1783 // At this point we know we are dealing with two different objects,
1784 // and neither of them is a Smi. The objects are in lhs_ and rhs_.
1786 // This returns non-equal for some object types, or falls through if it
1788 EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
1791 Label check_for_symbols;
1792 Label flat_string_check;
1793 // Check for heap-number-heap-number comparison. Can jump to slow case,
1794 // or load both doubles and jump to the code that handles
1795 // that case. If the inputs are not doubles then jumps to check_for_symbols.
1796 // In this case a2 will contain the type of lhs_.
1797 EmitCheckForTwoHeapNumbers(masm,
1800 &both_loaded_as_doubles,
1802 &flat_string_check);
1804 __ bind(&check_for_symbols);
1805 if (cc_ == eq && !strict_) {
1806 // Returns an answer for two symbols or two detectable objects.
1807 // Otherwise jumps to string case or not both strings case.
1808 // Assumes that a2 is the type of lhs_ on entry.
1809 EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
1812 // Check for both being sequential ASCII strings, and inline if that is the
1814 __ bind(&flat_string_check);
1816 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow);
1818 __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
1820 StringCompareStub::GenerateFlatAsciiStringEquals(masm,
1827 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
1835 // Never falls through to here.
1838 // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
1840 __ Push(lhs_, rhs_);
1841 // Figure out which native to call and setup the arguments.
1842 Builtins::JavaScript native;
1844 native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1846 native = Builtins::COMPARE;
1847 int ncr; // NaN compare result.
1848 if (cc_ == lt || cc_ == le) {
1851 ASSERT(cc_ == gt || cc_ == ge); // Remaining cases.
1854 __ li(a0, Operand(Smi::FromInt(ncr)));
1858 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1859 // tagged as a small integer.
1860 __ InvokeBuiltin(native, JUMP_FUNCTION);
1864 // The stub expects its argument in the tos_ register and returns its result in
1865 // it, too: zero for false, and a non-zero value for true.
1866 void ToBooleanStub::Generate(MacroAssembler* masm) {
1867 // This stub uses FPU instructions.
1868 CpuFeatures::Scope scope(FPU);
1871 const Register map = t5.is(tos_) ? t3 : t5;
1873 // undefined -> false.
1874 CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
1876 // Boolean -> its value.
1877 CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
1878 CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
1881 CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
1883 if (types_.Contains(SMI)) {
1884 // Smis: 0 -> false, all other -> true
1885 __ And(at, tos_, kSmiTagMask);
1886 // tos_ contains the correct return value already
1887 __ Ret(eq, at, Operand(zero_reg));
1888 } else if (types_.NeedsMap()) {
1889 // If we need a map later and have a Smi -> patch.
1890 __ JumpIfSmi(tos_, &patch);
1893 if (types_.NeedsMap()) {
1894 __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
1896 if (types_.CanBeUndetectable()) {
1897 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
1898 __ And(at, at, Operand(1 << Map::kIsUndetectable));
1899 // Undetectable -> false.
1900 __ Movn(tos_, zero_reg, at);
1901 __ Ret(ne, at, Operand(zero_reg));
1905 if (types_.Contains(SPEC_OBJECT)) {
1906 // Spec object -> true.
1907 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1908 // tos_ contains the correct non-zero return value already.
1909 __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
1912 if (types_.Contains(STRING)) {
1913 // String value -> false iff empty.
1914 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1916 __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE));
1917 __ Ret(USE_DELAY_SLOT); // the string length is OK as the return value
1918 __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
1922 if (types_.Contains(HEAP_NUMBER)) {
1923 // Heap number -> false iff +0, -0, or NaN.
1924 Label not_heap_number;
1925 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1926 __ Branch(¬_heap_number, ne, map, Operand(at));
1927 Label zero_or_nan, number;
1928 __ ldc1(f2, FieldMemOperand(tos_, HeapNumber::kValueOffset));
1929 __ BranchF(&number, &zero_or_nan, ne, f2, kDoubleRegZero);
1930 // "tos_" is a register, and contains a non zero value by default.
1931 // Hence we only need to overwrite "tos_" with zero to return false for
1932 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
1933 __ bind(&zero_or_nan);
1934 __ mov(tos_, zero_reg);
1937 __ bind(¬_heap_number);
1941 GenerateTypeTransition(masm);
1945 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
1947 Heap::RootListIndex value,
1949 if (types_.Contains(type)) {
1950 // If we see an expected oddball, return its ToBoolean value tos_.
1951 __ LoadRoot(at, value);
1952 __ Subu(at, at, tos_); // This is a check for equality for the movz below.
1953 // The value of a root is never NULL, so we can avoid loading a non-null
1954 // value into tos_ when we want to return 'true'.
1956 __ Movz(tos_, zero_reg, at);
1958 __ Ret(eq, at, Operand(zero_reg));
1963 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
1965 __ li(a2, Operand(Smi::FromInt(tos_.code())));
1966 __ li(a1, Operand(Smi::FromInt(types_.ToByte())));
1967 __ Push(a3, a2, a1);
1968 // Patch the caller to an appropriate specialized stub and return the
1969 // operation result to the caller of the stub.
1970 __ TailCallExternalReference(
1971 ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
1977 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1978 // We don't allow a GC during a store buffer overflow so there is no need to
1979 // store the registers in any particular way, but we do have to store and
1981 __ MultiPush(kJSCallerSaved | ra.bit());
1982 if (save_doubles_ == kSaveFPRegs) {
1983 CpuFeatures::Scope scope(FPU);
1984 __ MultiPushFPU(kCallerSavedFPU);
1986 const int argument_count = 1;
1987 const int fp_argument_count = 0;
1988 const Register scratch = a1;
1990 AllowExternalCallThatCantCauseGC scope(masm);
1991 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
1992 __ li(a0, Operand(ExternalReference::isolate_address()));
1994 ExternalReference::store_buffer_overflow_function(masm->isolate()),
1996 if (save_doubles_ == kSaveFPRegs) {
1997 CpuFeatures::Scope scope(FPU);
1998 __ MultiPopFPU(kCallerSavedFPU);
2001 __ MultiPop(kJSCallerSaved | ra.bit());
2006 void UnaryOpStub::PrintName(StringStream* stream) {
2007 const char* op_name = Token::Name(op_);
2008 const char* overwrite_name = NULL; // Make g++ happy.
2010 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
2011 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
2013 stream->Add("UnaryOpStub_%s_%s_%s",
2016 UnaryOpIC::GetName(operand_type_));
2020 // TODO(svenpanne): Use virtual functions instead of switch.
2021 void UnaryOpStub::Generate(MacroAssembler* masm) {
2022 switch (operand_type_) {
2023 case UnaryOpIC::UNINITIALIZED:
2024 GenerateTypeTransition(masm);
2026 case UnaryOpIC::SMI:
2027 GenerateSmiStub(masm);
2029 case UnaryOpIC::HEAP_NUMBER:
2030 GenerateHeapNumberStub(masm);
2032 case UnaryOpIC::GENERIC:
2033 GenerateGenericStub(masm);
2039 void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2040 // Argument is in a0 and v0 at this point, so we can overwrite a0.
2041 __ li(a2, Operand(Smi::FromInt(op_)));
2042 __ li(a1, Operand(Smi::FromInt(mode_)));
2043 __ li(a0, Operand(Smi::FromInt(operand_type_)));
2044 __ Push(v0, a2, a1, a0);
2046 __ TailCallExternalReference(
2047 ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
2051 // TODO(svenpanne): Use virtual functions instead of switch.
2052 void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2055 GenerateSmiStubSub(masm);
2057 case Token::BIT_NOT:
2058 GenerateSmiStubBitNot(masm);
2066 void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
2067 Label non_smi, slow;
2068 GenerateSmiCodeSub(masm, &non_smi, &slow);
2071 GenerateTypeTransition(masm);
2075 void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
2077 GenerateSmiCodeBitNot(masm, &non_smi);
2079 GenerateTypeTransition(masm);
2083 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
2086 __ JumpIfNotSmi(a0, non_smi);
2088 // The result of negating zero or the smallest negative smi is not a smi.
2089 __ And(t0, a0, ~0x80000000);
2090 __ Branch(slow, eq, t0, Operand(zero_reg));
2092 // Return '0 - value'.
2093 __ Ret(USE_DELAY_SLOT);
2094 __ subu(v0, zero_reg, a0);
2098 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
2100 __ JumpIfNotSmi(a0, non_smi);
2102 // Flip bits and revert inverted smi-tag.
2104 __ And(v0, v0, ~kSmiTagMask);
2109 // TODO(svenpanne): Use virtual functions instead of switch.
2110 void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2113 GenerateHeapNumberStubSub(masm);
2115 case Token::BIT_NOT:
2116 GenerateHeapNumberStubBitNot(masm);
2124 void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
2125 Label non_smi, slow, call_builtin;
2126 GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
2128 GenerateHeapNumberCodeSub(masm, &slow);
2130 GenerateTypeTransition(masm);
2131 __ bind(&call_builtin);
2132 GenerateGenericCodeFallback(masm);
2136 void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
2137 Label non_smi, slow;
2138 GenerateSmiCodeBitNot(masm, &non_smi);
2140 GenerateHeapNumberCodeBitNot(masm, &slow);
2142 GenerateTypeTransition(masm);
2146 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
2148 EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
2149 // a0 is a heap number. Get a new heap number in a1.
2150 if (mode_ == UNARY_OVERWRITE) {
2151 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
2152 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
2153 __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
2155 Label slow_allocate_heapnumber, heapnumber_allocated;
2156 __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
2157 __ jmp(&heapnumber_allocated);
2159 __ bind(&slow_allocate_heapnumber);
2161 FrameScope scope(masm, StackFrame::INTERNAL);
2163 __ CallRuntime(Runtime::kNumberAlloc, 0);
2168 __ bind(&heapnumber_allocated);
2169 __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
2170 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
2171 __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
2172 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
2173 __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset));
2180 void UnaryOpStub::GenerateHeapNumberCodeBitNot(
2181 MacroAssembler* masm,
2185 EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
2186 // Convert the heap number in a0 to an untagged integer in a1.
2187 __ ConvertToInt32(a0, a1, a2, a3, f0, slow);
2189 // Do the bitwise operation and check if the result fits in a smi.
2192 __ Addu(a2, a1, Operand(0x40000000));
2193 __ Branch(&try_float, lt, a2, Operand(zero_reg));
2195 // Tag the result as a smi and we're done.
2199 // Try to store the result in a heap number.
2200 __ bind(&try_float);
2201 if (mode_ == UNARY_NO_OVERWRITE) {
2202 Label slow_allocate_heapnumber, heapnumber_allocated;
2203 // Allocate a new heap number without zapping v0, which we need if it fails.
2204 __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber);
2205 __ jmp(&heapnumber_allocated);
2207 __ bind(&slow_allocate_heapnumber);
2209 FrameScope scope(masm, StackFrame::INTERNAL);
2210 __ push(v0); // Push the heap number, not the untagged int32.
2211 __ CallRuntime(Runtime::kNumberAlloc, 0);
2212 __ mov(a2, v0); // Move the new heap number into a2.
2213 // Get the heap number into v0, now that the new heap number is in a2.
2217 // Convert the heap number in v0 to an untagged integer in a1.
2218 // This can't go slow-case because it's the same number we already
2219 // converted once again.
2220 __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible);
2221 // Negate the result.
2224 __ bind(&heapnumber_allocated);
2225 __ mov(v0, a2); // Move newly allocated heap number to v0.
2228 if (CpuFeatures::IsSupported(FPU)) {
2229 // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
2230 CpuFeatures::Scope scope(FPU);
2233 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
2236 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
2237 // have to set up a frame.
2238 WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
2239 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2242 __ bind(&impossible);
2243 if (FLAG_debug_code) {
2244 __ stop("Incorrect assumption in bit-not stub");
2249 // TODO(svenpanne): Use virtual functions instead of switch.
2250 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
2253 GenerateGenericStubSub(masm);
2255 case Token::BIT_NOT:
2256 GenerateGenericStubBitNot(masm);
2264 void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
2265 Label non_smi, slow;
2266 GenerateSmiCodeSub(masm, &non_smi, &slow);
2268 GenerateHeapNumberCodeSub(masm, &slow);
2270 GenerateGenericCodeFallback(masm);
2274 void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
2275 Label non_smi, slow;
2276 GenerateSmiCodeBitNot(masm, &non_smi);
2278 GenerateHeapNumberCodeBitNot(masm, &slow);
2280 GenerateGenericCodeFallback(masm);
2284 void UnaryOpStub::GenerateGenericCodeFallback(
2285 MacroAssembler* masm) {
2286 // Handle the slow case by jumping to the JavaScript builtin.
2290 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
2292 case Token::BIT_NOT:
2293 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
2301 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2306 __ li(a2, Operand(Smi::FromInt(MinorKey())));
2307 __ li(a1, Operand(Smi::FromInt(op_)));
2308 __ li(a0, Operand(Smi::FromInt(operands_type_)));
2309 __ Push(a2, a1, a0);
2311 __ TailCallExternalReference(
2312 ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
2319 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
2320 MacroAssembler* masm) {
2325 void BinaryOpStub::Generate(MacroAssembler* masm) {
2326 // Explicitly allow generation of nested stubs. It is safe here because
2327 // generation code does not use any raw pointers.
2328 AllowStubCallsScope allow_stub_calls(masm, true);
2329 switch (operands_type_) {
2330 case BinaryOpIC::UNINITIALIZED:
2331 GenerateTypeTransition(masm);
2333 case BinaryOpIC::SMI:
2334 GenerateSmiStub(masm);
2336 case BinaryOpIC::INT32:
2337 GenerateInt32Stub(masm);
2339 case BinaryOpIC::HEAP_NUMBER:
2340 GenerateHeapNumberStub(masm);
2342 case BinaryOpIC::ODDBALL:
2343 GenerateOddballStub(masm);
2345 case BinaryOpIC::BOTH_STRING:
2346 GenerateBothStringStub(masm);
2348 case BinaryOpIC::STRING:
2349 GenerateStringStub(masm);
2351 case BinaryOpIC::GENERIC:
2352 GenerateGeneric(masm);
2360 void BinaryOpStub::PrintName(StringStream* stream) {
2361 const char* op_name = Token::Name(op_);
2362 const char* overwrite_name;
2364 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
2365 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
2366 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
2367 default: overwrite_name = "UnknownOverwrite"; break;
2369 stream->Add("BinaryOpStub_%s_%s_%s",
2372 BinaryOpIC::GetName(operands_type_));
2377 void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
2379 Register right = a0;
2381 Register scratch1 = t0;
2382 Register scratch2 = t1;
2384 ASSERT(right.is(a0));
2385 STATIC_ASSERT(kSmiTag == 0);
2387 Label not_smi_result;
2390 __ AdduAndCheckForOverflow(v0, left, right, scratch1);
2391 __ RetOnNoOverflow(scratch1);
2392 // No need to revert anything - right and left are intact.
2395 __ SubuAndCheckForOverflow(v0, left, right, scratch1);
2396 __ RetOnNoOverflow(scratch1);
2397 // No need to revert anything - right and left are intact.
2400 // Remove tag from one of the operands. This way the multiplication result
2401 // will be a smi if it fits the smi range.
2402 __ SmiUntag(scratch1, right);
2403 // Do multiplication.
2404 // lo = lower 32 bits of scratch1 * left.
2405 // hi = higher 32 bits of scratch1 * left.
2406 __ Mult(left, scratch1);
2407 // Check for overflowing the smi range - no overflow if higher 33 bits of
2408 // the result are identical.
2411 __ sra(scratch1, scratch1, 31);
2412 __ Branch(¬_smi_result, ne, scratch1, Operand(scratch2));
2413 // Go slow on zero result to handle -0.
2415 __ Ret(ne, v0, Operand(zero_reg));
2416 // We need -0 if we were multiplying a negative number with 0 to get 0.
2417 // We know one of them was zero.
2418 __ Addu(scratch2, right, left);
2420 // ARM uses the 'pl' condition, which is 'ge'.
2421 // Negating it results in 'lt'.
2422 __ Branch(&skip, lt, scratch2, Operand(zero_reg));
2423 ASSERT(Smi::FromInt(0) == 0);
2424 __ Ret(USE_DELAY_SLOT);
2425 __ mov(v0, zero_reg); // Return smi 0 if the non-zero one was positive.
2427 // We fall through here if we multiplied a negative number with 0, because
2428 // that would mean we should produce -0.
2433 __ SmiUntag(scratch2, right);
2434 __ SmiUntag(scratch1, left);
2435 __ Div(scratch1, scratch2);
2436 // A minor optimization: div may be calculated asynchronously, so we check
2437 // for division by zero before getting the result.
2438 __ Branch(¬_smi_result, eq, scratch2, Operand(zero_reg));
2439 // If the result is 0, we need to make sure the dividsor (right) is
2440 // positive, otherwise it is a -0 case.
2441 // Quotient is in 'lo', remainder is in 'hi'.
2442 // Check for no remainder first.
2444 __ Branch(¬_smi_result, ne, scratch1, Operand(zero_reg));
2446 __ Branch(&done, ne, scratch1, Operand(zero_reg));
2447 __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg));
2449 // Check that the signed result fits in a Smi.
2450 __ Addu(scratch2, scratch1, Operand(0x40000000));
2451 __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg));
2452 __ SmiTag(v0, scratch1);
2458 __ SmiUntag(scratch2, right);
2459 __ SmiUntag(scratch1, left);
2460 __ Div(scratch1, scratch2);
2461 // A minor optimization: div may be calculated asynchronously, so we check
2462 // for division by 0 before calling mfhi.
2463 // Check for zero on the right hand side.
2464 __ Branch(¬_smi_result, eq, scratch2, Operand(zero_reg));
2465 // If the result is 0, we need to make sure the dividend (left) is
2466 // positive (or 0), otherwise it is a -0 case.
2467 // Remainder is in 'hi'.
2469 __ Branch(&done, ne, scratch2, Operand(zero_reg));
2470 __ Branch(¬_smi_result, lt, scratch1, Operand(zero_reg));
2472 // Check that the signed result fits in a Smi.
2473 __ Addu(scratch1, scratch2, Operand(0x40000000));
2474 __ Branch(¬_smi_result, lt, scratch1, Operand(zero_reg));
2475 __ SmiTag(v0, scratch2);
2480 __ Ret(USE_DELAY_SLOT);
2481 __ or_(v0, left, right);
2483 case Token::BIT_AND:
2484 __ Ret(USE_DELAY_SLOT);
2485 __ and_(v0, left, right);
2487 case Token::BIT_XOR:
2488 __ Ret(USE_DELAY_SLOT);
2489 __ xor_(v0, left, right);
2492 // Remove tags from right operand.
2493 __ GetLeastBitsFromSmi(scratch1, right, 5);
2494 __ srav(scratch1, left, scratch1);
2496 __ And(v0, scratch1, ~kSmiTagMask);
2500 // Remove tags from operands. We can't do this on a 31 bit number
2501 // because then the 0s get shifted into bit 30 instead of bit 31.
2502 __ SmiUntag(scratch1, left);
2503 __ GetLeastBitsFromSmi(scratch2, right, 5);
2504 __ srlv(v0, scratch1, scratch2);
2505 // Unsigned shift is not allowed to produce a negative number, so
2506 // check the sign bit and the sign bit after Smi tagging.
2507 __ And(scratch1, v0, Operand(0xc0000000));
2508 __ Branch(¬_smi_result, ne, scratch1, Operand(zero_reg));
2514 // Remove tags from operands.
2515 __ SmiUntag(scratch1, left);
2516 __ GetLeastBitsFromSmi(scratch2, right, 5);
2517 __ sllv(scratch1, scratch1, scratch2);
2518 // Check that the signed result fits in a Smi.
2519 __ Addu(scratch2, scratch1, Operand(0x40000000));
2520 __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg));
2521 __ SmiTag(v0, scratch1);
2527 __ bind(¬_smi_result);
2531 void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2534 Label* gc_required) {
2536 Register right = a0;
2537 Register scratch1 = t3;
2538 Register scratch2 = t5;
2539 Register scratch3 = t0;
2541 ASSERT(smi_operands || (not_numbers != NULL));
2542 if (smi_operands && FLAG_debug_code) {
2543 __ AbortIfNotSmi(left);
2544 __ AbortIfNotSmi(right);
2547 Register heap_number_map = t2;
2548 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2556 // Load left and right operands into f12 and f14 or a0/a1 and a2/a3
2557 // depending on whether FPU is available or not.
2558 FloatingPointHelper::Destination destination =
2559 CpuFeatures::IsSupported(FPU) &&
2561 FloatingPointHelper::kFPURegisters :
2562 FloatingPointHelper::kCoreRegisters;
2564 // Allocate new heap number for result.
2565 Register result = s0;
2566 GenerateHeapResultAllocation(
2567 masm, result, heap_number_map, scratch1, scratch2, gc_required);
2569 // Load the operands.
2571 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2573 FloatingPointHelper::LoadOperands(masm,
2581 // Calculate the result.
2582 if (destination == FloatingPointHelper::kFPURegisters) {
2583 // Using FPU registers:
2585 // f14: Right value.
2586 CpuFeatures::Scope scope(FPU);
2589 __ add_d(f10, f12, f14);
2592 __ sub_d(f10, f12, f14);
2595 __ mul_d(f10, f12, f14);
2598 __ div_d(f10, f12, f14);
2604 // ARM uses a workaround here because of the unaligned HeapNumber
2605 // kValueOffset. On MIPS this workaround is built into sdc1 so
2606 // there's no point in generating even more instructions.
2607 __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
2608 __ Ret(USE_DELAY_SLOT);
2611 // Call the C function to handle the double operation.
2612 FloatingPointHelper::CallCCodeForDoubleOperation(masm,
2616 if (FLAG_debug_code) {
2617 __ stop("Unreachable code.");
2623 case Token::BIT_XOR:
2624 case Token::BIT_AND:
2629 __ SmiUntag(a3, left);
2630 __ SmiUntag(a2, right);
2632 // Convert operands to 32-bit integers. Right in a2 and left in a3.
2633 FloatingPointHelper::ConvertNumberToInt32(masm,
2642 FloatingPointHelper::ConvertNumberToInt32(masm,
2652 Label result_not_a_smi;
2655 __ Or(a2, a3, Operand(a2));
2657 case Token::BIT_XOR:
2658 __ Xor(a2, a3, Operand(a2));
2660 case Token::BIT_AND:
2661 __ And(a2, a3, Operand(a2));
2664 // Use only the 5 least significant bits of the shift count.
2665 __ GetLeastBitsFromInt32(a2, a2, 5);
2666 __ srav(a2, a3, a2);
2669 // Use only the 5 least significant bits of the shift count.
2670 __ GetLeastBitsFromInt32(a2, a2, 5);
2671 __ srlv(a2, a3, a2);
2672 // SHR is special because it is required to produce a positive answer.
2673 // The code below for writing into heap numbers isn't capable of
2674 // writing the register as an unsigned int so we go to slow case if we
2676 if (CpuFeatures::IsSupported(FPU)) {
2677 __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
2679 __ Branch(not_numbers, lt, a2, Operand(zero_reg));
2683 // Use only the 5 least significant bits of the shift count.
2684 __ GetLeastBitsFromInt32(a2, a2, 5);
2685 __ sllv(a2, a3, a2);
2690 // Check that the *signed* result fits in a smi.
2691 __ Addu(a3, a2, Operand(0x40000000));
2692 __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
2696 // Allocate new heap number for result.
2697 __ bind(&result_not_a_smi);
2698 Register result = t1;
2700 __ AllocateHeapNumber(
2701 result, scratch1, scratch2, heap_number_map, gc_required);
2703 GenerateHeapResultAllocation(
2704 masm, result, heap_number_map, scratch1, scratch2, gc_required);
2707 // a2: Answer as signed int32.
2708 // t1: Heap number to write answer into.
2710 // Nothing can go wrong now, so move the heap number to v0, which is the
2714 if (CpuFeatures::IsSupported(FPU)) {
2715 // Convert the int32 in a2 to the heap number in a0. As
2716 // mentioned above SHR needs to always produce a positive result.
2717 CpuFeatures::Scope scope(FPU);
2719 if (op_ == Token::SHR) {
2720 __ Cvt_d_uw(f0, f0, f22);
2724 // ARM uses a workaround here because of the unaligned HeapNumber
2725 // kValueOffset. On MIPS this workaround is built into sdc1 so
2726 // there's no point in generating even more instructions.
2727 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
2730 // Tail call that writes the int32 in a2 to the heap number in v0, using
2731 // a3 and a0 as scratch. v0 is preserved and returned.
2732 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
2733 __ TailCallStub(&stub);
2743 // Generate the smi code. If the operation on smis are successful this return is
2744 // generated. If the result is not a smi and heap number allocation is not
2745 // requested the code falls through. If number allocation is requested but a
2746 // heap number cannot be allocated the code jumps to the lable gc_required.
2747 void BinaryOpStub::GenerateSmiCode(
2748 MacroAssembler* masm,
2751 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
2755 Register right = a0;
2756 Register scratch1 = t3;
2757 Register scratch2 = t5;
2759 // Perform combined smi check on both operands.
2760 __ Or(scratch1, left, Operand(right));
2761 STATIC_ASSERT(kSmiTag == 0);
2762 __ JumpIfNotSmi(scratch1, ¬_smis);
2764 // If the smi-smi operation results in a smi return is generated.
2765 GenerateSmiSmiOperation(masm);
2767 // If heap number results are possible generate the result in an allocated
2769 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
2770 GenerateFPOperation(masm, true, use_runtime, gc_required);
2776 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2777 Label not_smis, call_runtime;
2779 if (result_type_ == BinaryOpIC::UNINITIALIZED ||
2780 result_type_ == BinaryOpIC::SMI) {
2781 // Only allow smi results.
2782 GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
2784 // Allow heap number result and don't make a transition if a heap number
2785 // cannot be allocated.
2786 GenerateSmiCode(masm,
2789 ALLOW_HEAPNUMBER_RESULTS);
2792 // Code falls through if the result is not returned as either a smi or heap
2794 GenerateTypeTransition(masm);
2796 __ bind(&call_runtime);
2797 GenerateCallRuntime(masm);
2801 void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
2802 ASSERT(operands_type_ == BinaryOpIC::STRING);
2803 // Try to add arguments as strings, otherwise, transition to the generic
2805 GenerateAddStrings(masm);
2806 GenerateTypeTransition(masm);
2810 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
2812 ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
2813 ASSERT(op_ == Token::ADD);
2814 // If both arguments are strings, call the string add stub.
2815 // Otherwise, do a transition.
2817 // Registers containing left and right operands respectively.
2819 Register right = a0;
2821 // Test if left operand is a string.
2822 __ JumpIfSmi(left, &call_runtime);
2823 __ GetObjectType(left, a2, a2);
2824 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2826 // Test if right operand is a string.
2827 __ JumpIfSmi(right, &call_runtime);
2828 __ GetObjectType(right, a2, a2);
2829 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2831 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
2832 GenerateRegisterArgsPush(masm);
2833 __ TailCallStub(&string_add_stub);
2835 __ bind(&call_runtime);
2836 GenerateTypeTransition(masm);
2840 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2841 ASSERT(operands_type_ == BinaryOpIC::INT32);
2844 Register right = a0;
2845 Register scratch1 = t3;
2846 Register scratch2 = t5;
2847 FPURegister double_scratch = f0;
2848 FPURegister single_scratch = f6;
2850 Register heap_number_result = no_reg;
2851 Register heap_number_map = t2;
2852 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2855 // Labels for type transition, used for wrong input or output types.
2856 // Both label are currently actually bound to the same position. We use two
2857 // different label to differentiate the cause leading to type transition.
2860 // Smi-smi fast case.
2862 __ Or(scratch1, left, right);
2863 __ JumpIfNotSmi(scratch1, &skip);
2864 GenerateSmiSmiOperation(masm);
2865 // Fall through if the result is not a smi.
2874 // Load both operands and check that they are 32-bit integer.
2875 // Jump to type transition if they are not. The registers a0 and a1 (right
2876 // and left) are preserved for the runtime call.
2877 FloatingPointHelper::Destination destination =
2878 (CpuFeatures::IsSupported(FPU) && op_ != Token::MOD)
2879 ? FloatingPointHelper::kFPURegisters
2880 : FloatingPointHelper::kCoreRegisters;
2882 FloatingPointHelper::LoadNumberAsInt32Double(masm,
2893 FloatingPointHelper::LoadNumberAsInt32Double(masm,
2905 if (destination == FloatingPointHelper::kFPURegisters) {
2906 CpuFeatures::Scope scope(FPU);
2907 Label return_heap_number;
2910 __ add_d(f10, f12, f14);
2913 __ sub_d(f10, f12, f14);
2916 __ mul_d(f10, f12, f14);
2919 __ div_d(f10, f12, f14);
2925 if (op_ != Token::DIV) {
2926 // These operations produce an integer result.
2927 // Try to return a smi if we can.
2928 // Otherwise return a heap number if allowed, or jump to type
2931 Register except_flag = scratch2;
2932 __ EmitFPUTruncate(kRoundToZero,
2938 if (result_type_ <= BinaryOpIC::INT32) {
2939 // If except_flag != 0, result does not fit in a 32-bit integer.
2940 __ Branch(&transition, ne, except_flag, Operand(zero_reg));
2943 // Check if the result fits in a smi.
2944 __ mfc1(scratch1, single_scratch);
2945 __ Addu(scratch2, scratch1, Operand(0x40000000));
2946 // If not try to return a heap number.
2947 __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
2948 // Check for minus zero. Return heap number for minus zero.
2950 __ Branch(¬_zero, ne, scratch1, Operand(zero_reg));
2951 __ mfc1(scratch2, f11);
2952 __ And(scratch2, scratch2, HeapNumber::kSignMask);
2953 __ Branch(&return_heap_number, ne, scratch2, Operand(zero_reg));
2956 // Tag the result and return.
2957 __ SmiTag(v0, scratch1);
2960 // DIV just falls through to allocating a heap number.
2963 __ bind(&return_heap_number);
2964 // Return a heap number, or fall through to type transition or runtime
2965 // call if we can't.
2966 if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
2967 : BinaryOpIC::INT32)) {
2968 // We are using FPU registers so s0 is available.
2969 heap_number_result = s0;
2970 GenerateHeapResultAllocation(masm,
2976 __ mov(v0, heap_number_result);
2977 __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset));
2981 // A DIV operation expecting an integer result falls through
2982 // to type transition.
2985 // We preserved a0 and a1 to be able to call runtime.
2986 // Save the left value on the stack.
2989 Label pop_and_call_runtime;
2991 // Allocate a heap number to store the result.
2992 heap_number_result = s0;
2993 GenerateHeapResultAllocation(masm,
2998 &pop_and_call_runtime);
3000 // Load the left value from the value saved on the stack.
3003 // Call the C function to handle the double operation.
3004 FloatingPointHelper::CallCCodeForDoubleOperation(
3005 masm, op_, heap_number_result, scratch1);
3006 if (FLAG_debug_code) {
3007 __ stop("Unreachable code.");
3010 __ bind(&pop_and_call_runtime);
3012 __ Branch(&call_runtime);
3019 case Token::BIT_XOR:
3020 case Token::BIT_AND:
3024 Label return_heap_number;
3025 Register scratch3 = t1;
3026 // Convert operands to 32-bit integers. Right in a2 and left in a3. The
3027 // registers a0 and a1 (right and left) are preserved for the runtime
3029 FloatingPointHelper::LoadNumberAsInt32(masm,
3038 FloatingPointHelper::LoadNumberAsInt32(masm,
3048 // The ECMA-262 standard specifies that, for shift operations, only the
3049 // 5 least significant bits of the shift value should be used.
3052 __ Or(a2, a3, Operand(a2));
3054 case Token::BIT_XOR:
3055 __ Xor(a2, a3, Operand(a2));
3057 case Token::BIT_AND:
3058 __ And(a2, a3, Operand(a2));
3061 __ And(a2, a2, Operand(0x1f));
3062 __ srav(a2, a3, a2);
3065 __ And(a2, a2, Operand(0x1f));
3066 __ srlv(a2, a3, a2);
3067 // SHR is special because it is required to produce a positive answer.
3068 // We only get a negative result if the shift value (a2) is 0.
3069 // This result cannot be respresented as a signed 32-bit integer, try
3070 // to return a heap number if we can.
3071 // The non FPU code does not support this special case, so jump to
3072 // runtime if we don't support it.
3073 if (CpuFeatures::IsSupported(FPU)) {
3074 __ Branch((result_type_ <= BinaryOpIC::INT32)
3076 : &return_heap_number,
3081 __ Branch((result_type_ <= BinaryOpIC::INT32)
3090 __ And(a2, a2, Operand(0x1f));
3091 __ sllv(a2, a3, a2);
3097 // Check if the result fits in a smi.
3098 __ Addu(scratch1, a2, Operand(0x40000000));
3099 // If not try to return a heap number. (We know the result is an int32.)
3100 __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
3101 // Tag the result and return.
3105 __ bind(&return_heap_number);
3106 heap_number_result = t1;
3107 GenerateHeapResultAllocation(masm,
3114 if (CpuFeatures::IsSupported(FPU)) {
3115 CpuFeatures::Scope scope(FPU);
3117 if (op_ != Token::SHR) {
3118 // Convert the result to a floating point value.
3119 __ mtc1(a2, double_scratch);
3120 __ cvt_d_w(double_scratch, double_scratch);
3122 // The result must be interpreted as an unsigned 32-bit integer.
3123 __ mtc1(a2, double_scratch);
3124 __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
3127 // Store the result.
3128 __ mov(v0, heap_number_result);
3129 __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
3132 // Tail call that writes the int32 in a2 to the heap number in v0, using
3133 // a3 and a0 as scratch. v0 is preserved and returned.
3135 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
3136 __ TailCallStub(&stub);
3146 // We never expect DIV to yield an integer result, so we always generate
3147 // type transition code for DIV operations expecting an integer result: the
3148 // code will fall through to this type transition.
3149 if (transition.is_linked() ||
3150 ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
3151 __ bind(&transition);
3152 GenerateTypeTransition(masm);
3155 __ bind(&call_runtime);
3156 GenerateCallRuntime(masm);
3160 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
3163 if (op_ == Token::ADD) {
3164 // Handle string addition here, because it is the only operation
3165 // that does not do a ToNumber conversion on the operands.
3166 GenerateAddStrings(masm);
3169 // Convert oddball arguments to numbers.
3171 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3172 __ Branch(&check, ne, a1, Operand(t0));
3173 if (Token::IsBitOp(op_)) {
3174 __ li(a1, Operand(Smi::FromInt(0)));
3176 __ LoadRoot(a1, Heap::kNanValueRootIndex);
3180 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3181 __ Branch(&done, ne, a0, Operand(t0));
3182 if (Token::IsBitOp(op_)) {
3183 __ li(a0, Operand(Smi::FromInt(0)));
3185 __ LoadRoot(a0, Heap::kNanValueRootIndex);
3189 GenerateHeapNumberStub(masm);
3193 void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
3195 GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
3197 __ bind(&call_runtime);
3198 GenerateCallRuntime(masm);
3202 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
3203 Label call_runtime, call_string_add_or_runtime;
3205 GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
3207 GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
3209 __ bind(&call_string_add_or_runtime);
3210 if (op_ == Token::ADD) {
3211 GenerateAddStrings(masm);
3214 __ bind(&call_runtime);
3215 GenerateCallRuntime(masm);
3219 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
3220 ASSERT(op_ == Token::ADD);
3221 Label left_not_string, call_runtime;
3224 Register right = a0;
3226 // Check if left argument is a string.
3227 __ JumpIfSmi(left, &left_not_string);
3228 __ GetObjectType(left, a2, a2);
3229 __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
3231 StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
3232 GenerateRegisterArgsPush(masm);
3233 __ TailCallStub(&string_add_left_stub);
3235 // Left operand is not a string, test right.
3236 __ bind(&left_not_string);
3237 __ JumpIfSmi(right, &call_runtime);
3238 __ GetObjectType(right, a2, a2);
3239 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
3241 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
3242 GenerateRegisterArgsPush(masm);
3243 __ TailCallStub(&string_add_right_stub);
3245 // At least one argument is not a string.
3246 __ bind(&call_runtime);
3250 void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
3251 GenerateRegisterArgsPush(masm);
3254 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
3257 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
3260 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
3263 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
3266 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
3269 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
3271 case Token::BIT_AND:
3272 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
3274 case Token::BIT_XOR:
3275 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
3278 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
3281 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
3284 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
3292 void BinaryOpStub::GenerateHeapResultAllocation(
3293 MacroAssembler* masm,
3295 Register heap_number_map,
3298 Label* gc_required) {
3300 // Code below will scratch result if allocation fails. To keep both arguments
3301 // intact for the runtime call result cannot be one of these.
3302 ASSERT(!result.is(a0) && !result.is(a1));
3304 if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
3305 Label skip_allocation, allocated;
3306 Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0;
3307 // If the overwritable operand is already an object, we skip the
3308 // allocation of a heap number.
3309 __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
3310 // Allocate a heap number for the result.
3311 __ AllocateHeapNumber(
3312 result, scratch1, scratch2, heap_number_map, gc_required);
3313 __ Branch(&allocated);
3314 __ bind(&skip_allocation);
3315 // Use object holding the overwritable operand for result.
3316 __ mov(result, overwritable_operand);
3317 __ bind(&allocated);
3319 ASSERT(mode_ == NO_OVERWRITE);
3320 __ AllocateHeapNumber(
3321 result, scratch1, scratch2, heap_number_map, gc_required);
3326 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
3332 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
3333 // Untagged case: double input in f4, double result goes
3335 // Tagged case: tagged input on top of stack and in a0,
3336 // tagged result (heap number) goes into v0.
3338 Label input_not_smi;
3341 Label invalid_cache;
3342 const Register scratch0 = t5;
3343 const Register scratch1 = t3;
3344 const Register cache_entry = a0;
3345 const bool tagged = (argument_type_ == TAGGED);
3347 if (CpuFeatures::IsSupported(FPU)) {
3348 CpuFeatures::Scope scope(FPU);
3351 // Argument is a number and is on stack and in a0.
3352 // Load argument and check if it is a smi.
3353 __ JumpIfNotSmi(a0, &input_not_smi);
3355 // Input is a smi. Convert to double and load the low and high words
3356 // of the double into a2, a3.
3357 __ sra(t0, a0, kSmiTagSize);
3360 __ Move(a2, a3, f4);
3363 __ bind(&input_not_smi);
3364 // Check if input is a HeapNumber.
3367 Heap::kHeapNumberMapRootIndex,
3370 // Input is a HeapNumber. Store the
3371 // low and high words into a2, a3.
3372 __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
3373 __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
3375 // Input is untagged double in f4. Output goes to f4.
3376 __ Move(a2, a3, f4);
3379 // a2 = low 32 bits of double value.
3380 // a3 = high 32 bits of double value.
3381 // Compute hash (the shifts are arithmetic):
3382 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
3388 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
3389 __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
3391 // a2 = low 32 bits of double value.
3392 // a3 = high 32 bits of double value.
3393 // a1 = TranscendentalCache::hash(double value).
3394 __ li(cache_entry, Operand(
3395 ExternalReference::transcendental_cache_array_address(
3397 // a0 points to cache array.
3398 __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
3399 Isolate::Current()->transcendental_cache()->caches_[0])));
3400 // a0 points to the cache for the type type_.
3401 // If NULL, the cache hasn't been initialized yet, so go through runtime.
3402 __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
3405 // Check that the layout of cache elements match expectations.
3406 { TranscendentalCache::SubCache::Element test_elem[2];
3407 char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
3408 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
3409 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
3410 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
3411 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
3412 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
3413 CHECK_EQ(0, elem_in0 - elem_start);
3414 CHECK_EQ(kIntSize, elem_in1 - elem_start);
3415 CHECK_EQ(2 * kIntSize, elem_out - elem_start);
3419 // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
3421 __ Addu(a1, a1, t0);
3423 __ Addu(cache_entry, cache_entry, t0);
3425 // Check if cache matches: Double value is stored in uint32_t[2] array.
3426 __ lw(t0, MemOperand(cache_entry, 0));
3427 __ lw(t1, MemOperand(cache_entry, 4));
3428 __ lw(t2, MemOperand(cache_entry, 8));
3429 __ Branch(&calculate, ne, a2, Operand(t0));
3430 __ Branch(&calculate, ne, a3, Operand(t1));
3431 // Cache hit. Load result, cleanup and return.
3432 Counters* counters = masm->isolate()->counters();
3433 __ IncrementCounter(
3434 counters->transcendental_cache_hit(), 1, scratch0, scratch1);
3436 // Pop input value from stack and load result into v0.
3440 // Load result into f4.
3441 __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
3444 } // if (CpuFeatures::IsSupported(FPU))
3446 __ bind(&calculate);
3447 Counters* counters = masm->isolate()->counters();
3448 __ IncrementCounter(
3449 counters->transcendental_cache_miss(), 1, scratch0, scratch1);
3451 __ bind(&invalid_cache);
3452 __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
3457 if (!CpuFeatures::IsSupported(FPU)) UNREACHABLE();
3458 CpuFeatures::Scope scope(FPU);
3462 const Register heap_number_map = t2;
3464 // Call C function to calculate the result and update the cache.
3465 // Register a0 holds precalculated cache entry address; preserve
3466 // it on the stack and pop it into register cache_entry after the
3468 __ Push(cache_entry, a2, a3);
3469 GenerateCallCFunction(masm, scratch0);
3470 __ GetCFunctionDoubleResult(f4);
3472 // Try to update the cache. If we cannot allocate a
3473 // heap number, we return the result without updating.
3474 __ Pop(cache_entry, a2, a3);
3475 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3476 __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
3477 __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
3479 __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
3480 __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
3481 __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
3483 __ Ret(USE_DELAY_SLOT);
3484 __ mov(v0, cache_entry);
3486 __ bind(&invalid_cache);
3487 // The cache is invalid. Call runtime which will recreate the
3489 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3490 __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
3491 __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
3493 FrameScope scope(masm, StackFrame::INTERNAL);
3495 __ CallRuntime(RuntimeFunction(), 1);
3497 __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
3500 __ bind(&skip_cache);
3501 // Call C function to calculate the result and answer directly
3502 // without updating the cache.
3503 GenerateCallCFunction(masm, scratch0);
3504 __ GetCFunctionDoubleResult(f4);
3505 __ bind(&no_update);
3507 // We return the value in f4 without adding it to the cache, but
3508 // we cause a scavenging GC so that future allocations will succeed.
3510 FrameScope scope(masm, StackFrame::INTERNAL);
3512 // Allocate an aligned object larger than a HeapNumber.
3513 ASSERT(4 * kPointerSize >= HeapNumber::kSize);
3514 __ li(scratch0, Operand(4 * kPointerSize));
3516 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
3523 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
3526 __ PrepareCallCFunction(2, scratch);
3527 if (IsMipsSoftFloatABI) {
3528 __ Move(a0, a1, f4);
3532 AllowExternalCallThatCantCauseGC scope(masm);
3533 Isolate* isolate = masm->isolate();
3535 case TranscendentalCache::SIN:
3537 ExternalReference::math_sin_double_function(isolate),
3540 case TranscendentalCache::COS:
3542 ExternalReference::math_cos_double_function(isolate),
3545 case TranscendentalCache::TAN:
3546 __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
3549 case TranscendentalCache::LOG:
3551 ExternalReference::math_log_double_function(isolate),
3562 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
3564 // Add more cases when necessary.
3565 case TranscendentalCache::SIN: return Runtime::kMath_sin;
3566 case TranscendentalCache::COS: return Runtime::kMath_cos;
3567 case TranscendentalCache::TAN: return Runtime::kMath_tan;
3568 case TranscendentalCache::LOG: return Runtime::kMath_log;
3571 return Runtime::kAbort;
3576 void StackCheckStub::Generate(MacroAssembler* masm) {
3577 __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
3581 void InterruptStub::Generate(MacroAssembler* masm) {
3582 __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
3586 void MathPowStub::Generate(MacroAssembler* masm) {
3587 CpuFeatures::Scope fpu_scope(FPU);
3588 const Register base = a1;
3589 const Register exponent = a2;
3590 const Register heapnumbermap = t1;
3591 const Register heapnumber = v0;
3592 const DoubleRegister double_base = f2;
3593 const DoubleRegister double_exponent = f4;
3594 const DoubleRegister double_result = f0;
3595 const DoubleRegister double_scratch = f6;
3596 const FPURegister single_scratch = f8;
3597 const Register scratch = t5;
3598 const Register scratch2 = t3;
3600 Label call_runtime, done, int_exponent;
3601 if (exponent_type_ == ON_STACK) {
3602 Label base_is_smi, unpack_exponent;
3603 // The exponent and base are supplied as arguments on the stack.
3604 // This can only happen if the stub is called from non-optimized code.
3605 // Load input parameters from stack to double registers.
3606 __ lw(base, MemOperand(sp, 1 * kPointerSize));
3607 __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
3609 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
3611 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
3612 __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
3613 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
3615 __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
3616 __ jmp(&unpack_exponent);
3618 __ bind(&base_is_smi);
3619 __ mtc1(scratch, single_scratch);
3620 __ cvt_d_w(double_base, single_scratch);
3621 __ bind(&unpack_exponent);
3623 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
3625 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
3626 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
3627 __ ldc1(double_exponent,
3628 FieldMemOperand(exponent, HeapNumber::kValueOffset));
3629 } else if (exponent_type_ == TAGGED) {
3630 // Base is already in double_base.
3631 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
3633 __ ldc1(double_exponent,
3634 FieldMemOperand(exponent, HeapNumber::kValueOffset));
3637 if (exponent_type_ != INTEGER) {
3638 Label int_exponent_convert;
3639 // Detect integer exponents stored as double.
3640 __ EmitFPUTruncate(kRoundToMinusInf,
3645 kCheckForInexactConversion);
3646 // scratch2 == 0 means there was no conversion error.
3647 __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
3649 if (exponent_type_ == ON_STACK) {
3650 // Detect square root case. Crankshaft detects constant +/-0.5 at
3651 // compile time and uses DoMathPowHalf instead. We then skip this check
3652 // for non-constant cases of +/-0.5 as these hardly occur.
3653 Label not_plus_half;
3656 __ Move(double_scratch, 0.5);
3657 __ BranchF(USE_DELAY_SLOT,
3663 // double_scratch can be overwritten in the delay slot.
3664 // Calculates square root of base. Check for the special case of
3665 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
3666 __ Move(double_scratch, -V8_INFINITY);
3667 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
3668 __ neg_d(double_result, double_scratch);
3670 // Add +0 to convert -0 to +0.
3671 __ add_d(double_scratch, double_base, kDoubleRegZero);
3672 __ sqrt_d(double_result, double_scratch);
3675 __ bind(¬_plus_half);
3676 __ Move(double_scratch, -0.5);
3677 __ BranchF(USE_DELAY_SLOT,
3683 // double_scratch can be overwritten in the delay slot.
3684 // Calculates square root of base. Check for the special case of
3685 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
3686 __ Move(double_scratch, -V8_INFINITY);
3687 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
3688 __ Move(double_result, kDoubleRegZero);
3690 // Add +0 to convert -0 to +0.
3691 __ add_d(double_scratch, double_base, kDoubleRegZero);
3692 __ Move(double_result, 1);
3693 __ sqrt_d(double_scratch, double_scratch);
3694 __ div_d(double_result, double_result, double_scratch);
3700 AllowExternalCallThatCantCauseGC scope(masm);
3701 __ PrepareCallCFunction(0, 2, scratch);
3702 __ SetCallCDoubleArguments(double_base, double_exponent);
3704 ExternalReference::power_double_double_function(masm->isolate()),
3708 __ GetCFunctionDoubleResult(double_result);
3711 __ bind(&int_exponent_convert);
3712 __ mfc1(scratch, single_scratch);
3715 // Calculate power with integer exponent.
3716 __ bind(&int_exponent);
3718 // Get two copies of exponent in the registers scratch and exponent.
3719 if (exponent_type_ == INTEGER) {
3720 __ mov(scratch, exponent);
3722 // Exponent has previously been stored into scratch as untagged integer.
3723 __ mov(exponent, scratch);
3726 __ mov_d(double_scratch, double_base); // Back up base.
3727 __ Move(double_result, 1.0);
3729 // Get absolute value of exponent.
3730 Label positive_exponent;
3731 __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
3732 __ Subu(scratch, zero_reg, scratch);
3733 __ bind(&positive_exponent);
3735 Label while_true, no_carry, loop_end;
3736 __ bind(&while_true);
3738 __ And(scratch2, scratch, 1);
3740 __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
3741 __ mul_d(double_result, double_result, double_scratch);
3744 __ sra(scratch, scratch, 1);
3746 __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
3747 __ mul_d(double_scratch, double_scratch, double_scratch);
3749 __ Branch(&while_true);
3753 __ Branch(&done, ge, exponent, Operand(zero_reg));
3754 __ Move(double_scratch, 1.0);
3755 __ div_d(double_result, double_scratch, double_result);
3756 // Test whether result is zero. Bail out to check for subnormal result.
3757 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
3758 __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
3760 // double_exponent may not contain the exponent value if the input was a
3761 // smi. We set it with exponent value before bailing out.
3762 __ mtc1(exponent, single_scratch);
3763 __ cvt_d_w(double_exponent, single_scratch);
3765 // Returning or bailing out.
3766 Counters* counters = masm->isolate()->counters();
3767 if (exponent_type_ == ON_STACK) {
3768 // The arguments are still on the stack.
3769 __ bind(&call_runtime);
3770 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
3772 // The stub is called from non-optimized code, which expects the result
3773 // as heap number in exponent.
3775 __ AllocateHeapNumber(
3776 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
3777 __ sdc1(double_result,
3778 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
3779 ASSERT(heapnumber.is(v0));
3780 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
3785 AllowExternalCallThatCantCauseGC scope(masm);
3786 __ PrepareCallCFunction(0, 2, scratch);
3787 __ SetCallCDoubleArguments(double_base, double_exponent);
3789 ExternalReference::power_double_double_function(masm->isolate()),
3793 __ GetCFunctionDoubleResult(double_result);
3796 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
3802 bool CEntryStub::NeedsImmovableCode() {
3807 bool CEntryStub::IsPregenerated() {
3808 return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
3813 void CodeStub::GenerateStubsAheadOfTime() {
3814 CEntryStub::GenerateAheadOfTime();
3815 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
3816 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
3817 RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
3821 void CodeStub::GenerateFPStubs() {
3822 CEntryStub save_doubles(1, kSaveFPRegs);
3823 Handle<Code> code = save_doubles.GetCode();
3824 code->set_is_pregenerated(true);
3825 StoreBufferOverflowStub stub(kSaveFPRegs);
3826 stub.GetCode()->set_is_pregenerated(true);
3827 code->GetIsolate()->set_fp_stubs_generated(true);
3831 void CEntryStub::GenerateAheadOfTime() {
3832 CEntryStub stub(1, kDontSaveFPRegs);
3833 Handle<Code> code = stub.GetCode();
3834 code->set_is_pregenerated(true);
3838 void CEntryStub::GenerateCore(MacroAssembler* masm,
3839 Label* throw_normal_exception,
3840 Label* throw_termination_exception,
3841 Label* throw_out_of_memory_exception,
3843 bool always_allocate) {
3844 // v0: result parameter for PerformGC, if any
3845 // s0: number of arguments including receiver (C callee-saved)
3846 // s1: pointer to the first argument (C callee-saved)
3847 // s2: pointer to builtin function (C callee-saved)
3849 Isolate* isolate = masm->isolate();
3852 // Move result passed in v0 into a0 to call PerformGC.
3854 __ PrepareCallCFunction(1, 0, a1);
3855 __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0);
3858 ExternalReference scope_depth =
3859 ExternalReference::heap_always_allocate_scope_depth(isolate);
3860 if (always_allocate) {
3861 __ li(a0, Operand(scope_depth));
3862 __ lw(a1, MemOperand(a0));
3863 __ Addu(a1, a1, Operand(1));
3864 __ sw(a1, MemOperand(a0));
3867 // Prepare arguments for C routine.
3870 // a1 = argv (set in the delay slot after find_ra below).
3872 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
3873 // also need to reserve the 4 argument slots on the stack.
3875 __ AssertStackIsAligned();
3877 __ li(a2, Operand(ExternalReference::isolate_address()));
3879 // To let the GC traverse the return address of the exit frames, we need to
3880 // know where the return address is. The CEntryStub is unmovable, so
3881 // we can store the address on the stack to be able to find it again and
3882 // we never have to restore it, because it will not change.
3883 { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
3884 // This branch-and-link sequence is needed to find the current PC on mips,
3885 // saved to the ra register.
3886 // Use masm-> here instead of the double-underscore macro since extra
3887 // coverage code can interfere with the proper calculation of ra.
3889 masm->bal(&find_ra); // bal exposes branch delay slot.
3891 masm->bind(&find_ra);
3893 // Adjust the value in ra to point to the correct return location, 2nd
3894 // instruction past the real call into C code (the jalr(t9)), and push it.
3895 // This is the return address of the exit frame.
3896 const int kNumInstructionsToJump = 5;
3897 masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
3898 masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
3899 // Stack space reservation moved to the branch delay slot below.
3900 // Stack is still aligned.
3902 // Call the C routine.
3903 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
3905 // Set up sp in the delay slot.
3906 masm->addiu(sp, sp, -kCArgsSlotsSize);
3907 // Make sure the stored 'ra' points to this position.
3908 ASSERT_EQ(kNumInstructionsToJump,
3909 masm->InstructionsGeneratedSince(&find_ra));
3912 if (always_allocate) {
3913 // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
3914 __ li(a2, Operand(scope_depth));
3915 __ lw(a3, MemOperand(a2));
3916 __ Subu(a3, a3, Operand(1));
3917 __ sw(a3, MemOperand(a2));
3920 // Check for failure result.
3921 Label failure_returned;
3922 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
3923 __ addiu(a2, v0, 1);
3924 __ andi(t0, a2, kFailureTagMask);
3925 __ Branch(USE_DELAY_SLOT, &failure_returned, eq, t0, Operand(zero_reg));
3926 // Restore stack (remove arg slots) in branch delay slot.
3927 __ addiu(sp, sp, kCArgsSlotsSize);
3930 // Exit C frame and return.
3932 // sp: stack pointer
3933 // fp: frame pointer
3934 __ LeaveExitFrame(save_doubles_, s0, true);
3936 // Check if we should retry or throw exception.
3938 __ bind(&failure_returned);
3939 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
3940 __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
3941 __ Branch(&retry, eq, t0, Operand(zero_reg));
3943 // Special handling of out of memory exceptions.
3944 Failure* out_of_memory = Failure::OutOfMemoryException();
3945 __ Branch(USE_DELAY_SLOT,
3946 throw_out_of_memory_exception,
3949 Operand(reinterpret_cast<int32_t>(out_of_memory)));
3950 // If we throw the OOM exception, the value of a3 doesn't matter.
3951 // Any instruction can be in the delay slot that's not a jump.
3953 // Retrieve the pending exception and clear the variable.
3954 __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
3955 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3957 __ lw(v0, MemOperand(t0));
3958 __ sw(a3, MemOperand(t0));
3960 // Special handling of termination exceptions which are uncatchable
3961 // by javascript code.
3962 __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
3963 __ Branch(throw_termination_exception, eq, v0, Operand(t0));
3965 // Handle normal exception.
3966 __ jmp(throw_normal_exception);
3969 // Last failure (v0) will be moved to (a0) for parameter when retrying.
3973 void CEntryStub::Generate(MacroAssembler* masm) {
3974 // Called from JavaScript; parameters are on stack as if calling JS function
3975 // s0: number of arguments including receiver
3976 // s1: size of arguments excluding receiver
3977 // s2: pointer to builtin function
3978 // fp: frame pointer (restored after C call)
3979 // sp: stack pointer (restored as callee's sp after C call)
3980 // cp: current context (C callee-saved)
3982 // NOTE: Invocations of builtins may return failure objects
3983 // instead of a proper result. The builtin entry handles
3984 // this by performing a garbage collection and retrying the
3987 // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
3988 // The reason for this is that these arguments would need to be saved anyway
3989 // so it's faster to set them up directly.
3990 // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
3992 // Compute the argv pointer in a callee-saved register.
3993 __ Addu(s1, sp, s1);
3995 // Enter the exit frame that transitions from JavaScript to C++.
3996 FrameScope scope(masm, StackFrame::MANUAL);
3997 __ EnterExitFrame(save_doubles_);
3999 // s0: number of arguments (C callee-saved)
4000 // s1: pointer to first argument (C callee-saved)
4001 // s2: pointer to builtin function (C callee-saved)
4003 Label throw_normal_exception;
4004 Label throw_termination_exception;
4005 Label throw_out_of_memory_exception;
4007 // Call into the runtime system.
4009 &throw_normal_exception,
4010 &throw_termination_exception,
4011 &throw_out_of_memory_exception,
4015 // Do space-specific GC and retry runtime call.
4017 &throw_normal_exception,
4018 &throw_termination_exception,
4019 &throw_out_of_memory_exception,
4023 // Do full GC and retry runtime call one final time.
4024 Failure* failure = Failure::InternalError();
4025 __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
4027 &throw_normal_exception,
4028 &throw_termination_exception,
4029 &throw_out_of_memory_exception,
4033 __ bind(&throw_out_of_memory_exception);
4034 // Set external caught exception to false.
4035 Isolate* isolate = masm->isolate();
4036 ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
4038 __ li(a0, Operand(false, RelocInfo::NONE));
4039 __ li(a2, Operand(external_caught));
4040 __ sw(a0, MemOperand(a2));
4042 // Set pending exception and v0 to out of memory exception.
4043 Failure* out_of_memory = Failure::OutOfMemoryException();
4044 __ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
4045 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4047 __ sw(v0, MemOperand(a2));
4048 // Fall through to the next label.
4050 __ bind(&throw_termination_exception);
4051 __ ThrowUncatchable(v0);
4053 __ bind(&throw_normal_exception);
4058 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
4059 Label invoke, handler_entry, exit;
4060 Isolate* isolate = masm->isolate();
4063 // a0: entry address
4072 // Save callee saved registers on the stack.
4073 __ MultiPush(kCalleeSaved | ra.bit());
4075 if (CpuFeatures::IsSupported(FPU)) {
4076 CpuFeatures::Scope scope(FPU);
4077 // Save callee-saved FPU registers.
4078 __ MultiPushFPU(kCalleeSavedFPU);
4079 // Set up the reserved register for 0.0.
4080 __ Move(kDoubleRegZero, 0.0);
4084 // Load argv in s0 register.
4085 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
4086 if (CpuFeatures::IsSupported(FPU)) {
4087 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
4090 __ InitializeRootRegister();
4091 __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
4093 // We build an EntryFrame.
4094 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
4095 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
4096 __ li(t2, Operand(Smi::FromInt(marker)));
4097 __ li(t1, Operand(Smi::FromInt(marker)));
4098 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
4100 __ lw(t0, MemOperand(t0));
4101 __ Push(t3, t2, t1, t0);
4102 // Set up frame pointer for the frame to be pushed.
4103 __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
4106 // a0: entry_address
4108 // a2: receiver_pointer
4114 // function slot | entry frame
4116 // bad fp (0xff...f) |
4117 // callee saved registers + ra
4121 // If this is the outermost JS call, set js_entry_sp value.
4122 Label non_outermost_js;
4123 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
4124 __ li(t1, Operand(ExternalReference(js_entry_sp)));
4125 __ lw(t2, MemOperand(t1));
4126 __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
4127 __ sw(fp, MemOperand(t1));
4128 __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4131 __ nop(); // Branch delay slot nop.
4132 __ bind(&non_outermost_js);
4133 __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
4137 // Jump to a faked try block that does the invoke, with a faked catch
4138 // block that sets the pending exception.
4140 __ bind(&handler_entry);
4141 handler_offset_ = handler_entry.pos();
4142 // Caught exception: Store result (exception) in the pending exception
4143 // field in the JSEnv and return a failure sentinel. Coming in here the
4144 // fp will be invalid because the PushTryHandler below sets it to 0 to
4145 // signal the existence of the JSEntry frame.
4146 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4148 __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
4149 __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
4150 __ b(&exit); // b exposes branch delay slot.
4151 __ nop(); // Branch delay slot nop.
4153 // Invoke: Link this frame into the handler chain. There's only one
4154 // handler block in this code object, so its index is 0.
4156 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
4157 // If an exception not caught by another handler occurs, this handler
4158 // returns control to the code after the bal(&invoke) above, which
4159 // restores all kCalleeSaved registers (including cp and fp) to their
4160 // saved values before returning a failure to C.
4162 // Clear any pending exceptions.
4163 __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
4164 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4166 __ sw(t1, MemOperand(t0));
4168 // Invoke the function by calling through JS entry trampoline builtin.
4169 // Notice that we cannot store a reference to the trampoline code directly in
4170 // this stub, because runtime stubs are not traversed when doing GC.
4173 // a0: entry_address
4175 // a2: receiver_pointer
4182 // callee saved registers + ra
4187 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
4189 __ li(t0, Operand(construct_entry));
4191 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
4192 __ li(t0, Operand(entry));
4194 __ lw(t9, MemOperand(t0)); // Deref address.
4196 // Call JSEntryTrampoline.
4197 __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
4200 // Unlink this frame from the handler chain.
4203 __ bind(&exit); // v0 holds result
4204 // Check if the current stack frame is marked as the outermost JS frame.
4205 Label non_outermost_js_2;
4207 __ Branch(&non_outermost_js_2,
4210 Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4211 __ li(t1, Operand(ExternalReference(js_entry_sp)));
4212 __ sw(zero_reg, MemOperand(t1));
4213 __ bind(&non_outermost_js_2);
4215 // Restore the top frame descriptors from the stack.
4217 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
4219 __ sw(t1, MemOperand(t0));
4221 // Reset the stack to the callee saved registers.
4222 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
4224 if (CpuFeatures::IsSupported(FPU)) {
4225 CpuFeatures::Scope scope(FPU);
4226 // Restore callee-saved fpu registers.
4227 __ MultiPopFPU(kCalleeSavedFPU);
4230 // Restore callee saved registers from the stack.
4231 __ MultiPop(kCalleeSaved | ra.bit());
4237 // Uses registers a0 to t0.
4238 // Expected input (depending on whether args are in registers or on the stack):
4239 // * object: a0 or at sp + 1 * kPointerSize.
4240 // * function: a1 or at sp.
4242 // An inlined call site may have been generated before calling this stub.
4243 // In this case the offset to the inline site to patch is passed on the stack,
4244 // in the safepoint slot for register t0.
4245 void InstanceofStub::Generate(MacroAssembler* masm) {
4246 // Call site inlining and patching implies arguments in registers.
4247 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
4248 // ReturnTrueFalse is only implemented for inlined call sites.
4249 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
4251 // Fixed register usage throughout the stub:
4252 const Register object = a0; // Object (lhs).
4253 Register map = a3; // Map of the object.
4254 const Register function = a1; // Function (rhs).
4255 const Register prototype = t0; // Prototype of the function.
4256 const Register inline_site = t5;
4257 const Register scratch = a2;
4259 const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
4261 Label slow, loop, is_instance, is_not_instance, not_js_object;
4263 if (!HasArgsInRegisters()) {
4264 __ lw(object, MemOperand(sp, 1 * kPointerSize));
4265 __ lw(function, MemOperand(sp, 0));
4268 // Check that the left hand is a JS object and load map.
4269 __ JumpIfSmi(object, ¬_js_object);
4270 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
4272 // If there is a call site cache don't look in the global cache, but do the
4273 // real lookup and update the call site cache.
4274 if (!HasCallSiteInlineCheck()) {
4276 __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
4277 __ Branch(&miss, ne, function, Operand(at));
4278 __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
4279 __ Branch(&miss, ne, map, Operand(at));
4280 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4281 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4286 // Get the prototype of the function.
4287 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
4289 // Check that the function prototype is a JS object.
4290 __ JumpIfSmi(prototype, &slow);
4291 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
4293 // Update the global instanceof or call site inlined cache with the current
4294 // map and function. The cached answer will be set when it is known below.
4295 if (!HasCallSiteInlineCheck()) {
4296 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
4297 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
4299 ASSERT(HasArgsInRegisters());
4300 // Patch the (relocated) inlined map check.
4302 // The offset was stored in t0 safepoint slot.
4303 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
4304 __ LoadFromSafepointRegisterSlot(scratch, t0);
4305 __ Subu(inline_site, ra, scratch);
4306 // Get the map location in scratch and patch it.
4307 __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
4308 __ sw(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
4311 // Register mapping: a3 is object map and t0 is function prototype.
4312 // Get prototype of object into a2.
4313 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
4315 // We don't need map any more. Use it as a scratch register.
4316 Register scratch2 = map;
4319 // Loop through the prototype chain looking for the function prototype.
4320 __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
4322 __ Branch(&is_instance, eq, scratch, Operand(prototype));
4323 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
4324 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
4325 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
4328 __ bind(&is_instance);
4329 ASSERT(Smi::FromInt(0) == 0);
4330 if (!HasCallSiteInlineCheck()) {
4331 __ mov(v0, zero_reg);
4332 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4334 // Patch the call site to return true.
4335 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
4336 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4337 // Get the boolean result location in scratch and patch it.
4338 __ PatchRelocatedValue(inline_site, scratch, v0);
4340 if (!ReturnTrueFalseObject()) {
4341 ASSERT_EQ(Smi::FromInt(0), 0);
4342 __ mov(v0, zero_reg);
4345 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4347 __ bind(&is_not_instance);
4348 if (!HasCallSiteInlineCheck()) {
4349 __ li(v0, Operand(Smi::FromInt(1)));
4350 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4352 // Patch the call site to return false.
4353 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
4354 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4355 // Get the boolean result location in scratch and patch it.
4356 __ PatchRelocatedValue(inline_site, scratch, v0);
4358 if (!ReturnTrueFalseObject()) {
4359 __ li(v0, Operand(Smi::FromInt(1)));
4363 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4365 Label object_not_null, object_not_null_or_smi;
4366 __ bind(¬_js_object);
4367 // Before null, smi and string value checks, check that the rhs is a function
4368 // as for a non-function rhs an exception needs to be thrown.
4369 __ JumpIfSmi(function, &slow);
4370 __ GetObjectType(function, scratch2, scratch);
4371 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
4373 // Null is not instance of anything.
4374 __ Branch(&object_not_null,
4377 Operand(masm->isolate()->factory()->null_value()));
4378 __ li(v0, Operand(Smi::FromInt(1)));
4379 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4381 __ bind(&object_not_null);
4382 // Smi values are not instances of anything.
4383 __ JumpIfNotSmi(object, &object_not_null_or_smi);
4384 __ li(v0, Operand(Smi::FromInt(1)));
4385 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4387 __ bind(&object_not_null_or_smi);
4388 // String values are not instances of anything.
4389 __ IsObjectJSStringType(object, scratch, &slow);
4390 __ li(v0, Operand(Smi::FromInt(1)));
4391 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4393 // Slow-case. Tail call builtin.
4395 if (!ReturnTrueFalseObject()) {
4396 if (HasArgsInRegisters()) {
4399 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
4402 FrameScope scope(masm, StackFrame::INTERNAL);
4404 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
4407 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
4408 __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
4409 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
4410 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4415 Register InstanceofStub::left() { return a0; }
4418 Register InstanceofStub::right() { return a1; }
4421 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
4422 // The displacement is the offset of the last parameter (if any)
4423 // relative to the frame pointer.
4424 const int kDisplacement =
4425 StandardFrameConstants::kCallerSPOffset - kPointerSize;
4427 // Check that the key is a smiGenerateReadElement.
4429 __ JumpIfNotSmi(a1, &slow);
4431 // Check if the calling frame is an arguments adaptor frame.
4433 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4434 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
4438 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4440 // Check index (a1) against formal parameters count limit passed in
4441 // through register a0. Use unsigned comparison to get negative
4443 __ Branch(&slow, hs, a1, Operand(a0));
4445 // Read the argument from the stack and return it.
4446 __ subu(a3, a0, a1);
4447 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4448 __ Addu(a3, fp, Operand(t3));
4449 __ lw(v0, MemOperand(a3, kDisplacement));
4452 // Arguments adaptor case: Check index (a1) against actual arguments
4453 // limit found in the arguments adaptor frame. Use unsigned
4454 // comparison to get negative check for free.
4456 __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4457 __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
4459 // Read the argument from the adaptor frame and return it.
4460 __ subu(a3, a0, a1);
4461 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4462 __ Addu(a3, a2, Operand(t3));
4463 __ lw(v0, MemOperand(a3, kDisplacement));
4466 // Slow-case: Handle non-smi or out-of-bounds access to arguments
4467 // by calling the runtime system.
4470 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
4474 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
4475 // sp[0] : number of parameters
4476 // sp[4] : receiver displacement
4478 // Check if the calling frame is an arguments adaptor frame.
4480 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4481 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
4485 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4487 // Patch the arguments.length and the parameters pointer in the current frame.
4488 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4489 __ sw(a2, MemOperand(sp, 0 * kPointerSize));
4491 __ Addu(a3, a3, Operand(t3));
4492 __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
4493 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4496 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4500 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
4502 // sp[0] : number of parameters (tagged)
4503 // sp[4] : address of receiver argument
4505 // Registers used over whole function:
4506 // t2 : allocated object (tagged)
4507 // t5 : mapped parameter count (tagged)
4509 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
4510 // a1 = parameter count (tagged)
4512 // Check if the calling frame is an arguments adaptor frame.
4514 Label adaptor_frame, try_allocate;
4515 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4516 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
4517 __ Branch(&adaptor_frame,
4520 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4522 // No adaptor, parameter count = argument count.
4524 __ b(&try_allocate);
4525 __ nop(); // Branch delay slot nop.
4527 // We have an adaptor frame. Patch the parameters pointer.
4528 __ bind(&adaptor_frame);
4529 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4531 __ Addu(a3, a3, Operand(t6));
4532 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4533 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4535 // a1 = parameter count (tagged)
4536 // a2 = argument count (tagged)
4537 // Compute the mapped parameter count = min(a1, a2) in a1.
4539 __ Branch(&skip_min, lt, a1, Operand(a2));
4543 __ bind(&try_allocate);
4545 // Compute the sizes of backing store, parameter map, and arguments object.
4546 // 1. Parameter map, has 2 extra words containing context and backing store.
4547 const int kParameterMapHeaderSize =
4548 FixedArray::kHeaderSize + 2 * kPointerSize;
4549 // If there are no mapped parameters, we do not need the parameter_map.
4550 Label param_map_size;
4551 ASSERT_EQ(0, Smi::FromInt(0));
4552 __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg));
4553 __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
4555 __ addiu(t5, t5, kParameterMapHeaderSize);
4556 __ bind(¶m_map_size);
4558 // 2. Backing store.
4560 __ Addu(t5, t5, Operand(t6));
4561 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
4563 // 3. Arguments object.
4564 __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
4566 // Do the allocation of all three objects in one go.
4567 __ AllocateInNewSpace(t5, v0, a3, t0, &runtime, TAG_OBJECT);
4569 // v0 = address of new object(s) (tagged)
4570 // a2 = argument count (tagged)
4571 // Get the arguments boilerplate from the current (global) context into t0.
4572 const int kNormalOffset =
4573 Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
4574 const int kAliasedOffset =
4575 Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
4577 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4578 __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
4579 Label skip2_ne, skip2_eq;
4580 __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
4581 __ lw(t0, MemOperand(t0, kNormalOffset));
4584 __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
4585 __ lw(t0, MemOperand(t0, kAliasedOffset));
4588 // v0 = address of new object (tagged)
4589 // a1 = mapped parameter count (tagged)
4590 // a2 = argument count (tagged)
4591 // t0 = address of boilerplate object (tagged)
4592 // Copy the JS object part.
4593 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
4594 __ lw(a3, FieldMemOperand(t0, i));
4595 __ sw(a3, FieldMemOperand(v0, i));
4598 // Set up the callee in-object property.
4599 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
4600 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
4601 const int kCalleeOffset = JSObject::kHeaderSize +
4602 Heap::kArgumentsCalleeIndex * kPointerSize;
4603 __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
4605 // Use the length (smi tagged) and set that as an in-object property too.
4606 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4607 const int kLengthOffset = JSObject::kHeaderSize +
4608 Heap::kArgumentsLengthIndex * kPointerSize;
4609 __ sw(a2, FieldMemOperand(v0, kLengthOffset));
4611 // Set up the elements pointer in the allocated arguments object.
4612 // If we allocated a parameter map, t0 will point there, otherwise
4613 // it will point to the backing store.
4614 __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
4615 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
4617 // v0 = address of new object (tagged)
4618 // a1 = mapped parameter count (tagged)
4619 // a2 = argument count (tagged)
4620 // t0 = address of parameter map or backing store (tagged)
4621 // Initialize parameter map. If there are no mapped arguments, we're done.
4622 Label skip_parameter_map;
4624 __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
4625 // Move backing store address to a3, because it is
4626 // expected there when filling in the unmapped arguments.
4630 __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
4632 __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
4633 __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
4634 __ Addu(t2, a1, Operand(Smi::FromInt(2)));
4635 __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
4636 __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
4638 __ Addu(t2, t0, Operand(t6));
4639 __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
4640 __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
4642 // Copy the parameter slots and the holes in the arguments.
4643 // We need to fill in mapped_parameter_count slots. They index the context,
4644 // where parameters are stored in reverse order, at
4645 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
4646 // The mapped parameter thus need to get indices
4647 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
4648 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
4649 // We loop from right to left.
4650 Label parameters_loop, parameters_test;
4652 __ lw(t5, MemOperand(sp, 0 * kPointerSize));
4653 __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
4654 __ Subu(t5, t5, Operand(a1));
4655 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
4657 __ Addu(a3, t0, Operand(t6));
4658 __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
4660 // t2 = loop variable (tagged)
4661 // a1 = mapping index (tagged)
4662 // a3 = address of backing store (tagged)
4663 // t0 = address of parameter map (tagged)
4664 // t1 = temporary scratch (a.o., for address calculation)
4665 // t3 = the hole value
4666 __ jmp(¶meters_test);
4668 __ bind(¶meters_loop);
4669 __ Subu(t2, t2, Operand(Smi::FromInt(1)));
4671 __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
4672 __ Addu(t6, t0, t1);
4673 __ sw(t5, MemOperand(t6));
4674 __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
4675 __ Addu(t6, a3, t1);
4676 __ sw(t3, MemOperand(t6));
4677 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
4678 __ bind(¶meters_test);
4679 __ Branch(¶meters_loop, ne, t2, Operand(Smi::FromInt(0)));
4681 __ bind(&skip_parameter_map);
4682 // a2 = argument count (tagged)
4683 // a3 = address of backing store (tagged)
4685 // Copy arguments header and remaining slots (if there are any).
4686 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
4687 __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
4688 __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
4690 Label arguments_loop, arguments_test;
4692 __ lw(t0, MemOperand(sp, 1 * kPointerSize));
4694 __ Subu(t0, t0, Operand(t6));
4695 __ jmp(&arguments_test);
4697 __ bind(&arguments_loop);
4698 __ Subu(t0, t0, Operand(kPointerSize));
4699 __ lw(t2, MemOperand(t0, 0));
4701 __ Addu(t1, a3, Operand(t6));
4702 __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
4703 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
4705 __ bind(&arguments_test);
4706 __ Branch(&arguments_loop, lt, t5, Operand(a2));
4708 // Return and remove the on-stack parameters.
4711 // Do the runtime call to allocate the arguments object.
4712 // a2 = argument count (tagged)
4714 __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
4715 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4719 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
4720 // sp[0] : number of parameters
4721 // sp[4] : receiver displacement
4723 // Check if the calling frame is an arguments adaptor frame.
4724 Label adaptor_frame, try_allocate, runtime;
4725 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4726 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
4727 __ Branch(&adaptor_frame,
4730 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4732 // Get the length from the frame.
4733 __ lw(a1, MemOperand(sp, 0));
4734 __ Branch(&try_allocate);
4736 // Patch the arguments.length and the parameters pointer.
4737 __ bind(&adaptor_frame);
4738 __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4739 __ sw(a1, MemOperand(sp, 0));
4740 __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
4741 __ Addu(a3, a2, Operand(at));
4743 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4744 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4746 // Try the new space allocation. Start out with computing the size
4747 // of the arguments object and the elements array in words.
4748 Label add_arguments_object;
4749 __ bind(&try_allocate);
4750 __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
4751 __ srl(a1, a1, kSmiTagSize);
4753 __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
4754 __ bind(&add_arguments_object);
4755 __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
4757 // Do the allocation of both objects in one go.
4758 __ AllocateInNewSpace(a1,
4763 static_cast<AllocationFlags>(TAG_OBJECT |
4766 // Get the arguments boilerplate from the current (global) context.
4767 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4768 __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
4769 __ lw(t0, MemOperand(t0, Context::SlotOffset(
4770 Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
4772 // Copy the JS object part.
4773 __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
4775 // Get the length (smi tagged) and set that as an in-object property too.
4776 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4777 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
4778 __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
4779 Heap::kArgumentsLengthIndex * kPointerSize));
4782 __ Branch(&done, eq, a1, Operand(zero_reg));
4784 // Get the parameters pointer from the stack.
4785 __ lw(a2, MemOperand(sp, 1 * kPointerSize));
4787 // Set up the elements pointer in the allocated arguments object and
4788 // initialize the header in the elements fixed array.
4789 __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
4790 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
4791 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
4792 __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
4793 __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
4794 // Untag the length for the loop.
4795 __ srl(a1, a1, kSmiTagSize);
4797 // Copy the fixed array slots.
4799 // Set up t0 to point to the first array slot.
4800 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4802 // Pre-decrement a2 with kPointerSize on each iteration.
4803 // Pre-decrement in order to skip receiver.
4804 __ Addu(a2, a2, Operand(-kPointerSize));
4805 __ lw(a3, MemOperand(a2));
4806 // Post-increment t0 with kPointerSize on each iteration.
4807 __ sw(a3, MemOperand(t0));
4808 __ Addu(t0, t0, Operand(kPointerSize));
4809 __ Subu(a1, a1, Operand(1));
4810 __ Branch(&loop, ne, a1, Operand(zero_reg));
4812 // Return and remove the on-stack parameters.
4816 // Do the runtime call to allocate the arguments object.
4818 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
4822 void RegExpExecStub::Generate(MacroAssembler* masm) {
4823 // Just jump directly to runtime if native RegExp is not selected at compile
4824 // time or if regexp entry in generated code is turned off runtime switch or
4826 #ifdef V8_INTERPRETED_REGEXP
4827 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4828 #else // V8_INTERPRETED_REGEXP
4830 // Stack frame on entry.
4831 // sp[0]: last_match_info (expected JSArray)
4832 // sp[4]: previous index
4833 // sp[8]: subject string
4834 // sp[12]: JSRegExp object
4836 const int kLastMatchInfoOffset = 0 * kPointerSize;
4837 const int kPreviousIndexOffset = 1 * kPointerSize;
4838 const int kSubjectOffset = 2 * kPointerSize;
4839 const int kJSRegExpOffset = 3 * kPointerSize;
4841 Isolate* isolate = masm->isolate();
4843 Label runtime, invoke_regexp;
4845 // Allocation of registers for this function. These are in callee save
4846 // registers and will be preserved by the call to the native RegExp code, as
4847 // this code is called using the normal C calling convention. When calling
4848 // directly from generated code the native RegExp code will not do a GC and
4849 // therefore the content of these registers are safe to use after the call.
4850 // MIPS - using s0..s2, since we are not using CEntry Stub.
4851 Register subject = s0;
4852 Register regexp_data = s1;
4853 Register last_match_info_elements = s2;
4855 // Ensure that a RegExp stack is allocated.
4856 ExternalReference address_of_regexp_stack_memory_address =
4857 ExternalReference::address_of_regexp_stack_memory_address(
4859 ExternalReference address_of_regexp_stack_memory_size =
4860 ExternalReference::address_of_regexp_stack_memory_size(isolate);
4861 __ li(a0, Operand(address_of_regexp_stack_memory_size));
4862 __ lw(a0, MemOperand(a0, 0));
4863 __ Branch(&runtime, eq, a0, Operand(zero_reg));
4865 // Check that the first argument is a JSRegExp object.
4866 __ lw(a0, MemOperand(sp, kJSRegExpOffset));
4867 STATIC_ASSERT(kSmiTag == 0);
4868 __ JumpIfSmi(a0, &runtime);
4869 __ GetObjectType(a0, a1, a1);
4870 __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
4872 // Check that the RegExp has been compiled (data contains a fixed array).
4873 __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
4874 if (FLAG_debug_code) {
4875 __ And(t0, regexp_data, Operand(kSmiTagMask));
4877 "Unexpected type for RegExp data, FixedArray expected",
4880 __ GetObjectType(regexp_data, a0, a0);
4882 "Unexpected type for RegExp data, FixedArray expected",
4884 Operand(FIXED_ARRAY_TYPE));
4887 // regexp_data: RegExp data (FixedArray)
4888 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
4889 __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
4890 __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
4892 // regexp_data: RegExp data (FixedArray)
4893 // Check that the number of captures fit in the static offsets vector buffer.
4895 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
4896 // Calculate number of capture registers (number_of_captures + 1) * 2. This
4897 // uses the asumption that smis are 2 * their untagged value.
4898 STATIC_ASSERT(kSmiTag == 0);
4899 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
4900 __ Addu(a2, a2, Operand(2)); // a2 was a smi.
4901 // Check that the static offsets vector buffer is large enough.
4902 __ Branch(&runtime, hi, a2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
4904 // a2: Number of capture registers
4905 // regexp_data: RegExp data (FixedArray)
4906 // Check that the second argument is a string.
4907 __ lw(subject, MemOperand(sp, kSubjectOffset));
4908 __ JumpIfSmi(subject, &runtime);
4909 __ GetObjectType(subject, a0, a0);
4910 __ And(a0, a0, Operand(kIsNotStringMask));
4911 STATIC_ASSERT(kStringTag == 0);
4912 __ Branch(&runtime, ne, a0, Operand(zero_reg));
4914 // Get the length of the string to r3.
4915 __ lw(a3, FieldMemOperand(subject, String::kLengthOffset));
4917 // a2: Number of capture registers
4918 // a3: Length of subject string as a smi
4919 // subject: Subject string
4920 // regexp_data: RegExp data (FixedArray)
4921 // Check that the third argument is a positive smi less than the subject
4922 // string length. A negative value will be greater (unsigned comparison).
4923 __ lw(a0, MemOperand(sp, kPreviousIndexOffset));
4924 __ JumpIfNotSmi(a0, &runtime);
4925 __ Branch(&runtime, ls, a3, Operand(a0));
4927 // a2: Number of capture registers
4928 // subject: Subject string
4929 // regexp_data: RegExp data (FixedArray)
4930 // Check that the fourth object is a JSArray object.
4931 __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
4932 __ JumpIfSmi(a0, &runtime);
4933 __ GetObjectType(a0, a1, a1);
4934 __ Branch(&runtime, ne, a1, Operand(JS_ARRAY_TYPE));
4935 // Check that the JSArray is in fast case.
4936 __ lw(last_match_info_elements,
4937 FieldMemOperand(a0, JSArray::kElementsOffset));
4938 __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
4939 __ Branch(&runtime, ne, a0, Operand(
4940 isolate->factory()->fixed_array_map()));
4941 // Check that the last match info has space for the capture registers and the
4942 // additional information.
4944 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
4945 __ Addu(a2, a2, Operand(RegExpImpl::kLastMatchOverhead));
4946 __ sra(at, a0, kSmiTagSize); // Untag length for comparison.
4947 __ Branch(&runtime, gt, a2, Operand(at));
4949 // Reset offset for possibly sliced string.
4950 __ mov(t0, zero_reg);
4951 // subject: Subject string
4952 // regexp_data: RegExp data (FixedArray)
4953 // Check the representation and encoding of the subject string.
4955 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
4956 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
4957 // First check for flat string. None of the following string type tests will
4958 // succeed if subject is not a string or a short external string.
4961 Operand(kIsNotStringMask |
4962 kStringRepresentationMask |
4963 kShortExternalStringMask));
4964 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
4965 __ Branch(&seq_string, eq, a1, Operand(zero_reg));
4967 // subject: Subject string
4968 // a0: instance type if Subject string
4969 // regexp_data: RegExp data (FixedArray)
4970 // a1: whether subject is a string and if yes, its string representation
4971 // Check for flat cons string or sliced string.
4972 // A flat cons string is a cons string where the second part is the empty
4973 // string. In that case the subject string is just the first part of the cons
4974 // string. Also in this case the first part of the cons string is known to be
4975 // a sequential string or an external string.
4976 // In the case of a sliced string its offset has to be taken into account.
4977 Label cons_string, external_string, check_encoding;
4978 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
4979 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
4980 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
4981 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
4982 __ Branch(&cons_string, lt, a1, Operand(kExternalStringTag));
4983 __ Branch(&external_string, eq, a1, Operand(kExternalStringTag));
4985 // Catch non-string subject or short external string.
4986 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
4987 __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
4988 __ Branch(&runtime, ne, at, Operand(zero_reg));
4990 // String is sliced.
4991 __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
4992 __ sra(t0, t0, kSmiTagSize);
4993 __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
4994 // t5: offset of sliced string, smi-tagged.
4995 __ jmp(&check_encoding);
4996 // String is a cons string, check whether it is flat.
4997 __ bind(&cons_string);
4998 __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
4999 __ LoadRoot(a1, Heap::kEmptyStringRootIndex);
5000 __ Branch(&runtime, ne, a0, Operand(a1));
5001 __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
5002 // Is first part of cons or parent of slice a flat string?
5003 __ bind(&check_encoding);
5004 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
5005 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
5006 STATIC_ASSERT(kSeqStringTag == 0);
5007 __ And(at, a0, Operand(kStringRepresentationMask));
5008 __ Branch(&external_string, ne, at, Operand(zero_reg));
5010 __ bind(&seq_string);
5011 // subject: Subject string
5012 // regexp_data: RegExp data (FixedArray)
5013 // a0: Instance type of subject string
5014 STATIC_ASSERT(kStringEncodingMask == 4);
5015 STATIC_ASSERT(kAsciiStringTag == 4);
5016 STATIC_ASSERT(kTwoByteStringTag == 0);
5017 // Find the code object based on the assumptions above.
5018 __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII.
5019 __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
5020 __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
5021 __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
5022 __ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
5024 // Check that the irregexp code has been generated for the actual string
5025 // encoding. If it has, the field contains a code object otherwise it contains
5026 // a smi (code flushing support).
5027 __ JumpIfSmi(t9, &runtime);
5029 // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
5031 // subject: Subject string
5032 // regexp_data: RegExp data (FixedArray)
5033 // Load used arguments before starting to push arguments for call to native
5034 // RegExp code to avoid handling changing stack height.
5035 __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
5036 __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
5038 // a1: previous index
5039 // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
5041 // subject: Subject string
5042 // regexp_data: RegExp data (FixedArray)
5043 // All checks done. Now push arguments for native regexp code.
5044 __ IncrementCounter(isolate->counters()->regexp_entry_native(),
5047 // Isolates: note we add an additional parameter here (isolate pointer).
5048 const int kRegExpExecuteArguments = 8;
5049 const int kParameterRegisters = 4;
5050 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
5052 // Stack pointer now points to cell where return address is to be written.
5053 // Arguments are before that on the stack or in registers, meaning we
5054 // treat the return address as argument 5. Thus every argument after that
5055 // needs to be shifted back by 1. Since DirectCEntryStub will handle
5056 // allocating space for the c argument slots, we don't need to calculate
5057 // that into the argument positions on the stack. This is how the stack will
5058 // look (sp meaning the value of sp at this moment):
5059 // [sp + 4] - Argument 8
5060 // [sp + 3] - Argument 7
5061 // [sp + 2] - Argument 6
5062 // [sp + 1] - Argument 5
5063 // [sp + 0] - saved ra
5065 // Argument 8: Pass current isolate address.
5066 // CFunctionArgumentOperand handles MIPS stack argument slots.
5067 __ li(a0, Operand(ExternalReference::isolate_address()));
5068 __ sw(a0, MemOperand(sp, 4 * kPointerSize));
5070 // Argument 7: Indicate that this is a direct call from JavaScript.
5071 __ li(a0, Operand(1));
5072 __ sw(a0, MemOperand(sp, 3 * kPointerSize));
5074 // Argument 6: Start (high end) of backtracking stack memory area.
5075 __ li(a0, Operand(address_of_regexp_stack_memory_address));
5076 __ lw(a0, MemOperand(a0, 0));
5077 __ li(a2, Operand(address_of_regexp_stack_memory_size));
5078 __ lw(a2, MemOperand(a2, 0));
5079 __ addu(a0, a0, a2);
5080 __ sw(a0, MemOperand(sp, 2 * kPointerSize));
5082 // Argument 5: static offsets vector buffer.
5084 ExternalReference::address_of_static_offsets_vector(isolate)));
5085 __ sw(a0, MemOperand(sp, 1 * kPointerSize));
5087 // For arguments 4 and 3 get string length, calculate start of string data
5088 // and calculate the shift of the index (0 for ASCII and 1 for two byte).
5089 __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
5090 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
5091 // Load the length from the original subject string from the previous stack
5092 // frame. Therefore we have to use fp, which points exactly to two pointer
5093 // sizes below the previous sp. (Because creating a new stack frame pushes
5094 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
5095 __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
5096 // If slice offset is not 0, load the length from the original sliced string.
5097 // Argument 4, a3: End of string data
5098 // Argument 3, a2: Start of string data
5099 // Prepare start and end index of the input.
5100 __ sllv(t1, t0, a3);
5101 __ addu(t0, t2, t1);
5102 __ sllv(t1, a1, a3);
5103 __ addu(a2, t0, t1);
5105 __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
5106 __ sra(t2, t2, kSmiTagSize);
5107 __ sllv(t1, t2, a3);
5108 __ addu(a3, t0, t1);
5109 // Argument 2 (a1): Previous index.
5112 // Argument 1 (a0): Subject string.
5113 __ mov(a0, subject);
5115 // Locate the code entry and call it.
5116 __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
5117 DirectCEntryStub stub;
5118 stub.GenerateCall(masm, t9);
5120 __ LeaveExitFrame(false, no_reg);
5123 // subject: subject string (callee saved)
5124 // regexp_data: RegExp data (callee saved)
5125 // last_match_info_elements: Last match info elements (callee saved)
5127 // Check the result.
5130 __ Branch(&success, eq, v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
5132 __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
5133 // If not exception it can only be retry. Handle that in the runtime system.
5134 __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
5135 // Result must now be exception. If there is no pending exception already a
5136 // stack overflow (on the backtrack stack) was detected in RegExp code but
5137 // haven't created the exception yet. Handle that in the runtime system.
5138 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
5139 __ li(a1, Operand(isolate->factory()->the_hole_value()));
5140 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
5142 __ lw(v0, MemOperand(a2, 0));
5143 __ Branch(&runtime, eq, v0, Operand(a1));
5145 __ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
5147 // Check if the exception is a termination. If so, throw as uncatchable.
5148 __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
5149 Label termination_exception;
5150 __ Branch(&termination_exception, eq, v0, Operand(a0));
5154 __ bind(&termination_exception);
5155 __ ThrowUncatchable(v0);
5158 // For failure and exception return null.
5159 __ li(v0, Operand(isolate->factory()->null_value()));
5162 // Process the result from the native regexp code.
5165 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
5166 // Calculate number of capture registers (number_of_captures + 1) * 2.
5167 STATIC_ASSERT(kSmiTag == 0);
5168 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
5169 __ Addu(a1, a1, Operand(2)); // a1 was a smi.
5171 // a1: number of capture registers
5172 // subject: subject string
5173 // Store the capture count.
5174 __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
5175 __ sw(a2, FieldMemOperand(last_match_info_elements,
5176 RegExpImpl::kLastCaptureCountOffset));
5177 // Store last subject and last input.
5179 FieldMemOperand(last_match_info_elements,
5180 RegExpImpl::kLastSubjectOffset));
5181 __ mov(a2, subject);
5182 __ RecordWriteField(last_match_info_elements,
5183 RegExpImpl::kLastSubjectOffset,
5189 FieldMemOperand(last_match_info_elements,
5190 RegExpImpl::kLastInputOffset));
5191 __ RecordWriteField(last_match_info_elements,
5192 RegExpImpl::kLastInputOffset,
5198 // Get the static offsets vector filled by the native regexp code.
5199 ExternalReference address_of_static_offsets_vector =
5200 ExternalReference::address_of_static_offsets_vector(isolate);
5201 __ li(a2, Operand(address_of_static_offsets_vector));
5203 // a1: number of capture registers
5204 // a2: offsets vector
5205 Label next_capture, done;
5206 // Capture register counter starts from number of capture registers and
5207 // counts down until wrapping after zero.
5209 last_match_info_elements,
5210 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
5211 __ bind(&next_capture);
5212 __ Subu(a1, a1, Operand(1));
5213 __ Branch(&done, lt, a1, Operand(zero_reg));
5214 // Read the value from the static offsets vector buffer.
5215 __ lw(a3, MemOperand(a2, 0));
5216 __ addiu(a2, a2, kPointerSize);
5217 // Store the smi value in the last match info.
5218 __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
5219 __ sw(a3, MemOperand(a0, 0));
5220 __ Branch(&next_capture, USE_DELAY_SLOT);
5221 __ addiu(a0, a0, kPointerSize); // In branch delay slot.
5225 // Return last match info.
5226 __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
5229 // External string. Short external strings have already been ruled out.
5231 __ bind(&external_string);
5232 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
5233 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
5234 if (FLAG_debug_code) {
5235 // Assert that we do not have a cons or slice (indirect strings) here.
5236 // Sequential strings have already been ruled out.
5237 __ And(at, a0, Operand(kIsIndirectStringMask));
5239 "external string expected, but not found",
5244 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
5245 // Move the pointer so that offset-wise, it looks like a sequential string.
5246 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
5249 SeqTwoByteString::kHeaderSize - kHeapObjectTag);
5250 __ jmp(&seq_string);
5252 // Do the runtime call to execute the regexp.
5254 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
5255 #endif // V8_INTERPRETED_REGEXP
5259 void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
5260 const int kMaxInlineLength = 100;
5263 __ lw(a1, MemOperand(sp, kPointerSize * 2));
5264 STATIC_ASSERT(kSmiTag == 0);
5265 STATIC_ASSERT(kSmiTagSize == 1);
5266 __ JumpIfNotSmi(a1, &slowcase);
5267 __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
5268 // Smi-tagging is equivalent to multiplying by 2.
5269 // Allocate RegExpResult followed by FixedArray with size in ebx.
5270 // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
5271 // Elements: [Map][Length][..elements..]
5272 // Size of JSArray with two in-object properties and the header of a
5275 (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
5276 __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
5277 __ Addu(a2, t1, Operand(objects_size));
5278 __ AllocateInNewSpace(
5279 a2, // In: Size, in words.
5280 v0, // Out: Start of allocation (tagged).
5281 a3, // Scratch register.
5282 t0, // Scratch register.
5284 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
5285 // v0: Start of allocated area, object-tagged.
5286 // a1: Number of elements in array, as smi.
5287 // t1: Number of elements, untagged.
5289 // Set JSArray map to global.regexp_result_map().
5290 // Set empty properties FixedArray.
5291 // Set elements to point to FixedArray allocated right after the JSArray.
5292 // Interleave operations for better latency.
5293 __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
5294 __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
5295 __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
5296 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
5297 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
5298 __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
5299 __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
5300 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
5302 // Set input, index and length fields from arguments.
5303 __ lw(a1, MemOperand(sp, kPointerSize * 0));
5304 __ lw(a2, MemOperand(sp, kPointerSize * 1));
5305 __ lw(t2, MemOperand(sp, kPointerSize * 2));
5306 __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
5307 __ sw(a2, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
5308 __ sw(t2, FieldMemOperand(v0, JSArray::kLengthOffset));
5310 // Fill out the elements FixedArray.
5311 // v0: JSArray, tagged.
5312 // a3: FixedArray, tagged.
5313 // t1: Number of elements in array, untagged.
5316 __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
5317 __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
5318 // Set FixedArray length.
5319 __ sll(t2, t1, kSmiTagSize);
5320 __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
5321 // Fill contents of fixed-array with the-hole.
5322 __ li(a2, Operand(masm->isolate()->factory()->the_hole_value()));
5323 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5324 // Fill fixed array elements with hole.
5325 // v0: JSArray, tagged.
5327 // a3: Start of elements in FixedArray.
5328 // t1: Number of elements to fill.
5330 __ sll(t1, t1, kPointerSizeLog2); // Convert num elements to num bytes.
5331 __ addu(t1, t1, a3); // Point past last element to store.
5333 __ Branch(&done, ge, a3, Operand(t1)); // Break when a3 past end of elem.
5334 __ sw(a2, MemOperand(a3));
5335 __ Branch(&loop, USE_DELAY_SLOT);
5336 __ addiu(a3, a3, kPointerSize); // In branch delay slot.
5342 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
5346 static void GenerateRecordCallTarget(MacroAssembler* masm) {
5347 // Cache the called function in a global property cell. Cache states
5348 // are uninitialized, monomorphic (indicated by a JSFunction), and
5350 // a1 : the function to call
5351 // a2 : cache cell for call target
5354 ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
5355 masm->isolate()->heap()->undefined_value());
5356 ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
5357 masm->isolate()->heap()->the_hole_value());
5359 // Load the cache state into a3.
5360 __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
5362 // A monomorphic cache hit or an already megamorphic state: invoke the
5363 // function without changing the state.
5364 __ Branch(&done, eq, a3, Operand(a1));
5365 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5366 __ Branch(&done, eq, a3, Operand(at));
5368 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
5370 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5372 __ Branch(USE_DELAY_SLOT, &done, eq, a3, Operand(at));
5373 // An uninitialized cache is patched with the function.
5374 // Store a1 in the delay slot. This may or may not get overwritten depending
5375 // on the result of the comparison.
5376 __ sw(a1, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
5377 // No need for a write barrier here - cells are rescanned.
5379 // MegamorphicSentinel is an immortal immovable object (undefined) so no
5380 // write-barrier is needed.
5381 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5382 __ sw(at, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
5388 void CallFunctionStub::Generate(MacroAssembler* masm) {
5389 // a1 : the function to call
5390 // a2 : cache cell for call target
5391 Label slow, non_function;
5393 // The receiver might implicitly be the global object. This is
5394 // indicated by passing the hole as the receiver to the call
5396 if (ReceiverMightBeImplicit()) {
5398 // Get the receiver from the stack.
5399 // function, receiver [, arguments]
5400 __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
5401 // Call as function is indicated with the hole.
5402 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5403 __ Branch(&call, ne, t0, Operand(at));
5404 // Patch the receiver on the stack with the global receiver object.
5405 __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
5406 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
5407 __ sw(a2, MemOperand(sp, argc_ * kPointerSize));
5411 // Check that the function is really a JavaScript function.
5412 // a1: pushed function (to be verified)
5413 __ JumpIfSmi(a1, &non_function);
5414 // Get the map of the function object.
5415 __ GetObjectType(a1, a2, a2);
5416 __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
5418 // Fast-case: Invoke the function now.
5419 // a1: pushed function
5420 ParameterCount actual(argc_);
5422 if (ReceiverMightBeImplicit()) {
5423 Label call_as_function;
5424 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5425 __ Branch(&call_as_function, eq, t0, Operand(at));
5426 __ InvokeFunction(a1,
5431 __ bind(&call_as_function);
5433 __ InvokeFunction(a1,
5439 // Slow-case: Non-function called.
5441 // Check for function proxy.
5442 __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_PROXY_TYPE));
5443 __ push(a1); // Put proxy as additional argument.
5444 __ li(a0, Operand(argc_ + 1, RelocInfo::NONE));
5445 __ li(a2, Operand(0, RelocInfo::NONE));
5446 __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
5447 __ SetCallKind(t1, CALL_AS_METHOD);
5449 Handle<Code> adaptor =
5450 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
5451 __ Jump(adaptor, RelocInfo::CODE_TARGET);
5454 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
5455 // of the original receiver from the call site).
5456 __ bind(&non_function);
5457 __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
5458 __ li(a0, Operand(argc_)); // Set up the number of arguments.
5459 __ mov(a2, zero_reg);
5460 __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
5461 __ SetCallKind(t1, CALL_AS_METHOD);
5462 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5463 RelocInfo::CODE_TARGET);
5467 void CallConstructStub::Generate(MacroAssembler* masm) {
5468 // a0 : number of arguments
5469 // a1 : the function to call
5470 // a2 : cache cell for call target
5471 Label slow, non_function_call;
5473 // Check that the function is not a smi.
5474 __ JumpIfSmi(a1, &non_function_call);
5475 // Check that the function is a JSFunction.
5476 __ GetObjectType(a1, a3, a3);
5477 __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
5479 if (RecordCallTarget()) {
5480 GenerateRecordCallTarget(masm);
5483 // Jump to the function-specific construct stub.
5484 __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
5485 __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset));
5486 __ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
5489 // a0: number of arguments
5490 // a1: called object
5494 __ Branch(&non_function_call, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
5495 __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
5498 __ bind(&non_function_call);
5499 __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
5501 // Set expected number of arguments to zero (not changing r0).
5502 __ li(a2, Operand(0, RelocInfo::NONE));
5503 __ SetCallKind(t1, CALL_AS_METHOD);
5504 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5505 RelocInfo::CODE_TARGET);
5509 // Unfortunately you have to run without snapshots to see most of these
5510 // names in the profile since most compare stubs end up in the snapshot.
5511 void CompareStub::PrintName(StringStream* stream) {
5512 ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
5513 (lhs_.is(a1) && rhs_.is(a0)));
5514 const char* cc_name;
5516 case lt: cc_name = "LT"; break;
5517 case gt: cc_name = "GT"; break;
5518 case le: cc_name = "LE"; break;
5519 case ge: cc_name = "GE"; break;
5520 case eq: cc_name = "EQ"; break;
5521 case ne: cc_name = "NE"; break;
5522 default: cc_name = "UnknownCondition"; break;
5524 bool is_equality = cc_ == eq || cc_ == ne;
5525 stream->Add("CompareStub_%s", cc_name);
5526 stream->Add(lhs_.is(a0) ? "_a0" : "_a1");
5527 stream->Add(rhs_.is(a0) ? "_a0" : "_a1");
5528 if (strict_ && is_equality) stream->Add("_STRICT");
5529 if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
5530 if (!include_number_compare_) stream->Add("_NO_NUMBER");
5531 if (!include_smi_compare_) stream->Add("_NO_SMI");
5535 int CompareStub::MinorKey() {
5536 // Encode the two parameters in a unique 16 bit value.
5537 ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
5538 ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
5539 (lhs_.is(a1) && rhs_.is(a0)));
5540 return ConditionField::encode(static_cast<unsigned>(cc_))
5541 | RegisterField::encode(lhs_.is(a0))
5542 | StrictField::encode(strict_)
5543 | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
5544 | IncludeSmiCompareField::encode(include_smi_compare_);
5548 // StringCharCodeAtGenerator.
5549 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
5552 Label got_char_code;
5553 Label sliced_string;
5555 ASSERT(!t0.is(index_));
5556 ASSERT(!t0.is(result_));
5557 ASSERT(!t0.is(object_));
5559 // If the receiver is a smi trigger the non-string case.
5560 __ JumpIfSmi(object_, receiver_not_string_);
5562 // Fetch the instance type of the receiver into result register.
5563 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5564 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5565 // If the receiver is not a string trigger the non-string case.
5566 __ And(t0, result_, Operand(kIsNotStringMask));
5567 __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
5569 // If the index is non-smi trigger the non-smi case.
5570 __ JumpIfNotSmi(index_, &index_not_smi_);
5572 __ bind(&got_smi_index_);
5574 // Check for index out of range.
5575 __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
5576 __ Branch(index_out_of_range_, ls, t0, Operand(index_));
5578 __ sra(index_, index_, kSmiTagSize);
5580 StringCharLoadGenerator::Generate(masm,
5586 __ sll(result_, result_, kSmiTagSize);
5591 void StringCharCodeAtGenerator::GenerateSlow(
5592 MacroAssembler* masm,
5593 const RuntimeCallHelper& call_helper) {
5594 __ Abort("Unexpected fallthrough to CharCodeAt slow case");
5596 // Index is not a smi.
5597 __ bind(&index_not_smi_);
5598 // If index is a heap number, try converting it to an integer.
5601 Heap::kHeapNumberMapRootIndex,
5604 call_helper.BeforeCall(masm);
5605 // Consumed by runtime conversion function:
5606 __ Push(object_, index_);
5607 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
5608 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
5610 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
5611 // NumberToSmi discards numbers that are not exact integers.
5612 __ CallRuntime(Runtime::kNumberToSmi, 1);
5615 // Save the conversion result before the pop instructions below
5616 // have a chance to overwrite it.
5618 __ Move(index_, v0);
5620 // Reload the instance type.
5621 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5622 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5623 call_helper.AfterCall(masm);
5624 // If index is still not a smi, it must be out of range.
5625 __ JumpIfNotSmi(index_, index_out_of_range_);
5626 // Otherwise, return to the fast path.
5627 __ Branch(&got_smi_index_);
5629 // Call runtime. We get here when the receiver is a string and the
5630 // index is a number, but the code of getting the actual character
5631 // is too complex (e.g., when the string needs to be flattened).
5632 __ bind(&call_runtime_);
5633 call_helper.BeforeCall(masm);
5634 __ sll(index_, index_, kSmiTagSize);
5635 __ Push(object_, index_);
5636 __ CallRuntime(Runtime::kStringCharCodeAt, 2);
5638 __ Move(result_, v0);
5640 call_helper.AfterCall(masm);
5643 __ Abort("Unexpected fallthrough from CharCodeAt slow case");
5647 // -------------------------------------------------------------------------
5648 // StringCharFromCodeGenerator
5650 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
5651 // Fast case of Heap::LookupSingleCharacterStringFromCode.
5653 ASSERT(!t0.is(result_));
5654 ASSERT(!t0.is(code_));
5656 STATIC_ASSERT(kSmiTag == 0);
5657 STATIC_ASSERT(kSmiShiftSize == 0);
5658 ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
5661 Operand(kSmiTagMask |
5662 ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
5663 __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
5665 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
5666 // At this point code register contains smi tagged ASCII char code.
5667 STATIC_ASSERT(kSmiTag == 0);
5668 __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
5669 __ Addu(result_, result_, t0);
5670 __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
5671 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
5672 __ Branch(&slow_case_, eq, result_, Operand(t0));
5677 void StringCharFromCodeGenerator::GenerateSlow(
5678 MacroAssembler* masm,
5679 const RuntimeCallHelper& call_helper) {
5680 __ Abort("Unexpected fallthrough to CharFromCode slow case");
5682 __ bind(&slow_case_);
5683 call_helper.BeforeCall(masm);
5685 __ CallRuntime(Runtime::kCharFromCode, 1);
5686 __ Move(result_, v0);
5688 call_helper.AfterCall(masm);
5691 __ Abort("Unexpected fallthrough from CharFromCode slow case");
5695 // -------------------------------------------------------------------------
5696 // StringCharAtGenerator
5698 void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
5699 char_code_at_generator_.GenerateFast(masm);
5700 char_from_code_generator_.GenerateFast(masm);
5704 void StringCharAtGenerator::GenerateSlow(
5705 MacroAssembler* masm,
5706 const RuntimeCallHelper& call_helper) {
5707 char_code_at_generator_.GenerateSlow(masm, call_helper);
5708 char_from_code_generator_.GenerateSlow(masm, call_helper);
5712 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5720 // This loop just copies one character at a time, as it is only used for
5721 // very short strings.
5723 __ addu(count, count, count);
5725 __ Branch(&done, eq, count, Operand(zero_reg));
5726 __ addu(count, dest, count); // Count now points to the last dest byte.
5729 __ lbu(scratch, MemOperand(src));
5730 __ addiu(src, src, 1);
5731 __ sb(scratch, MemOperand(dest));
5732 __ addiu(dest, dest, 1);
5733 __ Branch(&loop, lt, dest, Operand(count));
5739 enum CopyCharactersFlags {
5741 DEST_ALWAYS_ALIGNED = 2
5745 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
5755 bool ascii = (flags & COPY_ASCII) != 0;
5756 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
5758 if (dest_always_aligned && FLAG_debug_code) {
5759 // Check that destination is actually word aligned if the flag says
5761 __ And(scratch4, dest, Operand(kPointerAlignmentMask));
5763 "Destination of copy not aligned.",
5768 const int kReadAlignment = 4;
5769 const int kReadAlignmentMask = kReadAlignment - 1;
5770 // Ensure that reading an entire aligned word containing the last character
5771 // of a string will not read outside the allocated area (because we pad up
5772 // to kObjectAlignment).
5773 STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
5774 // Assumes word reads and writes are little endian.
5775 // Nothing to do for zero characters.
5779 __ addu(count, count, count);
5781 __ Branch(&done, eq, count, Operand(zero_reg));
5784 // Must copy at least eight bytes, otherwise just do it one byte at a time.
5785 __ Subu(scratch1, count, Operand(8));
5786 __ Addu(count, dest, Operand(count));
5787 Register limit = count; // Read until src equals this.
5788 __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
5790 if (!dest_always_aligned) {
5791 // Align dest by byte copying. Copies between zero and three bytes.
5792 __ And(scratch4, dest, Operand(kReadAlignmentMask));
5794 __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
5796 __ bind(&aligned_loop);
5797 __ lbu(scratch1, MemOperand(src));
5798 __ addiu(src, src, 1);
5799 __ sb(scratch1, MemOperand(dest));
5800 __ addiu(dest, dest, 1);
5801 __ addiu(scratch4, scratch4, 1);
5802 __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
5803 __ bind(&dest_aligned);
5808 __ And(scratch4, src, Operand(kReadAlignmentMask));
5809 __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
5811 // Loop for src/dst that are not aligned the same way.
5812 // This loop uses lwl and lwr instructions. These instructions
5813 // depend on the endianness, and the implementation assumes little-endian.
5817 __ lwr(scratch1, MemOperand(src));
5818 __ Addu(src, src, Operand(kReadAlignment));
5819 __ lwl(scratch1, MemOperand(src, -1));
5820 __ sw(scratch1, MemOperand(dest));
5821 __ Addu(dest, dest, Operand(kReadAlignment));
5822 __ Subu(scratch2, limit, dest);
5823 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
5826 __ Branch(&byte_loop);
5829 // Copy words from src to dest, until less than four bytes left.
5830 // Both src and dest are word aligned.
5831 __ bind(&simple_loop);
5835 __ lw(scratch1, MemOperand(src));
5836 __ Addu(src, src, Operand(kReadAlignment));
5837 __ sw(scratch1, MemOperand(dest));
5838 __ Addu(dest, dest, Operand(kReadAlignment));
5839 __ Subu(scratch2, limit, dest);
5840 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
5843 // Copy bytes from src to dest until dest hits limit.
5844 __ bind(&byte_loop);
5845 // Test if dest has already reached the limit.
5846 __ Branch(&done, ge, dest, Operand(limit));
5847 __ lbu(scratch1, MemOperand(src));
5848 __ addiu(src, src, 1);
5849 __ sb(scratch1, MemOperand(dest));
5850 __ addiu(dest, dest, 1);
5851 __ Branch(&byte_loop);
5857 void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5866 // Register scratch3 is the general scratch register in this function.
5867 Register scratch = scratch3;
5869 // Make sure that both characters are not digits as such strings has a
5870 // different hash algorithm. Don't try to look for these in the symbol table.
5871 Label not_array_index;
5872 __ Subu(scratch, c1, Operand(static_cast<int>('0')));
5873 __ Branch(¬_array_index,
5876 Operand(static_cast<int>('9' - '0')));
5877 __ Subu(scratch, c2, Operand(static_cast<int>('0')));
5879 // If check failed combine both characters into single halfword.
5880 // This is required by the contract of the method: code at the
5881 // not_found branch expects this combination in c1 register.
5883 __ sll(scratch1, c2, kBitsPerByte);
5884 __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
5885 __ Or(c1, c1, scratch1);
5888 not_found, Uless_equal, scratch, Operand(static_cast<int>('9' - '0')));
5890 __ bind(¬_array_index);
5891 // Calculate the two character string hash.
5892 Register hash = scratch1;
5893 StringHelper::GenerateHashInit(masm, hash, c1);
5894 StringHelper::GenerateHashAddCharacter(masm, hash, c2);
5895 StringHelper::GenerateHashGetHash(masm, hash);
5897 // Collect the two characters in a register.
5898 Register chars = c1;
5899 __ sll(scratch, c2, kBitsPerByte);
5900 __ Or(chars, chars, scratch);
5902 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5903 // hash: hash of two character string.
5905 // Load symbol table.
5906 // Load address of first element of the symbol table.
5907 Register symbol_table = c2;
5908 __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
5910 Register undefined = scratch4;
5911 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
5913 // Calculate capacity mask from the symbol table capacity.
5914 Register mask = scratch2;
5915 __ lw(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
5916 __ sra(mask, mask, 1);
5917 __ Addu(mask, mask, -1);
5919 // Calculate untagged address of the first element of the symbol table.
5920 Register first_symbol_table_element = symbol_table;
5921 __ Addu(first_symbol_table_element, symbol_table,
5922 Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
5925 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5926 // hash: hash of two character string
5927 // mask: capacity mask
5928 // first_symbol_table_element: address of the first element of
5930 // undefined: the undefined object
5933 // Perform a number of probes in the symbol table.
5934 const int kProbes = 4;
5935 Label found_in_symbol_table;
5936 Label next_probe[kProbes];
5937 Register candidate = scratch5; // Scratch register contains candidate.
5938 for (int i = 0; i < kProbes; i++) {
5939 // Calculate entry in symbol table.
5941 __ Addu(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
5943 __ mov(candidate, hash);
5946 __ And(candidate, candidate, Operand(mask));
5948 // Load the entry from the symble table.
5949 STATIC_ASSERT(SymbolTable::kEntrySize == 1);
5950 __ sll(scratch, candidate, kPointerSizeLog2);
5951 __ Addu(scratch, scratch, first_symbol_table_element);
5952 __ lw(candidate, MemOperand(scratch));
5954 // If entry is undefined no string with this hash can be found.
5956 __ GetObjectType(candidate, scratch, scratch);
5957 __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
5959 __ Branch(not_found, eq, undefined, Operand(candidate));
5960 // Must be the hole (deleted entry).
5961 if (FLAG_debug_code) {
5962 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
5963 __ Assert(eq, "oddball in symbol table is not undefined or the hole",
5964 scratch, Operand(candidate));
5966 __ jmp(&next_probe[i]);
5968 __ bind(&is_string);
5970 // Check that the candidate is a non-external ASCII string. The instance
5971 // type is still in the scratch register from the CompareObjectType
5973 __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
5975 // If length is not 2 the string is not a candidate.
5976 __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset));
5977 __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2)));
5979 // Check if the two characters match.
5980 // Assumes that word load is little endian.
5981 __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
5982 __ Branch(&found_in_symbol_table, eq, chars, Operand(scratch));
5983 __ bind(&next_probe[i]);
5986 // No matching 2 character string found by probing.
5989 // Scratch register contains result when we fall through to here.
5990 Register result = candidate;
5991 __ bind(&found_in_symbol_table);
5996 void StringHelper::GenerateHashInit(MacroAssembler* masm,
5998 Register character) {
5999 // hash = seed + character + ((seed + character) << 10);
6000 __ LoadRoot(hash, Heap::kHashSeedRootIndex);
6001 // Untag smi seed and add the character.
6003 __ addu(hash, hash, character);
6004 __ sll(at, hash, 10);
6005 __ addu(hash, hash, at);
6006 // hash ^= hash >> 6;
6007 __ srl(at, hash, 6);
6008 __ xor_(hash, hash, at);
6012 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
6014 Register character) {
6015 // hash += character;
6016 __ addu(hash, hash, character);
6017 // hash += hash << 10;
6018 __ sll(at, hash, 10);
6019 __ addu(hash, hash, at);
6020 // hash ^= hash >> 6;
6021 __ srl(at, hash, 6);
6022 __ xor_(hash, hash, at);
6026 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
6028 // hash += hash << 3;
6029 __ sll(at, hash, 3);
6030 __ addu(hash, hash, at);
6031 // hash ^= hash >> 11;
6032 __ srl(at, hash, 11);
6033 __ xor_(hash, hash, at);
6034 // hash += hash << 15;
6035 __ sll(at, hash, 15);
6036 __ addu(hash, hash, at);
6038 __ li(at, Operand(String::kHashBitMask));
6039 __ and_(hash, hash, at);
6041 // if (hash == 0) hash = 27;
6042 __ ori(at, zero_reg, StringHasher::kZeroHash);
6043 __ Movz(hash, at, hash);
6047 void SubStringStub::Generate(MacroAssembler* masm) {
6049 // Stack frame on entry.
6050 // ra: return address
6055 // This stub is called from the native-call %_SubString(...), so
6056 // nothing can be assumed about the arguments. It is tested that:
6057 // "string" is a sequential string,
6058 // both "from" and "to" are smis, and
6059 // 0 <= from <= to <= string.length.
6060 // If any of these assumptions fail, we call the runtime system.
6062 const int kToOffset = 0 * kPointerSize;
6063 const int kFromOffset = 1 * kPointerSize;
6064 const int kStringOffset = 2 * kPointerSize;
6066 __ lw(a2, MemOperand(sp, kToOffset));
6067 __ lw(a3, MemOperand(sp, kFromOffset));
6068 STATIC_ASSERT(kFromOffset == kToOffset + 4);
6069 STATIC_ASSERT(kSmiTag == 0);
6070 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
6072 // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
6073 // safe in this case.
6074 __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
6075 __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
6076 // Both a2 and a3 are untagged integers.
6078 __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
6080 __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to.
6081 __ Subu(a2, a2, a3);
6083 // Make sure first argument is a string.
6084 __ lw(v0, MemOperand(sp, kStringOffset));
6085 __ JumpIfSmi(v0, &runtime);
6086 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
6087 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
6088 __ And(t0, a1, Operand(kIsNotStringMask));
6090 __ Branch(&runtime, ne, t0, Operand(zero_reg));
6092 // Short-cut for the case of trivial substring.
6094 // v0: original string
6095 // a2: result string length
6096 __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
6098 __ Branch(&return_v0, eq, a2, Operand(t0));
6101 Label result_longer_than_two;
6102 // Check for special case of two character ASCII string, in which case
6103 // we do a lookup in the symbol table first.
6105 __ Branch(&result_longer_than_two, gt, a2, Operand(t0));
6106 __ Branch(&runtime, lt, a2, Operand(t0));
6108 __ JumpIfInstanceTypeIsNotSequentialAscii(a1, a1, &runtime);
6110 // Get the two characters forming the sub string.
6111 __ Addu(v0, v0, Operand(a3));
6112 __ lbu(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
6113 __ lbu(t0, FieldMemOperand(v0, SeqAsciiString::kHeaderSize + 1));
6115 // Try to lookup two character string in symbol table.
6116 Label make_two_character_string;
6117 StringHelper::GenerateTwoCharacterSymbolTableProbe(
6118 masm, a3, t0, a1, t1, t2, t3, t4, &make_two_character_string);
6121 // a2: result string length.
6122 // a3: two characters combined into halfword in little endian byte order.
6123 __ bind(&make_two_character_string);
6124 __ AllocateAsciiString(v0, a2, t0, t1, t4, &runtime);
6125 __ sh(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
6128 __ bind(&result_longer_than_two);
6130 // Deal with different string types: update the index if necessary
6131 // and put the underlying string into t1.
6132 // v0: original string
6133 // a1: instance type
6135 // a3: from index (untagged)
6136 Label underlying_unpacked, sliced_string, seq_or_external_string;
6137 // If the string is not indirect, it can only be sequential or external.
6138 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
6139 STATIC_ASSERT(kIsIndirectStringMask != 0);
6140 __ And(t0, a1, Operand(kIsIndirectStringMask));
6141 __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
6142 // t0 is used as a scratch register and can be overwritten in either case.
6143 __ And(t0, a1, Operand(kSlicedNotConsMask));
6144 __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
6145 // Cons string. Check whether it is flat, then fetch first part.
6146 __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
6147 __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
6148 __ Branch(&runtime, ne, t1, Operand(t0));
6149 __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
6150 // Update instance type.
6151 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
6152 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
6153 __ jmp(&underlying_unpacked);
6155 __ bind(&sliced_string);
6156 // Sliced string. Fetch parent and correct start index by offset.
6157 __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
6158 __ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
6159 __ sra(t0, t0, 1); // Add offset to index.
6160 __ Addu(a3, a3, t0);
6161 // Update instance type.
6162 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
6163 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
6164 __ jmp(&underlying_unpacked);
6166 __ bind(&seq_or_external_string);
6167 // Sequential or external string. Just move string to the expected register.
6170 __ bind(&underlying_unpacked);
6172 if (FLAG_string_slices) {
6174 // t1: underlying subject string
6175 // a1: instance type of underlying subject string
6177 // a3: adjusted start index (untagged)
6178 // Short slice. Copy instead of slicing.
6179 __ Branch(©_routine, lt, a2, Operand(SlicedString::kMinLength));
6180 // Allocate new sliced string. At this point we do not reload the instance
6181 // type including the string encoding because we simply rely on the info
6182 // provided by the original string. It does not matter if the original
6183 // string's encoding is wrong because we always have to recheck encoding of
6184 // the newly created string's parent anyways due to externalized strings.
6185 Label two_byte_slice, set_slice_header;
6186 STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
6187 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
6188 __ And(t0, a1, Operand(kStringEncodingMask));
6189 __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
6190 __ AllocateAsciiSlicedString(v0, a2, t2, t3, &runtime);
6191 __ jmp(&set_slice_header);
6192 __ bind(&two_byte_slice);
6193 __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
6194 __ bind(&set_slice_header);
6196 __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
6197 __ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
6200 __ bind(©_routine);
6203 // t1: underlying subject string
6204 // a1: instance type of underlying subject string
6206 // a3: adjusted start index (untagged)
6207 Label two_byte_sequential, sequential_string, allocate_result;
6208 STATIC_ASSERT(kExternalStringTag != 0);
6209 STATIC_ASSERT(kSeqStringTag == 0);
6210 __ And(t0, a1, Operand(kExternalStringTag));
6211 __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
6213 // Handle external string.
6214 // Rule out short external strings.
6215 STATIC_CHECK(kShortExternalStringTag != 0);
6216 __ And(t0, a1, Operand(kShortExternalStringTag));
6217 __ Branch(&runtime, ne, t0, Operand(zero_reg));
6218 __ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
6219 // t1 already points to the first character of underlying string.
6220 __ jmp(&allocate_result);
6222 __ bind(&sequential_string);
6223 // Locate first character of underlying subject string.
6224 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
6225 __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6227 __ bind(&allocate_result);
6228 // Sequential acii string. Allocate the result.
6229 STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
6230 __ And(t0, a1, Operand(kStringEncodingMask));
6231 __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
6233 // Allocate and copy the resulting ASCII string.
6234 __ AllocateAsciiString(v0, a2, t0, t2, t3, &runtime);
6236 // Locate first character of substring to copy.
6237 __ Addu(t1, t1, a3);
6239 // Locate first character of result.
6240 __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6242 // v0: result string
6243 // a1: first character of result string
6244 // a2: result string length
6245 // t1: first character of substring to copy
6246 STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
6247 StringHelper::GenerateCopyCharactersLong(
6248 masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
6251 // Allocate and copy the resulting two-byte string.
6252 __ bind(&two_byte_sequential);
6253 __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
6255 // Locate first character of substring to copy.
6256 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
6258 __ Addu(t1, t1, t0);
6259 // Locate first character of result.
6260 __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6262 // v0: result string.
6263 // a1: first character of result.
6264 // a2: result length.
6265 // t1: first character of substring to copy.
6266 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
6267 StringHelper::GenerateCopyCharactersLong(
6268 masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
6270 __ bind(&return_v0);
6271 Counters* counters = masm->isolate()->counters();
6272 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
6275 // Just jump to runtime to create the sub string.
6277 __ TailCallRuntime(Runtime::kSubString, 3, 1);
6281 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
6286 Register scratch3) {
6287 Register length = scratch1;
6290 Label strings_not_equal, check_zero_length;
6291 __ lw(length, FieldMemOperand(left, String::kLengthOffset));
6292 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
6293 __ Branch(&check_zero_length, eq, length, Operand(scratch2));
6294 __ bind(&strings_not_equal);
6295 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
6298 // Check if the length is zero.
6299 Label compare_chars;
6300 __ bind(&check_zero_length);
6301 STATIC_ASSERT(kSmiTag == 0);
6302 __ Branch(&compare_chars, ne, length, Operand(zero_reg));
6303 __ li(v0, Operand(Smi::FromInt(EQUAL)));
6306 // Compare characters.
6307 __ bind(&compare_chars);
6309 GenerateAsciiCharsCompareLoop(masm,
6310 left, right, length, scratch2, scratch3, v0,
6311 &strings_not_equal);
6313 // Characters are equal.
6314 __ li(v0, Operand(Smi::FromInt(EQUAL)));
6319 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
6325 Register scratch4) {
6326 Label result_not_equal, compare_lengths;
6327 // Find minimum length and length difference.
6328 __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
6329 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
6330 __ Subu(scratch3, scratch1, Operand(scratch2));
6331 Register length_delta = scratch3;
6332 __ slt(scratch4, scratch2, scratch1);
6333 __ Movn(scratch1, scratch2, scratch4);
6334 Register min_length = scratch1;
6335 STATIC_ASSERT(kSmiTag == 0);
6336 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
6339 GenerateAsciiCharsCompareLoop(masm,
6340 left, right, min_length, scratch2, scratch4, v0,
6343 // Compare lengths - strings up to min-length are equal.
6344 __ bind(&compare_lengths);
6345 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
6346 // Use length_delta as result if it's zero.
6347 __ mov(scratch2, length_delta);
6348 __ mov(scratch4, zero_reg);
6349 __ mov(v0, zero_reg);
6351 __ bind(&result_not_equal);
6352 // Conditionally update the result based either on length_delta or
6353 // the last comparion performed in the loop above.
6355 __ Branch(&ret, eq, scratch2, Operand(scratch4));
6356 __ li(v0, Operand(Smi::FromInt(GREATER)));
6357 __ Branch(&ret, gt, scratch2, Operand(scratch4));
6358 __ li(v0, Operand(Smi::FromInt(LESS)));
6364 void StringCompareStub::GenerateAsciiCharsCompareLoop(
6365 MacroAssembler* masm,
6372 Label* chars_not_equal) {
6373 // Change index to run from -length to -1 by adding length to string
6374 // start. This means that loop ends when index reaches zero, which
6375 // doesn't need an additional compare.
6376 __ SmiUntag(length);
6377 __ Addu(scratch1, length,
6378 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6379 __ Addu(left, left, Operand(scratch1));
6380 __ Addu(right, right, Operand(scratch1));
6381 __ Subu(length, zero_reg, length);
6382 Register index = length; // index = -length;
6388 __ Addu(scratch3, left, index);
6389 __ lbu(scratch1, MemOperand(scratch3));
6390 __ Addu(scratch3, right, index);
6391 __ lbu(scratch2, MemOperand(scratch3));
6392 __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
6393 __ Addu(index, index, 1);
6394 __ Branch(&loop, ne, index, Operand(zero_reg));
6398 void StringCompareStub::Generate(MacroAssembler* masm) {
6401 Counters* counters = masm->isolate()->counters();
6403 // Stack frame on entry.
6404 // sp[0]: right string
6405 // sp[4]: left string
6406 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
6407 __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
6410 __ Branch(¬_same, ne, a0, Operand(a1));
6411 STATIC_ASSERT(EQUAL == 0);
6412 STATIC_ASSERT(kSmiTag == 0);
6413 __ li(v0, Operand(Smi::FromInt(EQUAL)));
6414 __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
6419 // Check that both objects are sequential ASCII strings.
6420 __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
6422 // Compare flat ASCII strings natively. Remove arguments from stack first.
6423 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
6424 __ Addu(sp, sp, Operand(2 * kPointerSize));
6425 GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
6428 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6432 void StringAddStub::Generate(MacroAssembler* masm) {
6433 Label call_runtime, call_builtin;
6434 Builtins::JavaScript builtin_id = Builtins::ADD;
6436 Counters* counters = masm->isolate()->counters();
6439 // sp[0]: second argument (right).
6440 // sp[4]: first argument (left).
6442 // Load the two arguments.
6443 __ lw(a0, MemOperand(sp, 1 * kPointerSize)); // First argument.
6444 __ lw(a1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
6446 // Make sure that both arguments are strings if not known in advance.
6447 if (flags_ == NO_STRING_ADD_FLAGS) {
6448 __ JumpIfEitherSmi(a0, a1, &call_runtime);
6449 // Load instance types.
6450 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6451 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6452 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6453 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6454 STATIC_ASSERT(kStringTag == 0);
6455 // If either is not a string, go to runtime.
6456 __ Or(t4, t0, Operand(t1));
6457 __ And(t4, t4, Operand(kIsNotStringMask));
6458 __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
6460 // Here at least one of the arguments is definitely a string.
6461 // We convert the one that is not known to be a string.
6462 if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
6463 ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
6464 GenerateConvertArgument(
6465 masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
6466 builtin_id = Builtins::STRING_ADD_RIGHT;
6467 } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
6468 ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
6469 GenerateConvertArgument(
6470 masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
6471 builtin_id = Builtins::STRING_ADD_LEFT;
6475 // Both arguments are strings.
6477 // a1: second string
6478 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6479 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6481 Label strings_not_empty;
6482 // Check if either of the strings are empty. In that case return the other.
6483 // These tests use zero-length check on string-length whch is an Smi.
6484 // Assert that Smi::FromInt(0) is really 0.
6485 STATIC_ASSERT(kSmiTag == 0);
6486 ASSERT(Smi::FromInt(0) == 0);
6487 __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
6488 __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
6489 __ mov(v0, a0); // Assume we'll return first string (from a0).
6490 __ Movz(v0, a1, a2); // If first is empty, return second (from a1).
6491 __ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1.
6492 __ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1.
6493 __ and_(t4, t4, t5); // Branch if both strings were non-empty.
6494 __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
6496 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6499 __ bind(&strings_not_empty);
6502 // Untag both string-lengths.
6503 __ sra(a2, a2, kSmiTagSize);
6504 __ sra(a3, a3, kSmiTagSize);
6506 // Both strings are non-empty.
6508 // a1: second string
6509 // a2: length of first string
6510 // a3: length of second string
6511 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6512 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6513 // Look at the length of the result of adding the two strings.
6514 Label string_add_flat_result, longer_than_two;
6515 // Adding two lengths can't overflow.
6516 STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
6517 __ Addu(t2, a2, Operand(a3));
6518 // Use the symbol table when adding two one character strings, as it
6519 // helps later optimizations to return a symbol here.
6520 __ Branch(&longer_than_two, ne, t2, Operand(2));
6522 // Check that both strings are non-external ASCII strings.
6523 if (flags_ != NO_STRING_ADD_FLAGS) {
6524 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6525 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6526 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6527 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6529 __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
6532 // Get the two characters forming the sub string.
6533 __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize));
6534 __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize));
6536 // Try to lookup two character string in symbol table. If it is not found
6537 // just allocate a new one.
6538 Label make_two_character_string;
6539 StringHelper::GenerateTwoCharacterSymbolTableProbe(
6540 masm, a2, a3, t2, t3, t0, t1, t5, &make_two_character_string);
6541 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6544 __ bind(&make_two_character_string);
6545 // Resulting string has length 2 and first chars of two strings
6546 // are combined into single halfword in a2 register.
6547 // So we can fill resulting string without two loops by a single
6548 // halfword store instruction (which assumes that processor is
6549 // in a little endian mode).
6550 __ li(t2, Operand(2));
6551 __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
6552 __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
6553 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6556 __ bind(&longer_than_two);
6557 // Check if resulting string will be flat.
6558 __ Branch(&string_add_flat_result, lt, t2, Operand(ConsString::kMinLength));
6559 // Handle exceptionally long strings in the runtime system.
6560 STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
6561 ASSERT(IsPowerOf2(String::kMaxLength + 1));
6562 // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
6563 __ Branch(&call_runtime, hs, t2, Operand(String::kMaxLength + 1));
6565 // If result is not supposed to be flat, allocate a cons string object.
6566 // If both strings are ASCII the result is an ASCII cons string.
6567 if (flags_ != NO_STRING_ADD_FLAGS) {
6568 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6569 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6570 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6571 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6573 Label non_ascii, allocated, ascii_data;
6574 STATIC_ASSERT(kTwoByteStringTag == 0);
6575 // Branch to non_ascii if either string-encoding field is zero (non-ASCII).
6576 __ And(t4, t0, Operand(t1));
6577 __ And(t4, t4, Operand(kStringEncodingMask));
6578 __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
6580 // Allocate an ASCII cons string.
6581 __ bind(&ascii_data);
6582 __ AllocateAsciiConsString(v0, t2, t0, t1, &call_runtime);
6583 __ bind(&allocated);
6584 // Fill the fields of the cons string.
6585 __ sw(a0, FieldMemOperand(v0, ConsString::kFirstOffset));
6586 __ sw(a1, FieldMemOperand(v0, ConsString::kSecondOffset));
6587 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6590 __ bind(&non_ascii);
6591 // At least one of the strings is two-byte. Check whether it happens
6592 // to contain only ASCII characters.
6593 // t0: first instance type.
6594 // t1: second instance type.
6595 // Branch to if _both_ instances have kAsciiDataHintMask set.
6596 __ And(at, t0, Operand(kAsciiDataHintMask));
6597 __ and_(at, at, t1);
6598 __ Branch(&ascii_data, ne, at, Operand(zero_reg));
6600 __ xor_(t0, t0, t1);
6601 STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
6602 __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
6603 __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
6605 // Allocate a two byte cons string.
6606 __ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
6607 __ Branch(&allocated);
6609 // We cannot encounter sliced strings or cons strings here since:
6610 STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
6611 // Handle creating a flat result from either external or sequential strings.
6612 // Locate the first characters' locations.
6614 // a1: second string
6615 // a2: length of first string
6616 // a3: length of second string
6617 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6618 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6619 // t2: sum of lengths.
6620 Label first_prepared, second_prepared;
6621 __ bind(&string_add_flat_result);
6622 if (flags_ != NO_STRING_ADD_FLAGS) {
6623 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6624 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6625 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6626 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6628 // Check whether both strings have same encoding
6629 __ Xor(t3, t0, Operand(t1));
6630 __ And(t3, t3, Operand(kStringEncodingMask));
6631 __ Branch(&call_runtime, ne, t3, Operand(zero_reg));
6633 STATIC_ASSERT(kSeqStringTag == 0);
6634 __ And(t4, t0, Operand(kStringRepresentationMask));
6636 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
6637 Label skip_first_add;
6638 __ Branch(&skip_first_add, ne, t4, Operand(zero_reg));
6639 __ Branch(USE_DELAY_SLOT, &first_prepared);
6640 __ addiu(t3, a0, SeqAsciiString::kHeaderSize - kHeapObjectTag);
6641 __ bind(&skip_first_add);
6642 // External string: rule out short external string and load string resource.
6643 STATIC_ASSERT(kShortExternalStringTag != 0);
6644 __ And(t4, t0, Operand(kShortExternalStringMask));
6645 __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
6646 __ lw(t3, FieldMemOperand(a0, ExternalString::kResourceDataOffset));
6647 __ bind(&first_prepared);
6649 STATIC_ASSERT(kSeqStringTag == 0);
6650 __ And(t4, t1, Operand(kStringRepresentationMask));
6651 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
6652 Label skip_second_add;
6653 __ Branch(&skip_second_add, ne, t4, Operand(zero_reg));
6654 __ Branch(USE_DELAY_SLOT, &second_prepared);
6655 __ addiu(a1, a1, SeqAsciiString::kHeaderSize - kHeapObjectTag);
6656 __ bind(&skip_second_add);
6657 // External string: rule out short external string and load string resource.
6658 STATIC_ASSERT(kShortExternalStringTag != 0);
6659 __ And(t4, t1, Operand(kShortExternalStringMask));
6660 __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
6661 __ lw(a1, FieldMemOperand(a1, ExternalString::kResourceDataOffset));
6662 __ bind(&second_prepared);
6664 Label non_ascii_string_add_flat_result;
6665 // t3: first character of first string
6666 // a1: first character of second string
6667 // a2: length of first string
6668 // a3: length of second string
6669 // t2: sum of lengths.
6670 // Both strings have the same encoding.
6671 STATIC_ASSERT(kTwoByteStringTag == 0);
6672 __ And(t4, t1, Operand(kStringEncodingMask));
6673 __ Branch(&non_ascii_string_add_flat_result, eq, t4, Operand(zero_reg));
6675 __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
6676 __ Addu(t2, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6677 // v0: result string.
6678 // t3: first character of first string.
6679 // a1: first character of second string
6680 // a2: length of first string.
6681 // a3: length of second string.
6682 // t2: first character of result.
6684 StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, true);
6685 // t2: next character of result.
6686 StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
6687 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6690 __ bind(&non_ascii_string_add_flat_result);
6691 __ AllocateTwoByteString(v0, t2, t0, t1, t5, &call_runtime);
6692 __ Addu(t2, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6693 // v0: result string.
6694 // t3: first character of first string.
6695 // a1: first character of second string.
6696 // a2: length of first string.
6697 // a3: length of second string.
6698 // t2: first character of result.
6699 StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, false);
6700 // t2: next character of result.
6701 StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
6703 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6706 // Just jump to runtime to add the two strings.
6707 __ bind(&call_runtime);
6708 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
6710 if (call_builtin.is_linked()) {
6711 __ bind(&call_builtin);
6712 __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
6717 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
6725 // First check if the argument is already a string.
6726 Label not_string, done;
6727 __ JumpIfSmi(arg, ¬_string);
6728 __ GetObjectType(arg, scratch1, scratch1);
6729 __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
6731 // Check the number to string cache.
6733 __ bind(¬_string);
6734 // Puts the cached result into scratch1.
6735 NumberToStringStub::GenerateLookupNumberStringCache(masm,
6743 __ mov(arg, scratch1);
6744 __ sw(arg, MemOperand(sp, stack_offset));
6747 // Check if the argument is a safe string wrapper.
6748 __ bind(¬_cached);
6749 __ JumpIfSmi(arg, slow);
6750 __ GetObjectType(arg, scratch1, scratch2); // map -> scratch1.
6751 __ Branch(slow, ne, scratch2, Operand(JS_VALUE_TYPE));
6752 __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
6753 __ li(scratch4, 1 << Map::kStringWrapperSafeForDefaultValueOf);
6754 __ And(scratch2, scratch2, scratch4);
6755 __ Branch(slow, ne, scratch2, Operand(scratch4));
6756 __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset));
6757 __ sw(arg, MemOperand(sp, stack_offset));
6763 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
6764 ASSERT(state_ == CompareIC::SMIS);
6767 __ JumpIfNotSmi(a2, &miss);
6769 if (GetCondition() == eq) {
6770 // For equality we do not care about the sign of the result.
6771 __ Subu(v0, a0, a1);
6773 // Untag before subtracting to avoid handling overflow.
6776 __ Subu(v0, a1, a0);
6785 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6786 ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6789 Label unordered, maybe_undefined1, maybe_undefined2;
6791 __ And(a2, a1, Operand(a0));
6792 __ JumpIfSmi(a2, &generic_stub);
6794 __ GetObjectType(a0, a2, a2);
6795 __ Branch(&maybe_undefined1, ne, a2, Operand(HEAP_NUMBER_TYPE));
6796 __ GetObjectType(a1, a2, a2);
6797 __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
6799 // Inlining the double comparison and falling back to the general compare
6800 // stub if NaN is involved or FPU is unsupported.
6801 if (CpuFeatures::IsSupported(FPU)) {
6802 CpuFeatures::Scope scope(FPU);
6804 // Load left and right operand.
6805 __ Subu(a2, a1, Operand(kHeapObjectTag));
6806 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
6807 __ Subu(a2, a0, Operand(kHeapObjectTag));
6808 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
6810 // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
6811 Label fpu_eq, fpu_lt;
6812 // Test if equal, and also handle the unordered/NaN case.
6813 __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
6815 // Test if less (unordered case is already handled).
6816 __ BranchF(&fpu_lt, NULL, lt, f0, f2);
6818 // Otherwise it's greater, so just fall thru, and return.
6819 __ li(v0, Operand(GREATER));
6823 __ li(v0, Operand(EQUAL));
6827 __ li(v0, Operand(LESS));
6831 __ bind(&unordered);
6833 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
6834 __ bind(&generic_stub);
6835 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
6837 __ bind(&maybe_undefined1);
6838 if (Token::IsOrderedRelationalCompareOp(op_)) {
6839 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
6840 __ Branch(&miss, ne, a0, Operand(at));
6841 __ GetObjectType(a1, a2, a2);
6842 __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
6846 __ bind(&maybe_undefined2);
6847 if (Token::IsOrderedRelationalCompareOp(op_)) {
6848 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
6849 __ Branch(&unordered, eq, a1, Operand(at));
6857 void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
6858 ASSERT(state_ == CompareIC::SYMBOLS);
6861 // Registers containing left and right operands respectively.
6863 Register right = a0;
6867 // Check that both operands are heap objects.
6868 __ JumpIfEitherSmi(left, right, &miss);
6870 // Check that both operands are symbols.
6871 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6872 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6873 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6874 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6875 STATIC_ASSERT(kSymbolTag != 0);
6876 __ And(tmp1, tmp1, Operand(tmp2));
6877 __ And(tmp1, tmp1, kIsSymbolMask);
6878 __ Branch(&miss, eq, tmp1, Operand(zero_reg));
6879 // Make sure a0 is non-zero. At this point input operands are
6880 // guaranteed to be non-zero.
6881 ASSERT(right.is(a0));
6882 STATIC_ASSERT(EQUAL == 0);
6883 STATIC_ASSERT(kSmiTag == 0);
6885 // Symbols are compared by identity.
6886 __ Ret(ne, left, Operand(right));
6887 __ li(v0, Operand(Smi::FromInt(EQUAL)));
6895 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
6896 ASSERT(state_ == CompareIC::STRINGS);
6899 bool equality = Token::IsEqualityOp(op_);
6901 // Registers containing left and right operands respectively.
6903 Register right = a0;
6910 // Check that both operands are heap objects.
6911 __ JumpIfEitherSmi(left, right, &miss);
6913 // Check that both operands are strings. This leaves the instance
6914 // types loaded in tmp1 and tmp2.
6915 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6916 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6917 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6918 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6919 STATIC_ASSERT(kNotStringTag != 0);
6920 __ Or(tmp3, tmp1, tmp2);
6921 __ And(tmp5, tmp3, Operand(kIsNotStringMask));
6922 __ Branch(&miss, ne, tmp5, Operand(zero_reg));
6924 // Fast check for identical strings.
6925 Label left_ne_right;
6926 STATIC_ASSERT(EQUAL == 0);
6927 STATIC_ASSERT(kSmiTag == 0);
6928 __ Branch(&left_ne_right, ne, left, Operand(right));
6929 __ Ret(USE_DELAY_SLOT);
6930 __ mov(v0, zero_reg); // In the delay slot.
6931 __ bind(&left_ne_right);
6933 // Handle not identical strings.
6935 // Check that both strings are symbols. If they are, we're done
6936 // because we already know they are not identical.
6938 ASSERT(GetCondition() == eq);
6939 STATIC_ASSERT(kSymbolTag != 0);
6940 __ And(tmp3, tmp1, Operand(tmp2));
6941 __ And(tmp5, tmp3, Operand(kIsSymbolMask));
6943 __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg));
6944 // Make sure a0 is non-zero. At this point input operands are
6945 // guaranteed to be non-zero.
6946 ASSERT(right.is(a0));
6947 __ Ret(USE_DELAY_SLOT);
6948 __ mov(v0, a0); // In the delay slot.
6949 __ bind(&is_symbol);
6952 // Check that both strings are sequential ASCII.
6954 __ JumpIfBothInstanceTypesAreNotSequentialAscii(
6955 tmp1, tmp2, tmp3, tmp4, &runtime);
6957 // Compare flat ASCII strings. Returns when done.
6959 StringCompareStub::GenerateFlatAsciiStringEquals(
6960 masm, left, right, tmp1, tmp2, tmp3);
6962 StringCompareStub::GenerateCompareFlatAsciiStrings(
6963 masm, left, right, tmp1, tmp2, tmp3, tmp4);
6966 // Handle more complex cases in runtime.
6968 __ Push(left, right);
6970 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
6972 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6980 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
6981 ASSERT(state_ == CompareIC::OBJECTS);
6983 __ And(a2, a1, Operand(a0));
6984 __ JumpIfSmi(a2, &miss);
6986 __ GetObjectType(a0, a2, a2);
6987 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
6988 __ GetObjectType(a1, a2, a2);
6989 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
6991 ASSERT(GetCondition() == eq);
6992 __ Ret(USE_DELAY_SLOT);
6993 __ subu(v0, a0, a1);
7000 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
7003 __ JumpIfSmi(a2, &miss);
7004 __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
7005 __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
7006 __ Branch(&miss, ne, a2, Operand(known_map_));
7007 __ Branch(&miss, ne, a3, Operand(known_map_));
7009 __ Ret(USE_DELAY_SLOT);
7010 __ subu(v0, a0, a1);
7016 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
7018 // Call the runtime system in a fresh internal frame.
7019 ExternalReference miss =
7020 ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
7021 FrameScope scope(masm, StackFrame::INTERNAL);
7025 __ li(t0, Operand(Smi::FromInt(op_)));
7026 __ addiu(sp, sp, -kPointerSize);
7027 __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
7028 __ sw(t0, MemOperand(sp)); // In the delay slot.
7029 // Compute the entry point of the rewritten stub.
7030 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
7031 // Restore registers.
7038 void DirectCEntryStub::Generate(MacroAssembler* masm) {
7039 // No need to pop or drop anything, LeaveExitFrame will restore the old
7040 // stack, thus dropping the allocated space for the return value.
7041 // The saved ra is after the reserved stack space for the 4 args.
7042 __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
7044 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
7045 // In case of an error the return address may point to a memory area
7046 // filled with kZapValue by the GC.
7047 // Dereference the address and check for this.
7048 __ lw(t0, MemOperand(t9));
7049 __ Assert(ne, "Received invalid return address.", t0,
7050 Operand(reinterpret_cast<uint32_t>(kZapValue)));
7056 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
7057 ExternalReference function) {
7058 __ li(t9, Operand(function));
7059 this->GenerateCall(masm, t9);
7063 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
7065 __ Move(t9, target);
7066 __ AssertStackIsAligned();
7067 // Allocate space for arg slots.
7068 __ Subu(sp, sp, kCArgsSlotsSize);
7070 // Block the trampoline pool through the whole function to make sure the
7071 // number of generated instructions is constant.
7072 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
7074 // We need to get the current 'pc' value, which is not available on MIPS.
7076 masm->bal(&find_ra); // ra = pc + 8.
7077 masm->nop(); // Branch delay slot nop.
7078 masm->bind(&find_ra);
7080 const int kNumInstructionsToJump = 6;
7081 masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
7082 // Push return address (accessible to GC through exit frame pc).
7083 // This spot for ra was reserved in EnterExitFrame.
7084 masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
7086 Operand(reinterpret_cast<intptr_t>(GetCode().location()),
7087 RelocInfo::CODE_TARGET),
7089 // Call the function.
7091 // Make sure the stored 'ra' points to this position.
7092 ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
7096 void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
7100 Register properties,
7101 Handle<String> name,
7102 Register scratch0) {
7103 // If names of slots in range from 1 to kProbes - 1 for the hash value are
7104 // not equal to the name and kProbes-th slot is not used (its name is the
7105 // undefined value), it guarantees the hash table doesn't contain the
7106 // property. It's true even if some slots represent deleted properties
7107 // (their names are the hole value).
7108 for (int i = 0; i < kInlinedProbes; i++) {
7109 // scratch0 points to properties hash.
7110 // Compute the masked index: (hash + i + i * i) & mask.
7111 Register index = scratch0;
7112 // Capacity is smi 2^n.
7113 __ lw(index, FieldMemOperand(properties, kCapacityOffset));
7114 __ Subu(index, index, Operand(1));
7115 __ And(index, index, Operand(
7116 Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
7118 // Scale the index by multiplying by the entry size.
7119 ASSERT(StringDictionary::kEntrySize == 3);
7120 __ sll(at, index, 1);
7121 __ Addu(index, index, at);
7123 Register entity_name = scratch0;
7124 // Having undefined at this place means the name is not contained.
7125 ASSERT_EQ(kSmiTagSize, 1);
7126 Register tmp = properties;
7127 __ sll(scratch0, index, 1);
7128 __ Addu(tmp, properties, scratch0);
7129 __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
7131 ASSERT(!tmp.is(entity_name));
7132 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
7133 __ Branch(done, eq, entity_name, Operand(tmp));
7135 if (i != kInlinedProbes - 1) {
7136 // Load the hole ready for use below:
7137 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
7139 // Stop if found the property.
7140 __ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
7143 __ Branch(&the_hole, eq, entity_name, Operand(tmp));
7145 // Check if the entry name is not a symbol.
7146 __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
7148 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
7149 __ And(scratch0, entity_name, Operand(kIsSymbolMask));
7150 __ Branch(miss, eq, scratch0, Operand(zero_reg));
7154 // Restore the properties.
7156 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
7160 const int spill_mask =
7161 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
7162 a2.bit() | a1.bit() | a0.bit() | v0.bit());
7164 __ MultiPush(spill_mask);
7165 __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
7166 __ li(a1, Operand(Handle<String>(name)));
7167 StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
7170 __ MultiPop(spill_mask);
7172 __ Branch(done, eq, at, Operand(zero_reg));
7173 __ Branch(miss, ne, at, Operand(zero_reg));
7177 // Probe the string dictionary in the |elements| register. Jump to the
7178 // |done| label if a property with the given name is found. Jump to
7179 // the |miss| label otherwise.
7180 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
7181 void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
7187 Register scratch2) {
7188 ASSERT(!elements.is(scratch1));
7189 ASSERT(!elements.is(scratch2));
7190 ASSERT(!name.is(scratch1));
7191 ASSERT(!name.is(scratch2));
7193 // Assert that name contains a string.
7194 if (FLAG_debug_code) __ AbortIfNotString(name);
7196 // Compute the capacity mask.
7197 __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
7198 __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
7199 __ Subu(scratch1, scratch1, Operand(1));
7201 // Generate an unrolled loop that performs a few probes before
7202 // giving up. Measurements done on Gmail indicate that 2 probes
7203 // cover ~93% of loads from dictionaries.
7204 for (int i = 0; i < kInlinedProbes; i++) {
7205 // Compute the masked index: (hash + i + i * i) & mask.
7206 __ lw(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
7208 // Add the probe offset (i + i * i) left shifted to avoid right shifting
7209 // the hash in a separate instruction. The value hash + i + i * i is right
7210 // shifted in the following and instruction.
7211 ASSERT(StringDictionary::GetProbeOffset(i) <
7212 1 << (32 - String::kHashFieldOffset));
7213 __ Addu(scratch2, scratch2, Operand(
7214 StringDictionary::GetProbeOffset(i) << String::kHashShift));
7216 __ srl(scratch2, scratch2, String::kHashShift);
7217 __ And(scratch2, scratch1, scratch2);
7219 // Scale the index by multiplying by the element size.
7220 ASSERT(StringDictionary::kEntrySize == 3);
7221 // scratch2 = scratch2 * 3.
7223 __ sll(at, scratch2, 1);
7224 __ Addu(scratch2, scratch2, at);
7226 // Check if the key is identical to the name.
7227 __ sll(at, scratch2, 2);
7228 __ Addu(scratch2, elements, at);
7229 __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
7230 __ Branch(done, eq, name, Operand(at));
7233 const int spill_mask =
7234 (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
7235 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
7236 ~(scratch1.bit() | scratch2.bit());
7238 __ MultiPush(spill_mask);
7240 ASSERT(!elements.is(a1));
7242 __ Move(a0, elements);
7244 __ Move(a0, elements);
7247 StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
7249 __ mov(scratch2, a2);
7251 __ MultiPop(spill_mask);
7253 __ Branch(done, ne, at, Operand(zero_reg));
7254 __ Branch(miss, eq, at, Operand(zero_reg));
7258 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
7259 // This stub overrides SometimesSetsUpAFrame() to return false. That means
7260 // we cannot call anything that could cause a GC from this stub.
7262 // result: StringDictionary to probe
7264 // : StringDictionary to probe.
7265 // index_: will hold an index of entry if lookup is successful.
7266 // might alias with result_.
7268 // result_ is zero if lookup failed, non zero otherwise.
7270 Register result = v0;
7271 Register dictionary = a0;
7273 Register index = a2;
7276 Register undefined = t1;
7277 Register entry_key = t2;
7279 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
7281 __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
7282 __ sra(mask, mask, kSmiTagSize);
7283 __ Subu(mask, mask, Operand(1));
7285 __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
7287 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
7289 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
7290 // Compute the masked index: (hash + i + i * i) & mask.
7291 // Capacity is smi 2^n.
7293 // Add the probe offset (i + i * i) left shifted to avoid right shifting
7294 // the hash in a separate instruction. The value hash + i + i * i is right
7295 // shifted in the following and instruction.
7296 ASSERT(StringDictionary::GetProbeOffset(i) <
7297 1 << (32 - String::kHashFieldOffset));
7298 __ Addu(index, hash, Operand(
7299 StringDictionary::GetProbeOffset(i) << String::kHashShift));
7301 __ mov(index, hash);
7303 __ srl(index, index, String::kHashShift);
7304 __ And(index, mask, index);
7306 // Scale the index by multiplying by the entry size.
7307 ASSERT(StringDictionary::kEntrySize == 3);
7310 __ sll(index, index, 1);
7311 __ Addu(index, index, at);
7314 ASSERT_EQ(kSmiTagSize, 1);
7315 __ sll(index, index, 2);
7316 __ Addu(index, index, dictionary);
7317 __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
7319 // Having undefined at this place means the name is not contained.
7320 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined));
7322 // Stop if found the property.
7323 __ Branch(&in_dictionary, eq, entry_key, Operand(key));
7325 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
7326 // Check if the entry name is not a symbol.
7327 __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
7329 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
7330 __ And(result, entry_key, Operand(kIsSymbolMask));
7331 __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg));
7335 __ bind(&maybe_in_dictionary);
7336 // If we are doing negative lookup then probing failure should be
7337 // treated as a lookup success. For positive lookup probing failure
7338 // should be treated as lookup failure.
7339 if (mode_ == POSITIVE_LOOKUP) {
7340 __ Ret(USE_DELAY_SLOT);
7341 __ mov(result, zero_reg);
7344 __ bind(&in_dictionary);
7345 __ Ret(USE_DELAY_SLOT);
7348 __ bind(¬_in_dictionary);
7349 __ Ret(USE_DELAY_SLOT);
7350 __ mov(result, zero_reg);
7354 struct AheadOfTimeWriteBarrierStubList {
7355 Register object, value, address;
7356 RememberedSetAction action;
7359 #define REG(Name) { kRegister_ ## Name ## _Code }
7361 static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
7362 // Used in RegExpExecStub.
7363 { REG(s2), REG(s0), REG(t3), EMIT_REMEMBERED_SET },
7364 { REG(s2), REG(a2), REG(t3), EMIT_REMEMBERED_SET },
7365 // Used in CompileArrayPushCall.
7366 // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
7367 // Also used in KeyedStoreIC::GenerateGeneric.
7368 { REG(a3), REG(t0), REG(t1), EMIT_REMEMBERED_SET },
7369 // Used in CompileStoreGlobal.
7370 { REG(t0), REG(a1), REG(a2), OMIT_REMEMBERED_SET },
7371 // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
7372 { REG(a1), REG(a2), REG(a3), EMIT_REMEMBERED_SET },
7373 { REG(a3), REG(a2), REG(a1), EMIT_REMEMBERED_SET },
7374 // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
7375 { REG(a2), REG(a1), REG(a3), EMIT_REMEMBERED_SET },
7376 { REG(a3), REG(a1), REG(a2), EMIT_REMEMBERED_SET },
7377 // KeyedStoreStubCompiler::GenerateStoreFastElement.
7378 { REG(a3), REG(a2), REG(t0), EMIT_REMEMBERED_SET },
7379 { REG(a2), REG(a3), REG(t0), EMIT_REMEMBERED_SET },
7380 // ElementsTransitionGenerator::GenerateSmiOnlyToObject
7381 // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
7382 // and ElementsTransitionGenerator::GenerateDoubleToObject
7383 { REG(a2), REG(a3), REG(t5), EMIT_REMEMBERED_SET },
7384 { REG(a2), REG(a3), REG(t5), OMIT_REMEMBERED_SET },
7385 // ElementsTransitionGenerator::GenerateDoubleToObject
7386 { REG(t2), REG(a2), REG(a0), EMIT_REMEMBERED_SET },
7387 { REG(a2), REG(t2), REG(t5), EMIT_REMEMBERED_SET },
7388 // StoreArrayLiteralElementStub::Generate
7389 { REG(t1), REG(a0), REG(t2), EMIT_REMEMBERED_SET },
7390 // Null termination.
7391 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
7397 bool RecordWriteStub::IsPregenerated() {
7398 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7399 !entry->object.is(no_reg);
7401 if (object_.is(entry->object) &&
7402 value_.is(entry->value) &&
7403 address_.is(entry->address) &&
7404 remembered_set_action_ == entry->action &&
7405 save_fp_regs_mode_ == kDontSaveFPRegs) {
7413 bool StoreBufferOverflowStub::IsPregenerated() {
7414 return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
7418 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
7419 StoreBufferOverflowStub stub1(kDontSaveFPRegs);
7420 stub1.GetCode()->set_is_pregenerated(true);
7424 void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
7425 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7426 !entry->object.is(no_reg);
7428 RecordWriteStub stub(entry->object,
7433 stub.GetCode()->set_is_pregenerated(true);
7438 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
7439 // the value has just been written into the object, now this stub makes sure
7440 // we keep the GC informed. The word in the object where the value has been
7441 // written is in the address register.
7442 void RecordWriteStub::Generate(MacroAssembler* masm) {
7443 Label skip_to_incremental_noncompacting;
7444 Label skip_to_incremental_compacting;
7446 // The first two branch+nop instructions are generated with labels so as to
7447 // get the offset fixed up correctly by the bind(Label*) call. We patch it
7448 // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
7449 // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
7450 // incremental heap marking.
7451 // See RecordWriteStub::Patch for details.
7452 __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
7454 __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
7457 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7458 __ RememberedSetHelper(object_,
7462 MacroAssembler::kReturnAtEnd);
7466 __ bind(&skip_to_incremental_noncompacting);
7467 GenerateIncremental(masm, INCREMENTAL);
7469 __ bind(&skip_to_incremental_compacting);
7470 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
7472 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
7473 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
7475 PatchBranchIntoNop(masm, 0);
7476 PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
7480 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
7483 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7484 Label dont_need_remembered_set;
7486 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
7487 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
7489 &dont_need_remembered_set);
7491 __ CheckPageFlag(regs_.object(),
7493 1 << MemoryChunk::SCAN_ON_SCAVENGE,
7495 &dont_need_remembered_set);
7497 // First notify the incremental marker if necessary, then update the
7499 CheckNeedsToInformIncrementalMarker(
7500 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
7501 InformIncrementalMarker(masm, mode);
7502 regs_.Restore(masm);
7503 __ RememberedSetHelper(object_,
7507 MacroAssembler::kReturnAtEnd);
7509 __ bind(&dont_need_remembered_set);
7512 CheckNeedsToInformIncrementalMarker(
7513 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
7514 InformIncrementalMarker(masm, mode);
7515 regs_.Restore(masm);
7520 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
7521 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
7522 int argument_count = 3;
7523 __ PrepareCallCFunction(argument_count, regs_.scratch0());
7525 a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
7526 ASSERT(!address.is(regs_.object()));
7527 ASSERT(!address.is(a0));
7528 __ Move(address, regs_.address());
7529 __ Move(a0, regs_.object());
7530 if (mode == INCREMENTAL_COMPACTION) {
7531 __ Move(a1, address);
7533 ASSERT(mode == INCREMENTAL);
7534 __ lw(a1, MemOperand(address, 0));
7536 __ li(a2, Operand(ExternalReference::isolate_address()));
7538 AllowExternalCallThatCantCauseGC scope(masm);
7539 if (mode == INCREMENTAL_COMPACTION) {
7541 ExternalReference::incremental_evacuation_record_write_function(
7545 ASSERT(mode == INCREMENTAL);
7547 ExternalReference::incremental_marking_record_write_function(
7551 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
7555 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
7556 MacroAssembler* masm,
7557 OnNoNeedToInformIncrementalMarker on_no_need,
7560 Label need_incremental;
7561 Label need_incremental_pop_scratch;
7563 // Let's look at the color of the object: If it is not black we don't have
7564 // to inform the incremental marker.
7565 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
7567 regs_.Restore(masm);
7568 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7569 __ RememberedSetHelper(object_,
7573 MacroAssembler::kReturnAtEnd);
7580 // Get the value from the slot.
7581 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
7583 if (mode == INCREMENTAL_COMPACTION) {
7584 Label ensure_not_white;
7586 __ CheckPageFlag(regs_.scratch0(), // Contains value.
7587 regs_.scratch1(), // Scratch.
7588 MemoryChunk::kEvacuationCandidateMask,
7592 __ CheckPageFlag(regs_.object(),
7593 regs_.scratch1(), // Scratch.
7594 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
7598 __ bind(&ensure_not_white);
7601 // We need extra registers for this, so we push the object and the address
7602 // register temporarily.
7603 __ Push(regs_.object(), regs_.address());
7604 __ EnsureNotWhite(regs_.scratch0(), // The value.
7605 regs_.scratch1(), // Scratch.
7606 regs_.object(), // Scratch.
7607 regs_.address(), // Scratch.
7608 &need_incremental_pop_scratch);
7609 __ Pop(regs_.object(), regs_.address());
7611 regs_.Restore(masm);
7612 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7613 __ RememberedSetHelper(object_,
7617 MacroAssembler::kReturnAtEnd);
7622 __ bind(&need_incremental_pop_scratch);
7623 __ Pop(regs_.object(), regs_.address());
7625 __ bind(&need_incremental);
7627 // Fall through when we need to inform the incremental marker.
7631 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
7632 // ----------- S t a t e -------------
7633 // -- a0 : element value to store
7634 // -- a1 : array literal
7635 // -- a2 : map of array literal
7636 // -- a3 : element index as smi
7637 // -- t0 : array literal index in function as smi
7638 // -----------------------------------
7641 Label double_elements;
7643 Label slow_elements;
7644 Label fast_elements;
7646 __ CheckFastElements(a2, t1, &double_elements);
7647 // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
7648 __ JumpIfSmi(a0, &smi_element);
7649 __ CheckFastSmiOnlyElements(a2, t1, &fast_elements);
7651 // Store into the array literal requires a elements transition. Call into
7653 __ bind(&slow_elements);
7655 __ Push(a1, a3, a0);
7656 __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
7657 __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset));
7659 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
7661 // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
7662 __ bind(&fast_elements);
7663 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
7664 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
7665 __ Addu(t2, t1, t2);
7666 __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
7667 __ sw(a0, MemOperand(t2, 0));
7668 // Update the write barrier for the array store.
7669 __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
7670 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
7671 __ Ret(USE_DELAY_SLOT);
7674 // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
7675 // FAST_ELEMENTS, and value is Smi.
7676 __ bind(&smi_element);
7677 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
7678 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
7679 __ Addu(t2, t1, t2);
7680 __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
7681 __ Ret(USE_DELAY_SLOT);
7684 // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
7685 __ bind(&double_elements);
7686 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
7687 __ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, a2,
7689 __ Ret(USE_DELAY_SLOT);
7696 } } // namespace v8::internal
7698 #endif // V8_TARGET_ARCH_MIPS