1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #if defined(V8_TARGET_ARCH_MIPS)
32 #include "bootstrapper.h"
33 #include "code-stubs.h"
35 #include "regexp-macro-assembler.h"
41 #define __ ACCESS_MASM(masm)
43 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
47 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
53 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
54 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
59 // Check if the operand is a heap number.
60 static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
61 Register scratch1, Register scratch2,
62 Label* not_a_heap_number) {
63 __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
64 __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
65 __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2));
69 void ToNumberStub::Generate(MacroAssembler* masm) {
70 // The ToNumber stub takes one argument in a0.
71 Label check_heap_number, call_builtin;
72 __ JumpIfNotSmi(a0, &check_heap_number);
76 __ bind(&check_heap_number);
77 EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
81 __ bind(&call_builtin);
83 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
87 void FastNewClosureStub::Generate(MacroAssembler* masm) {
88 // Create a new closure from the given function info in new
89 // space. Set the context to the current context in cp.
92 // Pop the function info from the stack.
95 // Attempt to allocate new JSFunction in new space.
96 __ AllocateInNewSpace(JSFunction::kSize,
103 int map_index = strict_mode_ == kStrictMode
104 ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
105 : Context::FUNCTION_MAP_INDEX;
107 // Compute the function map in the current global context and set that
108 // as the map of the allocated object.
109 __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
110 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
111 __ lw(a2, MemOperand(a2, Context::SlotOffset(map_index)));
112 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
114 // Initialize the rest of the function. We don't have to update the
115 // write barrier because the allocated object is in new space.
116 __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
117 __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
118 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
119 __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
120 __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
121 __ sw(a2, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
122 __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
123 __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
124 __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
125 __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
127 // Initialize the code pointer in the function to be the one
128 // found in the shared function info object.
129 __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
130 __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
131 __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
133 // Return result. The argument function info has been popped already.
136 // Create a new closure through the slower runtime call.
138 __ LoadRoot(t0, Heap::kFalseValueRootIndex);
140 __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
144 void FastNewContextStub::Generate(MacroAssembler* masm) {
145 // Try to allocate the context in new space.
147 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
149 // Attempt to allocate the context in new space.
150 __ AllocateInNewSpace(FixedArray::SizeFor(length),
157 // Load the function from the stack.
158 __ lw(a3, MemOperand(sp, 0));
160 // Setup the object header.
161 __ LoadRoot(a2, Heap::kFunctionContextMapRootIndex);
162 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
163 __ li(a2, Operand(Smi::FromInt(length)));
164 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
166 // Setup the fixed slots.
167 __ li(a1, Operand(Smi::FromInt(0)));
168 __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
169 __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
170 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
172 // Copy the global object from the previous context.
173 __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
174 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
176 // Copy the qml global object from the surrounding context.
177 __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::QML_GLOBAL_INDEX)));
178 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::QML_GLOBAL_INDEX)));
181 // Initialize the rest of the slots to undefined.
182 __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
183 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
184 __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
187 // Remove the on-stack argument and return.
192 // Need to collect. Call into runtime system.
194 __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
198 void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
199 // Stack layout on entry:
202 // [sp + kPointerSize]: serialized scope info
204 // Try to allocate the context in new space.
206 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
207 __ AllocateInNewSpace(FixedArray::SizeFor(length),
208 v0, a1, a2, &gc, TAG_OBJECT);
210 // Load the function from the stack.
211 __ lw(a3, MemOperand(sp, 0));
213 // Load the serialized scope info from the stack.
214 __ lw(a1, MemOperand(sp, 1 * kPointerSize));
216 // Setup the object header.
217 __ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
218 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
219 __ li(a2, Operand(Smi::FromInt(length)));
220 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
222 // If this block context is nested in the global context we get a smi
223 // sentinel instead of a function. The block context should get the
224 // canonical empty function of the global context as its closure which
225 // we still have to look up.
226 Label after_sentinel;
227 __ JumpIfNotSmi(a3, &after_sentinel);
228 if (FLAG_debug_code) {
229 const char* message = "Expected 0 as a Smi sentinel";
230 __ Assert(eq, message, a3, Operand(zero_reg));
232 __ lw(a3, GlobalObjectOperand());
233 __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset));
234 __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
235 __ bind(&after_sentinel);
237 // Setup the fixed slots.
238 __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
239 __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
240 __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
242 // Copy the global object from the previous context.
243 __ lw(a1, ContextOperand(cp, Context::GLOBAL_INDEX));
244 __ sw(a1, ContextOperand(v0, Context::GLOBAL_INDEX));
246 // Copy the qml global object from the surrounding context.
247 __ lw(a1, ContextOperand(cp, Context::QML_GLOBAL_INDEX));
248 __ sw(a1, ContextOperand(v0, Context::QML_GLOBAL_INDEX));
250 // Initialize the rest of the slots to the hole value.
251 __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
252 for (int i = 0; i < slots_; i++) {
253 __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS));
256 // Remove the on-stack argument and return.
258 __ Addu(sp, sp, Operand(2 * kPointerSize));
261 // Need to collect. Call into runtime system.
263 __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
267 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
268 // Stack layout on entry:
269 // [sp]: constant elements.
270 // [sp + kPointerSize]: literal index.
271 // [sp + (2 * kPointerSize)]: literals array.
273 // All sizes here are multiples of kPointerSize.
274 int elements_size = 0;
276 elements_size = mode_ == CLONE_DOUBLE_ELEMENTS
277 ? FixedDoubleArray::SizeFor(length_)
278 : FixedArray::SizeFor(length_);
280 int size = JSArray::kSize + elements_size;
282 // Load boilerplate object into r3 and check if we need to create a
285 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
286 __ lw(a0, MemOperand(sp, 1 * kPointerSize));
287 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
288 __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
290 __ lw(a3, MemOperand(t0));
291 __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
292 __ Branch(&slow_case, eq, a3, Operand(t1));
294 if (FLAG_debug_code) {
296 Heap::RootListIndex expected_map_index;
297 if (mode_ == CLONE_ELEMENTS) {
298 message = "Expected (writable) fixed array";
299 expected_map_index = Heap::kFixedArrayMapRootIndex;
300 } else if (mode_ == CLONE_DOUBLE_ELEMENTS) {
301 message = "Expected (writable) fixed double array";
302 expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
304 ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
305 message = "Expected copy-on-write fixed array";
306 expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
309 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
310 __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset));
311 __ LoadRoot(at, expected_map_index);
312 __ Assert(eq, message, a3, Operand(at));
316 // Allocate both the JS array and the elements array in one big
317 // allocation. This avoids multiple limit checks.
318 // Return new object in v0.
319 __ AllocateInNewSpace(size,
326 // Copy the JS array part.
327 for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
328 if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
329 __ lw(a1, FieldMemOperand(a3, i));
330 __ sw(a1, FieldMemOperand(v0, i));
335 // Get hold of the elements array of the boilerplate and setup the
336 // elements pointer in the resulting object.
337 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
338 __ Addu(a2, v0, Operand(JSArray::kSize));
339 __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
341 // Copy the elements array.
342 ASSERT((elements_size % kPointerSize) == 0);
343 __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
346 // Return and remove the on-stack parameters.
347 __ Addu(sp, sp, Operand(3 * kPointerSize));
351 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
355 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
356 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
357 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
358 // scratch register. Destroys the source register. No GC occurs during this
359 // stub so you don't have to set up the frame.
360 class ConvertToDoubleStub : public CodeStub {
362 ConvertToDoubleStub(Register result_reg_1,
363 Register result_reg_2,
365 Register scratch_reg)
366 : result1_(result_reg_1),
367 result2_(result_reg_2),
369 zeros_(scratch_reg) { }
377 // Minor key encoding in 16 bits.
378 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
379 class OpBits: public BitField<Token::Value, 2, 14> {};
381 Major MajorKey() { return ConvertToDouble; }
383 // Encode the parameters in a unique 16 bit value.
384 return result1_.code() +
385 (result2_.code() << 4) +
386 (source_.code() << 8) +
387 (zeros_.code() << 12);
390 void Generate(MacroAssembler* masm);
394 void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
395 #ifndef BIG_ENDIAN_FLOATING_POINT
396 Register exponent = result1_;
397 Register mantissa = result2_;
399 Register exponent = result2_;
400 Register mantissa = result1_;
403 // Convert from Smi to integer.
404 __ sra(source_, source_, kSmiTagSize);
405 // Move sign bit from source to destination. This works because the sign bit
406 // in the exponent word of the double has the same position and polarity as
407 // the 2's complement sign bit in a Smi.
408 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
409 __ And(exponent, source_, Operand(HeapNumber::kSignMask));
410 // Subtract from 0 if source was negative.
411 __ subu(at, zero_reg, source_);
412 __ movn(source_, at, exponent);
414 // We have -1, 0 or 1, which we treat specially. Register source_ contains
415 // absolute value: it is either equal to 1 (special case of -1 and 1),
416 // greater than 1 (not a special case) or less than 1 (special case of 0).
417 __ Branch(¬_special, gt, source_, Operand(1));
419 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
420 static const uint32_t exponent_word_for_1 =
421 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
422 // Safe to use 'at' as dest reg here.
423 __ Or(at, exponent, Operand(exponent_word_for_1));
424 __ movn(exponent, at, source_); // Write exp when source not 0.
425 // 1, 0 and -1 all have 0 for the second word.
426 __ mov(mantissa, zero_reg);
429 __ bind(¬_special);
430 // Count leading zeros.
431 // Gets the wrong answer for 0, but we already checked for that case above.
432 __ clz(zeros_, source_);
433 // Compute exponent and or it into the exponent register.
434 // We use mantissa as a scratch register here.
435 __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
436 __ subu(mantissa, mantissa, zeros_);
437 __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
438 __ Or(exponent, exponent, mantissa);
440 // Shift up the source chopping the top bit off.
441 __ Addu(zeros_, zeros_, Operand(1));
442 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
443 __ sllv(source_, source_, zeros_);
444 // Compute lower part of fraction (last 12 bits).
445 __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
446 // And the top (top 20 bits).
447 __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
448 __ or_(exponent, exponent, source_);
454 void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
455 FloatingPointHelper::Destination destination,
458 if (CpuFeatures::IsSupported(FPU)) {
459 CpuFeatures::Scope scope(FPU);
460 __ sra(scratch1, a0, kSmiTagSize);
461 __ mtc1(scratch1, f14);
462 __ cvt_d_w(f14, f14);
463 __ sra(scratch1, a1, kSmiTagSize);
464 __ mtc1(scratch1, f12);
465 __ cvt_d_w(f12, f12);
466 if (destination == kCoreRegisters) {
467 __ Move(a2, a3, f14);
468 __ Move(a0, a1, f12);
471 ASSERT(destination == kCoreRegisters);
472 // Write Smi from a0 to a3 and a2 in double format.
473 __ mov(scratch1, a0);
474 ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
476 __ Call(stub1.GetCode());
477 // Write Smi from a1 to a1 and a0 in double format.
478 __ mov(scratch1, a1);
479 ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
480 __ Call(stub2.GetCode());
486 void FloatingPointHelper::LoadOperands(
487 MacroAssembler* masm,
488 FloatingPointHelper::Destination destination,
489 Register heap_number_map,
494 // Load right operand (a0) to f12 or a2/a3.
495 LoadNumber(masm, destination,
496 a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
498 // Load left operand (a1) to f14 or a0/a1.
499 LoadNumber(masm, destination,
500 a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow);
504 void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
505 Destination destination,
510 Register heap_number_map,
514 if (FLAG_debug_code) {
515 __ AbortIfNotRootValue(heap_number_map,
516 Heap::kHeapNumberMapRootIndex,
517 "HeapNumberMap register clobbered.");
522 __ JumpIfSmi(object, &is_smi);
523 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
525 // Handle loading a double from a heap number.
526 if (CpuFeatures::IsSupported(FPU) &&
527 destination == kFPURegisters) {
528 CpuFeatures::Scope scope(FPU);
529 // Load the double from tagged HeapNumber to double register.
531 // ARM uses a workaround here because of the unaligned HeapNumber
532 // kValueOffset. On MIPS this workaround is built into ldc1 so there's no
533 // point in generating even more instructions.
534 __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
536 ASSERT(destination == kCoreRegisters);
537 // Load the double from heap number to dst1 and dst2 in double format.
538 __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
539 __ lw(dst2, FieldMemOperand(object,
540 HeapNumber::kValueOffset + kPointerSize));
544 // Handle loading a double from a smi.
546 if (CpuFeatures::IsSupported(FPU)) {
547 CpuFeatures::Scope scope(FPU);
548 // Convert smi to double using FPU instructions.
549 __ SmiUntag(scratch1, object);
550 __ mtc1(scratch1, dst);
551 __ cvt_d_w(dst, dst);
552 if (destination == kCoreRegisters) {
553 // Load the converted smi to dst1 and dst2 in double format.
554 __ Move(dst1, dst2, dst);
557 ASSERT(destination == kCoreRegisters);
558 // Write smi to dst1 and dst2 double format.
559 __ mov(scratch1, object);
560 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
562 __ Call(stub.GetCode());
570 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
573 Register heap_number_map,
577 FPURegister double_scratch,
579 if (FLAG_debug_code) {
580 __ AbortIfNotRootValue(heap_number_map,
581 Heap::kHeapNumberMapRootIndex,
582 "HeapNumberMap register clobbered.");
586 Label not_in_int32_range;
588 __ JumpIfSmi(object, &is_smi);
589 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
590 __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
591 __ ConvertToInt32(object,
596 ¬_in_int32_range);
599 __ bind(¬_in_int32_range);
600 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
601 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
603 __ EmitOutOfInt32RangeTruncate(dst,
611 __ SmiUntag(dst, object);
616 void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
617 Register int_scratch,
618 Destination destination,
619 FPURegister double_dst,
623 FPURegister single_scratch) {
624 ASSERT(!int_scratch.is(scratch2));
625 ASSERT(!int_scratch.is(dst1));
626 ASSERT(!int_scratch.is(dst2));
630 if (CpuFeatures::IsSupported(FPU)) {
631 CpuFeatures::Scope scope(FPU);
632 __ mtc1(int_scratch, single_scratch);
633 __ cvt_d_w(double_dst, single_scratch);
634 if (destination == kCoreRegisters) {
635 __ Move(dst1, dst2, double_dst);
638 Label fewer_than_20_useful_bits;
641 // | s | exp | mantissa |
644 __ mov(dst2, int_scratch);
645 __ mov(dst1, int_scratch);
646 __ Branch(&done, eq, int_scratch, Operand(zero_reg));
648 // Preload the sign of the value.
649 __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask));
650 // Get the absolute value of the object (as an unsigned integer).
652 __ Branch(&skip_sub, ge, dst2, Operand(zero_reg));
653 __ Subu(int_scratch, zero_reg, int_scratch);
656 // Get mantisssa[51:20].
658 // Get the position of the first set bit.
659 __ clz(dst1, int_scratch);
661 __ Subu(dst1, scratch2, dst1);
664 __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias));
665 __ Ins(dst2, scratch2,
666 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
668 // Clear the first non null bit.
669 __ li(scratch2, Operand(1));
670 __ sllv(scratch2, scratch2, dst1);
672 __ Xor(scratch2, scratch2, at);
673 __ And(int_scratch, int_scratch, scratch2);
675 // Get the number of bits to set in the lower part of the mantissa.
676 __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
677 __ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
678 // Set the higher 20 bits of the mantissa.
679 __ srlv(at, int_scratch, scratch2);
680 __ or_(dst2, dst2, at);
682 __ subu(scratch2, at, scratch2);
683 __ sllv(dst1, int_scratch, scratch2);
686 __ bind(&fewer_than_20_useful_bits);
687 __ li(at, HeapNumber::kMantissaBitsInTopWord);
688 __ subu(scratch2, at, dst1);
689 __ sllv(scratch2, int_scratch, scratch2);
690 __ Or(dst2, dst2, scratch2);
692 __ mov(dst1, zero_reg);
698 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
700 Destination destination,
701 DoubleRegister double_dst,
704 Register heap_number_map,
707 FPURegister single_scratch,
709 ASSERT(!scratch1.is(object) && !scratch2.is(object));
710 ASSERT(!scratch1.is(scratch2));
711 ASSERT(!heap_number_map.is(object) &&
712 !heap_number_map.is(scratch1) &&
713 !heap_number_map.is(scratch2));
715 Label done, obj_is_not_smi;
717 __ JumpIfNotSmi(object, &obj_is_not_smi);
718 __ SmiUntag(scratch1, object);
719 ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
720 scratch2, single_scratch);
723 __ bind(&obj_is_not_smi);
724 if (FLAG_debug_code) {
725 __ AbortIfNotRootValue(heap_number_map,
726 Heap::kHeapNumberMapRootIndex,
727 "HeapNumberMap register clobbered.");
729 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
732 if (CpuFeatures::IsSupported(FPU)) {
733 CpuFeatures::Scope scope(FPU);
734 // Load the double value.
735 __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
737 Register except_flag = scratch2;
738 __ EmitFPUTruncate(kRoundToZero,
743 kCheckForInexactConversion);
745 // Jump to not_int32 if the operation did not succeed.
746 __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
748 if (destination == kCoreRegisters) {
749 __ Move(dst1, dst2, double_dst);
753 ASSERT(!scratch1.is(object) && !scratch2.is(object));
754 // Load the double value in the destination registers.
755 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
756 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
758 // Check for 0 and -0.
759 __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask));
760 __ Or(scratch1, scratch1, Operand(dst2));
761 __ Branch(&done, eq, scratch1, Operand(zero_reg));
763 // Check that the value can be exactly represented by a 32-bit integer.
764 // Jump to not_int32 if that's not the case.
765 DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
767 // dst1 and dst2 were trashed. Reload the double value.
768 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
769 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
776 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
779 Register heap_number_map,
783 DoubleRegister double_scratch,
785 ASSERT(!dst.is(object));
786 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
787 ASSERT(!scratch1.is(scratch2) &&
788 !scratch1.is(scratch3) &&
789 !scratch2.is(scratch3));
793 // Untag the object into the destination register.
794 __ SmiUntag(dst, object);
795 // Just return if the object is a smi.
796 __ JumpIfSmi(object, &done);
798 if (FLAG_debug_code) {
799 __ AbortIfNotRootValue(heap_number_map,
800 Heap::kHeapNumberMapRootIndex,
801 "HeapNumberMap register clobbered.");
803 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
805 // Object is a heap number.
806 // Convert the floating point value to a 32-bit integer.
807 if (CpuFeatures::IsSupported(FPU)) {
808 CpuFeatures::Scope scope(FPU);
809 // Load the double value.
810 __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
812 FPURegister single_scratch = double_scratch.low();
813 Register except_flag = scratch2;
814 __ EmitFPUTruncate(kRoundToZero,
819 kCheckForInexactConversion);
821 // Jump to not_int32 if the operation did not succeed.
822 __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
823 // Get the result in the destination register.
824 __ mfc1(dst, single_scratch);
827 // Load the double value in the destination registers.
828 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
829 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
831 // Check for 0 and -0.
832 __ And(dst, scratch1, Operand(~HeapNumber::kSignMask));
833 __ Or(dst, scratch2, Operand(dst));
834 __ Branch(&done, eq, dst, Operand(zero_reg));
836 DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
838 // Registers state after DoubleIs32BitInteger.
839 // dst: mantissa[51:20].
842 // Shift back the higher bits of the mantissa.
843 __ srlv(dst, dst, scratch3);
844 // Set the implicit first bit.
846 __ subu(scratch3, at, scratch3);
847 __ sllv(scratch2, scratch2, scratch3);
848 __ Or(dst, dst, scratch2);
850 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
851 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
853 __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg));
854 __ Subu(dst, zero_reg, dst);
862 void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
868 // Get exponent alone in scratch.
871 HeapNumber::kExponentShift,
872 HeapNumber::kExponentBits);
874 // Substract the bias from the exponent.
875 __ Subu(scratch, scratch, Operand(HeapNumber::kExponentBias));
877 // src1: higher (exponent) part of the double value.
878 // src2: lower (mantissa) part of the double value.
879 // scratch: unbiased exponent.
881 // Fast cases. Check for obvious non 32-bit integer values.
882 // Negative exponent cannot yield 32-bit integers.
883 __ Branch(not_int32, lt, scratch, Operand(zero_reg));
884 // Exponent greater than 31 cannot yield 32-bit integers.
885 // Also, a positive value with an exponent equal to 31 is outside of the
886 // signed 32-bit integer range.
887 // Another way to put it is that if (exponent - signbit) > 30 then the
888 // number cannot be represented as an int32.
890 __ srl(at, src1, 31);
891 __ subu(tmp, scratch, at);
892 __ Branch(not_int32, gt, tmp, Operand(30));
893 // - Bits [21:0] in the mantissa are not null.
894 __ And(tmp, src2, 0x3fffff);
895 __ Branch(not_int32, ne, tmp, Operand(zero_reg));
897 // Otherwise the exponent needs to be big enough to shift left all the
898 // non zero bits left. So we need the (30 - exponent) last bits of the
899 // 31 higher bits of the mantissa to be null.
900 // Because bits [21:0] are null, we can check instead that the
901 // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
903 // Get the 32 higher bits of the mantissa in dst.
906 HeapNumber::kMantissaBitsInTopWord,
907 32 - HeapNumber::kMantissaBitsInTopWord);
908 __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord);
909 __ or_(dst, dst, at);
911 // Create the mask and test the lower bits (of the higher bits).
913 __ subu(scratch, at, scratch);
915 __ sllv(src1, src2, scratch);
916 __ Subu(src1, src1, Operand(1));
917 __ And(src1, dst, src1);
918 __ Branch(not_int32, ne, src1, Operand(zero_reg));
922 void FloatingPointHelper::CallCCodeForDoubleOperation(
923 MacroAssembler* masm,
925 Register heap_number_result,
927 // Using core registers:
928 // a0: Left value (least significant part of mantissa).
929 // a1: Left value (sign, exponent, top of mantissa).
930 // a2: Right value (least significant part of mantissa).
931 // a3: Right value (sign, exponent, top of mantissa).
933 // Assert that heap_number_result is saved.
934 // We currently always use s0 to pass it.
935 ASSERT(heap_number_result.is(s0));
937 // Push the current return address before the C call.
939 __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
940 if (!IsMipsSoftFloatABI) {
941 CpuFeatures::Scope scope(FPU);
942 // We are not using MIPS FPU instructions, and parameters for the runtime
943 // function call are prepaired in a0-a3 registers, but function we are
944 // calling is compiled with hard-float flag and expecting hard float ABI
945 // (parameters in f12/f14 registers). We need to copy parameters from
946 // a0-a3 registers to f12/f14 register pairs.
947 __ Move(f12, a0, a1);
948 __ Move(f14, a2, a3);
951 AllowExternalCallThatCantCauseGC scope(masm);
953 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
955 // Store answer in the overwritable heap number.
956 if (!IsMipsSoftFloatABI) {
957 CpuFeatures::Scope scope(FPU);
958 // Double returned in register f0.
959 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
961 // Double returned in registers v0 and v1.
962 __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
963 __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
965 // Place heap_number_result in v0 and return to the pushed return address.
966 __ mov(v0, heap_number_result);
972 bool WriteInt32ToHeapNumberStub::IsPregenerated() {
973 // These variants are compiled ahead of time. See next method.
974 if (the_int_.is(a1) &&
975 the_heap_number_.is(v0) &&
980 if (the_int_.is(a2) &&
981 the_heap_number_.is(v0) &&
986 // Other register combinations are generated as and when they are needed,
987 // so it is unsafe to call them from stubs (we can't generate a stub while
988 // we are generating a stub).
993 void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
994 WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
995 WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
996 stub1.GetCode()->set_is_pregenerated(true);
997 stub2.GetCode()->set_is_pregenerated(true);
1001 // See comment for class, this does NOT work for int32's that are in Smi range.
1002 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
1003 Label max_negative_int;
1004 // the_int_ has the answer which is a signed int32 but not a Smi.
1005 // We test for the special value that has a different exponent.
1006 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
1007 // Test sign, and save for later conditionals.
1008 __ And(sign_, the_int_, Operand(0x80000000u));
1009 __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
1011 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
1012 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
1013 uint32_t non_smi_exponent =
1014 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
1015 __ li(scratch_, Operand(non_smi_exponent));
1016 // Set the sign bit in scratch_ if the value was negative.
1017 __ or_(scratch_, scratch_, sign_);
1018 // Subtract from 0 if the value was negative.
1019 __ subu(at, zero_reg, the_int_);
1020 __ movn(the_int_, at, sign_);
1021 // We should be masking the implict first digit of the mantissa away here,
1022 // but it just ends up combining harmlessly with the last digit of the
1023 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
1024 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
1025 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
1026 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1027 __ srl(at, the_int_, shift_distance);
1028 __ or_(scratch_, scratch_, at);
1029 __ sw(scratch_, FieldMemOperand(the_heap_number_,
1030 HeapNumber::kExponentOffset));
1031 __ sll(scratch_, the_int_, 32 - shift_distance);
1032 __ sw(scratch_, FieldMemOperand(the_heap_number_,
1033 HeapNumber::kMantissaOffset));
1036 __ bind(&max_negative_int);
1037 // The max negative int32 is stored as a positive number in the mantissa of
1038 // a double because it uses a sign bit instead of using two's complement.
1039 // The actual mantissa bits stored are all 0 because the implicit most
1040 // significant 1 bit is not stored.
1041 non_smi_exponent += 1 << HeapNumber::kExponentShift;
1042 __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
1044 FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
1045 __ mov(scratch_, zero_reg);
1047 FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
1052 // Handle the case where the lhs and rhs are the same object.
1053 // Equality is almost reflexive (everything but NaN), so this is a test
1054 // for "identity and not NaN".
1055 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
1058 bool never_nan_nan) {
1059 Label not_identical;
1060 Label heap_number, return_equal;
1061 Register exp_mask_reg = t5;
1063 __ Branch(¬_identical, ne, a0, Operand(a1));
1065 // The two objects are identical. If we know that one of them isn't NaN then
1066 // we now know they test equal.
1067 if (cc != eq || !never_nan_nan) {
1068 __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
1070 // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
1071 // so we do the second best thing - test it ourselves.
1072 // They are both equal and they are not both Smis so both of them are not
1073 // Smis. If it's not a heap number, then return equal.
1074 if (cc == less || cc == greater) {
1075 __ GetObjectType(a0, t4, t4);
1076 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
1078 __ GetObjectType(a0, t4, t4);
1079 __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
1080 // Comparing JS objects with <=, >= is complicated.
1082 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
1083 // Normally here we fall through to return_equal, but undefined is
1084 // special: (undefined == undefined) == true, but
1085 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
1086 if (cc == less_equal || cc == greater_equal) {
1087 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
1088 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
1089 __ Branch(&return_equal, ne, a0, Operand(t2));
1091 // undefined <= undefined should fail.
1092 __ li(v0, Operand(GREATER));
1094 // undefined >= undefined should fail.
1095 __ li(v0, Operand(LESS));
1103 __ bind(&return_equal);
1105 __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
1106 } else if (cc == greater) {
1107 __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
1109 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
1113 if (cc != eq || !never_nan_nan) {
1114 // For less and greater we don't have to check for NaN since the result of
1115 // x < x is false regardless. For the others here is some code to check
1117 if (cc != lt && cc != gt) {
1118 __ bind(&heap_number);
1119 // It is a heap number, so return non-equal if it's NaN and equal if it's
1122 // The representation of NaN values has all exponent bits (52..62) set,
1123 // and not all mantissa bits (0..51) clear.
1124 // Read top bits of double representation (second word of value).
1125 __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1126 // Test that exponent bits are all set.
1127 __ And(t3, t2, Operand(exp_mask_reg));
1128 // If all bits not set (ne cond), then not a NaN, objects are equal.
1129 __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
1131 // Shift out flag and all exponent bits, retaining only mantissa.
1132 __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
1133 // Or with all low-bits of mantissa.
1134 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
1135 __ Or(v0, t3, Operand(t2));
1136 // For equal we already have the right value in v0: Return zero (equal)
1137 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
1138 // not (it's a NaN). For <= and >= we need to load v0 with the failing
1139 // value if it's a NaN.
1141 // All-zero means Infinity means equal.
1142 __ Ret(eq, v0, Operand(zero_reg));
1144 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
1146 __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
1151 // No fall through here.
1154 __ bind(¬_identical);
1158 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
1161 Label* both_loaded_as_doubles,
1164 ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1165 (lhs.is(a1) && rhs.is(a0)));
1168 __ And(t0, lhs, Operand(kSmiTagMask));
1169 __ Branch(&lhs_is_smi, eq, t0, Operand(zero_reg));
1171 // Check whether the non-smi is a heap number.
1172 __ GetObjectType(lhs, t4, t4);
1174 // If lhs was not a number and rhs was a Smi then strict equality cannot
1175 // succeed. Return non-equal (lhs is already not zero).
1177 __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
1179 // Smi compared non-strictly with a non-Smi non-heap-number. Call
1181 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1184 // Rhs is a smi, lhs is a number.
1185 // Convert smi rhs to double.
1186 if (CpuFeatures::IsSupported(FPU)) {
1187 CpuFeatures::Scope scope(FPU);
1188 __ sra(at, rhs, kSmiTagSize);
1190 __ cvt_d_w(f14, f14);
1191 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1193 // Load lhs to a double in a2, a3.
1194 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1195 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1197 // Write Smi from rhs to a1 and a0 in double format. t5 is scratch.
1199 ConvertToDoubleStub stub1(a1, a0, t6, t5);
1201 __ Call(stub1.GetCode());
1206 // We now have both loaded as doubles.
1207 __ jmp(both_loaded_as_doubles);
1209 __ bind(&lhs_is_smi);
1210 // Lhs is a Smi. Check whether the non-smi is a heap number.
1211 __ GetObjectType(rhs, t4, t4);
1213 // If lhs was not a number and rhs was a Smi then strict equality cannot
1214 // succeed. Return non-equal.
1215 __ li(v0, Operand(1));
1216 __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
1218 // Smi compared non-strictly with a non-Smi non-heap-number. Call
1220 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1223 // Lhs is a smi, rhs is a number.
1224 // Convert smi lhs to double.
1225 if (CpuFeatures::IsSupported(FPU)) {
1226 CpuFeatures::Scope scope(FPU);
1227 __ sra(at, lhs, kSmiTagSize);
1229 __ cvt_d_w(f12, f12);
1230 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1232 // Convert lhs to a double format. t5 is scratch.
1234 ConvertToDoubleStub stub2(a3, a2, t6, t5);
1236 __ Call(stub2.GetCode());
1238 // Load rhs to a double in a1, a0.
1240 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1241 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1243 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1244 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1247 // Fall through to both_loaded_as_doubles.
1251 void EmitNanCheck(MacroAssembler* masm, Condition cc) {
1252 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1253 if (CpuFeatures::IsSupported(FPU)) {
1254 CpuFeatures::Scope scope(FPU);
1255 // Lhs and rhs are already loaded to f12 and f14 register pairs.
1256 __ Move(t0, t1, f14);
1257 __ Move(t2, t3, f12);
1259 // Lhs and rhs are already loaded to GP registers.
1260 __ mov(t0, a0); // a0 has LS 32 bits of rhs.
1261 __ mov(t1, a1); // a1 has MS 32 bits of rhs.
1262 __ mov(t2, a2); // a2 has LS 32 bits of lhs.
1263 __ mov(t3, a3); // a3 has MS 32 bits of lhs.
1265 Register rhs_exponent = exp_first ? t0 : t1;
1266 Register lhs_exponent = exp_first ? t2 : t3;
1267 Register rhs_mantissa = exp_first ? t1 : t0;
1268 Register lhs_mantissa = exp_first ? t3 : t2;
1269 Label one_is_nan, neither_is_nan;
1270 Label lhs_not_nan_exp_mask_is_loaded;
1272 Register exp_mask_reg = t4;
1273 __ li(exp_mask_reg, HeapNumber::kExponentMask);
1274 __ and_(t5, lhs_exponent, exp_mask_reg);
1275 __ Branch(&lhs_not_nan_exp_mask_is_loaded, ne, t5, Operand(exp_mask_reg));
1277 __ sll(t5, lhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
1278 __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
1280 __ Branch(&one_is_nan, ne, lhs_mantissa, Operand(zero_reg));
1282 __ li(exp_mask_reg, HeapNumber::kExponentMask);
1283 __ bind(&lhs_not_nan_exp_mask_is_loaded);
1284 __ and_(t5, rhs_exponent, exp_mask_reg);
1286 __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg));
1288 __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
1289 __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
1291 __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg));
1293 __ bind(&one_is_nan);
1294 // NaN comparisons always fail.
1295 // Load whatever we need in v0 to make the comparison fail.
1296 if (cc == lt || cc == le) {
1297 __ li(v0, Operand(GREATER));
1299 __ li(v0, Operand(LESS));
1301 __ Ret(); // Return.
1303 __ bind(&neither_is_nan);
1307 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
1308 // f12 and f14 have the two doubles. Neither is a NaN.
1309 // Call a native function to do a comparison between two non-NaNs.
1310 // Call C routine that may not cause GC or other trouble.
1311 // We use a call_was and return manually because we need arguments slots to
1314 Label return_result_not_equal, return_result_equal;
1316 // Doubles are not equal unless they have the same bit pattern.
1317 // Exception: 0 and -0.
1318 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1319 if (CpuFeatures::IsSupported(FPU)) {
1320 CpuFeatures::Scope scope(FPU);
1321 // Lhs and rhs are already loaded to f12 and f14 register pairs.
1322 __ Move(t0, t1, f14);
1323 __ Move(t2, t3, f12);
1325 // Lhs and rhs are already loaded to GP registers.
1326 __ mov(t0, a0); // a0 has LS 32 bits of rhs.
1327 __ mov(t1, a1); // a1 has MS 32 bits of rhs.
1328 __ mov(t2, a2); // a2 has LS 32 bits of lhs.
1329 __ mov(t3, a3); // a3 has MS 32 bits of lhs.
1331 Register rhs_exponent = exp_first ? t0 : t1;
1332 Register lhs_exponent = exp_first ? t2 : t3;
1333 Register rhs_mantissa = exp_first ? t1 : t0;
1334 Register lhs_mantissa = exp_first ? t3 : t2;
1336 __ xor_(v0, rhs_mantissa, lhs_mantissa);
1337 __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg));
1339 __ subu(v0, rhs_exponent, lhs_exponent);
1340 __ Branch(&return_result_equal, eq, v0, Operand(zero_reg));
1342 __ sll(rhs_exponent, rhs_exponent, kSmiTagSize);
1343 __ sll(lhs_exponent, lhs_exponent, kSmiTagSize);
1344 __ or_(t4, rhs_exponent, lhs_exponent);
1345 __ or_(t4, t4, rhs_mantissa);
1347 __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
1349 __ bind(&return_result_equal);
1350 __ li(v0, Operand(EQUAL));
1354 __ bind(&return_result_not_equal);
1356 if (!CpuFeatures::IsSupported(FPU)) {
1358 __ PrepareCallCFunction(0, 2, t4);
1359 if (!IsMipsSoftFloatABI) {
1360 // We are not using MIPS FPU instructions, and parameters for the runtime
1361 // function call are prepaired in a0-a3 registers, but function we are
1362 // calling is compiled with hard-float flag and expecting hard float ABI
1363 // (parameters in f12/f14 registers). We need to copy parameters from
1364 // a0-a3 registers to f12/f14 register pairs.
1365 __ Move(f12, a0, a1);
1366 __ Move(f14, a2, a3);
1369 AllowExternalCallThatCantCauseGC scope(masm);
1370 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
1372 __ pop(ra); // Because this function returns int, result is in v0.
1375 CpuFeatures::Scope scope(FPU);
1376 Label equal, less_than;
1377 __ BranchF(&equal, NULL, eq, f12, f14);
1378 __ BranchF(&less_than, NULL, lt, f12, f14);
1380 // Not equal, not less, not NaN, must be greater.
1381 __ li(v0, Operand(GREATER));
1385 __ li(v0, Operand(EQUAL));
1388 __ bind(&less_than);
1389 __ li(v0, Operand(LESS));
1395 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
1398 // If either operand is a JS object or an oddball value, then they are
1399 // not equal since their pointers are different.
1400 // There is no test for undetectability in strict equality.
1401 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
1402 Label first_non_object;
1403 // Get the type of the first operand into a2 and compare it with
1404 // FIRST_SPEC_OBJECT_TYPE.
1405 __ GetObjectType(lhs, a2, a2);
1406 __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
1409 Label return_not_equal;
1410 __ bind(&return_not_equal);
1411 __ li(v0, Operand(1));
1414 __ bind(&first_non_object);
1415 // Check for oddballs: true, false, null, undefined.
1416 __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
1418 __ GetObjectType(rhs, a3, a3);
1419 __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1421 // Check for oddballs: true, false, null, undefined.
1422 __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
1424 // Now that we have the types we might as well check for symbol-symbol.
1425 // Ensure that no non-strings have the symbol bit set.
1426 STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
1427 STATIC_ASSERT(kSymbolTag != 0);
1428 __ And(t2, a2, Operand(a3));
1429 __ And(t0, t2, Operand(kIsSymbolMask));
1430 __ Branch(&return_not_equal, ne, t0, Operand(zero_reg));
1434 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
1437 Label* both_loaded_as_doubles,
1438 Label* not_heap_numbers,
1440 __ GetObjectType(lhs, a3, a2);
1441 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
1442 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
1443 // If first was a heap number & second wasn't, go to slow case.
1444 __ Branch(slow, ne, a3, Operand(a2));
1446 // Both are heap numbers. Load them up then jump to the code we have
1448 if (CpuFeatures::IsSupported(FPU)) {
1449 CpuFeatures::Scope scope(FPU);
1450 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1451 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1453 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1454 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1456 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1457 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1459 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1460 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1463 __ jmp(both_loaded_as_doubles);
1467 // Fast negative check for symbol-to-symbol equality.
1468 static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
1471 Label* possible_strings,
1472 Label* not_both_strings) {
1473 ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1474 (lhs.is(a1) && rhs.is(a0)));
1476 // a2 is object type of lhs.
1477 // Ensure that no non-strings have the symbol bit set.
1479 STATIC_ASSERT(kSymbolTag != 0);
1480 __ And(at, a2, Operand(kIsNotStringMask));
1481 __ Branch(&object_test, ne, at, Operand(zero_reg));
1482 __ And(at, a2, Operand(kIsSymbolMask));
1483 __ Branch(possible_strings, eq, at, Operand(zero_reg));
1484 __ GetObjectType(rhs, a3, a3);
1485 __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
1486 __ And(at, a3, Operand(kIsSymbolMask));
1487 __ Branch(possible_strings, eq, at, Operand(zero_reg));
1489 // Both are symbols. We already checked they weren't the same pointer
1490 // so they are not equal.
1491 __ li(v0, Operand(1)); // Non-zero indicates not equal.
1494 __ bind(&object_test);
1495 __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
1496 __ GetObjectType(rhs, a2, a3);
1497 __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1499 // If both objects are undetectable, they are equal. Otherwise, they
1500 // are not equal, since they are different objects and an object is not
1501 // equal to undefined.
1502 __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
1503 __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
1504 __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
1505 __ and_(a0, a2, a3);
1506 __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
1507 __ Xor(v0, a0, Operand(1 << Map::kIsUndetectable));
1512 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
1520 // Use of registers. Register result is used as a temporary.
1521 Register number_string_cache = result;
1522 Register mask = scratch3;
1524 // Load the number string cache.
1525 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
1527 // Make the hash mask from the length of the number string cache. It
1528 // contains two elements (number and string) for each cache entry.
1529 __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
1530 // Divide length by two (length is a smi).
1531 __ sra(mask, mask, kSmiTagSize + 1);
1532 __ Addu(mask, mask, -1); // Make mask.
1534 // Calculate the entry in the number string cache. The hash value in the
1535 // number string cache for smis is just the smi value, and the hash for
1536 // doubles is the xor of the upper and lower words. See
1537 // Heap::GetNumberStringCache.
1538 Isolate* isolate = masm->isolate();
1540 Label load_result_from_cache;
1541 if (!object_is_smi) {
1542 __ JumpIfSmi(object, &is_smi);
1543 if (CpuFeatures::IsSupported(FPU)) {
1544 CpuFeatures::Scope scope(FPU);
1547 Heap::kHeapNumberMapRootIndex,
1551 STATIC_ASSERT(8 == kDoubleSize);
1554 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
1555 __ lw(scratch2, MemOperand(scratch1, kPointerSize));
1556 __ lw(scratch1, MemOperand(scratch1, 0));
1557 __ Xor(scratch1, scratch1, Operand(scratch2));
1558 __ And(scratch1, scratch1, Operand(mask));
1560 // Calculate address of entry in string cache: each entry consists
1561 // of two pointer sized fields.
1562 __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
1563 __ Addu(scratch1, number_string_cache, scratch1);
1565 Register probe = mask;
1567 FieldMemOperand(scratch1, FixedArray::kHeaderSize));
1568 __ JumpIfSmi(probe, not_found);
1569 __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
1570 __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
1571 __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
1572 __ Branch(not_found);
1574 // Note that there is no cache check for non-FPU case, even though
1575 // it seems there could be. May be a tiny opimization for non-FPU
1577 __ Branch(not_found);
1582 Register scratch = scratch1;
1583 __ sra(scratch, object, 1); // Shift away the tag.
1584 __ And(scratch, mask, Operand(scratch));
1586 // Calculate address of entry in string cache: each entry consists
1587 // of two pointer sized fields.
1588 __ sll(scratch, scratch, kPointerSizeLog2 + 1);
1589 __ Addu(scratch, number_string_cache, scratch);
1591 // Check if the entry is the smi we are looking for.
1592 Register probe = mask;
1593 __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
1594 __ Branch(not_found, ne, object, Operand(probe));
1596 // Get the result from the cache.
1597 __ bind(&load_result_from_cache);
1599 FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
1601 __ IncrementCounter(isolate->counters()->number_to_string_native(),
1608 void NumberToStringStub::Generate(MacroAssembler* masm) {
1611 __ lw(a1, MemOperand(sp, 0));
1613 // Generate code to lookup number in the number string cache.
1614 GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
1615 __ Addu(sp, sp, Operand(1 * kPointerSize));
1619 // Handle number to string in the runtime system if not found in the cache.
1620 __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
1624 // On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared.
1625 // On exit, v0 is 0, positive, or negative (smi) to indicate the result
1626 // of the comparison.
1627 void CompareStub::Generate(MacroAssembler* masm) {
1628 Label slow; // Call builtin.
1629 Label not_smis, both_loaded_as_doubles;
1632 if (include_smi_compare_) {
1633 Label not_two_smis, smi_done;
1635 __ JumpIfNotSmi(a2, ¬_two_smis);
1638 __ Subu(v0, a1, a0);
1640 __ bind(¬_two_smis);
1641 } else if (FLAG_debug_code) {
1643 __ And(a2, a2, kSmiTagMask);
1644 __ Assert(ne, "CompareStub: unexpected smi operands.",
1645 a2, Operand(zero_reg));
1649 // NOTICE! This code is only reached after a smi-fast-case check, so
1650 // it is certain that at least one operand isn't a smi.
1652 // This is optimized for reading the code and not benchmarked for
1653 // speed or amount of instructions. The code is not ordered for speed
1654 // or anything like this
1655 Label miss, user_compare;
1657 // No global compare if both operands are SMIs
1658 __ And(a2, a1, Operand(a0));
1659 __ JumpIfSmi(a2, &miss);
1662 // We need to check if lhs and rhs are both objects, if not we are
1663 // jumping out of the function. We will keep the 'map' in t0 (lhs) and
1664 // t1 (rhs) for later usage.
1665 __ GetObjectType(a0, t0, a3);
1666 __ Branch(&miss, ne, a3, Operand(JS_OBJECT_TYPE));
1668 __ GetObjectType(a1, t1, a3);
1669 __ Branch(&miss, ne, a3, Operand(JS_OBJECT_TYPE));
1671 // Check if the UseUserComparison flag is set by using the map of t0 for lhs
1672 __ lbu(t0, FieldMemOperand(t0, Map::kBitField2Offset));
1673 __ And(t0, t0, Operand(1 << Map::kUseUserObjectComparison));
1674 __ Branch(&user_compare, eq, t0, Operand(1 << Map::kUseUserObjectComparison));
1677 // Check if the UseUserComparison flag is _not_ set by using the map of t1 for
1678 // rhs and then jump to the miss label.
1679 __ lbu(t1, FieldMemOperand(t1, Map::kBitField2Offset));
1680 __ And(t1, t1, Operand(1 << Map::kUseUserObjectComparison));
1681 __ Branch(&miss, ne, t1, Operand(1 << Map::kUseUserObjectComparison));
1683 // Invoke the runtime function here
1684 __ bind(&user_compare);
1686 __ TailCallRuntime(Runtime::kUserObjectEquals, 2, 1);
1688 // We exit here without doing anything
1692 // Handle the case where the objects are identical. Either returns the answer
1693 // or goes to slow. Only falls through if the objects were not identical.
1694 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
1696 // If either is a Smi (we know that not both are), then they can only
1697 // be strictly equal if the other is a HeapNumber.
1698 STATIC_ASSERT(kSmiTag == 0);
1699 ASSERT_EQ(0, Smi::FromInt(0));
1700 __ And(t2, lhs_, Operand(rhs_));
1701 __ JumpIfNotSmi(t2, ¬_smis, t0);
1702 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
1703 // 1) Return the answer.
1705 // 3) Fall through to both_loaded_as_doubles.
1706 // 4) Jump to rhs_not_nan.
1707 // In cases 3 and 4 we have found out we were dealing with a number-number
1708 // comparison and the numbers have been loaded into f12 and f14 as doubles,
1709 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
1710 EmitSmiNonsmiComparison(masm, lhs_, rhs_,
1711 &both_loaded_as_doubles, &slow, strict_);
1713 __ bind(&both_loaded_as_doubles);
1714 // f12, f14 are the double representations of the left hand side
1715 // and the right hand side if we have FPU. Otherwise a2, a3 represent
1716 // left hand side and a0, a1 represent right hand side.
1718 Isolate* isolate = masm->isolate();
1719 if (CpuFeatures::IsSupported(FPU)) {
1720 CpuFeatures::Scope scope(FPU);
1722 __ li(t0, Operand(LESS));
1723 __ li(t1, Operand(GREATER));
1724 __ li(t2, Operand(EQUAL));
1726 // Check if either rhs or lhs is NaN.
1727 __ BranchF(NULL, &nan, eq, f12, f14);
1729 // Check if LESS condition is satisfied. If true, move conditionally
1731 __ c(OLT, D, f12, f14);
1733 // Use previous check to store conditionally to v0 oposite condition
1734 // (GREATER). If rhs is equal to lhs, this will be corrected in next
1737 // Check if EQUAL condition is satisfied. If true, move conditionally
1739 __ c(EQ, D, f12, f14);
1745 // NaN comparisons always fail.
1746 // Load whatever we need in v0 to make the comparison fail.
1747 if (cc_ == lt || cc_ == le) {
1748 __ li(v0, Operand(GREATER));
1750 __ li(v0, Operand(LESS));
1754 // Checks for NaN in the doubles we have loaded. Can return the answer or
1755 // fall through if neither is a NaN. Also binds rhs_not_nan.
1756 EmitNanCheck(masm, cc_);
1758 // Compares two doubles that are not NaNs. Returns the answer.
1759 // Never falls through.
1760 EmitTwoNonNanDoubleComparison(masm, cc_);
1764 // At this point we know we are dealing with two different objects,
1765 // and neither of them is a Smi. The objects are in lhs_ and rhs_.
1767 // This returns non-equal for some object types, or falls through if it
1769 EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
1772 Label check_for_symbols;
1773 Label flat_string_check;
1774 // Check for heap-number-heap-number comparison. Can jump to slow case,
1775 // or load both doubles and jump to the code that handles
1776 // that case. If the inputs are not doubles then jumps to check_for_symbols.
1777 // In this case a2 will contain the type of lhs_.
1778 EmitCheckForTwoHeapNumbers(masm,
1781 &both_loaded_as_doubles,
1783 &flat_string_check);
1785 __ bind(&check_for_symbols);
1786 if (cc_ == eq && !strict_) {
1787 // Returns an answer for two symbols or two detectable objects.
1788 // Otherwise jumps to string case or not both strings case.
1789 // Assumes that a2 is the type of lhs_ on entry.
1790 EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
1793 // Check for both being sequential ASCII strings, and inline if that is the
1795 __ bind(&flat_string_check);
1797 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow);
1799 __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
1801 StringCompareStub::GenerateFlatAsciiStringEquals(masm,
1808 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
1816 // Never falls through to here.
1819 // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
1821 __ Push(lhs_, rhs_);
1822 // Figure out which native to call and setup the arguments.
1823 Builtins::JavaScript native;
1825 native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1827 native = Builtins::COMPARE;
1828 int ncr; // NaN compare result.
1829 if (cc_ == lt || cc_ == le) {
1832 ASSERT(cc_ == gt || cc_ == ge); // Remaining cases.
1835 __ li(a0, Operand(Smi::FromInt(ncr)));
1839 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1840 // tagged as a small integer.
1841 __ InvokeBuiltin(native, JUMP_FUNCTION);
1845 // The stub expects its argument in the tos_ register and returns its result in
1846 // it, too: zero for false, and a non-zero value for true.
1847 void ToBooleanStub::Generate(MacroAssembler* masm) {
1848 // This stub uses FPU instructions.
1849 CpuFeatures::Scope scope(FPU);
1852 const Register map = t5.is(tos_) ? t3 : t5;
1854 // undefined -> false.
1855 CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
1857 // Boolean -> its value.
1858 CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
1859 CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
1862 CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
1864 if (types_.Contains(SMI)) {
1865 // Smis: 0 -> false, all other -> true
1866 __ And(at, tos_, kSmiTagMask);
1867 // tos_ contains the correct return value already
1868 __ Ret(eq, at, Operand(zero_reg));
1869 } else if (types_.NeedsMap()) {
1870 // If we need a map later and have a Smi -> patch.
1871 __ JumpIfSmi(tos_, &patch);
1874 if (types_.NeedsMap()) {
1875 __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
1877 if (types_.CanBeUndetectable()) {
1878 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
1879 __ And(at, at, Operand(1 << Map::kIsUndetectable));
1880 // Undetectable -> false.
1881 __ movn(tos_, zero_reg, at);
1882 __ Ret(ne, at, Operand(zero_reg));
1886 if (types_.Contains(SPEC_OBJECT)) {
1887 // Spec object -> true.
1888 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1889 // tos_ contains the correct non-zero return value already.
1890 __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
1893 if (types_.Contains(STRING)) {
1894 // String value -> false iff empty.
1895 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1897 __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE));
1898 __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
1899 __ Ret(); // the string length is OK as the return value
1903 if (types_.Contains(HEAP_NUMBER)) {
1904 // Heap number -> false iff +0, -0, or NaN.
1905 Label not_heap_number;
1906 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1907 __ Branch(¬_heap_number, ne, map, Operand(at));
1908 Label zero_or_nan, number;
1909 __ ldc1(f2, FieldMemOperand(tos_, HeapNumber::kValueOffset));
1910 __ BranchF(&number, &zero_or_nan, ne, f2, kDoubleRegZero);
1911 // "tos_" is a register, and contains a non zero value by default.
1912 // Hence we only need to overwrite "tos_" with zero to return false for
1913 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
1914 __ bind(&zero_or_nan);
1915 __ mov(tos_, zero_reg);
1918 __ bind(¬_heap_number);
1922 GenerateTypeTransition(masm);
1926 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
1928 Heap::RootListIndex value,
1930 if (types_.Contains(type)) {
1931 // If we see an expected oddball, return its ToBoolean value tos_.
1932 __ LoadRoot(at, value);
1933 __ Subu(at, at, tos_); // This is a check for equality for the movz below.
1934 // The value of a root is never NULL, so we can avoid loading a non-null
1935 // value into tos_ when we want to return 'true'.
1937 __ movz(tos_, zero_reg, at);
1939 __ Ret(eq, at, Operand(zero_reg));
1944 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
1946 __ li(a2, Operand(Smi::FromInt(tos_.code())));
1947 __ li(a1, Operand(Smi::FromInt(types_.ToByte())));
1948 __ Push(a3, a2, a1);
1949 // Patch the caller to an appropriate specialized stub and return the
1950 // operation result to the caller of the stub.
1951 __ TailCallExternalReference(
1952 ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
1958 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1959 // We don't allow a GC during a store buffer overflow so there is no need to
1960 // store the registers in any particular way, but we do have to store and
1962 __ MultiPush(kJSCallerSaved | ra.bit());
1963 if (save_doubles_ == kSaveFPRegs) {
1964 CpuFeatures::Scope scope(FPU);
1965 __ MultiPushFPU(kCallerSavedFPU);
1967 const int argument_count = 1;
1968 const int fp_argument_count = 0;
1969 const Register scratch = a1;
1971 AllowExternalCallThatCantCauseGC scope(masm);
1972 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
1973 __ li(a0, Operand(ExternalReference::isolate_address()));
1975 ExternalReference::store_buffer_overflow_function(masm->isolate()),
1977 if (save_doubles_ == kSaveFPRegs) {
1978 CpuFeatures::Scope scope(FPU);
1979 __ MultiPopFPU(kCallerSavedFPU);
1982 __ MultiPop(kJSCallerSaved | ra.bit());
1987 void UnaryOpStub::PrintName(StringStream* stream) {
1988 const char* op_name = Token::Name(op_);
1989 const char* overwrite_name = NULL; // Make g++ happy.
1991 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
1992 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
1994 stream->Add("UnaryOpStub_%s_%s_%s",
1997 UnaryOpIC::GetName(operand_type_));
2001 // TODO(svenpanne): Use virtual functions instead of switch.
2002 void UnaryOpStub::Generate(MacroAssembler* masm) {
2003 switch (operand_type_) {
2004 case UnaryOpIC::UNINITIALIZED:
2005 GenerateTypeTransition(masm);
2007 case UnaryOpIC::SMI:
2008 GenerateSmiStub(masm);
2010 case UnaryOpIC::HEAP_NUMBER:
2011 GenerateHeapNumberStub(masm);
2013 case UnaryOpIC::GENERIC:
2014 GenerateGenericStub(masm);
2020 void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2021 // Argument is in a0 and v0 at this point, so we can overwrite a0.
2022 __ li(a2, Operand(Smi::FromInt(op_)));
2023 __ li(a1, Operand(Smi::FromInt(mode_)));
2024 __ li(a0, Operand(Smi::FromInt(operand_type_)));
2025 __ Push(v0, a2, a1, a0);
2027 __ TailCallExternalReference(
2028 ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
2032 // TODO(svenpanne): Use virtual functions instead of switch.
2033 void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2036 GenerateSmiStubSub(masm);
2038 case Token::BIT_NOT:
2039 GenerateSmiStubBitNot(masm);
2047 void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
2048 Label non_smi, slow;
2049 GenerateSmiCodeSub(masm, &non_smi, &slow);
2052 GenerateTypeTransition(masm);
2056 void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
2058 GenerateSmiCodeBitNot(masm, &non_smi);
2060 GenerateTypeTransition(masm);
2064 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
2067 __ JumpIfNotSmi(a0, non_smi);
2069 // The result of negating zero or the smallest negative smi is not a smi.
2070 __ And(t0, a0, ~0x80000000);
2071 __ Branch(slow, eq, t0, Operand(zero_reg));
2073 // Return '0 - value'.
2074 __ Subu(v0, zero_reg, a0);
2079 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
2081 __ JumpIfNotSmi(a0, non_smi);
2083 // Flip bits and revert inverted smi-tag.
2085 __ And(v0, v0, ~kSmiTagMask);
2090 // TODO(svenpanne): Use virtual functions instead of switch.
2091 void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2094 GenerateHeapNumberStubSub(masm);
2096 case Token::BIT_NOT:
2097 GenerateHeapNumberStubBitNot(masm);
2105 void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
2106 Label non_smi, slow, call_builtin;
2107 GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
2109 GenerateHeapNumberCodeSub(masm, &slow);
2111 GenerateTypeTransition(masm);
2112 __ bind(&call_builtin);
2113 GenerateGenericCodeFallback(masm);
2117 void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
2118 Label non_smi, slow;
2119 GenerateSmiCodeBitNot(masm, &non_smi);
2121 GenerateHeapNumberCodeBitNot(masm, &slow);
2123 GenerateTypeTransition(masm);
2127 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
2129 EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
2130 // a0 is a heap number. Get a new heap number in a1.
2131 if (mode_ == UNARY_OVERWRITE) {
2132 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
2133 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
2134 __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
2136 Label slow_allocate_heapnumber, heapnumber_allocated;
2137 __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
2138 __ jmp(&heapnumber_allocated);
2140 __ bind(&slow_allocate_heapnumber);
2142 FrameScope scope(masm, StackFrame::INTERNAL);
2144 __ CallRuntime(Runtime::kNumberAlloc, 0);
2149 __ bind(&heapnumber_allocated);
2150 __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
2151 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
2152 __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
2153 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
2154 __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset));
2161 void UnaryOpStub::GenerateHeapNumberCodeBitNot(
2162 MacroAssembler* masm,
2166 EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
2167 // Convert the heap number in a0 to an untagged integer in a1.
2168 __ ConvertToInt32(a0, a1, a2, a3, f0, slow);
2170 // Do the bitwise operation and check if the result fits in a smi.
2173 __ Addu(a2, a1, Operand(0x40000000));
2174 __ Branch(&try_float, lt, a2, Operand(zero_reg));
2176 // Tag the result as a smi and we're done.
2180 // Try to store the result in a heap number.
2181 __ bind(&try_float);
2182 if (mode_ == UNARY_NO_OVERWRITE) {
2183 Label slow_allocate_heapnumber, heapnumber_allocated;
2184 // Allocate a new heap number without zapping v0, which we need if it fails.
2185 __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber);
2186 __ jmp(&heapnumber_allocated);
2188 __ bind(&slow_allocate_heapnumber);
2190 FrameScope scope(masm, StackFrame::INTERNAL);
2191 __ push(v0); // Push the heap number, not the untagged int32.
2192 __ CallRuntime(Runtime::kNumberAlloc, 0);
2193 __ mov(a2, v0); // Move the new heap number into a2.
2194 // Get the heap number into v0, now that the new heap number is in a2.
2198 // Convert the heap number in v0 to an untagged integer in a1.
2199 // This can't go slow-case because it's the same number we already
2200 // converted once again.
2201 __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible);
2202 // Negate the result.
2205 __ bind(&heapnumber_allocated);
2206 __ mov(v0, a2); // Move newly allocated heap number to v0.
2209 if (CpuFeatures::IsSupported(FPU)) {
2210 // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
2211 CpuFeatures::Scope scope(FPU);
2214 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
2217 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
2218 // have to set up a frame.
2219 WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
2220 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2223 __ bind(&impossible);
2224 if (FLAG_debug_code) {
2225 __ stop("Incorrect assumption in bit-not stub");
2230 // TODO(svenpanne): Use virtual functions instead of switch.
2231 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
2234 GenerateGenericStubSub(masm);
2236 case Token::BIT_NOT:
2237 GenerateGenericStubBitNot(masm);
2245 void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
2246 Label non_smi, slow;
2247 GenerateSmiCodeSub(masm, &non_smi, &slow);
2249 GenerateHeapNumberCodeSub(masm, &slow);
2251 GenerateGenericCodeFallback(masm);
2255 void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
2256 Label non_smi, slow;
2257 GenerateSmiCodeBitNot(masm, &non_smi);
2259 GenerateHeapNumberCodeBitNot(masm, &slow);
2261 GenerateGenericCodeFallback(masm);
2265 void UnaryOpStub::GenerateGenericCodeFallback(
2266 MacroAssembler* masm) {
2267 // Handle the slow case by jumping to the JavaScript builtin.
2271 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
2273 case Token::BIT_NOT:
2274 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
2282 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2287 __ li(a2, Operand(Smi::FromInt(MinorKey())));
2288 __ li(a1, Operand(Smi::FromInt(op_)));
2289 __ li(a0, Operand(Smi::FromInt(operands_type_)));
2290 __ Push(a2, a1, a0);
2292 __ TailCallExternalReference(
2293 ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
2300 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
2301 MacroAssembler* masm) {
2306 void BinaryOpStub::Generate(MacroAssembler* masm) {
2307 // Explicitly allow generation of nested stubs. It is safe here because
2308 // generation code does not use any raw pointers.
2309 AllowStubCallsScope allow_stub_calls(masm, true);
2310 switch (operands_type_) {
2311 case BinaryOpIC::UNINITIALIZED:
2312 GenerateTypeTransition(masm);
2314 case BinaryOpIC::SMI:
2315 GenerateSmiStub(masm);
2317 case BinaryOpIC::INT32:
2318 GenerateInt32Stub(masm);
2320 case BinaryOpIC::HEAP_NUMBER:
2321 GenerateHeapNumberStub(masm);
2323 case BinaryOpIC::ODDBALL:
2324 GenerateOddballStub(masm);
2326 case BinaryOpIC::BOTH_STRING:
2327 GenerateBothStringStub(masm);
2329 case BinaryOpIC::STRING:
2330 GenerateStringStub(masm);
2332 case BinaryOpIC::GENERIC:
2333 GenerateGeneric(masm);
2341 void BinaryOpStub::PrintName(StringStream* stream) {
2342 const char* op_name = Token::Name(op_);
2343 const char* overwrite_name;
2345 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
2346 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
2347 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
2348 default: overwrite_name = "UnknownOverwrite"; break;
2350 stream->Add("BinaryOpStub_%s_%s_%s",
2353 BinaryOpIC::GetName(operands_type_));
2358 void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
2360 Register right = a0;
2362 Register scratch1 = t0;
2363 Register scratch2 = t1;
2365 ASSERT(right.is(a0));
2366 STATIC_ASSERT(kSmiTag == 0);
2368 Label not_smi_result;
2371 __ AdduAndCheckForOverflow(v0, left, right, scratch1);
2372 __ RetOnNoOverflow(scratch1);
2373 // No need to revert anything - right and left are intact.
2376 __ SubuAndCheckForOverflow(v0, left, right, scratch1);
2377 __ RetOnNoOverflow(scratch1);
2378 // No need to revert anything - right and left are intact.
2381 // Remove tag from one of the operands. This way the multiplication result
2382 // will be a smi if it fits the smi range.
2383 __ SmiUntag(scratch1, right);
2384 // Do multiplication.
2385 // lo = lower 32 bits of scratch1 * left.
2386 // hi = higher 32 bits of scratch1 * left.
2387 __ Mult(left, scratch1);
2388 // Check for overflowing the smi range - no overflow if higher 33 bits of
2389 // the result are identical.
2392 __ sra(scratch1, scratch1, 31);
2393 __ Branch(¬_smi_result, ne, scratch1, Operand(scratch2));
2394 // Go slow on zero result to handle -0.
2396 __ Ret(ne, v0, Operand(zero_reg));
2397 // We need -0 if we were multiplying a negative number with 0 to get 0.
2398 // We know one of them was zero.
2399 __ Addu(scratch2, right, left);
2401 // ARM uses the 'pl' condition, which is 'ge'.
2402 // Negating it results in 'lt'.
2403 __ Branch(&skip, lt, scratch2, Operand(zero_reg));
2404 ASSERT(Smi::FromInt(0) == 0);
2405 __ mov(v0, zero_reg);
2406 __ Ret(); // Return smi 0 if the non-zero one was positive.
2408 // We fall through here if we multiplied a negative number with 0, because
2409 // that would mean we should produce -0.
2414 __ SmiUntag(scratch2, right);
2415 __ SmiUntag(scratch1, left);
2416 __ Div(scratch1, scratch2);
2417 // A minor optimization: div may be calculated asynchronously, so we check
2418 // for division by zero before getting the result.
2419 __ Branch(¬_smi_result, eq, scratch2, Operand(zero_reg));
2420 // If the result is 0, we need to make sure the dividsor (right) is
2421 // positive, otherwise it is a -0 case.
2422 // Quotient is in 'lo', remainder is in 'hi'.
2423 // Check for no remainder first.
2425 __ Branch(¬_smi_result, ne, scratch1, Operand(zero_reg));
2427 __ Branch(&done, ne, scratch1, Operand(zero_reg));
2428 __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg));
2430 // Check that the signed result fits in a Smi.
2431 __ Addu(scratch2, scratch1, Operand(0x40000000));
2432 __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg));
2433 __ SmiTag(v0, scratch1);
2439 __ SmiUntag(scratch2, right);
2440 __ SmiUntag(scratch1, left);
2441 __ Div(scratch1, scratch2);
2442 // A minor optimization: div may be calculated asynchronously, so we check
2443 // for division by 0 before calling mfhi.
2444 // Check for zero on the right hand side.
2445 __ Branch(¬_smi_result, eq, scratch2, Operand(zero_reg));
2446 // If the result is 0, we need to make sure the dividend (left) is
2447 // positive (or 0), otherwise it is a -0 case.
2448 // Remainder is in 'hi'.
2450 __ Branch(&done, ne, scratch2, Operand(zero_reg));
2451 __ Branch(¬_smi_result, lt, scratch1, Operand(zero_reg));
2453 // Check that the signed result fits in a Smi.
2454 __ Addu(scratch1, scratch2, Operand(0x40000000));
2455 __ Branch(¬_smi_result, lt, scratch1, Operand(zero_reg));
2456 __ SmiTag(v0, scratch2);
2461 __ Or(v0, left, Operand(right));
2464 case Token::BIT_AND:
2465 __ And(v0, left, Operand(right));
2468 case Token::BIT_XOR:
2469 __ Xor(v0, left, Operand(right));
2473 // Remove tags from right operand.
2474 __ GetLeastBitsFromSmi(scratch1, right, 5);
2475 __ srav(scratch1, left, scratch1);
2477 __ And(v0, scratch1, Operand(~kSmiTagMask));
2481 // Remove tags from operands. We can't do this on a 31 bit number
2482 // because then the 0s get shifted into bit 30 instead of bit 31.
2483 __ SmiUntag(scratch1, left);
2484 __ GetLeastBitsFromSmi(scratch2, right, 5);
2485 __ srlv(v0, scratch1, scratch2);
2486 // Unsigned shift is not allowed to produce a negative number, so
2487 // check the sign bit and the sign bit after Smi tagging.
2488 __ And(scratch1, v0, Operand(0xc0000000));
2489 __ Branch(¬_smi_result, ne, scratch1, Operand(zero_reg));
2495 // Remove tags from operands.
2496 __ SmiUntag(scratch1, left);
2497 __ GetLeastBitsFromSmi(scratch2, right, 5);
2498 __ sllv(scratch1, scratch1, scratch2);
2499 // Check that the signed result fits in a Smi.
2500 __ Addu(scratch2, scratch1, Operand(0x40000000));
2501 __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg));
2502 __ SmiTag(v0, scratch1);
2508 __ bind(¬_smi_result);
2512 void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2515 Label* gc_required) {
2517 Register right = a0;
2518 Register scratch1 = t3;
2519 Register scratch2 = t5;
2520 Register scratch3 = t0;
2522 ASSERT(smi_operands || (not_numbers != NULL));
2523 if (smi_operands && FLAG_debug_code) {
2524 __ AbortIfNotSmi(left);
2525 __ AbortIfNotSmi(right);
2528 Register heap_number_map = t2;
2529 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2537 // Load left and right operands into f12 and f14 or a0/a1 and a2/a3
2538 // depending on whether FPU is available or not.
2539 FloatingPointHelper::Destination destination =
2540 CpuFeatures::IsSupported(FPU) &&
2542 FloatingPointHelper::kFPURegisters :
2543 FloatingPointHelper::kCoreRegisters;
2545 // Allocate new heap number for result.
2546 Register result = s0;
2547 GenerateHeapResultAllocation(
2548 masm, result, heap_number_map, scratch1, scratch2, gc_required);
2550 // Load the operands.
2552 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2554 FloatingPointHelper::LoadOperands(masm,
2562 // Calculate the result.
2563 if (destination == FloatingPointHelper::kFPURegisters) {
2564 // Using FPU registers:
2566 // f14: Right value.
2567 CpuFeatures::Scope scope(FPU);
2570 __ add_d(f10, f12, f14);
2573 __ sub_d(f10, f12, f14);
2576 __ mul_d(f10, f12, f14);
2579 __ div_d(f10, f12, f14);
2585 // ARM uses a workaround here because of the unaligned HeapNumber
2586 // kValueOffset. On MIPS this workaround is built into sdc1 so
2587 // there's no point in generating even more instructions.
2588 __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
2592 // Call the C function to handle the double operation.
2593 FloatingPointHelper::CallCCodeForDoubleOperation(masm,
2597 if (FLAG_debug_code) {
2598 __ stop("Unreachable code.");
2604 case Token::BIT_XOR:
2605 case Token::BIT_AND:
2610 __ SmiUntag(a3, left);
2611 __ SmiUntag(a2, right);
2613 // Convert operands to 32-bit integers. Right in a2 and left in a3.
2614 FloatingPointHelper::ConvertNumberToInt32(masm,
2623 FloatingPointHelper::ConvertNumberToInt32(masm,
2633 Label result_not_a_smi;
2636 __ Or(a2, a3, Operand(a2));
2638 case Token::BIT_XOR:
2639 __ Xor(a2, a3, Operand(a2));
2641 case Token::BIT_AND:
2642 __ And(a2, a3, Operand(a2));
2645 // Use only the 5 least significant bits of the shift count.
2646 __ GetLeastBitsFromInt32(a2, a2, 5);
2647 __ srav(a2, a3, a2);
2650 // Use only the 5 least significant bits of the shift count.
2651 __ GetLeastBitsFromInt32(a2, a2, 5);
2652 __ srlv(a2, a3, a2);
2653 // SHR is special because it is required to produce a positive answer.
2654 // The code below for writing into heap numbers isn't capable of
2655 // writing the register as an unsigned int so we go to slow case if we
2657 if (CpuFeatures::IsSupported(FPU)) {
2658 __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
2660 __ Branch(not_numbers, lt, a2, Operand(zero_reg));
2664 // Use only the 5 least significant bits of the shift count.
2665 __ GetLeastBitsFromInt32(a2, a2, 5);
2666 __ sllv(a2, a3, a2);
2671 // Check that the *signed* result fits in a smi.
2672 __ Addu(a3, a2, Operand(0x40000000));
2673 __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
2677 // Allocate new heap number for result.
2678 __ bind(&result_not_a_smi);
2679 Register result = t1;
2681 __ AllocateHeapNumber(
2682 result, scratch1, scratch2, heap_number_map, gc_required);
2684 GenerateHeapResultAllocation(
2685 masm, result, heap_number_map, scratch1, scratch2, gc_required);
2688 // a2: Answer as signed int32.
2689 // t1: Heap number to write answer into.
2691 // Nothing can go wrong now, so move the heap number to v0, which is the
2695 if (CpuFeatures::IsSupported(FPU)) {
2696 // Convert the int32 in a2 to the heap number in a0. As
2697 // mentioned above SHR needs to always produce a positive result.
2698 CpuFeatures::Scope scope(FPU);
2700 if (op_ == Token::SHR) {
2701 __ Cvt_d_uw(f0, f0, f22);
2705 // ARM uses a workaround here because of the unaligned HeapNumber
2706 // kValueOffset. On MIPS this workaround is built into sdc1 so
2707 // there's no point in generating even more instructions.
2708 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
2711 // Tail call that writes the int32 in a2 to the heap number in v0, using
2712 // a3 and a0 as scratch. v0 is preserved and returned.
2713 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
2714 __ TailCallStub(&stub);
2724 // Generate the smi code. If the operation on smis are successful this return is
2725 // generated. If the result is not a smi and heap number allocation is not
2726 // requested the code falls through. If number allocation is requested but a
2727 // heap number cannot be allocated the code jumps to the lable gc_required.
2728 void BinaryOpStub::GenerateSmiCode(
2729 MacroAssembler* masm,
2732 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
2736 Register right = a0;
2737 Register scratch1 = t3;
2738 Register scratch2 = t5;
2740 // Perform combined smi check on both operands.
2741 __ Or(scratch1, left, Operand(right));
2742 STATIC_ASSERT(kSmiTag == 0);
2743 __ JumpIfNotSmi(scratch1, ¬_smis);
2745 // If the smi-smi operation results in a smi return is generated.
2746 GenerateSmiSmiOperation(masm);
2748 // If heap number results are possible generate the result in an allocated
2750 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
2751 GenerateFPOperation(masm, true, use_runtime, gc_required);
2757 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2758 Label not_smis, call_runtime;
2760 if (result_type_ == BinaryOpIC::UNINITIALIZED ||
2761 result_type_ == BinaryOpIC::SMI) {
2762 // Only allow smi results.
2763 GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
2765 // Allow heap number result and don't make a transition if a heap number
2766 // cannot be allocated.
2767 GenerateSmiCode(masm,
2770 ALLOW_HEAPNUMBER_RESULTS);
2773 // Code falls through if the result is not returned as either a smi or heap
2775 GenerateTypeTransition(masm);
2777 __ bind(&call_runtime);
2778 GenerateCallRuntime(masm);
2782 void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
2783 ASSERT(operands_type_ == BinaryOpIC::STRING);
2784 // Try to add arguments as strings, otherwise, transition to the generic
2786 GenerateAddStrings(masm);
2787 GenerateTypeTransition(masm);
2791 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
2793 ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
2794 ASSERT(op_ == Token::ADD);
2795 // If both arguments are strings, call the string add stub.
2796 // Otherwise, do a transition.
2798 // Registers containing left and right operands respectively.
2800 Register right = a0;
2802 // Test if left operand is a string.
2803 __ JumpIfSmi(left, &call_runtime);
2804 __ GetObjectType(left, a2, a2);
2805 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2807 // Test if right operand is a string.
2808 __ JumpIfSmi(right, &call_runtime);
2809 __ GetObjectType(right, a2, a2);
2810 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2812 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
2813 GenerateRegisterArgsPush(masm);
2814 __ TailCallStub(&string_add_stub);
2816 __ bind(&call_runtime);
2817 GenerateTypeTransition(masm);
2821 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2822 ASSERT(operands_type_ == BinaryOpIC::INT32);
2825 Register right = a0;
2826 Register scratch1 = t3;
2827 Register scratch2 = t5;
2828 FPURegister double_scratch = f0;
2829 FPURegister single_scratch = f6;
2831 Register heap_number_result = no_reg;
2832 Register heap_number_map = t2;
2833 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2836 // Labels for type transition, used for wrong input or output types.
2837 // Both label are currently actually bound to the same position. We use two
2838 // different label to differentiate the cause leading to type transition.
2841 // Smi-smi fast case.
2843 __ Or(scratch1, left, right);
2844 __ JumpIfNotSmi(scratch1, &skip);
2845 GenerateSmiSmiOperation(masm);
2846 // Fall through if the result is not a smi.
2855 // Load both operands and check that they are 32-bit integer.
2856 // Jump to type transition if they are not. The registers a0 and a1 (right
2857 // and left) are preserved for the runtime call.
2858 FloatingPointHelper::Destination destination =
2859 (CpuFeatures::IsSupported(FPU) && op_ != Token::MOD)
2860 ? FloatingPointHelper::kFPURegisters
2861 : FloatingPointHelper::kCoreRegisters;
2863 FloatingPointHelper::LoadNumberAsInt32Double(masm,
2874 FloatingPointHelper::LoadNumberAsInt32Double(masm,
2886 if (destination == FloatingPointHelper::kFPURegisters) {
2887 CpuFeatures::Scope scope(FPU);
2888 Label return_heap_number;
2891 __ add_d(f10, f12, f14);
2894 __ sub_d(f10, f12, f14);
2897 __ mul_d(f10, f12, f14);
2900 __ div_d(f10, f12, f14);
2906 if (op_ != Token::DIV) {
2907 // These operations produce an integer result.
2908 // Try to return a smi if we can.
2909 // Otherwise return a heap number if allowed, or jump to type
2912 Register except_flag = scratch2;
2913 __ EmitFPUTruncate(kRoundToZero,
2919 if (result_type_ <= BinaryOpIC::INT32) {
2920 // If except_flag != 0, result does not fit in a 32-bit integer.
2921 __ Branch(&transition, ne, except_flag, Operand(zero_reg));
2924 // Check if the result fits in a smi.
2925 __ mfc1(scratch1, single_scratch);
2926 __ Addu(scratch2, scratch1, Operand(0x40000000));
2927 // If not try to return a heap number.
2928 __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
2929 // Check for minus zero. Return heap number for minus zero.
2931 __ Branch(¬_zero, ne, scratch1, Operand(zero_reg));
2932 __ mfc1(scratch2, f11);
2933 __ And(scratch2, scratch2, HeapNumber::kSignMask);
2934 __ Branch(&return_heap_number, ne, scratch2, Operand(zero_reg));
2937 // Tag the result and return.
2938 __ SmiTag(v0, scratch1);
2941 // DIV just falls through to allocating a heap number.
2944 __ bind(&return_heap_number);
2945 // Return a heap number, or fall through to type transition or runtime
2946 // call if we can't.
2947 if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
2948 : BinaryOpIC::INT32)) {
2949 // We are using FPU registers so s0 is available.
2950 heap_number_result = s0;
2951 GenerateHeapResultAllocation(masm,
2957 __ mov(v0, heap_number_result);
2958 __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset));
2962 // A DIV operation expecting an integer result falls through
2963 // to type transition.
2966 // We preserved a0 and a1 to be able to call runtime.
2967 // Save the left value on the stack.
2970 Label pop_and_call_runtime;
2972 // Allocate a heap number to store the result.
2973 heap_number_result = s0;
2974 GenerateHeapResultAllocation(masm,
2979 &pop_and_call_runtime);
2981 // Load the left value from the value saved on the stack.
2984 // Call the C function to handle the double operation.
2985 FloatingPointHelper::CallCCodeForDoubleOperation(
2986 masm, op_, heap_number_result, scratch1);
2987 if (FLAG_debug_code) {
2988 __ stop("Unreachable code.");
2991 __ bind(&pop_and_call_runtime);
2993 __ Branch(&call_runtime);
3000 case Token::BIT_XOR:
3001 case Token::BIT_AND:
3005 Label return_heap_number;
3006 Register scratch3 = t1;
3007 // Convert operands to 32-bit integers. Right in a2 and left in a3. The
3008 // registers a0 and a1 (right and left) are preserved for the runtime
3010 FloatingPointHelper::LoadNumberAsInt32(masm,
3019 FloatingPointHelper::LoadNumberAsInt32(masm,
3029 // The ECMA-262 standard specifies that, for shift operations, only the
3030 // 5 least significant bits of the shift value should be used.
3033 __ Or(a2, a3, Operand(a2));
3035 case Token::BIT_XOR:
3036 __ Xor(a2, a3, Operand(a2));
3038 case Token::BIT_AND:
3039 __ And(a2, a3, Operand(a2));
3042 __ And(a2, a2, Operand(0x1f));
3043 __ srav(a2, a3, a2);
3046 __ And(a2, a2, Operand(0x1f));
3047 __ srlv(a2, a3, a2);
3048 // SHR is special because it is required to produce a positive answer.
3049 // We only get a negative result if the shift value (a2) is 0.
3050 // This result cannot be respresented as a signed 32-bit integer, try
3051 // to return a heap number if we can.
3052 // The non FPU code does not support this special case, so jump to
3053 // runtime if we don't support it.
3054 if (CpuFeatures::IsSupported(FPU)) {
3055 __ Branch((result_type_ <= BinaryOpIC::INT32)
3057 : &return_heap_number,
3062 __ Branch((result_type_ <= BinaryOpIC::INT32)
3071 __ And(a2, a2, Operand(0x1f));
3072 __ sllv(a2, a3, a2);
3078 // Check if the result fits in a smi.
3079 __ Addu(scratch1, a2, Operand(0x40000000));
3080 // If not try to return a heap number. (We know the result is an int32.)
3081 __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
3082 // Tag the result and return.
3086 __ bind(&return_heap_number);
3087 heap_number_result = t1;
3088 GenerateHeapResultAllocation(masm,
3095 if (CpuFeatures::IsSupported(FPU)) {
3096 CpuFeatures::Scope scope(FPU);
3098 if (op_ != Token::SHR) {
3099 // Convert the result to a floating point value.
3100 __ mtc1(a2, double_scratch);
3101 __ cvt_d_w(double_scratch, double_scratch);
3103 // The result must be interpreted as an unsigned 32-bit integer.
3104 __ mtc1(a2, double_scratch);
3105 __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
3108 // Store the result.
3109 __ mov(v0, heap_number_result);
3110 __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
3113 // Tail call that writes the int32 in a2 to the heap number in v0, using
3114 // a3 and a0 as scratch. v0 is preserved and returned.
3116 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
3117 __ TailCallStub(&stub);
3127 // We never expect DIV to yield an integer result, so we always generate
3128 // type transition code for DIV operations expecting an integer result: the
3129 // code will fall through to this type transition.
3130 if (transition.is_linked() ||
3131 ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
3132 __ bind(&transition);
3133 GenerateTypeTransition(masm);
3136 __ bind(&call_runtime);
3137 GenerateCallRuntime(masm);
3141 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
3144 if (op_ == Token::ADD) {
3145 // Handle string addition here, because it is the only operation
3146 // that does not do a ToNumber conversion on the operands.
3147 GenerateAddStrings(masm);
3150 // Convert oddball arguments to numbers.
3152 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3153 __ Branch(&check, ne, a1, Operand(t0));
3154 if (Token::IsBitOp(op_)) {
3155 __ li(a1, Operand(Smi::FromInt(0)));
3157 __ LoadRoot(a1, Heap::kNanValueRootIndex);
3161 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3162 __ Branch(&done, ne, a0, Operand(t0));
3163 if (Token::IsBitOp(op_)) {
3164 __ li(a0, Operand(Smi::FromInt(0)));
3166 __ LoadRoot(a0, Heap::kNanValueRootIndex);
3170 GenerateHeapNumberStub(masm);
3174 void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
3176 GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
3178 __ bind(&call_runtime);
3179 GenerateCallRuntime(masm);
3183 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
3184 Label call_runtime, call_string_add_or_runtime;
3186 GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
3188 GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
3190 __ bind(&call_string_add_or_runtime);
3191 if (op_ == Token::ADD) {
3192 GenerateAddStrings(masm);
3195 __ bind(&call_runtime);
3196 GenerateCallRuntime(masm);
3200 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
3201 ASSERT(op_ == Token::ADD);
3202 Label left_not_string, call_runtime;
3205 Register right = a0;
3207 // Check if left argument is a string.
3208 __ JumpIfSmi(left, &left_not_string);
3209 __ GetObjectType(left, a2, a2);
3210 __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
3212 StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
3213 GenerateRegisterArgsPush(masm);
3214 __ TailCallStub(&string_add_left_stub);
3216 // Left operand is not a string, test right.
3217 __ bind(&left_not_string);
3218 __ JumpIfSmi(right, &call_runtime);
3219 __ GetObjectType(right, a2, a2);
3220 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
3222 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
3223 GenerateRegisterArgsPush(masm);
3224 __ TailCallStub(&string_add_right_stub);
3226 // At least one argument is not a string.
3227 __ bind(&call_runtime);
3231 void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
3232 GenerateRegisterArgsPush(masm);
3235 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
3238 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
3241 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
3244 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
3247 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
3250 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
3252 case Token::BIT_AND:
3253 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
3255 case Token::BIT_XOR:
3256 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
3259 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
3262 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
3265 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
3273 void BinaryOpStub::GenerateHeapResultAllocation(
3274 MacroAssembler* masm,
3276 Register heap_number_map,
3279 Label* gc_required) {
3281 // Code below will scratch result if allocation fails. To keep both arguments
3282 // intact for the runtime call result cannot be one of these.
3283 ASSERT(!result.is(a0) && !result.is(a1));
3285 if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
3286 Label skip_allocation, allocated;
3287 Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0;
3288 // If the overwritable operand is already an object, we skip the
3289 // allocation of a heap number.
3290 __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
3291 // Allocate a heap number for the result.
3292 __ AllocateHeapNumber(
3293 result, scratch1, scratch2, heap_number_map, gc_required);
3294 __ Branch(&allocated);
3295 __ bind(&skip_allocation);
3296 // Use object holding the overwritable operand for result.
3297 __ mov(result, overwritable_operand);
3298 __ bind(&allocated);
3300 ASSERT(mode_ == NO_OVERWRITE);
3301 __ AllocateHeapNumber(
3302 result, scratch1, scratch2, heap_number_map, gc_required);
3307 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
3313 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
3314 // Untagged case: double input in f4, double result goes
3316 // Tagged case: tagged input on top of stack and in a0,
3317 // tagged result (heap number) goes into v0.
3319 Label input_not_smi;
3322 Label invalid_cache;
3323 const Register scratch0 = t5;
3324 const Register scratch1 = t3;
3325 const Register cache_entry = a0;
3326 const bool tagged = (argument_type_ == TAGGED);
3328 if (CpuFeatures::IsSupported(FPU)) {
3329 CpuFeatures::Scope scope(FPU);
3332 // Argument is a number and is on stack and in a0.
3333 // Load argument and check if it is a smi.
3334 __ JumpIfNotSmi(a0, &input_not_smi);
3336 // Input is a smi. Convert to double and load the low and high words
3337 // of the double into a2, a3.
3338 __ sra(t0, a0, kSmiTagSize);
3341 __ Move(a2, a3, f4);
3344 __ bind(&input_not_smi);
3345 // Check if input is a HeapNumber.
3348 Heap::kHeapNumberMapRootIndex,
3351 // Input is a HeapNumber. Store the
3352 // low and high words into a2, a3.
3353 __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
3354 __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
3356 // Input is untagged double in f4. Output goes to f4.
3357 __ Move(a2, a3, f4);
3360 // a2 = low 32 bits of double value.
3361 // a3 = high 32 bits of double value.
3362 // Compute hash (the shifts are arithmetic):
3363 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
3369 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
3370 __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
3372 // a2 = low 32 bits of double value.
3373 // a3 = high 32 bits of double value.
3374 // a1 = TranscendentalCache::hash(double value).
3375 __ li(cache_entry, Operand(
3376 ExternalReference::transcendental_cache_array_address(
3378 // a0 points to cache array.
3379 __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
3380 Isolate::Current()->transcendental_cache()->caches_[0])));
3381 // a0 points to the cache for the type type_.
3382 // If NULL, the cache hasn't been initialized yet, so go through runtime.
3383 __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
3386 // Check that the layout of cache elements match expectations.
3387 { TranscendentalCache::SubCache::Element test_elem[2];
3388 char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
3389 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
3390 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
3391 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
3392 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
3393 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
3394 CHECK_EQ(0, elem_in0 - elem_start);
3395 CHECK_EQ(kIntSize, elem_in1 - elem_start);
3396 CHECK_EQ(2 * kIntSize, elem_out - elem_start);
3400 // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
3402 __ Addu(a1, a1, t0);
3404 __ Addu(cache_entry, cache_entry, t0);
3406 // Check if cache matches: Double value is stored in uint32_t[2] array.
3407 __ lw(t0, MemOperand(cache_entry, 0));
3408 __ lw(t1, MemOperand(cache_entry, 4));
3409 __ lw(t2, MemOperand(cache_entry, 8));
3410 __ Branch(&calculate, ne, a2, Operand(t0));
3411 __ Branch(&calculate, ne, a3, Operand(t1));
3412 // Cache hit. Load result, cleanup and return.
3414 // Pop input value from stack and load result into v0.
3418 // Load result into f4.
3419 __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
3422 } // if (CpuFeatures::IsSupported(FPU))
3424 __ bind(&calculate);
3426 __ bind(&invalid_cache);
3427 __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
3432 if (!CpuFeatures::IsSupported(FPU)) UNREACHABLE();
3433 CpuFeatures::Scope scope(FPU);
3437 const Register heap_number_map = t2;
3439 // Call C function to calculate the result and update the cache.
3440 // Register a0 holds precalculated cache entry address; preserve
3441 // it on the stack and pop it into register cache_entry after the
3443 __ Push(cache_entry, a2, a3);
3444 GenerateCallCFunction(masm, scratch0);
3445 __ GetCFunctionDoubleResult(f4);
3447 // Try to update the cache. If we cannot allocate a
3448 // heap number, we return the result without updating.
3449 __ Pop(cache_entry, a2, a3);
3450 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3451 __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
3452 __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
3454 __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
3455 __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
3456 __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
3458 __ mov(v0, cache_entry);
3461 __ bind(&invalid_cache);
3462 // The cache is invalid. Call runtime which will recreate the
3464 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3465 __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
3466 __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
3468 FrameScope scope(masm, StackFrame::INTERNAL);
3470 __ CallRuntime(RuntimeFunction(), 1);
3472 __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
3475 __ bind(&skip_cache);
3476 // Call C function to calculate the result and answer directly
3477 // without updating the cache.
3478 GenerateCallCFunction(masm, scratch0);
3479 __ GetCFunctionDoubleResult(f4);
3480 __ bind(&no_update);
3482 // We return the value in f4 without adding it to the cache, but
3483 // we cause a scavenging GC so that future allocations will succeed.
3485 FrameScope scope(masm, StackFrame::INTERNAL);
3487 // Allocate an aligned object larger than a HeapNumber.
3488 ASSERT(4 * kPointerSize >= HeapNumber::kSize);
3489 __ li(scratch0, Operand(4 * kPointerSize));
3491 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
3498 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
3501 __ PrepareCallCFunction(2, scratch);
3502 if (IsMipsSoftFloatABI) {
3503 __ Move(a0, a1, f4);
3507 AllowExternalCallThatCantCauseGC scope(masm);
3509 case TranscendentalCache::SIN:
3511 ExternalReference::math_sin_double_function(masm->isolate()),
3514 case TranscendentalCache::COS:
3516 ExternalReference::math_cos_double_function(masm->isolate()),
3519 case TranscendentalCache::LOG:
3521 ExternalReference::math_log_double_function(masm->isolate()),
3532 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
3534 // Add more cases when necessary.
3535 case TranscendentalCache::SIN: return Runtime::kMath_sin;
3536 case TranscendentalCache::COS: return Runtime::kMath_cos;
3537 case TranscendentalCache::LOG: return Runtime::kMath_log;
3540 return Runtime::kAbort;
3545 void StackCheckStub::Generate(MacroAssembler* masm) {
3546 __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
3550 void MathPowStub::Generate(MacroAssembler* masm) {
3553 if (CpuFeatures::IsSupported(FPU)) {
3554 CpuFeatures::Scope scope(FPU);
3557 Label exponent_not_smi;
3558 Label convert_exponent;
3560 const Register base = a0;
3561 const Register exponent = a2;
3562 const Register heapnumbermap = t1;
3563 const Register heapnumber = s0; // Callee-saved register.
3564 const Register scratch = t2;
3565 const Register scratch2 = t3;
3567 // Alocate FP values in the ABI-parameter-passing regs.
3568 const DoubleRegister double_base = f12;
3569 const DoubleRegister double_exponent = f14;
3570 const DoubleRegister double_result = f0;
3571 const DoubleRegister double_scratch = f2;
3573 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
3574 __ lw(base, MemOperand(sp, 1 * kPointerSize));
3575 __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
3577 // Convert base to double value and store it in f0.
3578 __ JumpIfNotSmi(base, &base_not_smi);
3579 // Base is a Smi. Untag and convert it.
3581 __ mtc1(base, double_scratch);
3582 __ cvt_d_w(double_base, double_scratch);
3583 __ Branch(&convert_exponent);
3585 __ bind(&base_not_smi);
3586 __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
3587 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
3588 // Base is a heapnumber. Load it into double register.
3589 __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
3591 __ bind(&convert_exponent);
3592 __ JumpIfNotSmi(exponent, &exponent_not_smi);
3593 __ SmiUntag(exponent);
3595 // The base is in a double register and the exponent is
3596 // an untagged smi. Allocate a heap number and call a
3597 // C function for integer exponents. The register containing
3598 // the heap number is callee-saved.
3599 __ AllocateHeapNumber(heapnumber,
3605 __ PrepareCallCFunction(1, 1, scratch);
3606 __ SetCallCDoubleArguments(double_base, exponent);
3608 AllowExternalCallThatCantCauseGC scope(masm);
3610 ExternalReference::power_double_int_function(masm->isolate()), 1, 1);
3612 __ GetCFunctionDoubleResult(double_result);
3614 __ sdc1(double_result,
3615 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
3616 __ mov(v0, heapnumber);
3617 __ DropAndRet(2 * kPointerSize);
3619 __ bind(&exponent_not_smi);
3620 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
3621 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
3622 // Exponent is a heapnumber. Load it into double register.
3623 __ ldc1(double_exponent,
3624 FieldMemOperand(exponent, HeapNumber::kValueOffset));
3626 // The base and the exponent are in double registers.
3627 // Allocate a heap number and call a C function for
3628 // double exponents. The register containing
3629 // the heap number is callee-saved.
3630 __ AllocateHeapNumber(heapnumber,
3636 __ PrepareCallCFunction(0, 2, scratch);
3637 // ABI (o32) for func(double a, double b): a in f12, b in f14.
3638 ASSERT(double_base.is(f12));
3639 ASSERT(double_exponent.is(f14));
3640 __ SetCallCDoubleArguments(double_base, double_exponent);
3642 AllowExternalCallThatCantCauseGC scope(masm);
3644 ExternalReference::power_double_double_function(masm->isolate()),
3648 __ GetCFunctionDoubleResult(double_result);
3650 __ sdc1(double_result,
3651 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
3652 __ mov(v0, heapnumber);
3653 __ DropAndRet(2 * kPointerSize);
3656 __ bind(&call_runtime);
3657 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
3661 bool CEntryStub::NeedsImmovableCode() {
3666 bool CEntryStub::IsPregenerated() {
3667 return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
3672 void CodeStub::GenerateStubsAheadOfTime() {
3673 CEntryStub::GenerateAheadOfTime();
3674 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
3675 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
3676 RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
3680 void CodeStub::GenerateFPStubs() {
3681 CEntryStub save_doubles(1, kSaveFPRegs);
3682 Handle<Code> code = save_doubles.GetCode();
3683 code->set_is_pregenerated(true);
3684 StoreBufferOverflowStub stub(kSaveFPRegs);
3685 stub.GetCode()->set_is_pregenerated(true);
3686 code->GetIsolate()->set_fp_stubs_generated(true);
3690 void CEntryStub::GenerateAheadOfTime() {
3691 CEntryStub stub(1, kDontSaveFPRegs);
3692 Handle<Code> code = stub.GetCode();
3693 code->set_is_pregenerated(true);
3697 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
3702 void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
3703 UncatchableExceptionType type) {
3704 __ ThrowUncatchable(type, v0);
3708 void CEntryStub::GenerateCore(MacroAssembler* masm,
3709 Label* throw_normal_exception,
3710 Label* throw_termination_exception,
3711 Label* throw_out_of_memory_exception,
3713 bool always_allocate) {
3714 // v0: result parameter for PerformGC, if any
3715 // s0: number of arguments including receiver (C callee-saved)
3716 // s1: pointer to the first argument (C callee-saved)
3717 // s2: pointer to builtin function (C callee-saved)
3719 Isolate* isolate = masm->isolate();
3722 // Move result passed in v0 into a0 to call PerformGC.
3724 __ PrepareCallCFunction(1, 0, a1);
3725 __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0);
3728 ExternalReference scope_depth =
3729 ExternalReference::heap_always_allocate_scope_depth(isolate);
3730 if (always_allocate) {
3731 __ li(a0, Operand(scope_depth));
3732 __ lw(a1, MemOperand(a0));
3733 __ Addu(a1, a1, Operand(1));
3734 __ sw(a1, MemOperand(a0));
3737 // Prepare arguments for C routine: a0 = argc, a1 = argv
3741 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
3742 // also need to reserve the 4 argument slots on the stack.
3744 __ AssertStackIsAligned();
3746 __ li(a2, Operand(ExternalReference::isolate_address()));
3748 // To let the GC traverse the return address of the exit frames, we need to
3749 // know where the return address is. The CEntryStub is unmovable, so
3750 // we can store the address on the stack to be able to find it again and
3751 // we never have to restore it, because it will not change.
3752 { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
3753 // This branch-and-link sequence is needed to find the current PC on mips,
3754 // saved to the ra register.
3755 // Use masm-> here instead of the double-underscore macro since extra
3756 // coverage code can interfere with the proper calculation of ra.
3758 masm->bal(&find_ra); // bal exposes branch delay slot.
3759 masm->nop(); // Branch delay slot nop.
3760 masm->bind(&find_ra);
3762 // Adjust the value in ra to point to the correct return location, 2nd
3763 // instruction past the real call into C code (the jalr(t9)), and push it.
3764 // This is the return address of the exit frame.
3765 const int kNumInstructionsToJump = 6;
3766 masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
3767 masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
3768 masm->Subu(sp, sp, kCArgsSlotsSize);
3769 // Stack is still aligned.
3771 // Call the C routine.
3772 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
3774 masm->nop(); // Branch delay slot nop.
3775 // Make sure the stored 'ra' points to this position.
3776 ASSERT_EQ(kNumInstructionsToJump,
3777 masm->InstructionsGeneratedSince(&find_ra));
3780 // Restore stack (remove arg slots).
3781 __ Addu(sp, sp, kCArgsSlotsSize);
3783 if (always_allocate) {
3784 // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
3785 __ li(a2, Operand(scope_depth));
3786 __ lw(a3, MemOperand(a2));
3787 __ Subu(a3, a3, Operand(1));
3788 __ sw(a3, MemOperand(a2));
3791 // Check for failure result.
3792 Label failure_returned;
3793 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
3794 __ addiu(a2, v0, 1);
3795 __ andi(t0, a2, kFailureTagMask);
3796 __ Branch(&failure_returned, eq, t0, Operand(zero_reg));
3798 // Exit C frame and return.
3800 // sp: stack pointer
3801 // fp: frame pointer
3802 __ LeaveExitFrame(save_doubles_, s0);
3805 // Check if we should retry or throw exception.
3807 __ bind(&failure_returned);
3808 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
3809 __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
3810 __ Branch(&retry, eq, t0, Operand(zero_reg));
3812 // Special handling of out of memory exceptions.
3813 Failure* out_of_memory = Failure::OutOfMemoryException();
3814 __ Branch(throw_out_of_memory_exception, eq,
3815 v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
3817 // Retrieve the pending exception and clear the variable.
3818 __ li(a3, Operand(isolate->factory()->the_hole_value()));
3819 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3821 __ lw(v0, MemOperand(t0));
3822 __ sw(a3, MemOperand(t0));
3824 // Special handling of termination exceptions which are uncatchable
3825 // by javascript code.
3826 __ Branch(throw_termination_exception, eq,
3827 v0, Operand(isolate->factory()->termination_exception()));
3829 // Handle normal exception.
3830 __ jmp(throw_normal_exception);
3833 // Last failure (v0) will be moved to (a0) for parameter when retrying.
3837 void CEntryStub::Generate(MacroAssembler* masm) {
3838 // Called from JavaScript; parameters are on stack as if calling JS function
3839 // a0: number of arguments including receiver
3840 // a1: pointer to builtin function
3841 // fp: frame pointer (restored after C call)
3842 // sp: stack pointer (restored as callee's sp after C call)
3843 // cp: current context (C callee-saved)
3845 // NOTE: Invocations of builtins may return failure objects
3846 // instead of a proper result. The builtin entry handles
3847 // this by performing a garbage collection and retrying the
3850 // Compute the argv pointer in a callee-saved register.
3851 __ sll(s1, a0, kPointerSizeLog2);
3852 __ Addu(s1, sp, s1);
3853 __ Subu(s1, s1, Operand(kPointerSize));
3855 // Enter the exit frame that transitions from JavaScript to C++.
3856 FrameScope scope(masm, StackFrame::MANUAL);
3857 __ EnterExitFrame(save_doubles_);
3859 // Setup argc and the builtin function in callee-saved registers.
3863 // s0: number of arguments (C callee-saved)
3864 // s1: pointer to first argument (C callee-saved)
3865 // s2: pointer to builtin function (C callee-saved)
3867 Label throw_normal_exception;
3868 Label throw_termination_exception;
3869 Label throw_out_of_memory_exception;
3871 // Call into the runtime system.
3873 &throw_normal_exception,
3874 &throw_termination_exception,
3875 &throw_out_of_memory_exception,
3879 // Do space-specific GC and retry runtime call.
3881 &throw_normal_exception,
3882 &throw_termination_exception,
3883 &throw_out_of_memory_exception,
3887 // Do full GC and retry runtime call one final time.
3888 Failure* failure = Failure::InternalError();
3889 __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
3891 &throw_normal_exception,
3892 &throw_termination_exception,
3893 &throw_out_of_memory_exception,
3897 __ bind(&throw_out_of_memory_exception);
3898 GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
3900 __ bind(&throw_termination_exception);
3901 GenerateThrowUncatchable(masm, TERMINATION);
3903 __ bind(&throw_normal_exception);
3904 GenerateThrowTOS(masm);
3908 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
3910 Isolate* isolate = masm->isolate();
3913 // a0: entry address
3922 // Save callee saved registers on the stack.
3923 __ MultiPush(kCalleeSaved | ra.bit());
3925 if (CpuFeatures::IsSupported(FPU)) {
3926 CpuFeatures::Scope scope(FPU);
3927 // Save callee-saved FPU registers.
3928 __ MultiPushFPU(kCalleeSavedFPU);
3929 // Set up the reserved register for 0.0.
3930 __ Move(kDoubleRegZero, 0.0);
3934 // Load argv in s0 register.
3935 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
3936 if (CpuFeatures::IsSupported(FPU)) {
3937 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
3940 __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
3942 // We build an EntryFrame.
3943 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
3944 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
3945 __ li(t2, Operand(Smi::FromInt(marker)));
3946 __ li(t1, Operand(Smi::FromInt(marker)));
3947 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
3949 __ lw(t0, MemOperand(t0));
3950 __ Push(t3, t2, t1, t0);
3951 // Setup frame pointer for the frame to be pushed.
3952 __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
3955 // a0: entry_address
3957 // a2: reveiver_pointer
3963 // function slot | entry frame
3965 // bad fp (0xff...f) |
3966 // callee saved registers + ra
3970 // If this is the outermost JS call, set js_entry_sp value.
3971 Label non_outermost_js;
3972 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
3973 __ li(t1, Operand(ExternalReference(js_entry_sp)));
3974 __ lw(t2, MemOperand(t1));
3975 __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
3976 __ sw(fp, MemOperand(t1));
3977 __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
3980 __ nop(); // Branch delay slot nop.
3981 __ bind(&non_outermost_js);
3982 __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
3986 // Call a faked try-block that does the invoke.
3987 __ bal(&invoke); // bal exposes branch delay slot.
3988 __ nop(); // Branch delay slot nop.
3990 // Caught exception: Store result (exception) in the pending
3991 // exception field in the JSEnv and return a failure sentinel.
3992 // Coming in here the fp will be invalid because the PushTryHandler below
3993 // sets it to 0 to signal the existence of the JSEntry frame.
3994 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3996 __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
3997 __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
3998 __ b(&exit); // b exposes branch delay slot.
3999 __ nop(); // Branch delay slot nop.
4001 // Invoke: Link this frame into the handler chain.
4003 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
4004 // If an exception not caught by another handler occurs, this handler
4005 // returns control to the code after the bal(&invoke) above, which
4006 // restores all kCalleeSaved registers (including cp and fp) to their
4007 // saved values before returning a failure to C.
4009 // Clear any pending exceptions.
4010 __ li(t1, Operand(isolate->factory()->the_hole_value()));
4011 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4013 __ sw(t1, MemOperand(t0));
4015 // Invoke the function by calling through JS entry trampoline builtin.
4016 // Notice that we cannot store a reference to the trampoline code directly in
4017 // this stub, because runtime stubs are not traversed when doing GC.
4020 // a0: entry_address
4022 // a2: reveiver_pointer
4029 // callee saved registers + ra
4034 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
4036 __ li(t0, Operand(construct_entry));
4038 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
4039 __ li(t0, Operand(entry));
4041 __ lw(t9, MemOperand(t0)); // Deref address.
4043 // Call JSEntryTrampoline.
4044 __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
4047 // Unlink this frame from the handler chain.
4050 __ bind(&exit); // v0 holds result
4051 // Check if the current stack frame is marked as the outermost JS frame.
4052 Label non_outermost_js_2;
4054 __ Branch(&non_outermost_js_2, ne, t1,
4055 Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4056 __ li(t1, Operand(ExternalReference(js_entry_sp)));
4057 __ sw(zero_reg, MemOperand(t1));
4058 __ bind(&non_outermost_js_2);
4060 // Restore the top frame descriptors from the stack.
4062 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
4064 __ sw(t1, MemOperand(t0));
4066 // Reset the stack to the callee saved registers.
4067 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
4069 if (CpuFeatures::IsSupported(FPU)) {
4070 CpuFeatures::Scope scope(FPU);
4071 // Restore callee-saved fpu registers.
4072 __ MultiPopFPU(kCalleeSavedFPU);
4075 // Restore callee saved registers from the stack.
4076 __ MultiPop(kCalleeSaved | ra.bit());
4082 // Uses registers a0 to t0.
4083 // Expected input (depending on whether args are in registers or on the stack):
4084 // * object: a0 or at sp + 1 * kPointerSize.
4085 // * function: a1 or at sp.
4087 // An inlined call site may have been generated before calling this stub.
4088 // In this case the offset to the inline site to patch is passed on the stack,
4089 // in the safepoint slot for register t0.
4090 void InstanceofStub::Generate(MacroAssembler* masm) {
4091 // Call site inlining and patching implies arguments in registers.
4092 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
4093 // ReturnTrueFalse is only implemented for inlined call sites.
4094 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
4096 // Fixed register usage throughout the stub:
4097 const Register object = a0; // Object (lhs).
4098 Register map = a3; // Map of the object.
4099 const Register function = a1; // Function (rhs).
4100 const Register prototype = t0; // Prototype of the function.
4101 const Register inline_site = t5;
4102 const Register scratch = a2;
4104 const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize;
4106 Label slow, loop, is_instance, is_not_instance, not_js_object;
4108 if (!HasArgsInRegisters()) {
4109 __ lw(object, MemOperand(sp, 1 * kPointerSize));
4110 __ lw(function, MemOperand(sp, 0));
4113 // Check that the left hand is a JS object and load map.
4114 __ JumpIfSmi(object, ¬_js_object);
4115 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
4117 // If there is a call site cache don't look in the global cache, but do the
4118 // real lookup and update the call site cache.
4119 if (!HasCallSiteInlineCheck()) {
4121 __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
4122 __ Branch(&miss, ne, function, Operand(at));
4123 __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
4124 __ Branch(&miss, ne, map, Operand(at));
4125 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4126 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4131 // Get the prototype of the function.
4132 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
4134 // Check that the function prototype is a JS object.
4135 __ JumpIfSmi(prototype, &slow);
4136 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
4138 // Update the global instanceof or call site inlined cache with the current
4139 // map and function. The cached answer will be set when it is known below.
4140 if (!HasCallSiteInlineCheck()) {
4141 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
4142 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
4144 ASSERT(HasArgsInRegisters());
4145 // Patch the (relocated) inlined map check.
4147 // The offset was stored in t0 safepoint slot.
4148 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
4149 __ LoadFromSafepointRegisterSlot(scratch, t0);
4150 __ Subu(inline_site, ra, scratch);
4151 // Patch the relocated value to map.
4152 __ PatchRelocatedValue(inline_site, scratch, map);
4155 // Register mapping: a3 is object map and t0 is function prototype.
4156 // Get prototype of object into a2.
4157 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
4159 // We don't need map any more. Use it as a scratch register.
4160 Register scratch2 = map;
4163 // Loop through the prototype chain looking for the function prototype.
4164 __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
4166 __ Branch(&is_instance, eq, scratch, Operand(prototype));
4167 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
4168 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
4169 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
4172 __ bind(&is_instance);
4173 ASSERT(Smi::FromInt(0) == 0);
4174 if (!HasCallSiteInlineCheck()) {
4175 __ mov(v0, zero_reg);
4176 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4178 // Patch the call site to return true.
4179 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
4180 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4181 // Get the boolean result location in scratch and patch it.
4182 __ PatchRelocatedValue(inline_site, scratch, v0);
4184 if (!ReturnTrueFalseObject()) {
4185 ASSERT_EQ(Smi::FromInt(0), 0);
4186 __ mov(v0, zero_reg);
4189 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4191 __ bind(&is_not_instance);
4192 if (!HasCallSiteInlineCheck()) {
4193 __ li(v0, Operand(Smi::FromInt(1)));
4194 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4196 // Patch the call site to return false.
4197 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
4198 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4199 // Get the boolean result location in scratch and patch it.
4200 __ PatchRelocatedValue(inline_site, scratch, v0);
4202 if (!ReturnTrueFalseObject()) {
4203 __ li(v0, Operand(Smi::FromInt(1)));
4207 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4209 Label object_not_null, object_not_null_or_smi;
4210 __ bind(¬_js_object);
4211 // Before null, smi and string value checks, check that the rhs is a function
4212 // as for a non-function rhs an exception needs to be thrown.
4213 __ JumpIfSmi(function, &slow);
4214 __ GetObjectType(function, scratch2, scratch);
4215 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
4217 // Null is not instance of anything.
4218 __ Branch(&object_not_null, ne, scratch,
4219 Operand(masm->isolate()->factory()->null_value()));
4220 __ li(v0, Operand(Smi::FromInt(1)));
4221 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4223 __ bind(&object_not_null);
4224 // Smi values are not instances of anything.
4225 __ JumpIfNotSmi(object, &object_not_null_or_smi);
4226 __ li(v0, Operand(Smi::FromInt(1)));
4227 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4229 __ bind(&object_not_null_or_smi);
4230 // String values are not instances of anything.
4231 __ IsObjectJSStringType(object, scratch, &slow);
4232 __ li(v0, Operand(Smi::FromInt(1)));
4233 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4235 // Slow-case. Tail call builtin.
4237 if (!ReturnTrueFalseObject()) {
4238 if (HasArgsInRegisters()) {
4241 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
4244 FrameScope scope(masm, StackFrame::INTERNAL);
4246 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
4249 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
4250 __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
4251 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
4252 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4257 Register InstanceofStub::left() { return a0; }
4260 Register InstanceofStub::right() { return a1; }
4263 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
4264 // The displacement is the offset of the last parameter (if any)
4265 // relative to the frame pointer.
4266 static const int kDisplacement =
4267 StandardFrameConstants::kCallerSPOffset - kPointerSize;
4269 // Check that the key is a smiGenerateReadElement.
4271 __ JumpIfNotSmi(a1, &slow);
4273 // Check if the calling frame is an arguments adaptor frame.
4275 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4276 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
4280 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4282 // Check index (a1) against formal parameters count limit passed in
4283 // through register a0. Use unsigned comparison to get negative
4285 __ Branch(&slow, hs, a1, Operand(a0));
4287 // Read the argument from the stack and return it.
4288 __ subu(a3, a0, a1);
4289 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4290 __ Addu(a3, fp, Operand(t3));
4291 __ lw(v0, MemOperand(a3, kDisplacement));
4294 // Arguments adaptor case: Check index (a1) against actual arguments
4295 // limit found in the arguments adaptor frame. Use unsigned
4296 // comparison to get negative check for free.
4298 __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4299 __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
4301 // Read the argument from the adaptor frame and return it.
4302 __ subu(a3, a0, a1);
4303 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4304 __ Addu(a3, a2, Operand(t3));
4305 __ lw(v0, MemOperand(a3, kDisplacement));
4308 // Slow-case: Handle non-smi or out-of-bounds access to arguments
4309 // by calling the runtime system.
4312 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
4316 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
4317 // sp[0] : number of parameters
4318 // sp[4] : receiver displacement
4320 // Check if the calling frame is an arguments adaptor frame.
4322 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4323 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
4324 __ Branch(&runtime, ne,
4325 a2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4327 // Patch the arguments.length and the parameters pointer in the current frame.
4328 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4329 __ sw(a2, MemOperand(sp, 0 * kPointerSize));
4331 __ Addu(a3, a3, Operand(t3));
4332 __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
4333 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4336 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4340 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
4342 // sp[0] : number of parameters (tagged)
4343 // sp[4] : address of receiver argument
4345 // Registers used over whole function:
4346 // t2 : allocated object (tagged)
4347 // t5 : mapped parameter count (tagged)
4349 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
4350 // a1 = parameter count (tagged)
4352 // Check if the calling frame is an arguments adaptor frame.
4354 Label adaptor_frame, try_allocate;
4355 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4356 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
4357 __ Branch(&adaptor_frame, eq, a2,
4358 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4360 // No adaptor, parameter count = argument count.
4362 __ b(&try_allocate);
4363 __ nop(); // Branch delay slot nop.
4365 // We have an adaptor frame. Patch the parameters pointer.
4366 __ bind(&adaptor_frame);
4367 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4369 __ Addu(a3, a3, Operand(t6));
4370 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4371 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4373 // a1 = parameter count (tagged)
4374 // a2 = argument count (tagged)
4375 // Compute the mapped parameter count = min(a1, a2) in a1.
4377 __ Branch(&skip_min, lt, a1, Operand(a2));
4381 __ bind(&try_allocate);
4383 // Compute the sizes of backing store, parameter map, and arguments object.
4384 // 1. Parameter map, has 2 extra words containing context and backing store.
4385 const int kParameterMapHeaderSize =
4386 FixedArray::kHeaderSize + 2 * kPointerSize;
4387 // If there are no mapped parameters, we do not need the parameter_map.
4388 Label param_map_size;
4389 ASSERT_EQ(0, Smi::FromInt(0));
4390 __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg));
4391 __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
4393 __ addiu(t5, t5, kParameterMapHeaderSize);
4394 __ bind(¶m_map_size);
4396 // 2. Backing store.
4398 __ Addu(t5, t5, Operand(t6));
4399 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
4401 // 3. Arguments object.
4402 __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
4404 // Do the allocation of all three objects in one go.
4405 __ AllocateInNewSpace(t5, v0, a3, t0, &runtime, TAG_OBJECT);
4407 // v0 = address of new object(s) (tagged)
4408 // a2 = argument count (tagged)
4409 // Get the arguments boilerplate from the current (global) context into t0.
4410 const int kNormalOffset =
4411 Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
4412 const int kAliasedOffset =
4413 Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
4415 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4416 __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
4417 Label skip2_ne, skip2_eq;
4418 __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
4419 __ lw(t0, MemOperand(t0, kNormalOffset));
4422 __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
4423 __ lw(t0, MemOperand(t0, kAliasedOffset));
4426 // v0 = address of new object (tagged)
4427 // a1 = mapped parameter count (tagged)
4428 // a2 = argument count (tagged)
4429 // t0 = address of boilerplate object (tagged)
4430 // Copy the JS object part.
4431 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
4432 __ lw(a3, FieldMemOperand(t0, i));
4433 __ sw(a3, FieldMemOperand(v0, i));
4436 // Setup the callee in-object property.
4437 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
4438 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
4439 const int kCalleeOffset = JSObject::kHeaderSize +
4440 Heap::kArgumentsCalleeIndex * kPointerSize;
4441 __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
4443 // Use the length (smi tagged) and set that as an in-object property too.
4444 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4445 const int kLengthOffset = JSObject::kHeaderSize +
4446 Heap::kArgumentsLengthIndex * kPointerSize;
4447 __ sw(a2, FieldMemOperand(v0, kLengthOffset));
4449 // Setup the elements pointer in the allocated arguments object.
4450 // If we allocated a parameter map, t0 will point there, otherwise
4451 // it will point to the backing store.
4452 __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
4453 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
4455 // v0 = address of new object (tagged)
4456 // a1 = mapped parameter count (tagged)
4457 // a2 = argument count (tagged)
4458 // t0 = address of parameter map or backing store (tagged)
4459 // Initialize parameter map. If there are no mapped arguments, we're done.
4460 Label skip_parameter_map;
4462 __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
4463 // Move backing store address to a3, because it is
4464 // expected there when filling in the unmapped arguments.
4468 __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
4470 __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
4471 __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
4472 __ Addu(t2, a1, Operand(Smi::FromInt(2)));
4473 __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
4474 __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
4476 __ Addu(t2, t0, Operand(t6));
4477 __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
4478 __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
4480 // Copy the parameter slots and the holes in the arguments.
4481 // We need to fill in mapped_parameter_count slots. They index the context,
4482 // where parameters are stored in reverse order, at
4483 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
4484 // The mapped parameter thus need to get indices
4485 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
4486 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
4487 // We loop from right to left.
4488 Label parameters_loop, parameters_test;
4490 __ lw(t5, MemOperand(sp, 0 * kPointerSize));
4491 __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
4492 __ Subu(t5, t5, Operand(a1));
4493 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
4495 __ Addu(a3, t0, Operand(t6));
4496 __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
4498 // t2 = loop variable (tagged)
4499 // a1 = mapping index (tagged)
4500 // a3 = address of backing store (tagged)
4501 // t0 = address of parameter map (tagged)
4502 // t1 = temporary scratch (a.o., for address calculation)
4503 // t3 = the hole value
4504 __ jmp(¶meters_test);
4506 __ bind(¶meters_loop);
4507 __ Subu(t2, t2, Operand(Smi::FromInt(1)));
4509 __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
4510 __ Addu(t6, t0, t1);
4511 __ sw(t5, MemOperand(t6));
4512 __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
4513 __ Addu(t6, a3, t1);
4514 __ sw(t3, MemOperand(t6));
4515 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
4516 __ bind(¶meters_test);
4517 __ Branch(¶meters_loop, ne, t2, Operand(Smi::FromInt(0)));
4519 __ bind(&skip_parameter_map);
4520 // a2 = argument count (tagged)
4521 // a3 = address of backing store (tagged)
4523 // Copy arguments header and remaining slots (if there are any).
4524 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
4525 __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
4526 __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
4528 Label arguments_loop, arguments_test;
4530 __ lw(t0, MemOperand(sp, 1 * kPointerSize));
4532 __ Subu(t0, t0, Operand(t6));
4533 __ jmp(&arguments_test);
4535 __ bind(&arguments_loop);
4536 __ Subu(t0, t0, Operand(kPointerSize));
4537 __ lw(t2, MemOperand(t0, 0));
4539 __ Addu(t1, a3, Operand(t6));
4540 __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
4541 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
4543 __ bind(&arguments_test);
4544 __ Branch(&arguments_loop, lt, t5, Operand(a2));
4546 // Return and remove the on-stack parameters.
4547 __ Addu(sp, sp, Operand(3 * kPointerSize));
4550 // Do the runtime call to allocate the arguments object.
4551 // a2 = argument count (taggged)
4553 __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
4554 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4558 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
4559 // sp[0] : number of parameters
4560 // sp[4] : receiver displacement
4562 // Check if the calling frame is an arguments adaptor frame.
4563 Label adaptor_frame, try_allocate, runtime;
4564 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4565 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
4566 __ Branch(&adaptor_frame,
4569 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4571 // Get the length from the frame.
4572 __ lw(a1, MemOperand(sp, 0));
4573 __ Branch(&try_allocate);
4575 // Patch the arguments.length and the parameters pointer.
4576 __ bind(&adaptor_frame);
4577 __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4578 __ sw(a1, MemOperand(sp, 0));
4579 __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
4580 __ Addu(a3, a2, Operand(at));
4582 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4583 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4585 // Try the new space allocation. Start out with computing the size
4586 // of the arguments object and the elements array in words.
4587 Label add_arguments_object;
4588 __ bind(&try_allocate);
4589 __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
4590 __ srl(a1, a1, kSmiTagSize);
4592 __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
4593 __ bind(&add_arguments_object);
4594 __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
4596 // Do the allocation of both objects in one go.
4597 __ AllocateInNewSpace(a1,
4602 static_cast<AllocationFlags>(TAG_OBJECT |
4605 // Get the arguments boilerplate from the current (global) context.
4606 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4607 __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
4608 __ lw(t0, MemOperand(t0, Context::SlotOffset(
4609 Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
4611 // Copy the JS object part.
4612 __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
4614 // Get the length (smi tagged) and set that as an in-object property too.
4615 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4616 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
4617 __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
4618 Heap::kArgumentsLengthIndex * kPointerSize));
4621 __ Branch(&done, eq, a1, Operand(zero_reg));
4623 // Get the parameters pointer from the stack.
4624 __ lw(a2, MemOperand(sp, 1 * kPointerSize));
4626 // Setup the elements pointer in the allocated arguments object and
4627 // initialize the header in the elements fixed array.
4628 __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
4629 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
4630 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
4631 __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
4632 __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
4633 // Untag the length for the loop.
4634 __ srl(a1, a1, kSmiTagSize);
4636 // Copy the fixed array slots.
4638 // Setup t0 to point to the first array slot.
4639 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4641 // Pre-decrement a2 with kPointerSize on each iteration.
4642 // Pre-decrement in order to skip receiver.
4643 __ Addu(a2, a2, Operand(-kPointerSize));
4644 __ lw(a3, MemOperand(a2));
4645 // Post-increment t0 with kPointerSize on each iteration.
4646 __ sw(a3, MemOperand(t0));
4647 __ Addu(t0, t0, Operand(kPointerSize));
4648 __ Subu(a1, a1, Operand(1));
4649 __ Branch(&loop, ne, a1, Operand(zero_reg));
4651 // Return and remove the on-stack parameters.
4653 __ Addu(sp, sp, Operand(3 * kPointerSize));
4656 // Do the runtime call to allocate the arguments object.
4658 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
4662 void RegExpExecStub::Generate(MacroAssembler* masm) {
4663 // Just jump directly to runtime if native RegExp is not selected at compile
4664 // time or if regexp entry in generated code is turned off runtime switch or
4666 #ifdef V8_INTERPRETED_REGEXP
4667 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4668 #else // V8_INTERPRETED_REGEXP
4670 // Stack frame on entry.
4671 // sp[0]: last_match_info (expected JSArray)
4672 // sp[4]: previous index
4673 // sp[8]: subject string
4674 // sp[12]: JSRegExp object
4676 static const int kLastMatchInfoOffset = 0 * kPointerSize;
4677 static const int kPreviousIndexOffset = 1 * kPointerSize;
4678 static const int kSubjectOffset = 2 * kPointerSize;
4679 static const int kJSRegExpOffset = 3 * kPointerSize;
4681 Isolate* isolate = masm->isolate();
4683 Label runtime, invoke_regexp;
4685 // Allocation of registers for this function. These are in callee save
4686 // registers and will be preserved by the call to the native RegExp code, as
4687 // this code is called using the normal C calling convention. When calling
4688 // directly from generated code the native RegExp code will not do a GC and
4689 // therefore the content of these registers are safe to use after the call.
4690 // MIPS - using s0..s2, since we are not using CEntry Stub.
4691 Register subject = s0;
4692 Register regexp_data = s1;
4693 Register last_match_info_elements = s2;
4695 // Ensure that a RegExp stack is allocated.
4696 ExternalReference address_of_regexp_stack_memory_address =
4697 ExternalReference::address_of_regexp_stack_memory_address(
4699 ExternalReference address_of_regexp_stack_memory_size =
4700 ExternalReference::address_of_regexp_stack_memory_size(isolate);
4701 __ li(a0, Operand(address_of_regexp_stack_memory_size));
4702 __ lw(a0, MemOperand(a0, 0));
4703 __ Branch(&runtime, eq, a0, Operand(zero_reg));
4705 // Check that the first argument is a JSRegExp object.
4706 __ lw(a0, MemOperand(sp, kJSRegExpOffset));
4707 STATIC_ASSERT(kSmiTag == 0);
4708 __ JumpIfSmi(a0, &runtime);
4709 __ GetObjectType(a0, a1, a1);
4710 __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
4712 // Check that the RegExp has been compiled (data contains a fixed array).
4713 __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
4714 if (FLAG_debug_code) {
4715 __ And(t0, regexp_data, Operand(kSmiTagMask));
4717 "Unexpected type for RegExp data, FixedArray expected",
4720 __ GetObjectType(regexp_data, a0, a0);
4722 "Unexpected type for RegExp data, FixedArray expected",
4724 Operand(FIXED_ARRAY_TYPE));
4727 // regexp_data: RegExp data (FixedArray)
4728 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
4729 __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
4730 __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
4732 // regexp_data: RegExp data (FixedArray)
4733 // Check that the number of captures fit in the static offsets vector buffer.
4735 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
4736 // Calculate number of capture registers (number_of_captures + 1) * 2. This
4737 // uses the asumption that smis are 2 * their untagged value.
4738 STATIC_ASSERT(kSmiTag == 0);
4739 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
4740 __ Addu(a2, a2, Operand(2)); // a2 was a smi.
4741 // Check that the static offsets vector buffer is large enough.
4742 __ Branch(&runtime, hi, a2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
4744 // a2: Number of capture registers
4745 // regexp_data: RegExp data (FixedArray)
4746 // Check that the second argument is a string.
4747 __ lw(subject, MemOperand(sp, kSubjectOffset));
4748 __ JumpIfSmi(subject, &runtime);
4749 __ GetObjectType(subject, a0, a0);
4750 __ And(a0, a0, Operand(kIsNotStringMask));
4751 STATIC_ASSERT(kStringTag == 0);
4752 __ Branch(&runtime, ne, a0, Operand(zero_reg));
4754 // Get the length of the string to r3.
4755 __ lw(a3, FieldMemOperand(subject, String::kLengthOffset));
4757 // a2: Number of capture registers
4758 // a3: Length of subject string as a smi
4759 // subject: Subject string
4760 // regexp_data: RegExp data (FixedArray)
4761 // Check that the third argument is a positive smi less than the subject
4762 // string length. A negative value will be greater (unsigned comparison).
4763 __ lw(a0, MemOperand(sp, kPreviousIndexOffset));
4764 __ And(at, a0, Operand(kSmiTagMask));
4765 __ Branch(&runtime, ne, at, Operand(zero_reg));
4766 __ Branch(&runtime, ls, a3, Operand(a0));
4768 // a2: Number of capture registers
4769 // subject: Subject string
4770 // regexp_data: RegExp data (FixedArray)
4771 // Check that the fourth object is a JSArray object.
4772 __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
4773 __ JumpIfSmi(a0, &runtime);
4774 __ GetObjectType(a0, a1, a1);
4775 __ Branch(&runtime, ne, a1, Operand(JS_ARRAY_TYPE));
4776 // Check that the JSArray is in fast case.
4777 __ lw(last_match_info_elements,
4778 FieldMemOperand(a0, JSArray::kElementsOffset));
4779 __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
4780 __ Branch(&runtime, ne, a0, Operand(
4781 isolate->factory()->fixed_array_map()));
4782 // Check that the last match info has space for the capture registers and the
4783 // additional information.
4785 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
4786 __ Addu(a2, a2, Operand(RegExpImpl::kLastMatchOverhead));
4787 __ sra(at, a0, kSmiTagSize); // Untag length for comparison.
4788 __ Branch(&runtime, gt, a2, Operand(at));
4790 // Reset offset for possibly sliced string.
4791 __ mov(t0, zero_reg);
4792 // subject: Subject string
4793 // regexp_data: RegExp data (FixedArray)
4794 // Check the representation and encoding of the subject string.
4796 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
4797 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
4798 // First check for flat string.
4799 __ And(a1, a0, Operand(kIsNotStringMask | kStringRepresentationMask));
4800 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
4801 __ Branch(&seq_string, eq, a1, Operand(zero_reg));
4803 // subject: Subject string
4804 // a0: instance type if Subject string
4805 // regexp_data: RegExp data (FixedArray)
4806 // Check for flat cons string or sliced string.
4807 // A flat cons string is a cons string where the second part is the empty
4808 // string. In that case the subject string is just the first part of the cons
4809 // string. Also in this case the first part of the cons string is known to be
4810 // a sequential string or an external string.
4811 // In the case of a sliced string its offset has to be taken into account.
4812 Label cons_string, check_encoding;
4813 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
4814 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
4815 __ Branch(&cons_string, lt, a1, Operand(kExternalStringTag));
4816 __ Branch(&runtime, eq, a1, Operand(kExternalStringTag));
4818 // String is sliced.
4819 __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
4820 __ sra(t0, t0, kSmiTagSize);
4821 __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
4822 // t5: offset of sliced string, smi-tagged.
4823 __ jmp(&check_encoding);
4824 // String is a cons string, check whether it is flat.
4825 __ bind(&cons_string);
4826 __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
4827 __ LoadRoot(a1, Heap::kEmptyStringRootIndex);
4828 __ Branch(&runtime, ne, a0, Operand(a1));
4829 __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
4830 // Is first part of cons or parent of slice a flat string?
4831 __ bind(&check_encoding);
4832 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
4833 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
4834 STATIC_ASSERT(kSeqStringTag == 0);
4835 __ And(at, a0, Operand(kStringRepresentationMask));
4836 __ Branch(&runtime, ne, at, Operand(zero_reg));
4838 __ bind(&seq_string);
4839 // subject: Subject string
4840 // regexp_data: RegExp data (FixedArray)
4841 // a0: Instance type of subject string
4842 STATIC_ASSERT(kStringEncodingMask == 4);
4843 STATIC_ASSERT(kAsciiStringTag == 4);
4844 STATIC_ASSERT(kTwoByteStringTag == 0);
4845 // Find the code object based on the assumptions above.
4846 __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ascii.
4847 __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
4848 __ sra(a3, a0, 2); // a3 is 1 for ascii, 0 for UC16 (usyed below).
4849 __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
4850 __ movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
4852 // Check that the irregexp code has been generated for the actual string
4853 // encoding. If it has, the field contains a code object otherwise it contains
4854 // a smi (code flushing support).
4855 __ JumpIfSmi(t9, &runtime);
4857 // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
4859 // subject: Subject string
4860 // regexp_data: RegExp data (FixedArray)
4861 // Load used arguments before starting to push arguments for call to native
4862 // RegExp code to avoid handling changing stack height.
4863 __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
4864 __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
4866 // a1: previous index
4867 // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
4869 // subject: Subject string
4870 // regexp_data: RegExp data (FixedArray)
4871 // All checks done. Now push arguments for native regexp code.
4872 __ IncrementCounter(isolate->counters()->regexp_entry_native(),
4875 // Isolates: note we add an additional parameter here (isolate pointer).
4876 static const int kRegExpExecuteArguments = 8;
4877 static const int kParameterRegisters = 4;
4878 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
4880 // Stack pointer now points to cell where return address is to be written.
4881 // Arguments are before that on the stack or in registers, meaning we
4882 // treat the return address as argument 5. Thus every argument after that
4883 // needs to be shifted back by 1. Since DirectCEntryStub will handle
4884 // allocating space for the c argument slots, we don't need to calculate
4885 // that into the argument positions on the stack. This is how the stack will
4886 // look (sp meaning the value of sp at this moment):
4887 // [sp + 4] - Argument 8
4888 // [sp + 3] - Argument 7
4889 // [sp + 2] - Argument 6
4890 // [sp + 1] - Argument 5
4891 // [sp + 0] - saved ra
4893 // Argument 8: Pass current isolate address.
4894 // CFunctionArgumentOperand handles MIPS stack argument slots.
4895 __ li(a0, Operand(ExternalReference::isolate_address()));
4896 __ sw(a0, MemOperand(sp, 4 * kPointerSize));
4898 // Argument 7: Indicate that this is a direct call from JavaScript.
4899 __ li(a0, Operand(1));
4900 __ sw(a0, MemOperand(sp, 3 * kPointerSize));
4902 // Argument 6: Start (high end) of backtracking stack memory area.
4903 __ li(a0, Operand(address_of_regexp_stack_memory_address));
4904 __ lw(a0, MemOperand(a0, 0));
4905 __ li(a2, Operand(address_of_regexp_stack_memory_size));
4906 __ lw(a2, MemOperand(a2, 0));
4907 __ addu(a0, a0, a2);
4908 __ sw(a0, MemOperand(sp, 2 * kPointerSize));
4910 // Argument 5: static offsets vector buffer.
4912 ExternalReference::address_of_static_offsets_vector(isolate)));
4913 __ sw(a0, MemOperand(sp, 1 * kPointerSize));
4915 // For arguments 4 and 3 get string length, calculate start of string data
4916 // and calculate the shift of the index (0 for ASCII and 1 for two byte).
4917 __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
4918 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
4919 // Load the length from the original subject string from the previous stack
4920 // frame. Therefore we have to use fp, which points exactly to two pointer
4921 // sizes below the previous sp. (Because creating a new stack frame pushes
4922 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
4923 __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
4924 // If slice offset is not 0, load the length from the original sliced string.
4925 // Argument 4, a3: End of string data
4926 // Argument 3, a2: Start of string data
4927 // Prepare start and end index of the input.
4928 __ sllv(t1, t0, a3);
4929 __ addu(t0, t2, t1);
4930 __ sllv(t1, a1, a3);
4931 __ addu(a2, t0, t1);
4933 __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
4934 __ sra(t2, t2, kSmiTagSize);
4935 __ sllv(t1, t2, a3);
4936 __ addu(a3, t0, t1);
4937 // Argument 2 (a1): Previous index.
4940 // Argument 1 (a0): Subject string.
4941 __ mov(a0, subject);
4943 // Locate the code entry and call it.
4944 __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
4945 DirectCEntryStub stub;
4946 stub.GenerateCall(masm, t9);
4948 __ LeaveExitFrame(false, no_reg);
4951 // subject: subject string (callee saved)
4952 // regexp_data: RegExp data (callee saved)
4953 // last_match_info_elements: Last match info elements (callee saved)
4955 // Check the result.
4958 __ Branch(&success, eq,
4959 v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
4961 __ Branch(&failure, eq,
4962 v0, Operand(NativeRegExpMacroAssembler::FAILURE));
4963 // If not exception it can only be retry. Handle that in the runtime system.
4964 __ Branch(&runtime, ne,
4965 v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
4966 // Result must now be exception. If there is no pending exception already a
4967 // stack overflow (on the backtrack stack) was detected in RegExp code but
4968 // haven't created the exception yet. Handle that in the runtime system.
4969 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
4970 __ li(a1, Operand(isolate->factory()->the_hole_value()));
4971 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4973 __ lw(v0, MemOperand(a2, 0));
4974 __ Branch(&runtime, eq, v0, Operand(a1));
4976 __ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
4978 // Check if the exception is a termination. If so, throw as uncatchable.
4979 __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
4980 Label termination_exception;
4981 __ Branch(&termination_exception, eq, v0, Operand(a0));
4983 __ Throw(v0); // Expects thrown value in v0.
4985 __ bind(&termination_exception);
4986 __ ThrowUncatchable(TERMINATION, v0); // Expects thrown value in v0.
4989 // For failure and exception return null.
4990 __ li(v0, Operand(isolate->factory()->null_value()));
4991 __ Addu(sp, sp, Operand(4 * kPointerSize));
4994 // Process the result from the native regexp code.
4997 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
4998 // Calculate number of capture registers (number_of_captures + 1) * 2.
4999 STATIC_ASSERT(kSmiTag == 0);
5000 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
5001 __ Addu(a1, a1, Operand(2)); // a1 was a smi.
5003 // a1: number of capture registers
5004 // subject: subject string
5005 // Store the capture count.
5006 __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
5007 __ sw(a2, FieldMemOperand(last_match_info_elements,
5008 RegExpImpl::kLastCaptureCountOffset));
5009 // Store last subject and last input.
5011 FieldMemOperand(last_match_info_elements,
5012 RegExpImpl::kLastSubjectOffset));
5013 __ mov(a2, subject);
5014 __ RecordWriteField(last_match_info_elements,
5015 RegExpImpl::kLastSubjectOffset,
5021 FieldMemOperand(last_match_info_elements,
5022 RegExpImpl::kLastInputOffset));
5023 __ RecordWriteField(last_match_info_elements,
5024 RegExpImpl::kLastInputOffset,
5030 // Get the static offsets vector filled by the native regexp code.
5031 ExternalReference address_of_static_offsets_vector =
5032 ExternalReference::address_of_static_offsets_vector(isolate);
5033 __ li(a2, Operand(address_of_static_offsets_vector));
5035 // a1: number of capture registers
5036 // a2: offsets vector
5037 Label next_capture, done;
5038 // Capture register counter starts from number of capture registers and
5039 // counts down until wrapping after zero.
5041 last_match_info_elements,
5042 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
5043 __ bind(&next_capture);
5044 __ Subu(a1, a1, Operand(1));
5045 __ Branch(&done, lt, a1, Operand(zero_reg));
5046 // Read the value from the static offsets vector buffer.
5047 __ lw(a3, MemOperand(a2, 0));
5048 __ addiu(a2, a2, kPointerSize);
5049 // Store the smi value in the last match info.
5050 __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
5051 __ sw(a3, MemOperand(a0, 0));
5052 __ Branch(&next_capture, USE_DELAY_SLOT);
5053 __ addiu(a0, a0, kPointerSize); // In branch delay slot.
5057 // Return last match info.
5058 __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
5059 __ Addu(sp, sp, Operand(4 * kPointerSize));
5062 // Do the runtime call to execute the regexp.
5064 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
5065 #endif // V8_INTERPRETED_REGEXP
5069 void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
5070 const int kMaxInlineLength = 100;
5073 __ lw(a1, MemOperand(sp, kPointerSize * 2));
5074 STATIC_ASSERT(kSmiTag == 0);
5075 STATIC_ASSERT(kSmiTagSize == 1);
5076 __ JumpIfNotSmi(a1, &slowcase);
5077 __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
5078 // Smi-tagging is equivalent to multiplying by 2.
5079 // Allocate RegExpResult followed by FixedArray with size in ebx.
5080 // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
5081 // Elements: [Map][Length][..elements..]
5082 // Size of JSArray with two in-object properties and the header of a
5085 (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
5086 __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
5087 __ Addu(a2, t1, Operand(objects_size));
5088 __ AllocateInNewSpace(
5089 a2, // In: Size, in words.
5090 v0, // Out: Start of allocation (tagged).
5091 a3, // Scratch register.
5092 t0, // Scratch register.
5094 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
5095 // v0: Start of allocated area, object-tagged.
5096 // a1: Number of elements in array, as smi.
5097 // t1: Number of elements, untagged.
5099 // Set JSArray map to global.regexp_result_map().
5100 // Set empty properties FixedArray.
5101 // Set elements to point to FixedArray allocated right after the JSArray.
5102 // Interleave operations for better latency.
5103 __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
5104 __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
5105 __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
5106 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
5107 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
5108 __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
5109 __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
5110 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
5112 // Set input, index and length fields from arguments.
5113 __ lw(a1, MemOperand(sp, kPointerSize * 0));
5114 __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
5115 __ lw(a1, MemOperand(sp, kPointerSize * 1));
5116 __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
5117 __ lw(a1, MemOperand(sp, kPointerSize * 2));
5118 __ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset));
5120 // Fill out the elements FixedArray.
5121 // v0: JSArray, tagged.
5122 // a3: FixedArray, tagged.
5123 // t1: Number of elements in array, untagged.
5126 __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
5127 __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
5128 // Set FixedArray length.
5129 __ sll(t2, t1, kSmiTagSize);
5130 __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
5131 // Fill contents of fixed-array with the-hole.
5132 __ li(a2, Operand(masm->isolate()->factory()->the_hole_value()));
5133 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5134 // Fill fixed array elements with hole.
5135 // v0: JSArray, tagged.
5137 // a3: Start of elements in FixedArray.
5138 // t1: Number of elements to fill.
5140 __ sll(t1, t1, kPointerSizeLog2); // Convert num elements to num bytes.
5141 __ addu(t1, t1, a3); // Point past last element to store.
5143 __ Branch(&done, ge, a3, Operand(t1)); // Break when a3 past end of elem.
5144 __ sw(a2, MemOperand(a3));
5145 __ Branch(&loop, USE_DELAY_SLOT);
5146 __ addiu(a3, a3, kPointerSize); // In branch delay slot.
5149 __ Addu(sp, sp, Operand(3 * kPointerSize));
5153 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
5157 void CallFunctionStub::FinishCode(Code* code) {
5158 code->set_has_function_cache(false);
5162 void CallFunctionStub::Clear(Heap* heap, Address address) {
5167 Object* CallFunctionStub::GetCachedValue(Address address) {
5173 void CallFunctionStub::Generate(MacroAssembler* masm) {
5174 Label slow, non_function;
5176 // The receiver might implicitly be the global object. This is
5177 // indicated by passing the hole as the receiver to the call
5179 if (ReceiverMightBeImplicit()) {
5181 // Get the receiver from the stack.
5182 // function, receiver [, arguments]
5183 __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
5184 // Call as function is indicated with the hole.
5185 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5186 __ Branch(&call, ne, t0, Operand(at));
5187 // Patch the receiver on the stack with the global receiver object.
5188 __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
5189 __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
5190 __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
5194 // Get the function to call from the stack.
5195 // function, receiver [, arguments]
5196 __ lw(a1, MemOperand(sp, (argc_ + 1) * kPointerSize));
5198 // Check that the function is really a JavaScript function.
5199 // a1: pushed function (to be verified)
5200 __ JumpIfSmi(a1, &non_function);
5201 // Get the map of the function object.
5202 __ GetObjectType(a1, a2, a2);
5203 __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
5205 // Fast-case: Invoke the function now.
5206 // a1: pushed function
5207 ParameterCount actual(argc_);
5209 if (ReceiverMightBeImplicit()) {
5210 Label call_as_function;
5211 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5212 __ Branch(&call_as_function, eq, t0, Operand(at));
5213 __ InvokeFunction(a1,
5218 __ bind(&call_as_function);
5220 __ InvokeFunction(a1,
5226 // Slow-case: Non-function called.
5228 // Check for function proxy.
5229 __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_PROXY_TYPE));
5230 __ push(a1); // Put proxy as additional argument.
5231 __ li(a0, Operand(argc_ + 1, RelocInfo::NONE));
5232 __ li(a2, Operand(0, RelocInfo::NONE));
5233 __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
5234 __ SetCallKind(t1, CALL_AS_FUNCTION);
5236 Handle<Code> adaptor =
5237 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
5238 __ Jump(adaptor, RelocInfo::CODE_TARGET);
5241 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
5242 // of the original receiver from the call site).
5243 __ bind(&non_function);
5244 __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
5245 __ li(a0, Operand(argc_)); // Setup the number of arguments.
5246 __ mov(a2, zero_reg);
5247 __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
5248 __ SetCallKind(t1, CALL_AS_METHOD);
5249 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5250 RelocInfo::CODE_TARGET);
5254 // Unfortunately you have to run without snapshots to see most of these
5255 // names in the profile since most compare stubs end up in the snapshot.
5256 void CompareStub::PrintName(StringStream* stream) {
5257 ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
5258 (lhs_.is(a1) && rhs_.is(a0)));
5259 const char* cc_name;
5261 case lt: cc_name = "LT"; break;
5262 case gt: cc_name = "GT"; break;
5263 case le: cc_name = "LE"; break;
5264 case ge: cc_name = "GE"; break;
5265 case eq: cc_name = "EQ"; break;
5266 case ne: cc_name = "NE"; break;
5267 default: cc_name = "UnknownCondition"; break;
5269 bool is_equality = cc_ == eq || cc_ == ne;
5270 stream->Add("CompareStub_%s", cc_name);
5271 stream->Add(lhs_.is(a0) ? "_a0" : "_a1");
5272 stream->Add(rhs_.is(a0) ? "_a0" : "_a1");
5273 if (strict_ && is_equality) stream->Add("_STRICT");
5274 if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
5275 if (!include_number_compare_) stream->Add("_NO_NUMBER");
5276 if (!include_smi_compare_) stream->Add("_NO_SMI");
5280 int CompareStub::MinorKey() {
5281 // Encode the two parameters in a unique 16 bit value.
5282 ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
5283 ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
5284 (lhs_.is(a1) && rhs_.is(a0)));
5285 return ConditionField::encode(static_cast<unsigned>(cc_))
5286 | RegisterField::encode(lhs_.is(a0))
5287 | StrictField::encode(strict_)
5288 | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
5289 | IncludeSmiCompareField::encode(include_smi_compare_);
5293 // StringCharCodeAtGenerator.
5294 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
5297 Label got_char_code;
5298 Label sliced_string;
5300 ASSERT(!t0.is(scratch_));
5301 ASSERT(!t0.is(index_));
5302 ASSERT(!t0.is(result_));
5303 ASSERT(!t0.is(object_));
5305 // If the receiver is a smi trigger the non-string case.
5306 __ JumpIfSmi(object_, receiver_not_string_);
5308 // Fetch the instance type of the receiver into result register.
5309 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5310 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5311 // If the receiver is not a string trigger the non-string case.
5312 __ And(t0, result_, Operand(kIsNotStringMask));
5313 __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
5315 // If the index is non-smi trigger the non-smi case.
5316 __ JumpIfNotSmi(index_, &index_not_smi_);
5318 // Put smi-tagged index into scratch register.
5319 __ mov(scratch_, index_);
5320 __ bind(&got_smi_index_);
5322 // Check for index out of range.
5323 __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
5324 __ Branch(index_out_of_range_, ls, t0, Operand(scratch_));
5326 // We need special handling for non-flat strings.
5327 STATIC_ASSERT(kSeqStringTag == 0);
5328 __ And(t0, result_, Operand(kStringRepresentationMask));
5329 __ Branch(&flat_string, eq, t0, Operand(zero_reg));
5331 // Handle non-flat strings.
5332 __ And(result_, result_, Operand(kStringRepresentationMask));
5333 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
5334 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
5335 __ Branch(&sliced_string, gt, result_, Operand(kExternalStringTag));
5336 __ Branch(&call_runtime_, eq, result_, Operand(kExternalStringTag));
5339 // Check whether the right hand side is the empty string (i.e. if
5340 // this is really a flat string in a cons string). If that is not
5341 // the case we would rather go to the runtime system now to flatten
5343 Label assure_seq_string;
5344 __ lw(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
5345 __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
5346 __ Branch(&call_runtime_, ne, result_, Operand(t0));
5348 // Get the first of the two strings and load its instance type.
5349 __ lw(result_, FieldMemOperand(object_, ConsString::kFirstOffset));
5350 __ jmp(&assure_seq_string);
5352 // SlicedString, unpack and add offset.
5353 __ bind(&sliced_string);
5354 __ lw(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
5355 __ addu(scratch_, scratch_, result_);
5356 __ lw(result_, FieldMemOperand(object_, SlicedString::kParentOffset));
5358 // Assure that we are dealing with a sequential string. Go to runtime if not.
5359 __ bind(&assure_seq_string);
5360 __ lw(result_, FieldMemOperand(result_, HeapObject::kMapOffset));
5361 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5362 // Check that parent is not an external string. Go to runtime otherwise.
5363 STATIC_ASSERT(kSeqStringTag == 0);
5365 __ And(t0, result_, Operand(kStringRepresentationMask));
5366 __ Branch(&call_runtime_, ne, t0, Operand(zero_reg));
5367 // Actually fetch the parent string if it is confirmed to be sequential.
5368 STATIC_ASSERT(SlicedString::kParentOffset == ConsString::kFirstOffset);
5369 __ lw(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
5371 // Check for 1-byte or 2-byte string.
5372 __ bind(&flat_string);
5373 STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
5374 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
5375 __ And(t0, result_, Operand(kStringEncodingMask));
5376 __ Branch(&ascii_string, ne, t0, Operand(zero_reg));
5379 // Load the 2-byte character code into the result register. We can
5380 // add without shifting since the smi tag size is the log2 of the
5381 // number of bytes in a two-byte character.
5382 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
5383 __ Addu(scratch_, object_, Operand(scratch_));
5384 __ lhu(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
5385 __ Branch(&got_char_code);
5388 // Load the byte into the result register.
5389 __ bind(&ascii_string);
5391 __ srl(t0, scratch_, kSmiTagSize);
5392 __ Addu(scratch_, object_, t0);
5394 __ lbu(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
5396 __ bind(&got_char_code);
5397 __ sll(result_, result_, kSmiTagSize);
5402 void StringCharCodeAtGenerator::GenerateSlow(
5403 MacroAssembler* masm,
5404 const RuntimeCallHelper& call_helper) {
5405 __ Abort("Unexpected fallthrough to CharCodeAt slow case");
5407 // Index is not a smi.
5408 __ bind(&index_not_smi_);
5409 // If index is a heap number, try converting it to an integer.
5412 Heap::kHeapNumberMapRootIndex,
5415 call_helper.BeforeCall(masm);
5416 // Consumed by runtime conversion function:
5417 __ Push(object_, index_, index_);
5418 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
5419 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
5421 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
5422 // NumberToSmi discards numbers that are not exact integers.
5423 __ CallRuntime(Runtime::kNumberToSmi, 1);
5426 // Save the conversion result before the pop instructions below
5427 // have a chance to overwrite it.
5429 __ Move(scratch_, v0);
5433 // Reload the instance type.
5434 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5435 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5436 call_helper.AfterCall(masm);
5437 // If index is still not a smi, it must be out of range.
5438 __ JumpIfNotSmi(scratch_, index_out_of_range_);
5439 // Otherwise, return to the fast path.
5440 __ Branch(&got_smi_index_);
5442 // Call runtime. We get here when the receiver is a string and the
5443 // index is a number, but the code of getting the actual character
5444 // is too complex (e.g., when the string needs to be flattened).
5445 __ bind(&call_runtime_);
5446 call_helper.BeforeCall(masm);
5447 __ Push(object_, index_);
5448 __ CallRuntime(Runtime::kStringCharCodeAt, 2);
5450 __ Move(result_, v0);
5452 call_helper.AfterCall(masm);
5455 __ Abort("Unexpected fallthrough from CharCodeAt slow case");
5459 // -------------------------------------------------------------------------
5460 // StringCharFromCodeGenerator
5462 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
5463 // Fast case of Heap::LookupSingleCharacterStringFromCode.
5465 ASSERT(!t0.is(result_));
5466 ASSERT(!t0.is(code_));
5468 STATIC_ASSERT(kSmiTag == 0);
5469 STATIC_ASSERT(kSmiShiftSize == 0);
5470 ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
5473 Operand(kSmiTagMask |
5474 ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
5475 __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
5477 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
5478 // At this point code register contains smi tagged ASCII char code.
5479 STATIC_ASSERT(kSmiTag == 0);
5480 __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
5481 __ Addu(result_, result_, t0);
5482 __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
5483 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
5484 __ Branch(&slow_case_, eq, result_, Operand(t0));
5489 void StringCharFromCodeGenerator::GenerateSlow(
5490 MacroAssembler* masm,
5491 const RuntimeCallHelper& call_helper) {
5492 __ Abort("Unexpected fallthrough to CharFromCode slow case");
5494 __ bind(&slow_case_);
5495 call_helper.BeforeCall(masm);
5497 __ CallRuntime(Runtime::kCharFromCode, 1);
5498 __ Move(result_, v0);
5500 call_helper.AfterCall(masm);
5503 __ Abort("Unexpected fallthrough from CharFromCode slow case");
5507 // -------------------------------------------------------------------------
5508 // StringCharAtGenerator
5510 void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
5511 char_code_at_generator_.GenerateFast(masm);
5512 char_from_code_generator_.GenerateFast(masm);
5516 void StringCharAtGenerator::GenerateSlow(
5517 MacroAssembler* masm,
5518 const RuntimeCallHelper& call_helper) {
5519 char_code_at_generator_.GenerateSlow(masm, call_helper);
5520 char_from_code_generator_.GenerateSlow(masm, call_helper);
5524 class StringHelper : public AllStatic {
5526 // Generate code for copying characters using a simple loop. This should only
5527 // be used in places where the number of characters is small and the
5528 // additional setup and checking in GenerateCopyCharactersLong adds too much
5529 // overhead. Copying of overlapping regions is not supported.
5530 // Dest register ends at the position after the last character written.
5531 static void GenerateCopyCharacters(MacroAssembler* masm,
5538 // Generate code for copying a large number of characters. This function
5539 // is allowed to spend extra time setting up conditions to make copying
5540 // faster. Copying of overlapping regions is not supported.
5541 // Dest register ends at the position after the last character written.
5542 static void GenerateCopyCharactersLong(MacroAssembler* masm,
5554 // Probe the symbol table for a two character string. If the string is
5555 // not found by probing a jump to the label not_found is performed. This jump
5556 // does not guarantee that the string is not in the symbol table. If the
5557 // string is found the code falls through with the string in register r0.
5558 // Contents of both c1 and c2 registers are modified. At the exit c1 is
5559 // guaranteed to contain halfword with low and high bytes equal to
5560 // initial contents of c1 and c2 respectively.
5561 static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5571 // Generate string hash.
5572 static void GenerateHashInit(MacroAssembler* masm,
5574 Register character);
5576 static void GenerateHashAddCharacter(MacroAssembler* masm,
5578 Register character);
5580 static void GenerateHashGetHash(MacroAssembler* masm,
5584 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
5588 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5596 // This loop just copies one character at a time, as it is only used for
5597 // very short strings.
5599 __ addu(count, count, count);
5601 __ Branch(&done, eq, count, Operand(zero_reg));
5602 __ addu(count, dest, count); // Count now points to the last dest byte.
5605 __ lbu(scratch, MemOperand(src));
5606 __ addiu(src, src, 1);
5607 __ sb(scratch, MemOperand(dest));
5608 __ addiu(dest, dest, 1);
5609 __ Branch(&loop, lt, dest, Operand(count));
5615 enum CopyCharactersFlags {
5617 DEST_ALWAYS_ALIGNED = 2
5621 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
5631 bool ascii = (flags & COPY_ASCII) != 0;
5632 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
5634 if (dest_always_aligned && FLAG_debug_code) {
5635 // Check that destination is actually word aligned if the flag says
5637 __ And(scratch4, dest, Operand(kPointerAlignmentMask));
5639 "Destination of copy not aligned.",
5644 const int kReadAlignment = 4;
5645 const int kReadAlignmentMask = kReadAlignment - 1;
5646 // Ensure that reading an entire aligned word containing the last character
5647 // of a string will not read outside the allocated area (because we pad up
5648 // to kObjectAlignment).
5649 STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
5650 // Assumes word reads and writes are little endian.
5651 // Nothing to do for zero characters.
5655 __ addu(count, count, count);
5657 __ Branch(&done, eq, count, Operand(zero_reg));
5660 // Must copy at least eight bytes, otherwise just do it one byte at a time.
5661 __ Subu(scratch1, count, Operand(8));
5662 __ Addu(count, dest, Operand(count));
5663 Register limit = count; // Read until src equals this.
5664 __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
5666 if (!dest_always_aligned) {
5667 // Align dest by byte copying. Copies between zero and three bytes.
5668 __ And(scratch4, dest, Operand(kReadAlignmentMask));
5670 __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
5672 __ bind(&aligned_loop);
5673 __ lbu(scratch1, MemOperand(src));
5674 __ addiu(src, src, 1);
5675 __ sb(scratch1, MemOperand(dest));
5676 __ addiu(dest, dest, 1);
5677 __ addiu(scratch4, scratch4, 1);
5678 __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
5679 __ bind(&dest_aligned);
5684 __ And(scratch4, src, Operand(kReadAlignmentMask));
5685 __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
5687 // Loop for src/dst that are not aligned the same way.
5688 // This loop uses lwl and lwr instructions. These instructions
5689 // depend on the endianness, and the implementation assumes little-endian.
5693 __ lwr(scratch1, MemOperand(src));
5694 __ Addu(src, src, Operand(kReadAlignment));
5695 __ lwl(scratch1, MemOperand(src, -1));
5696 __ sw(scratch1, MemOperand(dest));
5697 __ Addu(dest, dest, Operand(kReadAlignment));
5698 __ Subu(scratch2, limit, dest);
5699 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
5702 __ Branch(&byte_loop);
5705 // Copy words from src to dest, until less than four bytes left.
5706 // Both src and dest are word aligned.
5707 __ bind(&simple_loop);
5711 __ lw(scratch1, MemOperand(src));
5712 __ Addu(src, src, Operand(kReadAlignment));
5713 __ sw(scratch1, MemOperand(dest));
5714 __ Addu(dest, dest, Operand(kReadAlignment));
5715 __ Subu(scratch2, limit, dest);
5716 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
5719 // Copy bytes from src to dest until dest hits limit.
5720 __ bind(&byte_loop);
5721 // Test if dest has already reached the limit.
5722 __ Branch(&done, ge, dest, Operand(limit));
5723 __ lbu(scratch1, MemOperand(src));
5724 __ addiu(src, src, 1);
5725 __ sb(scratch1, MemOperand(dest));
5726 __ addiu(dest, dest, 1);
5727 __ Branch(&byte_loop);
5733 void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5742 // Register scratch3 is the general scratch register in this function.
5743 Register scratch = scratch3;
5745 // Make sure that both characters are not digits as such strings has a
5746 // different hash algorithm. Don't try to look for these in the symbol table.
5747 Label not_array_index;
5748 __ Subu(scratch, c1, Operand(static_cast<int>('0')));
5749 __ Branch(¬_array_index,
5752 Operand(static_cast<int>('9' - '0')));
5753 __ Subu(scratch, c2, Operand(static_cast<int>('0')));
5755 // If check failed combine both characters into single halfword.
5756 // This is required by the contract of the method: code at the
5757 // not_found branch expects this combination in c1 register.
5759 __ sll(scratch1, c2, kBitsPerByte);
5760 __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
5761 __ Or(c1, c1, scratch1);
5763 __ Branch(not_found,
5766 Operand(static_cast<int>('9' - '0')));
5768 __ bind(¬_array_index);
5769 // Calculate the two character string hash.
5770 Register hash = scratch1;
5771 StringHelper::GenerateHashInit(masm, hash, c1);
5772 StringHelper::GenerateHashAddCharacter(masm, hash, c2);
5773 StringHelper::GenerateHashGetHash(masm, hash);
5775 // Collect the two characters in a register.
5776 Register chars = c1;
5777 __ sll(scratch, c2, kBitsPerByte);
5778 __ Or(chars, chars, scratch);
5780 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5781 // hash: hash of two character string.
5783 // Load symbol table.
5784 // Load address of first element of the symbol table.
5785 Register symbol_table = c2;
5786 __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
5788 Register undefined = scratch4;
5789 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
5791 // Calculate capacity mask from the symbol table capacity.
5792 Register mask = scratch2;
5793 __ lw(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
5794 __ sra(mask, mask, 1);
5795 __ Addu(mask, mask, -1);
5797 // Calculate untagged address of the first element of the symbol table.
5798 Register first_symbol_table_element = symbol_table;
5799 __ Addu(first_symbol_table_element, symbol_table,
5800 Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
5803 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5804 // hash: hash of two character string
5805 // mask: capacity mask
5806 // first_symbol_table_element: address of the first element of
5808 // undefined: the undefined object
5811 // Perform a number of probes in the symbol table.
5812 static const int kProbes = 4;
5813 Label found_in_symbol_table;
5814 Label next_probe[kProbes];
5815 Register candidate = scratch5; // Scratch register contains candidate.
5816 for (int i = 0; i < kProbes; i++) {
5817 // Calculate entry in symbol table.
5819 __ Addu(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
5821 __ mov(candidate, hash);
5824 __ And(candidate, candidate, Operand(mask));
5826 // Load the entry from the symble table.
5827 STATIC_ASSERT(SymbolTable::kEntrySize == 1);
5828 __ sll(scratch, candidate, kPointerSizeLog2);
5829 __ Addu(scratch, scratch, first_symbol_table_element);
5830 __ lw(candidate, MemOperand(scratch));
5832 // If entry is undefined no string with this hash can be found.
5834 __ GetObjectType(candidate, scratch, scratch);
5835 __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
5837 __ Branch(not_found, eq, undefined, Operand(candidate));
5838 // Must be null (deleted entry).
5839 if (FLAG_debug_code) {
5840 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
5841 __ Assert(eq, "oddball in symbol table is not undefined or null",
5842 scratch, Operand(candidate));
5844 __ jmp(&next_probe[i]);
5846 __ bind(&is_string);
5848 // Check that the candidate is a non-external ASCII string. The instance
5849 // type is still in the scratch register from the CompareObjectType
5851 __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
5853 // If length is not 2 the string is not a candidate.
5854 __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset));
5855 __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2)));
5857 // Check if the two characters match.
5858 // Assumes that word load is little endian.
5859 __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
5860 __ Branch(&found_in_symbol_table, eq, chars, Operand(scratch));
5861 __ bind(&next_probe[i]);
5864 // No matching 2 character string found by probing.
5867 // Scratch register contains result when we fall through to here.
5868 Register result = candidate;
5869 __ bind(&found_in_symbol_table);
5874 void StringHelper::GenerateHashInit(MacroAssembler* masm,
5876 Register character) {
5877 // hash = character + (character << 10);
5878 __ sll(hash, character, 10);
5879 __ addu(hash, hash, character);
5880 // hash ^= hash >> 6;
5881 __ sra(at, hash, 6);
5882 __ xor_(hash, hash, at);
5886 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
5888 Register character) {
5889 // hash += character;
5890 __ addu(hash, hash, character);
5891 // hash += hash << 10;
5892 __ sll(at, hash, 10);
5893 __ addu(hash, hash, at);
5894 // hash ^= hash >> 6;
5895 __ sra(at, hash, 6);
5896 __ xor_(hash, hash, at);
5900 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
5902 // hash += hash << 3;
5903 __ sll(at, hash, 3);
5904 __ addu(hash, hash, at);
5905 // hash ^= hash >> 11;
5906 __ sra(at, hash, 11);
5907 __ xor_(hash, hash, at);
5908 // hash += hash << 15;
5909 __ sll(at, hash, 15);
5910 __ addu(hash, hash, at);
5912 // if (hash == 0) hash = 27;
5913 __ ori(at, zero_reg, 27);
5914 __ movz(hash, at, hash);
5918 void SubStringStub::Generate(MacroAssembler* masm) {
5919 Label sub_string_runtime;
5920 // Stack frame on entry.
5921 // ra: return address
5926 // This stub is called from the native-call %_SubString(...), so
5927 // nothing can be assumed about the arguments. It is tested that:
5928 // "string" is a sequential string,
5929 // both "from" and "to" are smis, and
5930 // 0 <= from <= to <= string.length.
5931 // If any of these assumptions fail, we call the runtime system.
5933 static const int kToOffset = 0 * kPointerSize;
5934 static const int kFromOffset = 1 * kPointerSize;
5935 static const int kStringOffset = 2 * kPointerSize;
5940 // Check bounds and smi-ness.
5941 __ lw(to, MemOperand(sp, kToOffset));
5942 __ lw(from, MemOperand(sp, kFromOffset));
5943 STATIC_ASSERT(kFromOffset == kToOffset + 4);
5944 STATIC_ASSERT(kSmiTag == 0);
5945 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
5947 __ JumpIfNotSmi(from, &sub_string_runtime);
5948 __ JumpIfNotSmi(to, &sub_string_runtime);
5950 __ sra(a3, from, kSmiTagSize); // Remove smi tag.
5951 __ sra(t5, to, kSmiTagSize); // Remove smi tag.
5953 // a3: from index (untagged smi)
5954 // t5: to index (untagged smi)
5956 __ Branch(&sub_string_runtime, lt, a3, Operand(zero_reg)); // From < 0.
5958 __ subu(a2, t5, a3);
5959 __ Branch(&sub_string_runtime, gt, a3, Operand(t5)); // Fail if from > to.
5961 // Special handling of sub-strings of length 1 and 2. One character strings
5962 // are handled in the runtime system (looked up in the single character
5963 // cache). Two character strings are looked for in the symbol cache in
5965 __ Branch(&sub_string_runtime, lt, a2, Operand(2));
5967 // Both to and from are smis.
5969 // a2: result string length
5970 // a3: from index (untagged smi)
5971 // t2: (a.k.a. to): to (smi)
5972 // t3: (a.k.a. from): from offset (smi)
5973 // t5: to index (untagged smi)
5975 // Make sure first argument is a sequential (or flat) string.
5976 __ lw(v0, MemOperand(sp, kStringOffset));
5977 __ Branch(&sub_string_runtime, eq, v0, Operand(kSmiTagMask));
5979 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
5980 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
5981 __ And(t4, v0, Operand(kIsNotStringMask));
5983 __ Branch(&sub_string_runtime, ne, t4, Operand(zero_reg));
5985 // Short-cut for the case of trivial substring.
5987 // v0: original string
5988 // a2: result string length
5989 __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
5991 __ Branch(&return_v0, eq, a2, Operand(t0));
5994 if (FLAG_string_slices) {
5995 __ Branch(&create_slice, ge, a2, Operand(SlicedString::kMinLength));
5998 // v0: original string
5999 // a1: instance type
6000 // a2: result string length
6001 // a3: from index (untagged smi)
6002 // t2: (a.k.a. to): to (smi)
6003 // t3: (a.k.a. from): from offset (smi)
6004 // t5: to index (untagged smi)
6007 __ And(t0, a1, Operand(kStringRepresentationMask));
6008 STATIC_ASSERT(kSeqStringTag < kConsStringTag);
6009 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
6010 STATIC_ASSERT(kConsStringTag < kSlicedStringTag);
6012 // Slices and external strings go to runtime.
6013 __ Branch(&sub_string_runtime, gt, t0, Operand(kConsStringTag));
6015 // Sequential strings are handled directly.
6016 __ Branch(&seq_string, lt, t0, Operand(kConsStringTag));
6018 // Cons string. Try to recurse (once) on the first substring.
6019 // (This adds a little more generality than necessary to handle flattened
6020 // cons strings, but not much).
6021 __ lw(v0, FieldMemOperand(v0, ConsString::kFirstOffset));
6022 __ lw(t0, FieldMemOperand(v0, HeapObject::kMapOffset));
6023 __ lbu(a1, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6024 STATIC_ASSERT(kSeqStringTag == 0);
6025 // Cons, slices and external strings go to runtime.
6026 __ Branch(&sub_string_runtime, ne, a1, Operand(kStringRepresentationMask));
6028 // Definitly a sequential string.
6029 __ bind(&seq_string);
6031 // v0: original string
6032 // a1: instance type
6033 // a2: result string length
6034 // a3: from index (untagged smi)
6035 // t2: (a.k.a. to): to (smi)
6036 // t3: (a.k.a. from): from offset (smi)
6037 // t5: to index (untagged smi)
6039 __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
6040 __ Branch(&sub_string_runtime, lt, t0, Operand(to)); // Fail if to > length.
6043 // v0: original string or left hand side of the original cons string.
6044 // a1: instance type
6045 // a2: result string length
6046 // a3: from index (untagged smi)
6047 // t3: (a.k.a. from): from offset (smi)
6048 // t5: to index (untagged smi)
6050 // Check for flat ASCII string.
6051 Label non_ascii_flat;
6052 STATIC_ASSERT(kTwoByteStringTag == 0);
6054 __ And(t4, a1, Operand(kStringEncodingMask));
6055 __ Branch(&non_ascii_flat, eq, t4, Operand(zero_reg));
6057 Label result_longer_than_two;
6058 __ Branch(&result_longer_than_two, gt, a2, Operand(2));
6060 // Sub string of length 2 requested.
6061 // Get the two characters forming the sub string.
6062 __ Addu(v0, v0, Operand(a3));
6063 __ lbu(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
6064 __ lbu(t0, FieldMemOperand(v0, SeqAsciiString::kHeaderSize + 1));
6066 // Try to lookup two character string in symbol table.
6067 Label make_two_character_string;
6068 StringHelper::GenerateTwoCharacterSymbolTableProbe(
6069 masm, a3, t0, a1, t1, t2, t3, t4, &make_two_character_string);
6070 Counters* counters = masm->isolate()->counters();
6073 // a2: result string length.
6074 // a3: two characters combined into halfword in little endian byte order.
6075 __ bind(&make_two_character_string);
6076 __ AllocateAsciiString(v0, a2, t0, t1, t4, &sub_string_runtime);
6077 __ sh(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
6080 __ bind(&result_longer_than_two);
6082 // Locate 'from' character of string.
6083 __ Addu(t1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6084 __ sra(t4, from, 1);
6085 __ Addu(t1, t1, t4);
6087 // Allocate the result.
6088 __ AllocateAsciiString(v0, a2, t4, t0, a1, &sub_string_runtime);
6090 // v0: result string
6091 // a2: result string length
6092 // a3: from index (untagged smi)
6093 // t1: first character of substring to copy
6094 // t3: (a.k.a. from): from offset (smi)
6095 // Locate first character of result.
6096 __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6098 // v0: result string
6099 // a1: first character of result string
6100 // a2: result string length
6101 // t1: first character of substring to copy
6102 STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
6103 StringHelper::GenerateCopyCharactersLong(
6104 masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
6107 __ bind(&non_ascii_flat);
6108 // a2: result string length
6110 // t3: (a.k.a. from): from offset (smi)
6111 // Check for flat two byte string.
6113 // Locate 'from' character of string.
6114 __ Addu(t1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6115 // As "from" is a smi it is 2 times the value which matches the size of a two
6117 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
6118 __ Addu(t1, t1, Operand(from));
6120 // Allocate the result.
6121 __ AllocateTwoByteString(v0, a2, a1, a3, t0, &sub_string_runtime);
6123 // v0: result string
6124 // a2: result string length
6125 // t1: first character of substring to copy
6126 // Locate first character of result.
6127 __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6131 // v0: result string.
6132 // a1: first character of result.
6133 // a2: result length.
6134 // t1: first character of substring to copy.
6135 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
6136 StringHelper::GenerateCopyCharactersLong(
6137 masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
6140 if (FLAG_string_slices) {
6141 __ bind(&create_slice);
6142 // v0: original string
6143 // a1: instance type
6145 // a3: from index (untagged smi)
6146 // t2 (a.k.a. to): to (smi)
6147 // t3 (a.k.a. from): from offset (smi)
6148 Label allocate_slice, sliced_string, seq_string;
6149 STATIC_ASSERT(kSeqStringTag == 0);
6150 __ And(t4, a1, Operand(kStringRepresentationMask));
6151 __ Branch(&seq_string, eq, t4, Operand(zero_reg));
6152 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
6153 STATIC_ASSERT(kIsIndirectStringMask != 0);
6154 __ And(t4, a1, Operand(kIsIndirectStringMask));
6155 // External string. Jump to runtime.
6156 __ Branch(&sub_string_runtime, eq, t4, Operand(zero_reg));
6158 __ And(t4, a1, Operand(kSlicedNotConsMask));
6159 __ Branch(&sliced_string, ne, t4, Operand(zero_reg));
6160 // Cons string. Check whether it is flat, then fetch first part.
6161 __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
6162 __ LoadRoot(t5, Heap::kEmptyStringRootIndex);
6163 __ Branch(&sub_string_runtime, ne, t1, Operand(t5));
6164 __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
6165 __ jmp(&allocate_slice);
6167 __ bind(&sliced_string);
6168 // Sliced string. Fetch parent and correct start index by offset.
6169 __ lw(t1, FieldMemOperand(v0, SlicedString::kOffsetOffset));
6170 __ addu(t3, t3, t1);
6171 __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
6172 __ jmp(&allocate_slice);
6174 __ bind(&seq_string);
6175 // Sequential string. Just move string to the right register.
6178 __ bind(&allocate_slice);
6179 // a1: instance type of original string
6181 // t1: underlying subject string
6182 // t3 (a.k.a. from): from offset (smi)
6183 // Allocate new sliced string. At this point we do not reload the instance
6184 // type including the string encoding because we simply rely on the info
6185 // provided by the original string. It does not matter if the original
6186 // string's encoding is wrong because we always have to recheck encoding of
6187 // the newly created string's parent anyways due to externalized strings.
6188 Label two_byte_slice, set_slice_header;
6189 STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
6190 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
6191 __ And(t4, a1, Operand(kStringEncodingMask));
6192 __ Branch(&two_byte_slice, eq, t4, Operand(zero_reg));
6193 __ AllocateAsciiSlicedString(v0, a2, a3, t0, &sub_string_runtime);
6194 __ jmp(&set_slice_header);
6195 __ bind(&two_byte_slice);
6196 __ AllocateTwoByteSlicedString(v0, a2, a3, t0, &sub_string_runtime);
6197 __ bind(&set_slice_header);
6198 __ sw(t3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
6199 __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
6202 __ bind(&return_v0);
6203 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
6204 __ Addu(sp, sp, Operand(3 * kPointerSize));
6207 // Just jump to runtime to create the sub string.
6208 __ bind(&sub_string_runtime);
6209 __ TailCallRuntime(Runtime::kSubString, 3, 1);
6213 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
6218 Register scratch3) {
6219 Register length = scratch1;
6222 Label strings_not_equal, check_zero_length;
6223 __ lw(length, FieldMemOperand(left, String::kLengthOffset));
6224 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
6225 __ Branch(&check_zero_length, eq, length, Operand(scratch2));
6226 __ bind(&strings_not_equal);
6227 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
6230 // Check if the length is zero.
6231 Label compare_chars;
6232 __ bind(&check_zero_length);
6233 STATIC_ASSERT(kSmiTag == 0);
6234 __ Branch(&compare_chars, ne, length, Operand(zero_reg));
6235 __ li(v0, Operand(Smi::FromInt(EQUAL)));
6238 // Compare characters.
6239 __ bind(&compare_chars);
6241 GenerateAsciiCharsCompareLoop(masm,
6242 left, right, length, scratch2, scratch3, v0,
6243 &strings_not_equal);
6245 // Characters are equal.
6246 __ li(v0, Operand(Smi::FromInt(EQUAL)));
6251 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
6257 Register scratch4) {
6258 Label result_not_equal, compare_lengths;
6259 // Find minimum length and length difference.
6260 __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
6261 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
6262 __ Subu(scratch3, scratch1, Operand(scratch2));
6263 Register length_delta = scratch3;
6264 __ slt(scratch4, scratch2, scratch1);
6265 __ movn(scratch1, scratch2, scratch4);
6266 Register min_length = scratch1;
6267 STATIC_ASSERT(kSmiTag == 0);
6268 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
6271 GenerateAsciiCharsCompareLoop(masm,
6272 left, right, min_length, scratch2, scratch4, v0,
6275 // Compare lengths - strings up to min-length are equal.
6276 __ bind(&compare_lengths);
6277 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
6278 // Use length_delta as result if it's zero.
6279 __ mov(scratch2, length_delta);
6280 __ mov(scratch4, zero_reg);
6281 __ mov(v0, zero_reg);
6283 __ bind(&result_not_equal);
6284 // Conditionally update the result based either on length_delta or
6285 // the last comparion performed in the loop above.
6287 __ Branch(&ret, eq, scratch2, Operand(scratch4));
6288 __ li(v0, Operand(Smi::FromInt(GREATER)));
6289 __ Branch(&ret, gt, scratch2, Operand(scratch4));
6290 __ li(v0, Operand(Smi::FromInt(LESS)));
6296 void StringCompareStub::GenerateAsciiCharsCompareLoop(
6297 MacroAssembler* masm,
6304 Label* chars_not_equal) {
6305 // Change index to run from -length to -1 by adding length to string
6306 // start. This means that loop ends when index reaches zero, which
6307 // doesn't need an additional compare.
6308 __ SmiUntag(length);
6309 __ Addu(scratch1, length,
6310 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6311 __ Addu(left, left, Operand(scratch1));
6312 __ Addu(right, right, Operand(scratch1));
6313 __ Subu(length, zero_reg, length);
6314 Register index = length; // index = -length;
6320 __ Addu(scratch3, left, index);
6321 __ lbu(scratch1, MemOperand(scratch3));
6322 __ Addu(scratch3, right, index);
6323 __ lbu(scratch2, MemOperand(scratch3));
6324 __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
6325 __ Addu(index, index, 1);
6326 __ Branch(&loop, ne, index, Operand(zero_reg));
6330 void StringCompareStub::Generate(MacroAssembler* masm) {
6333 Counters* counters = masm->isolate()->counters();
6335 // Stack frame on entry.
6336 // sp[0]: right string
6337 // sp[4]: left string
6338 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
6339 __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
6342 __ Branch(¬_same, ne, a0, Operand(a1));
6343 STATIC_ASSERT(EQUAL == 0);
6344 STATIC_ASSERT(kSmiTag == 0);
6345 __ li(v0, Operand(Smi::FromInt(EQUAL)));
6346 __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
6347 __ Addu(sp, sp, Operand(2 * kPointerSize));
6352 // Check that both objects are sequential ASCII strings.
6353 __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
6355 // Compare flat ASCII strings natively. Remove arguments from stack first.
6356 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
6357 __ Addu(sp, sp, Operand(2 * kPointerSize));
6358 GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
6361 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6365 void StringAddStub::Generate(MacroAssembler* masm) {
6366 Label string_add_runtime, call_builtin;
6367 Builtins::JavaScript builtin_id = Builtins::ADD;
6369 Counters* counters = masm->isolate()->counters();
6372 // sp[0]: second argument (right).
6373 // sp[4]: first argument (left).
6375 // Load the two arguments.
6376 __ lw(a0, MemOperand(sp, 1 * kPointerSize)); // First argument.
6377 __ lw(a1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
6379 // Make sure that both arguments are strings if not known in advance.
6380 if (flags_ == NO_STRING_ADD_FLAGS) {
6381 __ JumpIfEitherSmi(a0, a1, &string_add_runtime);
6382 // Load instance types.
6383 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6384 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6385 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6386 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6387 STATIC_ASSERT(kStringTag == 0);
6388 // If either is not a string, go to runtime.
6389 __ Or(t4, t0, Operand(t1));
6390 __ And(t4, t4, Operand(kIsNotStringMask));
6391 __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
6393 // Here at least one of the arguments is definitely a string.
6394 // We convert the one that is not known to be a string.
6395 if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
6396 ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
6397 GenerateConvertArgument(
6398 masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
6399 builtin_id = Builtins::STRING_ADD_RIGHT;
6400 } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
6401 ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
6402 GenerateConvertArgument(
6403 masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
6404 builtin_id = Builtins::STRING_ADD_LEFT;
6408 // Both arguments are strings.
6410 // a1: second string
6411 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6412 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6414 Label strings_not_empty;
6415 // Check if either of the strings are empty. In that case return the other.
6416 // These tests use zero-length check on string-length whch is an Smi.
6417 // Assert that Smi::FromInt(0) is really 0.
6418 STATIC_ASSERT(kSmiTag == 0);
6419 ASSERT(Smi::FromInt(0) == 0);
6420 __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
6421 __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
6422 __ mov(v0, a0); // Assume we'll return first string (from a0).
6423 __ movz(v0, a1, a2); // If first is empty, return second (from a1).
6424 __ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1.
6425 __ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1.
6426 __ and_(t4, t4, t5); // Branch if both strings were non-empty.
6427 __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
6429 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6430 __ Addu(sp, sp, Operand(2 * kPointerSize));
6433 __ bind(&strings_not_empty);
6436 // Untag both string-lengths.
6437 __ sra(a2, a2, kSmiTagSize);
6438 __ sra(a3, a3, kSmiTagSize);
6440 // Both strings are non-empty.
6442 // a1: second string
6443 // a2: length of first string
6444 // a3: length of second string
6445 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6446 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6447 // Look at the length of the result of adding the two strings.
6448 Label string_add_flat_result, longer_than_two;
6449 // Adding two lengths can't overflow.
6450 STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
6451 __ Addu(t2, a2, Operand(a3));
6452 // Use the symbol table when adding two one character strings, as it
6453 // helps later optimizations to return a symbol here.
6454 __ Branch(&longer_than_two, ne, t2, Operand(2));
6456 // Check that both strings are non-external ASCII strings.
6457 if (flags_ != NO_STRING_ADD_FLAGS) {
6458 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6459 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6460 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6461 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6463 __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
6464 &string_add_runtime);
6466 // Get the two characters forming the sub string.
6467 __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize));
6468 __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize));
6470 // Try to lookup two character string in symbol table. If it is not found
6471 // just allocate a new one.
6472 Label make_two_character_string;
6473 StringHelper::GenerateTwoCharacterSymbolTableProbe(
6474 masm, a2, a3, t2, t3, t0, t1, t4, &make_two_character_string);
6475 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6476 __ Addu(sp, sp, Operand(2 * kPointerSize));
6479 __ bind(&make_two_character_string);
6480 // Resulting string has length 2 and first chars of two strings
6481 // are combined into single halfword in a2 register.
6482 // So we can fill resulting string without two loops by a single
6483 // halfword store instruction (which assumes that processor is
6484 // in a little endian mode).
6485 __ li(t2, Operand(2));
6486 __ AllocateAsciiString(v0, t2, t0, t1, t4, &string_add_runtime);
6487 __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
6488 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6489 __ Addu(sp, sp, Operand(2 * kPointerSize));
6492 __ bind(&longer_than_two);
6493 // Check if resulting string will be flat.
6494 __ Branch(&string_add_flat_result, lt, t2,
6495 Operand(String::kMinNonFlatLength));
6496 // Handle exceptionally long strings in the runtime system.
6497 STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
6498 ASSERT(IsPowerOf2(String::kMaxLength + 1));
6499 // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
6500 __ Branch(&string_add_runtime, hs, t2, Operand(String::kMaxLength + 1));
6502 // If result is not supposed to be flat, allocate a cons string object.
6503 // If both strings are ASCII the result is an ASCII cons string.
6504 if (flags_ != NO_STRING_ADD_FLAGS) {
6505 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6506 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6507 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6508 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6510 Label non_ascii, allocated, ascii_data;
6511 STATIC_ASSERT(kTwoByteStringTag == 0);
6512 // Branch to non_ascii if either string-encoding field is zero (non-ascii).
6513 __ And(t4, t0, Operand(t1));
6514 __ And(t4, t4, Operand(kStringEncodingMask));
6515 __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
6517 // Allocate an ASCII cons string.
6518 __ bind(&ascii_data);
6519 __ AllocateAsciiConsString(t3, t2, t0, t1, &string_add_runtime);
6520 __ bind(&allocated);
6521 // Fill the fields of the cons string.
6522 __ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset));
6523 __ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset));
6525 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6526 __ Addu(sp, sp, Operand(2 * kPointerSize));
6529 __ bind(&non_ascii);
6530 // At least one of the strings is two-byte. Check whether it happens
6531 // to contain only ASCII characters.
6532 // t0: first instance type.
6533 // t1: second instance type.
6534 // Branch to if _both_ instances have kAsciiDataHintMask set.
6535 __ And(at, t0, Operand(kAsciiDataHintMask));
6536 __ and_(at, at, t1);
6537 __ Branch(&ascii_data, ne, at, Operand(zero_reg));
6539 __ xor_(t0, t0, t1);
6540 STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
6541 __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
6542 __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
6544 // Allocate a two byte cons string.
6545 __ AllocateTwoByteConsString(t3, t2, t0, t1, &string_add_runtime);
6546 __ Branch(&allocated);
6548 // Handle creating a flat result. First check that both strings are
6549 // sequential and that they have the same encoding.
6551 // a1: second string
6552 // a2: length of first string
6553 // a3: length of second string
6554 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6555 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6556 // t2: sum of lengths.
6557 __ bind(&string_add_flat_result);
6558 if (flags_ != NO_STRING_ADD_FLAGS) {
6559 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6560 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6561 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6562 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6564 // Check that both strings are sequential, meaning that we
6565 // branch to runtime if either string tag is non-zero.
6566 STATIC_ASSERT(kSeqStringTag == 0);
6567 __ Or(t4, t0, Operand(t1));
6568 __ And(t4, t4, Operand(kStringRepresentationMask));
6569 __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
6571 // Now check if both strings have the same encoding (ASCII/Two-byte).
6573 // a1: second string
6574 // a2: length of first string
6575 // a3: length of second string
6576 // t0: first string instance type
6577 // t1: second string instance type
6578 // t2: sum of lengths.
6579 Label non_ascii_string_add_flat_result;
6580 ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test.
6581 __ xor_(t3, t1, t0);
6582 __ And(t3, t3, Operand(kStringEncodingMask));
6583 __ Branch(&string_add_runtime, ne, t3, Operand(zero_reg));
6584 // And see if it's ASCII (0) or two-byte (1).
6585 __ And(t3, t0, Operand(kStringEncodingMask));
6586 __ Branch(&non_ascii_string_add_flat_result, eq, t3, Operand(zero_reg));
6588 // Both strings are sequential ASCII strings. We also know that they are
6589 // short (since the sum of the lengths is less than kMinNonFlatLength).
6590 // t2: length of resulting flat string
6591 __ AllocateAsciiString(t3, t2, t0, t1, t4, &string_add_runtime);
6592 // Locate first character of result.
6593 __ Addu(t2, t3, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6594 // Locate first character of first argument.
6595 __ Addu(a0, a0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6596 // a0: first character of first string.
6597 // a1: second string.
6598 // a2: length of first string.
6599 // a3: length of second string.
6600 // t2: first character of result.
6601 // t3: result string.
6602 StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, true);
6604 // Load second argument and locate first character.
6605 __ Addu(a1, a1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6606 // a1: first character of second string.
6607 // a3: length of second string.
6608 // t2: next character of result.
6609 // t3: result string.
6610 StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
6612 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6613 __ Addu(sp, sp, Operand(2 * kPointerSize));
6616 __ bind(&non_ascii_string_add_flat_result);
6617 // Both strings are sequential two byte strings.
6618 // a0: first string.
6619 // a1: second string.
6620 // a2: length of first string.
6621 // a3: length of second string.
6622 // t2: sum of length of strings.
6623 __ AllocateTwoByteString(t3, t2, t0, t1, t4, &string_add_runtime);
6624 // a0: first string.
6625 // a1: second string.
6626 // a2: length of first string.
6627 // a3: length of second string.
6628 // t3: result string.
6630 // Locate first character of result.
6631 __ Addu(t2, t3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6632 // Locate first character of first argument.
6633 __ Addu(a0, a0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6635 // a0: first character of first string.
6636 // a1: second string.
6637 // a2: length of first string.
6638 // a3: length of second string.
6639 // t2: first character of result.
6640 // t3: result string.
6641 StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, false);
6643 // Locate first character of second argument.
6644 __ Addu(a1, a1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6646 // a1: first character of second string.
6647 // a3: length of second string.
6648 // t2: next character of result (after copy of first string).
6649 // t3: result string.
6650 StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
6653 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6654 __ Addu(sp, sp, Operand(2 * kPointerSize));
6657 // Just jump to runtime to add the two strings.
6658 __ bind(&string_add_runtime);
6659 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
6661 if (call_builtin.is_linked()) {
6662 __ bind(&call_builtin);
6663 __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
6668 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
6676 // First check if the argument is already a string.
6677 Label not_string, done;
6678 __ JumpIfSmi(arg, ¬_string);
6679 __ GetObjectType(arg, scratch1, scratch1);
6680 __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
6682 // Check the number to string cache.
6684 __ bind(¬_string);
6685 // Puts the cached result into scratch1.
6686 NumberToStringStub::GenerateLookupNumberStringCache(masm,
6694 __ mov(arg, scratch1);
6695 __ sw(arg, MemOperand(sp, stack_offset));
6698 // Check if the argument is a safe string wrapper.
6699 __ bind(¬_cached);
6700 __ JumpIfSmi(arg, slow);
6701 __ GetObjectType(arg, scratch1, scratch2); // map -> scratch1.
6702 __ Branch(slow, ne, scratch2, Operand(JS_VALUE_TYPE));
6703 __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
6704 __ li(scratch4, 1 << Map::kStringWrapperSafeForDefaultValueOf);
6705 __ And(scratch2, scratch2, scratch4);
6706 __ Branch(slow, ne, scratch2, Operand(scratch4));
6707 __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset));
6708 __ sw(arg, MemOperand(sp, stack_offset));
6714 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
6715 ASSERT(state_ == CompareIC::SMIS);
6718 __ JumpIfNotSmi(a2, &miss);
6720 if (GetCondition() == eq) {
6721 // For equality we do not care about the sign of the result.
6722 __ Subu(v0, a0, a1);
6724 // Untag before subtracting to avoid handling overflow.
6727 __ Subu(v0, a1, a0);
6736 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6737 ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6742 __ And(a2, a1, Operand(a0));
6743 __ JumpIfSmi(a2, &generic_stub);
6745 __ GetObjectType(a0, a2, a2);
6746 __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
6747 __ GetObjectType(a1, a2, a2);
6748 __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
6750 // Inlining the double comparison and falling back to the general compare
6751 // stub if NaN is involved or FPU is unsupported.
6752 if (CpuFeatures::IsSupported(FPU)) {
6753 CpuFeatures::Scope scope(FPU);
6755 // Load left and right operand.
6756 __ Subu(a2, a1, Operand(kHeapObjectTag));
6757 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
6758 __ Subu(a2, a0, Operand(kHeapObjectTag));
6759 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
6761 // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
6762 Label fpu_eq, fpu_lt;
6763 // Test if equal, and also handle the unordered/NaN case.
6764 __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
6766 // Test if less (unordered case is already handled).
6767 __ BranchF(&fpu_lt, NULL, lt, f0, f2);
6769 // Otherwise it's greater, so just fall thru, and return.
6770 __ Ret(USE_DELAY_SLOT);
6771 __ li(v0, Operand(GREATER)); // In delay slot.
6774 __ Ret(USE_DELAY_SLOT);
6775 __ li(v0, Operand(EQUAL)); // In delay slot.
6778 __ Ret(USE_DELAY_SLOT);
6779 __ li(v0, Operand(LESS)); // In delay slot.
6781 __ bind(&unordered);
6784 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
6785 __ bind(&generic_stub);
6786 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
6793 void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
6794 ASSERT(state_ == CompareIC::SYMBOLS);
6797 // Registers containing left and right operands respectively.
6799 Register right = a0;
6803 // Check that both operands are heap objects.
6804 __ JumpIfEitherSmi(left, right, &miss);
6806 // Check that both operands are symbols.
6807 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6808 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6809 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6810 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6811 STATIC_ASSERT(kSymbolTag != 0);
6812 __ And(tmp1, tmp1, Operand(tmp2));
6813 __ And(tmp1, tmp1, kIsSymbolMask);
6814 __ Branch(&miss, eq, tmp1, Operand(zero_reg));
6815 // Make sure a0 is non-zero. At this point input operands are
6816 // guaranteed to be non-zero.
6817 ASSERT(right.is(a0));
6818 STATIC_ASSERT(EQUAL == 0);
6819 STATIC_ASSERT(kSmiTag == 0);
6821 // Symbols are compared by identity.
6822 __ Ret(ne, left, Operand(right));
6823 __ li(v0, Operand(Smi::FromInt(EQUAL)));
6831 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
6832 ASSERT(state_ == CompareIC::STRINGS);
6835 // Registers containing left and right operands respectively.
6837 Register right = a0;
6844 // Check that both operands are heap objects.
6845 __ JumpIfEitherSmi(left, right, &miss);
6847 // Check that both operands are strings. This leaves the instance
6848 // types loaded in tmp1 and tmp2.
6849 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6850 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6851 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6852 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6853 STATIC_ASSERT(kNotStringTag != 0);
6854 __ Or(tmp3, tmp1, tmp2);
6855 __ And(tmp5, tmp3, Operand(kIsNotStringMask));
6856 __ Branch(&miss, ne, tmp5, Operand(zero_reg));
6858 // Fast check for identical strings.
6859 Label left_ne_right;
6860 STATIC_ASSERT(EQUAL == 0);
6861 STATIC_ASSERT(kSmiTag == 0);
6862 __ Branch(&left_ne_right, ne, left, Operand(right), USE_DELAY_SLOT);
6863 __ mov(v0, zero_reg); // In the delay slot.
6865 __ bind(&left_ne_right);
6867 // Handle not identical strings.
6869 // Check that both strings are symbols. If they are, we're done
6870 // because we already know they are not identical.
6871 ASSERT(GetCondition() == eq);
6872 STATIC_ASSERT(kSymbolTag != 0);
6873 __ And(tmp3, tmp1, Operand(tmp2));
6874 __ And(tmp5, tmp3, Operand(kIsSymbolMask));
6876 __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg), USE_DELAY_SLOT);
6877 __ mov(v0, a0); // In the delay slot.
6878 // Make sure a0 is non-zero. At this point input operands are
6879 // guaranteed to be non-zero.
6880 ASSERT(right.is(a0));
6882 __ bind(&is_symbol);
6884 // Check that both strings are sequential ASCII.
6886 __ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
6889 // Compare flat ASCII strings. Returns when done.
6890 StringCompareStub::GenerateFlatAsciiStringEquals(
6891 masm, left, right, tmp1, tmp2, tmp3);
6893 // Handle more complex cases in runtime.
6895 __ Push(left, right);
6896 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
6903 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
6904 ASSERT(state_ == CompareIC::OBJECTS);
6906 __ And(a2, a1, Operand(a0));
6907 __ JumpIfSmi(a2, &miss);
6909 // Compare lhs, a2 holds the map, a3 holds the type_reg
6910 __ GetObjectType(a0, a2, a3);
6911 __ Branch(&miss, ne, a3, Operand(JS_OBJECT_TYPE));
6912 __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
6913 __ And(a2, a2, Operand(1 << Map::kUseUserObjectComparison));
6914 __ Branch(&miss, eq, a2, Operand(1 << Map::kUseUserObjectComparison));
6917 // Compare rhs, a2 holds the map, a3 holds the type_reg
6918 __ GetObjectType(a1, a2, a3);
6919 __ Branch(&miss, ne, a3, Operand(JS_OBJECT_TYPE));
6920 __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
6921 __ And(a2, a2, Operand(1 << Map::kUseUserObjectComparison));
6922 __ Branch(&miss, eq, a2, Operand(1 << Map::kUseUserObjectComparison));
6924 ASSERT(GetCondition() == eq);
6925 __ Subu(v0, a0, Operand(a1));
6933 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
6937 // Call the runtime system in a fresh internal frame.
6938 ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
6941 FrameScope scope(masm, StackFrame::INTERNAL);
6943 __ li(t0, Operand(Smi::FromInt(op_)));
6945 __ CallExternalReference(miss, 3);
6947 // Compute the entry point of the rewritten stub.
6948 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
6949 // Restore registers.
6957 void DirectCEntryStub::Generate(MacroAssembler* masm) {
6958 // No need to pop or drop anything, LeaveExitFrame will restore the old
6959 // stack, thus dropping the allocated space for the return value.
6960 // The saved ra is after the reserved stack space for the 4 args.
6961 __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
6963 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
6964 // In case of an error the return address may point to a memory area
6965 // filled with kZapValue by the GC.
6966 // Dereference the address and check for this.
6967 __ lw(t0, MemOperand(t9));
6968 __ Assert(ne, "Received invalid return address.", t0,
6969 Operand(reinterpret_cast<uint32_t>(kZapValue)));
6975 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6976 ExternalReference function) {
6977 __ li(t9, Operand(function));
6978 this->GenerateCall(masm, t9);
6982 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6984 __ Move(t9, target);
6985 __ AssertStackIsAligned();
6986 // Allocate space for arg slots.
6987 __ Subu(sp, sp, kCArgsSlotsSize);
6989 // Block the trampoline pool through the whole function to make sure the
6990 // number of generated instructions is constant.
6991 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
6993 // We need to get the current 'pc' value, which is not available on MIPS.
6995 masm->bal(&find_ra); // ra = pc + 8.
6996 masm->nop(); // Branch delay slot nop.
6997 masm->bind(&find_ra);
6999 const int kNumInstructionsToJump = 6;
7000 masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
7001 // Push return address (accessible to GC through exit frame pc).
7002 // This spot for ra was reserved in EnterExitFrame.
7003 masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
7004 masm->li(ra, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
7005 RelocInfo::CODE_TARGET), true);
7006 // Call the function.
7008 // Make sure the stored 'ra' points to this position.
7009 ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
7013 void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
7017 Register properties,
7018 Handle<String> name,
7019 Register scratch0) {
7020 // If names of slots in range from 1 to kProbes - 1 for the hash value are
7021 // not equal to the name and kProbes-th slot is not used (its name is the
7022 // undefined value), it guarantees the hash table doesn't contain the
7023 // property. It's true even if some slots represent deleted properties
7024 // (their names are the null value).
7025 for (int i = 0; i < kInlinedProbes; i++) {
7026 // scratch0 points to properties hash.
7027 // Compute the masked index: (hash + i + i * i) & mask.
7028 Register index = scratch0;
7029 // Capacity is smi 2^n.
7030 __ lw(index, FieldMemOperand(properties, kCapacityOffset));
7031 __ Subu(index, index, Operand(1));
7032 __ And(index, index, Operand(
7033 Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
7035 // Scale the index by multiplying by the entry size.
7036 ASSERT(StringDictionary::kEntrySize == 3);
7037 __ sll(at, index, 1);
7038 __ Addu(index, index, at);
7040 Register entity_name = scratch0;
7041 // Having undefined at this place means the name is not contained.
7042 ASSERT_EQ(kSmiTagSize, 1);
7043 Register tmp = properties;
7044 __ sll(scratch0, index, 1);
7045 __ Addu(tmp, properties, scratch0);
7046 __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
7048 ASSERT(!tmp.is(entity_name));
7049 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
7050 __ Branch(done, eq, entity_name, Operand(tmp));
7052 if (i != kInlinedProbes - 1) {
7053 // Stop if found the property.
7054 __ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
7056 // Check if the entry name is not a symbol.
7057 __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
7059 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
7060 __ And(scratch0, entity_name, Operand(kIsSymbolMask));
7061 __ Branch(miss, eq, scratch0, Operand(zero_reg));
7063 // Restore the properties.
7065 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
7069 const int spill_mask =
7070 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
7071 a2.bit() | a1.bit() | a0.bit() | v0.bit());
7073 __ MultiPush(spill_mask);
7074 __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
7075 __ li(a1, Operand(Handle<String>(name)));
7076 StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
7079 __ MultiPop(spill_mask);
7081 __ Branch(done, eq, at, Operand(zero_reg));
7082 __ Branch(miss, ne, at, Operand(zero_reg));
7086 // Probe the string dictionary in the |elements| register. Jump to the
7087 // |done| label if a property with the given name is found. Jump to
7088 // the |miss| label otherwise.
7089 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
7090 void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
7096 Register scratch2) {
7097 ASSERT(!elements.is(scratch1));
7098 ASSERT(!elements.is(scratch2));
7099 ASSERT(!name.is(scratch1));
7100 ASSERT(!name.is(scratch2));
7102 // Assert that name contains a string.
7103 if (FLAG_debug_code) __ AbortIfNotString(name);
7105 // Compute the capacity mask.
7106 __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
7107 __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
7108 __ Subu(scratch1, scratch1, Operand(1));
7110 // Generate an unrolled loop that performs a few probes before
7111 // giving up. Measurements done on Gmail indicate that 2 probes
7112 // cover ~93% of loads from dictionaries.
7113 for (int i = 0; i < kInlinedProbes; i++) {
7114 // Compute the masked index: (hash + i + i * i) & mask.
7115 __ lw(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
7117 // Add the probe offset (i + i * i) left shifted to avoid right shifting
7118 // the hash in a separate instruction. The value hash + i + i * i is right
7119 // shifted in the following and instruction.
7120 ASSERT(StringDictionary::GetProbeOffset(i) <
7121 1 << (32 - String::kHashFieldOffset));
7122 __ Addu(scratch2, scratch2, Operand(
7123 StringDictionary::GetProbeOffset(i) << String::kHashShift));
7125 __ srl(scratch2, scratch2, String::kHashShift);
7126 __ And(scratch2, scratch1, scratch2);
7128 // Scale the index by multiplying by the element size.
7129 ASSERT(StringDictionary::kEntrySize == 3);
7130 // scratch2 = scratch2 * 3.
7132 __ sll(at, scratch2, 1);
7133 __ Addu(scratch2, scratch2, at);
7135 // Check if the key is identical to the name.
7136 __ sll(at, scratch2, 2);
7137 __ Addu(scratch2, elements, at);
7138 __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
7139 __ Branch(done, eq, name, Operand(at));
7142 const int spill_mask =
7143 (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
7144 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
7145 ~(scratch1.bit() | scratch2.bit());
7147 __ MultiPush(spill_mask);
7149 ASSERT(!elements.is(a1));
7151 __ Move(a0, elements);
7153 __ Move(a0, elements);
7156 StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
7158 __ mov(scratch2, a2);
7160 __ MultiPop(spill_mask);
7162 __ Branch(done, ne, at, Operand(zero_reg));
7163 __ Branch(miss, eq, at, Operand(zero_reg));
7167 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
7168 // This stub overrides SometimesSetsUpAFrame() to return false. That means
7169 // we cannot call anything that could cause a GC from this stub.
7171 // result: StringDictionary to probe
7173 // : StringDictionary to probe.
7174 // index_: will hold an index of entry if lookup is successful.
7175 // might alias with result_.
7177 // result_ is zero if lookup failed, non zero otherwise.
7179 Register result = v0;
7180 Register dictionary = a0;
7182 Register index = a2;
7185 Register undefined = t1;
7186 Register entry_key = t2;
7188 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
7190 __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
7191 __ sra(mask, mask, kSmiTagSize);
7192 __ Subu(mask, mask, Operand(1));
7194 __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
7196 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
7198 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
7199 // Compute the masked index: (hash + i + i * i) & mask.
7200 // Capacity is smi 2^n.
7202 // Add the probe offset (i + i * i) left shifted to avoid right shifting
7203 // the hash in a separate instruction. The value hash + i + i * i is right
7204 // shifted in the following and instruction.
7205 ASSERT(StringDictionary::GetProbeOffset(i) <
7206 1 << (32 - String::kHashFieldOffset));
7207 __ Addu(index, hash, Operand(
7208 StringDictionary::GetProbeOffset(i) << String::kHashShift));
7210 __ mov(index, hash);
7212 __ srl(index, index, String::kHashShift);
7213 __ And(index, mask, index);
7215 // Scale the index by multiplying by the entry size.
7216 ASSERT(StringDictionary::kEntrySize == 3);
7219 __ sll(index, index, 1);
7220 __ Addu(index, index, at);
7223 ASSERT_EQ(kSmiTagSize, 1);
7224 __ sll(index, index, 2);
7225 __ Addu(index, index, dictionary);
7226 __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
7228 // Having undefined at this place means the name is not contained.
7229 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined));
7231 // Stop if found the property.
7232 __ Branch(&in_dictionary, eq, entry_key, Operand(key));
7234 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
7235 // Check if the entry name is not a symbol.
7236 __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
7238 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
7239 __ And(result, entry_key, Operand(kIsSymbolMask));
7240 __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg));
7244 __ bind(&maybe_in_dictionary);
7245 // If we are doing negative lookup then probing failure should be
7246 // treated as a lookup success. For positive lookup probing failure
7247 // should be treated as lookup failure.
7248 if (mode_ == POSITIVE_LOOKUP) {
7249 __ mov(result, zero_reg);
7253 __ bind(&in_dictionary);
7257 __ bind(¬_in_dictionary);
7258 __ mov(result, zero_reg);
7263 struct AheadOfTimeWriteBarrierStubList {
7264 Register object, value, address;
7265 RememberedSetAction action;
7269 struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
7270 // Used in RegExpExecStub.
7271 { s2, s0, t3, EMIT_REMEMBERED_SET },
7272 { s2, a2, t3, EMIT_REMEMBERED_SET },
7273 // Used in CompileArrayPushCall.
7274 // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
7275 // Also used in KeyedStoreIC::GenerateGeneric.
7276 { a3, t0, t1, EMIT_REMEMBERED_SET },
7277 // Used in CompileStoreGlobal.
7278 { t0, a1, a2, OMIT_REMEMBERED_SET },
7279 // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
7280 { a1, a2, a3, EMIT_REMEMBERED_SET },
7281 { a3, a2, a1, EMIT_REMEMBERED_SET },
7282 // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
7283 { a2, a1, a3, EMIT_REMEMBERED_SET },
7284 { a3, a1, a2, EMIT_REMEMBERED_SET },
7285 // KeyedStoreStubCompiler::GenerateStoreFastElement.
7286 { t0, a2, a3, EMIT_REMEMBERED_SET },
7287 // ElementsTransitionGenerator::GenerateSmiOnlyToObject
7288 // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
7289 // and ElementsTransitionGenerator::GenerateDoubleToObject
7290 { a2, a3, t5, EMIT_REMEMBERED_SET },
7291 // ElementsTransitionGenerator::GenerateDoubleToObject
7292 { t2, a2, a0, EMIT_REMEMBERED_SET },
7293 { a2, t2, t5, EMIT_REMEMBERED_SET },
7294 // Null termination.
7295 { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
7299 bool RecordWriteStub::IsPregenerated() {
7300 for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7301 !entry->object.is(no_reg);
7303 if (object_.is(entry->object) &&
7304 value_.is(entry->value) &&
7305 address_.is(entry->address) &&
7306 remembered_set_action_ == entry->action &&
7307 save_fp_regs_mode_ == kDontSaveFPRegs) {
7315 bool StoreBufferOverflowStub::IsPregenerated() {
7316 return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
7320 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
7321 StoreBufferOverflowStub stub1(kDontSaveFPRegs);
7322 stub1.GetCode()->set_is_pregenerated(true);
7326 void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
7327 for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7328 !entry->object.is(no_reg);
7330 RecordWriteStub stub(entry->object,
7335 stub.GetCode()->set_is_pregenerated(true);
7340 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
7341 // the value has just been written into the object, now this stub makes sure
7342 // we keep the GC informed. The word in the object where the value has been
7343 // written is in the address register.
7344 void RecordWriteStub::Generate(MacroAssembler* masm) {
7345 Label skip_to_incremental_noncompacting;
7346 Label skip_to_incremental_compacting;
7348 // The first two branch+nop instructions are generated with labels so as to
7349 // get the offset fixed up correctly by the bind(Label*) call. We patch it
7350 // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
7351 // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
7352 // incremental heap marking.
7353 // See RecordWriteStub::Patch for details.
7354 __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
7356 __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
7359 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7360 __ RememberedSetHelper(object_,
7364 MacroAssembler::kReturnAtEnd);
7368 __ bind(&skip_to_incremental_noncompacting);
7369 GenerateIncremental(masm, INCREMENTAL);
7371 __ bind(&skip_to_incremental_compacting);
7372 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
7374 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
7375 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
7377 PatchBranchIntoNop(masm, 0);
7378 PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
7382 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
7385 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7386 Label dont_need_remembered_set;
7388 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
7389 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
7391 &dont_need_remembered_set);
7393 __ CheckPageFlag(regs_.object(),
7395 1 << MemoryChunk::SCAN_ON_SCAVENGE,
7397 &dont_need_remembered_set);
7399 // First notify the incremental marker if necessary, then update the
7401 CheckNeedsToInformIncrementalMarker(
7402 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
7403 InformIncrementalMarker(masm, mode);
7404 regs_.Restore(masm);
7405 __ RememberedSetHelper(object_,
7409 MacroAssembler::kReturnAtEnd);
7411 __ bind(&dont_need_remembered_set);
7414 CheckNeedsToInformIncrementalMarker(
7415 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
7416 InformIncrementalMarker(masm, mode);
7417 regs_.Restore(masm);
7422 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
7423 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
7424 int argument_count = 3;
7425 __ PrepareCallCFunction(argument_count, regs_.scratch0());
7427 a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
7428 ASSERT(!address.is(regs_.object()));
7429 ASSERT(!address.is(a0));
7430 __ Move(address, regs_.address());
7431 __ Move(a0, regs_.object());
7432 if (mode == INCREMENTAL_COMPACTION) {
7433 __ Move(a1, address);
7435 ASSERT(mode == INCREMENTAL);
7436 __ lw(a1, MemOperand(address, 0));
7438 __ li(a2, Operand(ExternalReference::isolate_address()));
7440 AllowExternalCallThatCantCauseGC scope(masm);
7441 if (mode == INCREMENTAL_COMPACTION) {
7443 ExternalReference::incremental_evacuation_record_write_function(
7447 ASSERT(mode == INCREMENTAL);
7449 ExternalReference::incremental_marking_record_write_function(
7453 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
7457 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
7458 MacroAssembler* masm,
7459 OnNoNeedToInformIncrementalMarker on_no_need,
7462 Label need_incremental;
7463 Label need_incremental_pop_scratch;
7465 // Let's look at the color of the object: If it is not black we don't have
7466 // to inform the incremental marker.
7467 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
7469 regs_.Restore(masm);
7470 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7471 __ RememberedSetHelper(object_,
7475 MacroAssembler::kReturnAtEnd);
7482 // Get the value from the slot.
7483 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
7485 if (mode == INCREMENTAL_COMPACTION) {
7486 Label ensure_not_white;
7488 __ CheckPageFlag(regs_.scratch0(), // Contains value.
7489 regs_.scratch1(), // Scratch.
7490 MemoryChunk::kEvacuationCandidateMask,
7494 __ CheckPageFlag(regs_.object(),
7495 regs_.scratch1(), // Scratch.
7496 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
7500 __ bind(&ensure_not_white);
7503 // We need extra registers for this, so we push the object and the address
7504 // register temporarily.
7505 __ Push(regs_.object(), regs_.address());
7506 __ EnsureNotWhite(regs_.scratch0(), // The value.
7507 regs_.scratch1(), // Scratch.
7508 regs_.object(), // Scratch.
7509 regs_.address(), // Scratch.
7510 &need_incremental_pop_scratch);
7511 __ Pop(regs_.object(), regs_.address());
7513 regs_.Restore(masm);
7514 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7515 __ RememberedSetHelper(object_,
7519 MacroAssembler::kReturnAtEnd);
7524 __ bind(&need_incremental_pop_scratch);
7525 __ Pop(regs_.object(), regs_.address());
7527 __ bind(&need_incremental);
7529 // Fall through when we need to inform the incremental marker.
7535 } } // namespace v8::internal
7537 #endif // V8_TARGET_ARCH_MIPS