1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #if V8_TARGET_ARCH_MIPS
9 #include "src/base/bits.h"
10 #include "src/bootstrapper.h"
11 #include "src/code-stubs.h"
12 #include "src/codegen.h"
13 #include "src/ic/handler-compiler.h"
14 #include "src/ic/ic.h"
15 #include "src/isolate.h"
16 #include "src/jsregexp.h"
17 #include "src/regexp-macro-assembler.h"
18 #include "src/runtime/runtime.h"
24 static void InitializeArrayConstructorDescriptor(
25 Isolate* isolate, CodeStubDescriptor* descriptor,
26 int constant_stack_parameter_count) {
27 Address deopt_handler = Runtime::FunctionForId(
28 Runtime::kArrayConstructor)->entry;
30 if (constant_stack_parameter_count == 0) {
31 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
32 JS_FUNCTION_STUB_MODE);
34 descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
35 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
40 static void InitializeInternalArrayConstructorDescriptor(
41 Isolate* isolate, CodeStubDescriptor* descriptor,
42 int constant_stack_parameter_count) {
43 Address deopt_handler = Runtime::FunctionForId(
44 Runtime::kInternalArrayConstructor)->entry;
46 if (constant_stack_parameter_count == 0) {
47 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
48 JS_FUNCTION_STUB_MODE);
50 descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
51 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
56 void ArrayNoArgumentConstructorStub::InitializeDescriptor(
57 CodeStubDescriptor* descriptor) {
58 InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
62 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
63 CodeStubDescriptor* descriptor) {
64 InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
68 void ArrayNArgumentsConstructorStub::InitializeDescriptor(
69 CodeStubDescriptor* descriptor) {
70 InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
74 void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
75 CodeStubDescriptor* descriptor) {
76 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
80 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
81 CodeStubDescriptor* descriptor) {
82 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
86 void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
87 CodeStubDescriptor* descriptor) {
88 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
92 #define __ ACCESS_MASM(masm)
95 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
98 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
104 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
109 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
110 ExternalReference miss) {
111 // Update the static counter each time a new code stub is generated.
112 isolate()->counters()->code_stubs()->Increment();
114 CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
115 int param_count = descriptor.GetEnvironmentParameterCount();
117 // Call the runtime system in a fresh internal frame.
118 FrameScope scope(masm, StackFrame::INTERNAL);
119 DCHECK(param_count == 0 ||
120 a0.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
121 // Push arguments, adjust sp.
122 __ Subu(sp, sp, Operand(param_count * kPointerSize));
123 for (int i = 0; i < param_count; ++i) {
124 // Store argument to stack.
125 __ sw(descriptor.GetEnvironmentParameterRegister(i),
126 MemOperand(sp, (param_count - 1 - i) * kPointerSize));
128 __ CallExternalReference(miss, param_count);
135 void DoubleToIStub::Generate(MacroAssembler* masm) {
136 Label out_of_range, only_low, negate, done;
137 Register input_reg = source();
138 Register result_reg = destination();
140 int double_offset = offset();
141 // Account for saved regs if input is sp.
142 if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
145 GetRegisterThatIsNotOneOf(input_reg, result_reg);
147 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
149 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
150 DoubleRegister double_scratch = kLithiumScratchDouble;
152 __ Push(scratch, scratch2, scratch3);
154 if (!skip_fastpath()) {
155 // Load double input.
156 __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
158 // Clear cumulative exception flags and save the FCSR.
159 __ cfc1(scratch2, FCSR);
160 __ ctc1(zero_reg, FCSR);
162 // Try a conversion to a signed integer.
163 __ Trunc_w_d(double_scratch, double_scratch);
164 // Move the converted value into the result register.
165 __ mfc1(scratch3, double_scratch);
167 // Retrieve and restore the FCSR.
168 __ cfc1(scratch, FCSR);
169 __ ctc1(scratch2, FCSR);
171 // Check for overflow and NaNs.
174 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
175 | kFCSRInvalidOpFlagMask);
176 // If we had no exceptions then set result_reg and we are done.
178 __ Branch(&error, ne, scratch, Operand(zero_reg));
179 __ Move(result_reg, scratch3);
184 // Load the double value and perform a manual truncation.
185 Register input_high = scratch2;
186 Register input_low = scratch3;
189 MemOperand(input_reg, double_offset + Register::kMantissaOffset));
191 MemOperand(input_reg, double_offset + Register::kExponentOffset));
193 Label normal_exponent, restore_sign;
194 // Extract the biased exponent in result.
197 HeapNumber::kExponentShift,
198 HeapNumber::kExponentBits);
200 // Check for Infinity and NaNs, which should return 0.
201 __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
202 __ Movz(result_reg, zero_reg, scratch);
203 __ Branch(&done, eq, scratch, Operand(zero_reg));
205 // Express exponent as delta to (number of mantissa bits + 31).
208 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
210 // If the delta is strictly positive, all bits would be shifted away,
211 // which means that we can return 0.
212 __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
213 __ mov(result_reg, zero_reg);
216 __ bind(&normal_exponent);
217 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
219 __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
222 Register sign = result_reg;
224 __ And(sign, input_high, Operand(HeapNumber::kSignMask));
226 // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
227 // to check for this specific case.
228 Label high_shift_needed, high_shift_done;
229 __ Branch(&high_shift_needed, lt, scratch, Operand(32));
230 __ mov(input_high, zero_reg);
231 __ Branch(&high_shift_done);
232 __ bind(&high_shift_needed);
234 // Set the implicit 1 before the mantissa part in input_high.
237 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
238 // Shift the mantissa bits to the correct position.
239 // We don't need to clear non-mantissa bits as they will be shifted away.
240 // If they weren't, it would mean that the answer is in the 32bit range.
241 __ sllv(input_high, input_high, scratch);
243 __ bind(&high_shift_done);
245 // Replace the shifted bits with bits from the lower mantissa word.
246 Label pos_shift, shift_done;
248 __ subu(scratch, at, scratch);
249 __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
252 __ Subu(scratch, zero_reg, scratch);
253 __ sllv(input_low, input_low, scratch);
254 __ Branch(&shift_done);
257 __ srlv(input_low, input_low, scratch);
259 __ bind(&shift_done);
260 __ Or(input_high, input_high, Operand(input_low));
261 // Restore sign if necessary.
262 __ mov(scratch, sign);
265 __ Subu(result_reg, zero_reg, input_high);
266 __ Movz(result_reg, input_high, scratch);
270 __ Pop(scratch, scratch2, scratch3);
275 // Handle the case where the lhs and rhs are the same object.
276 // Equality is almost reflexive (everything but NaN), so this is a test
277 // for "identity and not NaN".
278 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
282 Label heap_number, return_equal;
283 Register exp_mask_reg = t5;
285 __ Branch(¬_identical, ne, a0, Operand(a1));
287 __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
289 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
290 // so we do the second best thing - test it ourselves.
291 // They are both equal and they are not both Smis so both of them are not
292 // Smis. If it's not a heap number, then return equal.
293 if (cc == less || cc == greater) {
294 __ GetObjectType(a0, t4, t4);
295 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
297 __ GetObjectType(a0, t4, t4);
298 __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
299 // Comparing JS objects with <=, >= is complicated.
301 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
302 // Normally here we fall through to return_equal, but undefined is
303 // special: (undefined == undefined) == true, but
304 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
305 if (cc == less_equal || cc == greater_equal) {
306 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
307 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
308 __ Branch(&return_equal, ne, a0, Operand(t2));
309 DCHECK(is_int16(GREATER) && is_int16(LESS));
310 __ Ret(USE_DELAY_SLOT);
312 // undefined <= undefined should fail.
313 __ li(v0, Operand(GREATER));
315 // undefined >= undefined should fail.
316 __ li(v0, Operand(LESS));
322 __ bind(&return_equal);
323 DCHECK(is_int16(GREATER) && is_int16(LESS));
324 __ Ret(USE_DELAY_SLOT);
326 __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
327 } else if (cc == greater) {
328 __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
330 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
333 // For less and greater we don't have to check for NaN since the result of
334 // x < x is false regardless. For the others here is some code to check
336 if (cc != lt && cc != gt) {
337 __ bind(&heap_number);
338 // It is a heap number, so return non-equal if it's NaN and equal if it's
341 // The representation of NaN values has all exponent bits (52..62) set,
342 // and not all mantissa bits (0..51) clear.
343 // Read top bits of double representation (second word of value).
344 __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
345 // Test that exponent bits are all set.
346 __ And(t3, t2, Operand(exp_mask_reg));
347 // If all bits not set (ne cond), then not a NaN, objects are equal.
348 __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
350 // Shift out flag and all exponent bits, retaining only mantissa.
351 __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
352 // Or with all low-bits of mantissa.
353 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
354 __ Or(v0, t3, Operand(t2));
355 // For equal we already have the right value in v0: Return zero (equal)
356 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
357 // not (it's a NaN). For <= and >= we need to load v0 with the failing
358 // value if it's a NaN.
360 // All-zero means Infinity means equal.
361 __ Ret(eq, v0, Operand(zero_reg));
362 DCHECK(is_int16(GREATER) && is_int16(LESS));
363 __ Ret(USE_DELAY_SLOT);
365 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
367 __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
371 // No fall through here.
373 __ bind(¬_identical);
377 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
380 Label* both_loaded_as_doubles,
383 DCHECK((lhs.is(a0) && rhs.is(a1)) ||
384 (lhs.is(a1) && rhs.is(a0)));
387 __ JumpIfSmi(lhs, &lhs_is_smi);
389 // Check whether the non-smi is a heap number.
390 __ GetObjectType(lhs, t4, t4);
392 // If lhs was not a number and rhs was a Smi then strict equality cannot
393 // succeed. Return non-equal (lhs is already not zero).
394 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
397 // Smi compared non-strictly with a non-Smi non-heap-number. Call
399 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
402 // Rhs is a smi, lhs is a number.
403 // Convert smi rhs to double.
404 __ sra(at, rhs, kSmiTagSize);
406 __ cvt_d_w(f14, f14);
407 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
409 // We now have both loaded as doubles.
410 __ jmp(both_loaded_as_doubles);
412 __ bind(&lhs_is_smi);
413 // Lhs is a Smi. Check whether the non-smi is a heap number.
414 __ GetObjectType(rhs, t4, t4);
416 // If lhs was not a number and rhs was a Smi then strict equality cannot
417 // succeed. Return non-equal.
418 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
419 __ li(v0, Operand(1));
421 // Smi compared non-strictly with a non-Smi non-heap-number. Call
423 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
426 // Lhs is a smi, rhs is a number.
427 // Convert smi lhs to double.
428 __ sra(at, lhs, kSmiTagSize);
430 __ cvt_d_w(f12, f12);
431 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
432 // Fall through to both_loaded_as_doubles.
436 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
439 // If either operand is a JS object or an oddball value, then they are
440 // not equal since their pointers are different.
441 // There is no test for undetectability in strict equality.
442 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
443 Label first_non_object;
444 // Get the type of the first operand into a2 and compare it with
445 // FIRST_SPEC_OBJECT_TYPE.
446 __ GetObjectType(lhs, a2, a2);
447 __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
450 Label return_not_equal;
451 __ bind(&return_not_equal);
452 __ Ret(USE_DELAY_SLOT);
453 __ li(v0, Operand(1));
455 __ bind(&first_non_object);
456 // Check for oddballs: true, false, null, undefined.
457 __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
459 __ GetObjectType(rhs, a3, a3);
460 __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
462 // Check for oddballs: true, false, null, undefined.
463 __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
465 // Now that we have the types we might as well check for
466 // internalized-internalized.
467 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
468 __ Or(a2, a2, Operand(a3));
469 __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
470 __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
474 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
477 Label* both_loaded_as_doubles,
478 Label* not_heap_numbers,
480 __ GetObjectType(lhs, a3, a2);
481 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
482 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
483 // If first was a heap number & second wasn't, go to slow case.
484 __ Branch(slow, ne, a3, Operand(a2));
486 // Both are heap numbers. Load them up then jump to the code we have
488 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
489 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
491 __ jmp(both_loaded_as_doubles);
495 // Fast negative check for internalized-to-internalized equality.
496 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
499 Label* possible_strings,
500 Label* not_both_strings) {
501 DCHECK((lhs.is(a0) && rhs.is(a1)) ||
502 (lhs.is(a1) && rhs.is(a0)));
504 // a2 is object type of rhs.
506 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
507 __ And(at, a2, Operand(kIsNotStringMask));
508 __ Branch(&object_test, ne, at, Operand(zero_reg));
509 __ And(at, a2, Operand(kIsNotInternalizedMask));
510 __ Branch(possible_strings, ne, at, Operand(zero_reg));
511 __ GetObjectType(rhs, a3, a3);
512 __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
513 __ And(at, a3, Operand(kIsNotInternalizedMask));
514 __ Branch(possible_strings, ne, at, Operand(zero_reg));
516 // Both are internalized strings. We already checked they weren't the same
517 // pointer so they are not equal.
518 __ Ret(USE_DELAY_SLOT);
519 __ li(v0, Operand(1)); // Non-zero indicates not equal.
521 __ bind(&object_test);
522 __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
523 __ GetObjectType(rhs, a2, a3);
524 __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
526 // If both objects are undetectable, they are equal. Otherwise, they
527 // are not equal, since they are different objects and an object is not
528 // equal to undefined.
529 __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
530 __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
531 __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
533 __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
534 __ Ret(USE_DELAY_SLOT);
535 __ xori(v0, a0, 1 << Map::kIsUndetectable);
539 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
541 CompareICState::State expected,
544 if (expected == CompareICState::SMI) {
545 __ JumpIfNotSmi(input, fail);
546 } else if (expected == CompareICState::NUMBER) {
547 __ JumpIfSmi(input, &ok);
548 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
551 // We could be strict about internalized/string here, but as long as
552 // hydrogen doesn't care, the stub doesn't have to care either.
557 // On entry a1 and a2 are the values to be compared.
558 // On exit a0 is 0, positive or negative to indicate the result of
560 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
563 Condition cc = GetCondition();
566 CompareICStub_CheckInputType(masm, lhs, a2, left(), &miss);
567 CompareICStub_CheckInputType(masm, rhs, a3, right(), &miss);
569 Label slow; // Call builtin.
570 Label not_smis, both_loaded_as_doubles;
572 Label not_two_smis, smi_done;
574 __ JumpIfNotSmi(a2, ¬_two_smis);
577 __ Ret(USE_DELAY_SLOT);
579 __ bind(¬_two_smis);
581 // NOTICE! This code is only reached after a smi-fast-case check, so
582 // it is certain that at least one operand isn't a smi.
584 // Handle the case where the objects are identical. Either returns the answer
585 // or goes to slow. Only falls through if the objects were not identical.
586 EmitIdenticalObjectComparison(masm, &slow, cc);
588 // If either is a Smi (we know that not both are), then they can only
589 // be strictly equal if the other is a HeapNumber.
590 STATIC_ASSERT(kSmiTag == 0);
591 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
592 __ And(t2, lhs, Operand(rhs));
593 __ JumpIfNotSmi(t2, ¬_smis, t0);
594 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
595 // 1) Return the answer.
597 // 3) Fall through to both_loaded_as_doubles.
598 // 4) Jump to rhs_not_nan.
599 // In cases 3 and 4 we have found out we were dealing with a number-number
600 // comparison and the numbers have been loaded into f12 and f14 as doubles,
601 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
602 EmitSmiNonsmiComparison(masm, lhs, rhs,
603 &both_loaded_as_doubles, &slow, strict());
605 __ bind(&both_loaded_as_doubles);
606 // f12, f14 are the double representations of the left hand side
607 // and the right hand side if we have FPU. Otherwise a2, a3 represent
608 // left hand side and a0, a1 represent right hand side.
610 __ li(t0, Operand(LESS));
611 __ li(t1, Operand(GREATER));
612 __ li(t2, Operand(EQUAL));
614 // Check if either rhs or lhs is NaN.
615 __ BranchF(NULL, &nan, eq, f12, f14);
617 // Check if LESS condition is satisfied. If true, move conditionally
619 if (!IsMipsArchVariant(kMips32r6)) {
620 __ c(OLT, D, f12, f14);
622 // Use previous check to store conditionally to v0 oposite condition
623 // (GREATER). If rhs is equal to lhs, this will be corrected in next
626 // Check if EQUAL condition is satisfied. If true, move conditionally
628 __ c(EQ, D, f12, f14);
632 __ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
633 __ mov(v0, t0); // Return LESS as result.
635 __ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
636 __ mov(v0, t2); // Return EQUAL as result.
638 __ mov(v0, t1); // Return GREATER as result.
645 // NaN comparisons always fail.
646 // Load whatever we need in v0 to make the comparison fail.
647 DCHECK(is_int16(GREATER) && is_int16(LESS));
648 __ Ret(USE_DELAY_SLOT);
649 if (cc == lt || cc == le) {
650 __ li(v0, Operand(GREATER));
652 __ li(v0, Operand(LESS));
657 // At this point we know we are dealing with two different objects,
658 // and neither of them is a Smi. The objects are in lhs_ and rhs_.
660 // This returns non-equal for some object types, or falls through if it
662 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
665 Label check_for_internalized_strings;
666 Label flat_string_check;
667 // Check for heap-number-heap-number comparison. Can jump to slow case,
668 // or load both doubles and jump to the code that handles
669 // that case. If the inputs are not doubles then jumps to
670 // check_for_internalized_strings.
671 // In this case a2 will contain the type of lhs_.
672 EmitCheckForTwoHeapNumbers(masm,
675 &both_loaded_as_doubles,
676 &check_for_internalized_strings,
679 __ bind(&check_for_internalized_strings);
680 if (cc == eq && !strict()) {
681 // Returns an answer for two internalized strings or two
682 // detectable objects.
683 // Otherwise jumps to string case or not both strings case.
684 // Assumes that a2 is the type of lhs_ on entry.
685 EmitCheckForInternalizedStringsOrObjects(
686 masm, lhs, rhs, &flat_string_check, &slow);
689 // Check for both being sequential one-byte strings,
690 // and inline if that is the case.
691 __ bind(&flat_string_check);
693 __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, a2, a3, &slow);
695 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
698 StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, a2, a3, t0);
700 StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, a2, a3, t0,
703 // Never falls through to here.
706 // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
709 // Figure out which native to call and setup the arguments.
710 Builtins::JavaScript native;
712 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
714 native = Builtins::COMPARE;
715 int ncr; // NaN compare result.
716 if (cc == lt || cc == le) {
719 DCHECK(cc == gt || cc == ge); // Remaining cases.
722 __ li(a0, Operand(Smi::FromInt(ncr)));
726 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
727 // tagged as a small integer.
728 __ InvokeBuiltin(native, JUMP_FUNCTION);
735 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
738 __ PushSafepointRegisters();
743 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
746 __ PopSafepointRegisters();
751 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
752 // We don't allow a GC during a store buffer overflow so there is no need to
753 // store the registers in any particular way, but we do have to store and
755 __ MultiPush(kJSCallerSaved | ra.bit());
756 if (save_doubles()) {
757 __ MultiPushFPU(kCallerSavedFPU);
759 const int argument_count = 1;
760 const int fp_argument_count = 0;
761 const Register scratch = a1;
763 AllowExternalCallThatCantCauseGC scope(masm);
764 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
765 __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
767 ExternalReference::store_buffer_overflow_function(isolate()),
769 if (save_doubles()) {
770 __ MultiPopFPU(kCallerSavedFPU);
773 __ MultiPop(kJSCallerSaved | ra.bit());
778 void MathPowStub::Generate(MacroAssembler* masm) {
779 const Register base = a1;
780 const Register exponent = MathPowTaggedDescriptor::exponent();
781 DCHECK(exponent.is(a2));
782 const Register heapnumbermap = t1;
783 const Register heapnumber = v0;
784 const DoubleRegister double_base = f2;
785 const DoubleRegister double_exponent = f4;
786 const DoubleRegister double_result = f0;
787 const DoubleRegister double_scratch = f6;
788 const FPURegister single_scratch = f8;
789 const Register scratch = t5;
790 const Register scratch2 = t3;
792 Label call_runtime, done, int_exponent;
793 if (exponent_type() == ON_STACK) {
794 Label base_is_smi, unpack_exponent;
795 // The exponent and base are supplied as arguments on the stack.
796 // This can only happen if the stub is called from non-optimized code.
797 // Load input parameters from stack to double registers.
798 __ lw(base, MemOperand(sp, 1 * kPointerSize));
799 __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
801 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
803 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
804 __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
805 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
807 __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
808 __ jmp(&unpack_exponent);
810 __ bind(&base_is_smi);
811 __ mtc1(scratch, single_scratch);
812 __ cvt_d_w(double_base, single_scratch);
813 __ bind(&unpack_exponent);
815 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
817 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
818 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
819 __ ldc1(double_exponent,
820 FieldMemOperand(exponent, HeapNumber::kValueOffset));
821 } else if (exponent_type() == TAGGED) {
822 // Base is already in double_base.
823 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
825 __ ldc1(double_exponent,
826 FieldMemOperand(exponent, HeapNumber::kValueOffset));
829 if (exponent_type() != INTEGER) {
830 Label int_exponent_convert;
831 // Detect integer exponents stored as double.
832 __ EmitFPUTruncate(kRoundToMinusInf,
838 kCheckForInexactConversion);
839 // scratch2 == 0 means there was no conversion error.
840 __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
842 if (exponent_type() == ON_STACK) {
843 // Detect square root case. Crankshaft detects constant +/-0.5 at
844 // compile time and uses DoMathPowHalf instead. We then skip this check
845 // for non-constant cases of +/-0.5 as these hardly occur.
848 __ Move(double_scratch, 0.5);
849 __ BranchF(USE_DELAY_SLOT,
855 // double_scratch can be overwritten in the delay slot.
856 // Calculates square root of base. Check for the special case of
857 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
858 __ Move(double_scratch, static_cast<double>(-V8_INFINITY));
859 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
860 __ neg_d(double_result, double_scratch);
862 // Add +0 to convert -0 to +0.
863 __ add_d(double_scratch, double_base, kDoubleRegZero);
864 __ sqrt_d(double_result, double_scratch);
867 __ bind(¬_plus_half);
868 __ Move(double_scratch, -0.5);
869 __ BranchF(USE_DELAY_SLOT,
875 // double_scratch can be overwritten in the delay slot.
876 // Calculates square root of base. Check for the special case of
877 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
878 __ Move(double_scratch, static_cast<double>(-V8_INFINITY));
879 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
880 __ Move(double_result, kDoubleRegZero);
882 // Add +0 to convert -0 to +0.
883 __ add_d(double_scratch, double_base, kDoubleRegZero);
884 __ Move(double_result, 1.);
885 __ sqrt_d(double_scratch, double_scratch);
886 __ div_d(double_result, double_result, double_scratch);
892 AllowExternalCallThatCantCauseGC scope(masm);
893 __ PrepareCallCFunction(0, 2, scratch2);
894 __ MovToFloatParameters(double_base, double_exponent);
896 ExternalReference::power_double_double_function(isolate()),
900 __ MovFromFloatResult(double_result);
903 __ bind(&int_exponent_convert);
906 // Calculate power with integer exponent.
907 __ bind(&int_exponent);
909 // Get two copies of exponent in the registers scratch and exponent.
910 if (exponent_type() == INTEGER) {
911 __ mov(scratch, exponent);
913 // Exponent has previously been stored into scratch as untagged integer.
914 __ mov(exponent, scratch);
917 __ mov_d(double_scratch, double_base); // Back up base.
918 __ Move(double_result, 1.0);
920 // Get absolute value of exponent.
921 Label positive_exponent;
922 __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
923 __ Subu(scratch, zero_reg, scratch);
924 __ bind(&positive_exponent);
926 Label while_true, no_carry, loop_end;
927 __ bind(&while_true);
929 __ And(scratch2, scratch, 1);
931 __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
932 __ mul_d(double_result, double_result, double_scratch);
935 __ sra(scratch, scratch, 1);
937 __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
938 __ mul_d(double_scratch, double_scratch, double_scratch);
940 __ Branch(&while_true);
944 __ Branch(&done, ge, exponent, Operand(zero_reg));
945 __ Move(double_scratch, 1.0);
946 __ div_d(double_result, double_scratch, double_result);
947 // Test whether result is zero. Bail out to check for subnormal result.
948 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
949 __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
951 // double_exponent may not contain the exponent value if the input was a
952 // smi. We set it with exponent value before bailing out.
953 __ mtc1(exponent, single_scratch);
954 __ cvt_d_w(double_exponent, single_scratch);
956 // Returning or bailing out.
957 Counters* counters = isolate()->counters();
958 if (exponent_type() == ON_STACK) {
959 // The arguments are still on the stack.
960 __ bind(&call_runtime);
961 __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
963 // The stub is called from non-optimized code, which expects the result
964 // as heap number in exponent.
966 __ AllocateHeapNumber(
967 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
968 __ sdc1(double_result,
969 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
970 DCHECK(heapnumber.is(v0));
971 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
976 AllowExternalCallThatCantCauseGC scope(masm);
977 __ PrepareCallCFunction(0, 2, scratch);
978 __ MovToFloatParameters(double_base, double_exponent);
980 ExternalReference::power_double_double_function(isolate()),
984 __ MovFromFloatResult(double_result);
987 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
993 bool CEntryStub::NeedsImmovableCode() {
998 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
999 CEntryStub::GenerateAheadOfTime(isolate);
1000 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
1001 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
1002 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
1003 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
1004 CreateWeakCellStub::GenerateAheadOfTime(isolate);
1005 BinaryOpICStub::GenerateAheadOfTime(isolate);
1006 StoreRegistersStateStub::GenerateAheadOfTime(isolate);
1007 RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
1008 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
1012 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1013 StoreRegistersStateStub stub(isolate);
1018 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1019 RestoreRegistersStateStub stub(isolate);
1024 void CodeStub::GenerateFPStubs(Isolate* isolate) {
1025 // Generate if not already in cache.
1026 SaveFPRegsMode mode = kSaveFPRegs;
1027 CEntryStub(isolate, 1, mode).GetCode();
1028 StoreBufferOverflowStub(isolate, mode).GetCode();
1029 isolate->set_fp_stubs_generated(true);
1033 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1034 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
1039 void CEntryStub::Generate(MacroAssembler* masm) {
1040 // Called from JavaScript; parameters are on stack as if calling JS function
1041 // a0: number of arguments including receiver
1042 // a1: pointer to builtin function
1043 // fp: frame pointer (restored after C call)
1044 // sp: stack pointer (restored as callee's sp after C call)
1045 // cp: current context (C callee-saved)
1047 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1049 // Compute the argv pointer in a callee-saved register.
1050 __ sll(s1, a0, kPointerSizeLog2);
1051 __ Addu(s1, sp, s1);
1052 __ Subu(s1, s1, kPointerSize);
1054 // Enter the exit frame that transitions from JavaScript to C++.
1055 FrameScope scope(masm, StackFrame::MANUAL);
1056 __ EnterExitFrame(save_doubles());
1058 // s0: number of arguments including receiver (C callee-saved)
1059 // s1: pointer to first argument (C callee-saved)
1060 // s2: pointer to builtin function (C callee-saved)
1062 // Prepare arguments for C routine.
1066 // a1 = argv (set in the delay slot after find_ra below).
1068 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
1069 // also need to reserve the 4 argument slots on the stack.
1071 __ AssertStackIsAligned();
1073 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
1075 // To let the GC traverse the return address of the exit frames, we need to
1076 // know where the return address is. The CEntryStub is unmovable, so
1077 // we can store the address on the stack to be able to find it again and
1078 // we never have to restore it, because it will not change.
1079 { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
1080 // This branch-and-link sequence is needed to find the current PC on mips,
1081 // saved to the ra register.
1082 // Use masm-> here instead of the double-underscore macro since extra
1083 // coverage code can interfere with the proper calculation of ra.
1085 masm->bal(&find_ra); // bal exposes branch delay slot.
1087 masm->bind(&find_ra);
1089 // Adjust the value in ra to point to the correct return location, 2nd
1090 // instruction past the real call into C code (the jalr(t9)), and push it.
1091 // This is the return address of the exit frame.
1092 const int kNumInstructionsToJump = 5;
1093 masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
1094 masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
1095 // Stack space reservation moved to the branch delay slot below.
1096 // Stack is still aligned.
1098 // Call the C routine.
1099 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
1101 // Set up sp in the delay slot.
1102 masm->addiu(sp, sp, -kCArgsSlotsSize);
1103 // Make sure the stored 'ra' points to this position.
1104 DCHECK_EQ(kNumInstructionsToJump,
1105 masm->InstructionsGeneratedSince(&find_ra));
1109 // Runtime functions should not return 'the hole'. Allowing it to escape may
1110 // lead to crashes in the IC code later.
1111 if (FLAG_debug_code) {
1113 __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
1114 __ Branch(&okay, ne, v0, Operand(t0));
1115 __ stop("The hole escaped");
1119 // Check result for exception sentinel.
1120 Label exception_returned;
1121 __ LoadRoot(t0, Heap::kExceptionRootIndex);
1122 __ Branch(&exception_returned, eq, t0, Operand(v0));
1124 ExternalReference pending_exception_address(
1125 Isolate::kPendingExceptionAddress, isolate());
1127 // Check that there is no pending exception, otherwise we
1128 // should have returned the exception sentinel.
1129 if (FLAG_debug_code) {
1131 __ li(a2, Operand(pending_exception_address));
1132 __ lw(a2, MemOperand(a2));
1133 __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
1134 // Cannot use check here as it attempts to generate call into runtime.
1135 __ Branch(&okay, eq, t0, Operand(a2));
1136 __ stop("Unexpected pending exception");
1140 // Exit C frame and return.
1142 // sp: stack pointer
1143 // fp: frame pointer
1144 // s0: still holds argc (callee-saved).
1145 __ LeaveExitFrame(save_doubles(), s0, true, EMIT_RETURN);
1147 // Handling of exception.
1148 __ bind(&exception_returned);
1150 // Retrieve the pending exception.
1151 __ li(a2, Operand(pending_exception_address));
1152 __ lw(v0, MemOperand(a2));
1154 // Clear the pending exception.
1155 __ li(a3, Operand(isolate()->factory()->the_hole_value()));
1156 __ sw(a3, MemOperand(a2));
1158 // Special handling of termination exceptions which are uncatchable
1159 // by javascript code.
1160 Label throw_termination_exception;
1161 __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
1162 __ Branch(&throw_termination_exception, eq, v0, Operand(t0));
1164 // Handle normal exception.
1167 __ bind(&throw_termination_exception);
1168 __ ThrowUncatchable(v0);
1172 void JSEntryStub::Generate(MacroAssembler* masm) {
1173 Label invoke, handler_entry, exit;
1174 Isolate* isolate = masm->isolate();
1177 // a0: entry address
1186 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1188 // Save callee saved registers on the stack.
1189 __ MultiPush(kCalleeSaved | ra.bit());
1191 // Save callee-saved FPU registers.
1192 __ MultiPushFPU(kCalleeSavedFPU);
1193 // Set up the reserved register for 0.0.
1194 __ Move(kDoubleRegZero, 0.0);
1197 // Load argv in s0 register.
1198 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1199 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
1201 __ InitializeRootRegister();
1202 __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
1204 // We build an EntryFrame.
1205 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
1206 int marker = type();
1207 __ li(t2, Operand(Smi::FromInt(marker)));
1208 __ li(t1, Operand(Smi::FromInt(marker)));
1209 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1211 __ lw(t0, MemOperand(t0));
1212 __ Push(t3, t2, t1, t0);
1213 // Set up frame pointer for the frame to be pushed.
1214 __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
1217 // a0: entry_address
1219 // a2: receiver_pointer
1225 // function slot | entry frame
1227 // bad fp (0xff...f) |
1228 // callee saved registers + ra
1232 // If this is the outermost JS call, set js_entry_sp value.
1233 Label non_outermost_js;
1234 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
1235 __ li(t1, Operand(ExternalReference(js_entry_sp)));
1236 __ lw(t2, MemOperand(t1));
1237 __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
1238 __ sw(fp, MemOperand(t1));
1239 __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1242 __ nop(); // Branch delay slot nop.
1243 __ bind(&non_outermost_js);
1244 __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1248 // Jump to a faked try block that does the invoke, with a faked catch
1249 // block that sets the pending exception.
1251 __ bind(&handler_entry);
1252 handler_offset_ = handler_entry.pos();
1253 // Caught exception: Store result (exception) in the pending exception
1254 // field in the JSEnv and return a failure sentinel. Coming in here the
1255 // fp will be invalid because the PushTryHandler below sets it to 0 to
1256 // signal the existence of the JSEntry frame.
1257 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1259 __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
1260 __ LoadRoot(v0, Heap::kExceptionRootIndex);
1261 __ b(&exit); // b exposes branch delay slot.
1262 __ nop(); // Branch delay slot nop.
1264 // Invoke: Link this frame into the handler chain. There's only one
1265 // handler block in this code object, so its index is 0.
1267 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
1268 // If an exception not caught by another handler occurs, this handler
1269 // returns control to the code after the bal(&invoke) above, which
1270 // restores all kCalleeSaved registers (including cp and fp) to their
1271 // saved values before returning a failure to C.
1273 // Clear any pending exceptions.
1274 __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
1275 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1277 __ sw(t1, MemOperand(t0));
1279 // Invoke the function by calling through JS entry trampoline builtin.
1280 // Notice that we cannot store a reference to the trampoline code directly in
1281 // this stub, because runtime stubs are not traversed when doing GC.
1284 // a0: entry_address
1286 // a2: receiver_pointer
1293 // callee saved registers + ra
1297 if (type() == StackFrame::ENTRY_CONSTRUCT) {
1298 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1300 __ li(t0, Operand(construct_entry));
1302 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
1303 __ li(t0, Operand(entry));
1305 __ lw(t9, MemOperand(t0)); // Deref address.
1307 // Call JSEntryTrampoline.
1308 __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
1311 // Unlink this frame from the handler chain.
1314 __ bind(&exit); // v0 holds result
1315 // Check if the current stack frame is marked as the outermost JS frame.
1316 Label non_outermost_js_2;
1318 __ Branch(&non_outermost_js_2,
1321 Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1322 __ li(t1, Operand(ExternalReference(js_entry_sp)));
1323 __ sw(zero_reg, MemOperand(t1));
1324 __ bind(&non_outermost_js_2);
1326 // Restore the top frame descriptors from the stack.
1328 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1330 __ sw(t1, MemOperand(t0));
1332 // Reset the stack to the callee saved registers.
1333 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
1335 // Restore callee-saved fpu registers.
1336 __ MultiPopFPU(kCalleeSavedFPU);
1338 // Restore callee saved registers from the stack.
1339 __ MultiPop(kCalleeSaved | ra.bit());
1345 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
1346 // Return address is in ra.
1349 Register receiver = LoadDescriptor::ReceiverRegister();
1350 Register index = LoadDescriptor::NameRegister();
1351 Register scratch = t1;
1352 Register result = v0;
1353 DCHECK(!scratch.is(receiver) && !scratch.is(index));
1354 DCHECK(!FLAG_vector_ics ||
1355 (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
1356 result.is(VectorLoadICDescriptor::SlotRegister())));
1358 // StringCharAtGenerator doesn't use the result register until it's passed
1359 // the different miss possibilities. If it did, we would have a conflict
1360 // when FLAG_vector_ics is true.
1361 StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
1362 &miss, // When not a string.
1363 &miss, // When not a number.
1364 &miss, // When index out of range.
1365 STRING_INDEX_IS_ARRAY_INDEX,
1366 RECEIVER_IS_STRING);
1367 char_at_generator.GenerateFast(masm);
1370 StubRuntimeCallHelper call_helper;
1371 char_at_generator.GenerateSlow(masm, call_helper);
1374 PropertyAccessCompiler::TailCallBuiltin(
1375 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1379 // Uses registers a0 to t0.
1380 // Expected input (depending on whether args are in registers or on the stack):
1381 // * object: a0 or at sp + 1 * kPointerSize.
1382 // * function: a1 or at sp.
1384 // An inlined call site may have been generated before calling this stub.
1385 // In this case the offset to the inline site to patch is passed on the stack,
1386 // in the safepoint slot for register t0.
1387 void InstanceofStub::Generate(MacroAssembler* masm) {
1388 // Call site inlining and patching implies arguments in registers.
1389 DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
1391 // Fixed register usage throughout the stub:
1392 const Register object = a0; // Object (lhs).
1393 Register map = a3; // Map of the object.
1394 const Register function = a1; // Function (rhs).
1395 const Register prototype = t0; // Prototype of the function.
1396 const Register inline_site = t5;
1397 const Register scratch = a2;
1399 const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
1401 Label slow, loop, is_instance, is_not_instance, not_js_object;
1403 if (!HasArgsInRegisters()) {
1404 __ lw(object, MemOperand(sp, 1 * kPointerSize));
1405 __ lw(function, MemOperand(sp, 0));
1408 // Check that the left hand is a JS object and load map.
1409 __ JumpIfSmi(object, ¬_js_object);
1410 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
1412 // If there is a call site cache don't look in the global cache, but do the
1413 // real lookup and update the call site cache.
1414 if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
1416 __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
1417 __ Branch(&miss, ne, function, Operand(at));
1418 __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
1419 __ Branch(&miss, ne, map, Operand(at));
1420 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1421 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1426 // Get the prototype of the function.
1427 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
1429 // Check that the function prototype is a JS object.
1430 __ JumpIfSmi(prototype, &slow);
1431 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
1433 // Update the global instanceof or call site inlined cache with the current
1434 // map and function. The cached answer will be set when it is known below.
1435 if (!HasCallSiteInlineCheck()) {
1436 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1437 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
1439 DCHECK(HasArgsInRegisters());
1440 // Patch the (relocated) inlined map check.
1442 // The offset was stored in t0 safepoint slot.
1443 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
1444 __ LoadFromSafepointRegisterSlot(scratch, t0);
1445 __ Subu(inline_site, ra, scratch);
1446 // Get the map location in scratch and patch it.
1447 __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
1448 __ sw(map, FieldMemOperand(scratch, Cell::kValueOffset));
1451 // Register mapping: a3 is object map and t0 is function prototype.
1452 // Get prototype of object into a2.
1453 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
1455 // We don't need map any more. Use it as a scratch register.
1456 Register scratch2 = map;
1459 // Loop through the prototype chain looking for the function prototype.
1460 __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
1462 __ Branch(&is_instance, eq, scratch, Operand(prototype));
1463 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
1464 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
1465 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
1468 __ bind(&is_instance);
1469 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
1470 if (!HasCallSiteInlineCheck()) {
1471 __ mov(v0, zero_reg);
1472 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1473 if (ReturnTrueFalseObject()) {
1474 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
1477 // Patch the call site to return true.
1478 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
1479 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1480 // Get the boolean result location in scratch and patch it.
1481 __ PatchRelocatedValue(inline_site, scratch, v0);
1483 if (!ReturnTrueFalseObject()) {
1484 __ mov(v0, zero_reg);
1487 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1489 __ bind(&is_not_instance);
1490 if (!HasCallSiteInlineCheck()) {
1491 __ li(v0, Operand(Smi::FromInt(1)));
1492 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1493 if (ReturnTrueFalseObject()) {
1494 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1497 // Patch the call site to return false.
1498 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1499 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1500 // Get the boolean result location in scratch and patch it.
1501 __ PatchRelocatedValue(inline_site, scratch, v0);
1503 if (!ReturnTrueFalseObject()) {
1504 __ li(v0, Operand(Smi::FromInt(1)));
1508 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1510 Label object_not_null, object_not_null_or_smi;
1511 __ bind(¬_js_object);
1512 // Before null, smi and string value checks, check that the rhs is a function
1513 // as for a non-function rhs an exception needs to be thrown.
1514 __ JumpIfSmi(function, &slow);
1515 __ GetObjectType(function, scratch2, scratch);
1516 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
1518 // Null is not instance of anything.
1519 __ Branch(&object_not_null, ne, object,
1520 Operand(isolate()->factory()->null_value()));
1521 if (ReturnTrueFalseObject()) {
1522 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1524 __ li(v0, Operand(Smi::FromInt(1)));
1526 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1528 __ bind(&object_not_null);
1529 // Smi values are not instances of anything.
1530 __ JumpIfNotSmi(object, &object_not_null_or_smi);
1531 if (ReturnTrueFalseObject()) {
1532 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1534 __ li(v0, Operand(Smi::FromInt(1)));
1536 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1538 __ bind(&object_not_null_or_smi);
1539 // String values are not instances of anything.
1540 __ IsObjectJSStringType(object, scratch, &slow);
1541 if (ReturnTrueFalseObject()) {
1542 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1544 __ li(v0, Operand(Smi::FromInt(1)));
1546 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1548 // Slow-case. Tail call builtin.
1550 if (!ReturnTrueFalseObject()) {
1551 if (HasArgsInRegisters()) {
1554 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
1557 FrameScope scope(masm, StackFrame::INTERNAL);
1559 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
1562 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
1563 __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
1564 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1565 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1570 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1572 Register receiver = LoadDescriptor::ReceiverRegister();
1573 // Ensure that the vector and slot registers won't be clobbered before
1574 // calling the miss handler.
1575 DCHECK(!FLAG_vector_ics ||
1576 !AreAliased(t0, t1, VectorLoadICDescriptor::VectorRegister(),
1577 VectorLoadICDescriptor::SlotRegister()));
1579 NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, t0,
1582 PropertyAccessCompiler::TailCallBuiltin(
1583 masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1587 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
1588 CHECK(!has_new_target());
1589 // The displacement is the offset of the last parameter (if any)
1590 // relative to the frame pointer.
1591 const int kDisplacement =
1592 StandardFrameConstants::kCallerSPOffset - kPointerSize;
1593 DCHECK(a1.is(ArgumentsAccessReadDescriptor::index()));
1594 DCHECK(a0.is(ArgumentsAccessReadDescriptor::parameter_count()));
1596 // Check that the key is a smiGenerateReadElement.
1598 __ JumpIfNotSmi(a1, &slow);
1600 // Check if the calling frame is an arguments adaptor frame.
1602 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1603 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
1607 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1609 // Check index (a1) against formal parameters count limit passed in
1610 // through register a0. Use unsigned comparison to get negative
1612 __ Branch(&slow, hs, a1, Operand(a0));
1614 // Read the argument from the stack and return it.
1615 __ subu(a3, a0, a1);
1616 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
1617 __ Addu(a3, fp, Operand(t3));
1618 __ Ret(USE_DELAY_SLOT);
1619 __ lw(v0, MemOperand(a3, kDisplacement));
1621 // Arguments adaptor case: Check index (a1) against actual arguments
1622 // limit found in the arguments adaptor frame. Use unsigned
1623 // comparison to get negative check for free.
1625 __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1626 __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
1628 // Read the argument from the adaptor frame and return it.
1629 __ subu(a3, a0, a1);
1630 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
1631 __ Addu(a3, a2, Operand(t3));
1632 __ Ret(USE_DELAY_SLOT);
1633 __ lw(v0, MemOperand(a3, kDisplacement));
1635 // Slow-case: Handle non-smi or out-of-bounds access to arguments
1636 // by calling the runtime system.
1639 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
1643 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
1644 // sp[0] : number of parameters
1645 // sp[4] : receiver displacement
1648 CHECK(!has_new_target());
1650 // Check if the calling frame is an arguments adaptor frame.
1652 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1653 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
1657 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1659 // Patch the arguments.length and the parameters pointer in the current frame.
1660 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
1661 __ sw(a2, MemOperand(sp, 0 * kPointerSize));
1663 __ Addu(a3, a3, Operand(t3));
1664 __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
1665 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
1668 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1672 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
1674 // sp[0] : number of parameters (tagged)
1675 // sp[4] : address of receiver argument
1677 // Registers used over whole function:
1678 // t2 : allocated object (tagged)
1679 // t5 : mapped parameter count (tagged)
1681 CHECK(!has_new_target());
1683 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
1684 // a1 = parameter count (tagged)
1686 // Check if the calling frame is an arguments adaptor frame.
1688 Label adaptor_frame, try_allocate;
1689 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1690 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
1691 __ Branch(&adaptor_frame,
1694 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1696 // No adaptor, parameter count = argument count.
1698 __ b(&try_allocate);
1699 __ nop(); // Branch delay slot nop.
1701 // We have an adaptor frame. Patch the parameters pointer.
1702 __ bind(&adaptor_frame);
1703 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
1705 __ Addu(a3, a3, Operand(t6));
1706 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
1707 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
1709 // a1 = parameter count (tagged)
1710 // a2 = argument count (tagged)
1711 // Compute the mapped parameter count = min(a1, a2) in a1.
1713 __ Branch(&skip_min, lt, a1, Operand(a2));
1717 __ bind(&try_allocate);
1719 // Compute the sizes of backing store, parameter map, and arguments object.
1720 // 1. Parameter map, has 2 extra words containing context and backing store.
1721 const int kParameterMapHeaderSize =
1722 FixedArray::kHeaderSize + 2 * kPointerSize;
1723 // If there are no mapped parameters, we do not need the parameter_map.
1724 Label param_map_size;
1725 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
1726 __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg));
1727 __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
1729 __ addiu(t5, t5, kParameterMapHeaderSize);
1730 __ bind(¶m_map_size);
1732 // 2. Backing store.
1734 __ Addu(t5, t5, Operand(t6));
1735 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
1737 // 3. Arguments object.
1738 __ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize));
1740 // Do the allocation of all three objects in one go.
1741 __ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT);
1743 // v0 = address of new object(s) (tagged)
1744 // a2 = argument count (smi-tagged)
1745 // Get the arguments boilerplate from the current native context into t0.
1746 const int kNormalOffset =
1747 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
1748 const int kAliasedOffset =
1749 Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
1751 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1752 __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
1753 Label skip2_ne, skip2_eq;
1754 __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
1755 __ lw(t0, MemOperand(t0, kNormalOffset));
1758 __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
1759 __ lw(t0, MemOperand(t0, kAliasedOffset));
1762 // v0 = address of new object (tagged)
1763 // a1 = mapped parameter count (tagged)
1764 // a2 = argument count (smi-tagged)
1765 // t0 = address of arguments map (tagged)
1766 __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
1767 __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
1768 __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
1769 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
1771 // Set up the callee in-object property.
1772 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
1773 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
1774 __ AssertNotSmi(a3);
1775 const int kCalleeOffset = JSObject::kHeaderSize +
1776 Heap::kArgumentsCalleeIndex * kPointerSize;
1777 __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
1779 // Use the length (smi tagged) and set that as an in-object property too.
1781 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1782 const int kLengthOffset = JSObject::kHeaderSize +
1783 Heap::kArgumentsLengthIndex * kPointerSize;
1784 __ sw(a2, FieldMemOperand(v0, kLengthOffset));
1786 // Set up the elements pointer in the allocated arguments object.
1787 // If we allocated a parameter map, t0 will point there, otherwise
1788 // it will point to the backing store.
1789 __ Addu(t0, v0, Operand(Heap::kSloppyArgumentsObjectSize));
1790 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
1792 // v0 = address of new object (tagged)
1793 // a1 = mapped parameter count (tagged)
1794 // a2 = argument count (tagged)
1795 // t0 = address of parameter map or backing store (tagged)
1796 // Initialize parameter map. If there are no mapped arguments, we're done.
1797 Label skip_parameter_map;
1799 __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
1800 // Move backing store address to a3, because it is
1801 // expected there when filling in the unmapped arguments.
1805 __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
1807 __ LoadRoot(t2, Heap::kSloppyArgumentsElementsMapRootIndex);
1808 __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
1809 __ Addu(t2, a1, Operand(Smi::FromInt(2)));
1810 __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
1811 __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
1813 __ Addu(t2, t0, Operand(t6));
1814 __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
1815 __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
1817 // Copy the parameter slots and the holes in the arguments.
1818 // We need to fill in mapped_parameter_count slots. They index the context,
1819 // where parameters are stored in reverse order, at
1820 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
1821 // The mapped parameter thus need to get indices
1822 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
1823 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
1824 // We loop from right to left.
1825 Label parameters_loop, parameters_test;
1827 __ lw(t5, MemOperand(sp, 0 * kPointerSize));
1828 __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
1829 __ Subu(t5, t5, Operand(a1));
1830 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
1832 __ Addu(a3, t0, Operand(t6));
1833 __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
1835 // t2 = loop variable (tagged)
1836 // a1 = mapping index (tagged)
1837 // a3 = address of backing store (tagged)
1838 // t0 = address of parameter map (tagged)
1839 // t1 = temporary scratch (a.o., for address calculation)
1840 // t3 = the hole value
1841 __ jmp(¶meters_test);
1843 __ bind(¶meters_loop);
1844 __ Subu(t2, t2, Operand(Smi::FromInt(1)));
1846 __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
1847 __ Addu(t6, t0, t1);
1848 __ sw(t5, MemOperand(t6));
1849 __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
1850 __ Addu(t6, a3, t1);
1851 __ sw(t3, MemOperand(t6));
1852 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
1853 __ bind(¶meters_test);
1854 __ Branch(¶meters_loop, ne, t2, Operand(Smi::FromInt(0)));
1856 __ bind(&skip_parameter_map);
1857 // a2 = argument count (tagged)
1858 // a3 = address of backing store (tagged)
1860 // Copy arguments header and remaining slots (if there are any).
1861 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
1862 __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
1863 __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
1865 Label arguments_loop, arguments_test;
1867 __ lw(t0, MemOperand(sp, 1 * kPointerSize));
1869 __ Subu(t0, t0, Operand(t6));
1870 __ jmp(&arguments_test);
1872 __ bind(&arguments_loop);
1873 __ Subu(t0, t0, Operand(kPointerSize));
1874 __ lw(t2, MemOperand(t0, 0));
1876 __ Addu(t1, a3, Operand(t6));
1877 __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
1878 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
1880 __ bind(&arguments_test);
1881 __ Branch(&arguments_loop, lt, t5, Operand(a2));
1883 // Return and remove the on-stack parameters.
1886 // Do the runtime call to allocate the arguments object.
1887 // a2 = argument count (tagged)
1889 __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
1890 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1894 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
1895 // Return address is in ra.
1898 Register receiver = LoadDescriptor::ReceiverRegister();
1899 Register key = LoadDescriptor::NameRegister();
1901 // Check that the key is an array index, that is Uint32.
1902 __ And(t0, key, Operand(kSmiTagMask | kSmiSignMask));
1903 __ Branch(&slow, ne, t0, Operand(zero_reg));
1905 // Everything is fine, call runtime.
1906 __ Push(receiver, key); // Receiver, key.
1908 // Perform tail call to the entry.
1909 __ TailCallExternalReference(
1910 ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
1915 PropertyAccessCompiler::TailCallBuiltin(
1916 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1920 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
1921 // sp[0] : number of parameters
1922 // sp[4] : receiver displacement
1924 // Check if the calling frame is an arguments adaptor frame.
1925 Label adaptor_frame, try_allocate, runtime;
1926 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1927 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
1928 __ Branch(&adaptor_frame,
1931 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1933 // Get the length from the frame.
1934 __ lw(a1, MemOperand(sp, 0));
1935 __ Branch(&try_allocate);
1937 // Patch the arguments.length and the parameters pointer.
1938 __ bind(&adaptor_frame);
1939 __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1940 if (has_new_target()) {
1941 // Subtract 1 from smi-tagged arguments count.
1942 __ Subu(a1, a1, Operand(2));
1944 __ sw(a1, MemOperand(sp, 0));
1945 __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
1946 __ Addu(a3, a2, Operand(at));
1948 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
1949 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
1951 // Try the new space allocation. Start out with computing the size
1952 // of the arguments object and the elements array in words.
1953 Label add_arguments_object;
1954 __ bind(&try_allocate);
1955 __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
1956 __ srl(a1, a1, kSmiTagSize);
1958 __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
1959 __ bind(&add_arguments_object);
1960 __ Addu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
1962 // Do the allocation of both objects in one go.
1963 __ Allocate(a1, v0, a2, a3, &runtime,
1964 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
1966 // Get the arguments boilerplate from the current native context.
1967 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1968 __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
1969 __ lw(t0, MemOperand(
1970 t0, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
1972 __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
1973 __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
1974 __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
1975 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
1977 // Get the length (smi tagged) and set that as an in-object property too.
1978 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1979 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
1981 __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
1982 Heap::kArgumentsLengthIndex * kPointerSize));
1985 __ Branch(&done, eq, a1, Operand(zero_reg));
1987 // Get the parameters pointer from the stack.
1988 __ lw(a2, MemOperand(sp, 1 * kPointerSize));
1990 // Set up the elements pointer in the allocated arguments object and
1991 // initialize the header in the elements fixed array.
1992 __ Addu(t0, v0, Operand(Heap::kStrictArgumentsObjectSize));
1993 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
1994 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
1995 __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
1996 __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
1997 // Untag the length for the loop.
1998 __ srl(a1, a1, kSmiTagSize);
2000 // Copy the fixed array slots.
2002 // Set up t0 to point to the first array slot.
2003 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2005 // Pre-decrement a2 with kPointerSize on each iteration.
2006 // Pre-decrement in order to skip receiver.
2007 __ Addu(a2, a2, Operand(-kPointerSize));
2008 __ lw(a3, MemOperand(a2));
2009 // Post-increment t0 with kPointerSize on each iteration.
2010 __ sw(a3, MemOperand(t0));
2011 __ Addu(t0, t0, Operand(kPointerSize));
2012 __ Subu(a1, a1, Operand(1));
2013 __ Branch(&loop, ne, a1, Operand(zero_reg));
2015 // Return and remove the on-stack parameters.
2019 // Do the runtime call to allocate the arguments object.
2021 __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
2025 void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
2026 // sp[0] : index of rest parameter
2027 // sp[4] : number of parameters
2028 // sp[8] : receiver displacement
2029 // Check if the calling frame is an arguments adaptor frame.
2032 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2033 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
2034 __ Branch(&runtime, ne, a3,
2035 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2037 // Patch the arguments.length and the parameters pointer.
2038 __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
2039 __ sw(a1, MemOperand(sp, 1 * kPointerSize));
2040 __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
2041 __ Addu(a3, a2, Operand(at));
2043 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
2044 __ sw(a3, MemOperand(sp, 2 * kPointerSize));
2046 // Do the runtime call to allocate the arguments object.
2048 __ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
2052 void RegExpExecStub::Generate(MacroAssembler* masm) {
2053 // Just jump directly to runtime if native RegExp is not selected at compile
2054 // time or if regexp entry in generated code is turned off runtime switch or
2056 #ifdef V8_INTERPRETED_REGEXP
2057 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2058 #else // V8_INTERPRETED_REGEXP
2060 // Stack frame on entry.
2061 // sp[0]: last_match_info (expected JSArray)
2062 // sp[4]: previous index
2063 // sp[8]: subject string
2064 // sp[12]: JSRegExp object
2066 const int kLastMatchInfoOffset = 0 * kPointerSize;
2067 const int kPreviousIndexOffset = 1 * kPointerSize;
2068 const int kSubjectOffset = 2 * kPointerSize;
2069 const int kJSRegExpOffset = 3 * kPointerSize;
2072 // Allocation of registers for this function. These are in callee save
2073 // registers and will be preserved by the call to the native RegExp code, as
2074 // this code is called using the normal C calling convention. When calling
2075 // directly from generated code the native RegExp code will not do a GC and
2076 // therefore the content of these registers are safe to use after the call.
2077 // MIPS - using s0..s2, since we are not using CEntry Stub.
2078 Register subject = s0;
2079 Register regexp_data = s1;
2080 Register last_match_info_elements = s2;
2082 // Ensure that a RegExp stack is allocated.
2083 ExternalReference address_of_regexp_stack_memory_address =
2084 ExternalReference::address_of_regexp_stack_memory_address(
2086 ExternalReference address_of_regexp_stack_memory_size =
2087 ExternalReference::address_of_regexp_stack_memory_size(isolate());
2088 __ li(a0, Operand(address_of_regexp_stack_memory_size));
2089 __ lw(a0, MemOperand(a0, 0));
2090 __ Branch(&runtime, eq, a0, Operand(zero_reg));
2092 // Check that the first argument is a JSRegExp object.
2093 __ lw(a0, MemOperand(sp, kJSRegExpOffset));
2094 STATIC_ASSERT(kSmiTag == 0);
2095 __ JumpIfSmi(a0, &runtime);
2096 __ GetObjectType(a0, a1, a1);
2097 __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
2099 // Check that the RegExp has been compiled (data contains a fixed array).
2100 __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
2101 if (FLAG_debug_code) {
2102 __ SmiTst(regexp_data, t0);
2104 kUnexpectedTypeForRegExpDataFixedArrayExpected,
2107 __ GetObjectType(regexp_data, a0, a0);
2109 kUnexpectedTypeForRegExpDataFixedArrayExpected,
2111 Operand(FIXED_ARRAY_TYPE));
2114 // regexp_data: RegExp data (FixedArray)
2115 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2116 __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2117 __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
2119 // regexp_data: RegExp data (FixedArray)
2120 // Check that the number of captures fit in the static offsets vector buffer.
2122 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2123 // Check (number_of_captures + 1) * 2 <= offsets vector size
2124 // Or number_of_captures * 2 <= offsets vector size - 2
2125 // Multiplying by 2 comes for free since a2 is smi-tagged.
2126 STATIC_ASSERT(kSmiTag == 0);
2127 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2128 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
2130 &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
2132 // Reset offset for possibly sliced string.
2133 __ mov(t0, zero_reg);
2134 __ lw(subject, MemOperand(sp, kSubjectOffset));
2135 __ JumpIfSmi(subject, &runtime);
2136 __ mov(a3, subject); // Make a copy of the original subject string.
2137 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2138 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2139 // subject: subject string
2140 // a3: subject string
2141 // a0: subject string instance type
2142 // regexp_data: RegExp data (FixedArray)
2143 // Handle subject string according to its encoding and representation:
2144 // (1) Sequential string? If yes, go to (5).
2145 // (2) Anything but sequential or cons? If yes, go to (6).
2146 // (3) Cons string. If the string is flat, replace subject with first string.
2147 // Otherwise bailout.
2148 // (4) Is subject external? If yes, go to (7).
2149 // (5) Sequential string. Load regexp code according to encoding.
2153 // Deferred code at the end of the stub:
2154 // (6) Not a long external string? If yes, go to (8).
2155 // (7) External string. Make it, offset-wise, look like a sequential string.
2157 // (8) Short external string or not a string? If yes, bail out to runtime.
2158 // (9) Sliced string. Replace subject with parent. Go to (4).
2160 Label seq_string /* 5 */, external_string /* 7 */,
2161 check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
2162 not_long_external /* 8 */;
2164 // (1) Sequential string? If yes, go to (5).
2167 Operand(kIsNotStringMask |
2168 kStringRepresentationMask |
2169 kShortExternalStringMask));
2170 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
2171 __ Branch(&seq_string, eq, a1, Operand(zero_reg)); // Go to (5).
2173 // (2) Anything but sequential or cons? If yes, go to (6).
2174 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2175 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2176 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2177 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2179 __ Branch(¬_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
2181 // (3) Cons string. Check that it's flat.
2182 // Replace subject with first string and reload instance type.
2183 __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
2184 __ LoadRoot(a1, Heap::kempty_stringRootIndex);
2185 __ Branch(&runtime, ne, a0, Operand(a1));
2186 __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2188 // (4) Is subject external? If yes, go to (7).
2189 __ bind(&check_underlying);
2190 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2191 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2192 STATIC_ASSERT(kSeqStringTag == 0);
2193 __ And(at, a0, Operand(kStringRepresentationMask));
2194 // The underlying external string is never a short external string.
2195 STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
2196 STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2197 __ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7).
2199 // (5) Sequential string. Load regexp code according to encoding.
2200 __ bind(&seq_string);
2201 // subject: sequential subject string (or look-alike, external string)
2202 // a3: original subject string
2203 // Load previous index and check range before a3 is overwritten. We have to
2204 // use a3 instead of subject here because subject might have been only made
2205 // to look like a sequential string when it actually is an external string.
2206 __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
2207 __ JumpIfNotSmi(a1, &runtime);
2208 __ lw(a3, FieldMemOperand(a3, String::kLengthOffset));
2209 __ Branch(&runtime, ls, a3, Operand(a1));
2210 __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
2212 STATIC_ASSERT(kStringEncodingMask == 4);
2213 STATIC_ASSERT(kOneByteStringTag == 4);
2214 STATIC_ASSERT(kTwoByteStringTag == 0);
2215 __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for one-byte.
2216 __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
2217 __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
2218 __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
2219 __ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
2221 // (E) Carry on. String handling is done.
2222 // t9: irregexp code
2223 // Check that the irregexp code has been generated for the actual string
2224 // encoding. If it has, the field contains a code object otherwise it contains
2225 // a smi (code flushing support).
2226 __ JumpIfSmi(t9, &runtime);
2228 // a1: previous index
2229 // a3: encoding of subject string (1 if one_byte, 0 if two_byte);
2231 // subject: Subject string
2232 // regexp_data: RegExp data (FixedArray)
2233 // All checks done. Now push arguments for native regexp code.
2234 __ IncrementCounter(isolate()->counters()->regexp_entry_native(),
2237 // Isolates: note we add an additional parameter here (isolate pointer).
2238 const int kRegExpExecuteArguments = 9;
2239 const int kParameterRegisters = 4;
2240 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
2242 // Stack pointer now points to cell where return address is to be written.
2243 // Arguments are before that on the stack or in registers, meaning we
2244 // treat the return address as argument 5. Thus every argument after that
2245 // needs to be shifted back by 1. Since DirectCEntryStub will handle
2246 // allocating space for the c argument slots, we don't need to calculate
2247 // that into the argument positions on the stack. This is how the stack will
2248 // look (sp meaning the value of sp at this moment):
2249 // [sp + 5] - Argument 9
2250 // [sp + 4] - Argument 8
2251 // [sp + 3] - Argument 7
2252 // [sp + 2] - Argument 6
2253 // [sp + 1] - Argument 5
2254 // [sp + 0] - saved ra
2256 // Argument 9: Pass current isolate address.
2257 // CFunctionArgumentOperand handles MIPS stack argument slots.
2258 __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
2259 __ sw(a0, MemOperand(sp, 5 * kPointerSize));
2261 // Argument 8: Indicate that this is a direct call from JavaScript.
2262 __ li(a0, Operand(1));
2263 __ sw(a0, MemOperand(sp, 4 * kPointerSize));
2265 // Argument 7: Start (high end) of backtracking stack memory area.
2266 __ li(a0, Operand(address_of_regexp_stack_memory_address));
2267 __ lw(a0, MemOperand(a0, 0));
2268 __ li(a2, Operand(address_of_regexp_stack_memory_size));
2269 __ lw(a2, MemOperand(a2, 0));
2270 __ addu(a0, a0, a2);
2271 __ sw(a0, MemOperand(sp, 3 * kPointerSize));
2273 // Argument 6: Set the number of capture registers to zero to force global
2274 // regexps to behave as non-global. This does not affect non-global regexps.
2275 __ mov(a0, zero_reg);
2276 __ sw(a0, MemOperand(sp, 2 * kPointerSize));
2278 // Argument 5: static offsets vector buffer.
2280 ExternalReference::address_of_static_offsets_vector(isolate())));
2281 __ sw(a0, MemOperand(sp, 1 * kPointerSize));
2283 // For arguments 4 and 3 get string length, calculate start of string data
2284 // calculate the shift of the index (0 for one-byte and 1 for two-byte).
2285 __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
2286 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
2287 // Load the length from the original subject string from the previous stack
2288 // frame. Therefore we have to use fp, which points exactly to two pointer
2289 // sizes below the previous sp. (Because creating a new stack frame pushes
2290 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
2291 __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2292 // If slice offset is not 0, load the length from the original sliced string.
2293 // Argument 4, a3: End of string data
2294 // Argument 3, a2: Start of string data
2295 // Prepare start and end index of the input.
2296 __ sllv(t1, t0, a3);
2297 __ addu(t0, t2, t1);
2298 __ sllv(t1, a1, a3);
2299 __ addu(a2, t0, t1);
2301 __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
2302 __ sra(t2, t2, kSmiTagSize);
2303 __ sllv(t1, t2, a3);
2304 __ addu(a3, t0, t1);
2305 // Argument 2 (a1): Previous index.
2308 // Argument 1 (a0): Subject string.
2309 __ mov(a0, subject);
2311 // Locate the code entry and call it.
2312 __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
2313 DirectCEntryStub stub(isolate());
2314 stub.GenerateCall(masm, t9);
2316 __ LeaveExitFrame(false, no_reg, true);
2319 // subject: subject string (callee saved)
2320 // regexp_data: RegExp data (callee saved)
2321 // last_match_info_elements: Last match info elements (callee saved)
2322 // Check the result.
2324 __ Branch(&success, eq, v0, Operand(1));
2325 // We expect exactly one result since we force the called regexp to behave
2328 __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
2329 // If not exception it can only be retry. Handle that in the runtime system.
2330 __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
2331 // Result must now be exception. If there is no pending exception already a
2332 // stack overflow (on the backtrack stack) was detected in RegExp code but
2333 // haven't created the exception yet. Handle that in the runtime system.
2334 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
2335 __ li(a1, Operand(isolate()->factory()->the_hole_value()));
2336 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2338 __ lw(v0, MemOperand(a2, 0));
2339 __ Branch(&runtime, eq, v0, Operand(a1));
2341 __ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
2343 // Check if the exception is a termination. If so, throw as uncatchable.
2344 __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
2345 Label termination_exception;
2346 __ Branch(&termination_exception, eq, v0, Operand(a0));
2350 __ bind(&termination_exception);
2351 __ ThrowUncatchable(v0);
2354 // For failure and exception return null.
2355 __ li(v0, Operand(isolate()->factory()->null_value()));
2358 // Process the result from the native regexp code.
2361 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2362 // Calculate number of capture registers (number_of_captures + 1) * 2.
2363 // Multiplying by 2 comes for free since r1 is smi-tagged.
2364 STATIC_ASSERT(kSmiTag == 0);
2365 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2366 __ Addu(a1, a1, Operand(2)); // a1 was a smi.
2368 __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
2369 __ JumpIfSmi(a0, &runtime);
2370 __ GetObjectType(a0, a2, a2);
2371 __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
2372 // Check that the JSArray is in fast case.
2373 __ lw(last_match_info_elements,
2374 FieldMemOperand(a0, JSArray::kElementsOffset));
2375 __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2376 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
2377 __ Branch(&runtime, ne, a0, Operand(at));
2378 // Check that the last match info has space for the capture registers and the
2379 // additional information.
2381 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
2382 __ Addu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
2383 __ sra(at, a0, kSmiTagSize);
2384 __ Branch(&runtime, gt, a2, Operand(at));
2386 // a1: number of capture registers
2387 // subject: subject string
2388 // Store the capture count.
2389 __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
2390 __ sw(a2, FieldMemOperand(last_match_info_elements,
2391 RegExpImpl::kLastCaptureCountOffset));
2392 // Store last subject and last input.
2394 FieldMemOperand(last_match_info_elements,
2395 RegExpImpl::kLastSubjectOffset));
2396 __ mov(a2, subject);
2397 __ RecordWriteField(last_match_info_elements,
2398 RegExpImpl::kLastSubjectOffset,
2403 __ mov(subject, a2);
2405 FieldMemOperand(last_match_info_elements,
2406 RegExpImpl::kLastInputOffset));
2407 __ RecordWriteField(last_match_info_elements,
2408 RegExpImpl::kLastInputOffset,
2414 // Get the static offsets vector filled by the native regexp code.
2415 ExternalReference address_of_static_offsets_vector =
2416 ExternalReference::address_of_static_offsets_vector(isolate());
2417 __ li(a2, Operand(address_of_static_offsets_vector));
2419 // a1: number of capture registers
2420 // a2: offsets vector
2421 Label next_capture, done;
2422 // Capture register counter starts from number of capture registers and
2423 // counts down until wrapping after zero.
2425 last_match_info_elements,
2426 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
2427 __ bind(&next_capture);
2428 __ Subu(a1, a1, Operand(1));
2429 __ Branch(&done, lt, a1, Operand(zero_reg));
2430 // Read the value from the static offsets vector buffer.
2431 __ lw(a3, MemOperand(a2, 0));
2432 __ addiu(a2, a2, kPointerSize);
2433 // Store the smi value in the last match info.
2434 __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
2435 __ sw(a3, MemOperand(a0, 0));
2436 __ Branch(&next_capture, USE_DELAY_SLOT);
2437 __ addiu(a0, a0, kPointerSize); // In branch delay slot.
2441 // Return last match info.
2442 __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
2445 // Do the runtime call to execute the regexp.
2447 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2449 // Deferred code for string handling.
2450 // (6) Not a long external string? If yes, go to (8).
2451 __ bind(¬_seq_nor_cons);
2453 __ Branch(¬_long_external, gt, a1, Operand(kExternalStringTag));
2455 // (7) External string. Make it, offset-wise, look like a sequential string.
2456 __ bind(&external_string);
2457 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2458 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2459 if (FLAG_debug_code) {
2460 // Assert that we do not have a cons or slice (indirect strings) here.
2461 // Sequential strings have already been ruled out.
2462 __ And(at, a0, Operand(kIsIndirectStringMask));
2464 kExternalStringExpectedButNotFound,
2469 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
2470 // Move the pointer so that offset-wise, it looks like a sequential string.
2471 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2474 SeqTwoByteString::kHeaderSize - kHeapObjectTag);
2475 __ jmp(&seq_string); // Go to (5).
2477 // (8) Short external string or not a string? If yes, bail out to runtime.
2478 __ bind(¬_long_external);
2479 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
2480 __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
2481 __ Branch(&runtime, ne, at, Operand(zero_reg));
2483 // (9) Sliced string. Replace subject with parent. Go to (4).
2484 // Load offset into t0 and replace subject string with parent.
2485 __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
2486 __ sra(t0, t0, kSmiTagSize);
2487 __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
2488 __ jmp(&check_underlying); // Go to (4).
2489 #endif // V8_INTERPRETED_REGEXP
2493 static void GenerateRecordCallTarget(MacroAssembler* masm) {
2494 // Cache the called function in a feedback vector slot. Cache states
2495 // are uninitialized, monomorphic (indicated by a JSFunction), and
2497 // a0 : number of arguments to the construct function
2498 // a1 : the function to call
2499 // a2 : Feedback vector
2500 // a3 : slot in feedback vector (Smi)
2501 Label initialize, done, miss, megamorphic, not_array_function;
2503 DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
2504 masm->isolate()->heap()->megamorphic_symbol());
2505 DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
2506 masm->isolate()->heap()->uninitialized_symbol());
2508 // Load the cache state into t0.
2509 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2510 __ Addu(t0, a2, Operand(t0));
2511 __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
2513 // A monomorphic cache hit or an already megamorphic state: invoke the
2514 // function without changing the state.
2515 __ Branch(&done, eq, t0, Operand(a1));
2517 if (!FLAG_pretenuring_call_new) {
2518 // If we came here, we need to see if we are the array function.
2519 // If we didn't have a matching function, and we didn't find the megamorph
2520 // sentinel, then we have in the slot either some other function or an
2521 // AllocationSite. Do a map check on the object in a3.
2522 __ lw(t1, FieldMemOperand(t0, 0));
2523 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2524 __ Branch(&miss, ne, t1, Operand(at));
2526 // Make sure the function is the Array() function
2527 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
2528 __ Branch(&megamorphic, ne, a1, Operand(t0));
2534 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2536 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
2537 __ Branch(&initialize, eq, t0, Operand(at));
2538 // MegamorphicSentinel is an immortal immovable object (undefined) so no
2539 // write-barrier is needed.
2540 __ bind(&megamorphic);
2541 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2542 __ Addu(t0, a2, Operand(t0));
2543 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2544 __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
2547 // An uninitialized cache is patched with the function.
2548 __ bind(&initialize);
2549 if (!FLAG_pretenuring_call_new) {
2550 // Make sure the function is the Array() function.
2551 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
2552 __ Branch(¬_array_function, ne, a1, Operand(t0));
2554 // The target function is the Array constructor,
2555 // Create an AllocationSite if we don't already have it, store it in the
2558 FrameScope scope(masm, StackFrame::INTERNAL);
2559 const RegList kSavedRegs =
2565 // Arguments register must be smi-tagged to call out.
2567 __ MultiPush(kSavedRegs);
2569 CreateAllocationSiteStub create_stub(masm->isolate());
2570 __ CallStub(&create_stub);
2572 __ MultiPop(kSavedRegs);
2577 __ bind(¬_array_function);
2580 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2581 __ Addu(t0, a2, Operand(t0));
2582 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2583 __ sw(a1, MemOperand(t0, 0));
2585 __ Push(t0, a2, a1);
2586 __ RecordWrite(a2, t0, a1, kRAHasNotBeenSaved, kDontSaveFPRegs,
2587 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
2594 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
2595 __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2596 __ lw(t0, FieldMemOperand(a3, SharedFunctionInfo::kCompilerHintsOffset));
2598 // Do not transform the receiver for strict mode functions.
2599 int32_t strict_mode_function_mask =
2600 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
2601 // Do not transform the receiver for native (Compilerhints already in a3).
2602 int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
2603 __ And(at, t0, Operand(strict_mode_function_mask | native_mask));
2604 __ Branch(cont, ne, at, Operand(zero_reg));
2608 static void EmitSlowCase(MacroAssembler* masm,
2610 Label* non_function) {
2611 // Check for function proxy.
2612 __ Branch(non_function, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
2613 __ push(a1); // put proxy as additional argument
2614 __ li(a0, Operand(argc + 1, RelocInfo::NONE32));
2615 __ mov(a2, zero_reg);
2616 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
2618 Handle<Code> adaptor =
2619 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
2620 __ Jump(adaptor, RelocInfo::CODE_TARGET);
2623 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
2624 // of the original receiver from the call site).
2625 __ bind(non_function);
2626 __ sw(a1, MemOperand(sp, argc * kPointerSize));
2627 __ li(a0, Operand(argc)); // Set up the number of arguments.
2628 __ mov(a2, zero_reg);
2629 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
2630 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2631 RelocInfo::CODE_TARGET);
2635 static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
2636 // Wrap the receiver and patch it back onto the stack.
2637 { FrameScope frame_scope(masm, StackFrame::INTERNAL);
2639 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
2642 __ Branch(USE_DELAY_SLOT, cont);
2643 __ sw(v0, MemOperand(sp, argc * kPointerSize));
2647 static void CallFunctionNoFeedback(MacroAssembler* masm,
2648 int argc, bool needs_checks,
2649 bool call_as_method) {
2650 // a1 : the function to call
2651 Label slow, non_function, wrap, cont;
2654 // Check that the function is really a JavaScript function.
2655 // a1: pushed function (to be verified)
2656 __ JumpIfSmi(a1, &non_function);
2658 // Goto slow case if we do not have a function.
2659 __ GetObjectType(a1, t0, t0);
2660 __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
2663 // Fast-case: Invoke the function now.
2664 // a1: pushed function
2665 ParameterCount actual(argc);
2667 if (call_as_method) {
2669 EmitContinueIfStrictOrNative(masm, &cont);
2672 // Compute the receiver in sloppy mode.
2673 __ lw(a3, MemOperand(sp, argc * kPointerSize));
2676 __ JumpIfSmi(a3, &wrap);
2677 __ GetObjectType(a3, t0, t0);
2678 __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
2686 __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
2689 // Slow-case: Non-function called.
2691 EmitSlowCase(masm, argc, &non_function);
2694 if (call_as_method) {
2696 // Wrap the receiver and patch it back onto the stack.
2697 EmitWrapCase(masm, argc, &cont);
2702 void CallFunctionStub::Generate(MacroAssembler* masm) {
2703 CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
2707 void CallConstructStub::Generate(MacroAssembler* masm) {
2708 // a0 : number of arguments
2709 // a1 : the function to call
2710 // a2 : feedback vector
2711 // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
2712 Label slow, non_function_call;
2714 // Check that the function is not a smi.
2715 __ JumpIfSmi(a1, &non_function_call);
2716 // Check that the function is a JSFunction.
2717 __ GetObjectType(a1, t0, t0);
2718 __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
2720 if (RecordCallTarget()) {
2721 GenerateRecordCallTarget(masm);
2723 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
2724 __ Addu(t1, a2, at);
2725 if (FLAG_pretenuring_call_new) {
2726 // Put the AllocationSite from the feedback vector into a2.
2727 // By adding kPointerSize we encode that we know the AllocationSite
2728 // entry is at the feedback vector slot given by a3 + 1.
2729 __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize + kPointerSize));
2731 Label feedback_register_initialized;
2732 // Put the AllocationSite from the feedback vector into a2, or undefined.
2733 __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
2734 __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
2735 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2736 __ Branch(&feedback_register_initialized, eq, t1, Operand(at));
2737 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
2738 __ bind(&feedback_register_initialized);
2741 __ AssertUndefinedOrAllocationSite(a2, t1);
2744 // Pass function as original constructor.
2745 if (IsSuperConstructorCall()) {
2746 __ li(t0, Operand(1 * kPointerSize));
2747 __ sll(at, a0, kPointerSizeLog2);
2748 __ Addu(t0, t0, Operand(at));
2749 __ Addu(at, sp, Operand(t0));
2750 __ lw(a3, MemOperand(at, 0));
2755 // Jump to the function-specific construct stub.
2756 Register jmp_reg = t0;
2757 __ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2758 __ lw(jmp_reg, FieldMemOperand(jmp_reg,
2759 SharedFunctionInfo::kConstructStubOffset));
2760 __ Addu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
2763 // a0: number of arguments
2764 // a1: called object
2768 __ Branch(&non_function_call, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
2769 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
2772 __ bind(&non_function_call);
2773 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
2775 // Set expected number of arguments to zero (not changing r0).
2776 __ li(a2, Operand(0, RelocInfo::NONE32));
2777 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2778 RelocInfo::CODE_TARGET);
2782 static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
2783 __ lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
2784 __ lw(vector, FieldMemOperand(vector,
2785 JSFunction::kSharedFunctionInfoOffset));
2786 __ lw(vector, FieldMemOperand(vector,
2787 SharedFunctionInfo::kFeedbackVectorOffset));
2791 void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
2797 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
2798 __ Branch(&miss, ne, a1, Operand(at));
2800 __ li(a0, Operand(arg_count()));
2801 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
2802 __ Addu(at, a2, Operand(at));
2803 __ lw(t0, FieldMemOperand(at, FixedArray::kHeaderSize));
2805 // Verify that t0 contains an AllocationSite
2806 __ lw(t1, FieldMemOperand(t0, HeapObject::kMapOffset));
2807 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2808 __ Branch(&miss, ne, t1, Operand(at));
2812 ArrayConstructorStub stub(masm->isolate(), arg_count());
2813 __ TailCallStub(&stub);
2818 // The slow case, we need this no matter what to complete a call after a miss.
2819 CallFunctionNoFeedback(masm,
2825 __ stop("Unexpected code address");
2829 void CallICStub::Generate(MacroAssembler* masm) {
2831 // a3 - slot id (Smi)
2833 const int with_types_offset =
2834 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
2835 const int generic_offset =
2836 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
2837 Label extra_checks_or_miss, slow_start;
2838 Label slow, non_function, wrap, cont;
2839 Label have_js_function;
2840 int argc = arg_count();
2841 ParameterCount actual(argc);
2843 // The checks. First, does r1 match the recorded monomorphic target?
2844 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2845 __ Addu(t0, a2, Operand(t0));
2846 __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
2848 // We don't know that we have a weak cell. We might have a private symbol
2849 // or an AllocationSite, but the memory is safe to examine.
2850 // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
2852 // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
2853 // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
2854 // computed, meaning that it can't appear to be a pointer. If the low bit is
2855 // 0, then hash is computed, but the 0 bit prevents the field from appearing
2857 STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
2858 STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
2859 WeakCell::kValueOffset &&
2860 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
2862 __ lw(t1, FieldMemOperand(t0, WeakCell::kValueOffset));
2863 __ Branch(&extra_checks_or_miss, ne, a1, Operand(t1));
2865 // The compare above could have been a SMI/SMI comparison. Guard against this
2866 // convincing us that we have a monomorphic JSFunction.
2867 __ JumpIfSmi(a1, &extra_checks_or_miss);
2869 __ bind(&have_js_function);
2870 if (CallAsMethod()) {
2871 EmitContinueIfStrictOrNative(masm, &cont);
2872 // Compute the receiver in sloppy mode.
2873 __ lw(a3, MemOperand(sp, argc * kPointerSize));
2875 __ JumpIfSmi(a3, &wrap);
2876 __ GetObjectType(a3, t0, t0);
2877 __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
2882 __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
2885 EmitSlowCase(masm, argc, &non_function);
2887 if (CallAsMethod()) {
2889 EmitWrapCase(masm, argc, &cont);
2892 __ bind(&extra_checks_or_miss);
2893 Label uninitialized, miss;
2895 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2896 __ Branch(&slow_start, eq, t0, Operand(at));
2898 // The following cases attempt to handle MISS cases without going to the
2900 if (FLAG_trace_ic) {
2904 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
2905 __ Branch(&uninitialized, eq, t0, Operand(at));
2907 // We are going megamorphic. If the feedback is a JSFunction, it is fine
2908 // to handle it here. More complex cases are dealt with in the runtime.
2909 __ AssertNotSmi(t0);
2910 __ GetObjectType(t0, t1, t1);
2911 __ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE));
2912 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2913 __ Addu(t0, a2, Operand(t0));
2914 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2915 __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
2916 // We have to update statistics for runtime profiling.
2917 __ lw(t0, FieldMemOperand(a2, with_types_offset));
2918 __ Subu(t0, t0, Operand(Smi::FromInt(1)));
2919 __ sw(t0, FieldMemOperand(a2, with_types_offset));
2920 __ lw(t0, FieldMemOperand(a2, generic_offset));
2921 __ Addu(t0, t0, Operand(Smi::FromInt(1)));
2922 __ Branch(USE_DELAY_SLOT, &slow_start);
2923 __ sw(t0, FieldMemOperand(a2, generic_offset)); // In delay slot.
2925 __ bind(&uninitialized);
2927 // We are going monomorphic, provided we actually have a JSFunction.
2928 __ JumpIfSmi(a1, &miss);
2930 // Goto miss case if we do not have a function.
2931 __ GetObjectType(a1, t0, t0);
2932 __ Branch(&miss, ne, t0, Operand(JS_FUNCTION_TYPE));
2934 // Make sure the function is not the Array() function, which requires special
2935 // behavior on MISS.
2936 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
2937 __ Branch(&miss, eq, a1, Operand(t0));
2940 __ lw(t0, FieldMemOperand(a2, with_types_offset));
2941 __ Addu(t0, t0, Operand(Smi::FromInt(1)));
2942 __ sw(t0, FieldMemOperand(a2, with_types_offset));
2944 // Store the function. Use a stub since we need a frame for allocation.
2949 FrameScope scope(masm, StackFrame::INTERNAL);
2950 CreateWeakCellStub create_stub(masm->isolate());
2952 __ CallStub(&create_stub);
2956 __ Branch(&have_js_function);
2958 // We are here because tracing is on or we encountered a MISS case we can't
2964 __ bind(&slow_start);
2965 // Check that the function is really a JavaScript function.
2966 // r1: pushed function (to be verified)
2967 __ JumpIfSmi(a1, &non_function);
2969 // Goto slow case if we do not have a function.
2970 __ GetObjectType(a1, t0, t0);
2971 __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
2972 __ Branch(&have_js_function);
2976 void CallICStub::GenerateMiss(MacroAssembler* masm) {
2977 FrameScope scope(masm, StackFrame::INTERNAL);
2979 // Push the receiver and the function and feedback info.
2980 __ Push(a1, a2, a3);
2983 IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
2984 : IC::kCallIC_Customization_Miss;
2986 ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
2987 __ CallExternalReference(miss, 3);
2989 // Move result to a1 and exit the internal frame.
2994 // StringCharCodeAtGenerator.
2995 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
2996 DCHECK(!t0.is(index_));
2997 DCHECK(!t0.is(result_));
2998 DCHECK(!t0.is(object_));
2999 if (check_mode_ == RECEIVER_IS_UNKNOWN) {
3000 // If the receiver is a smi trigger the non-string case.
3001 __ JumpIfSmi(object_, receiver_not_string_);
3003 // Fetch the instance type of the receiver into result register.
3004 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3005 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3006 // If the receiver is not a string trigger the non-string case.
3007 __ And(t0, result_, Operand(kIsNotStringMask));
3008 __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
3011 // If the index is non-smi trigger the non-smi case.
3012 __ JumpIfNotSmi(index_, &index_not_smi_);
3014 __ bind(&got_smi_index_);
3016 // Check for index out of range.
3017 __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
3018 __ Branch(index_out_of_range_, ls, t0, Operand(index_));
3020 __ sra(index_, index_, kSmiTagSize);
3022 StringCharLoadGenerator::Generate(masm,
3028 __ sll(result_, result_, kSmiTagSize);
3033 void StringCharCodeAtGenerator::GenerateSlow(
3034 MacroAssembler* masm,
3035 const RuntimeCallHelper& call_helper) {
3036 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
3038 // Index is not a smi.
3039 __ bind(&index_not_smi_);
3040 // If index is a heap number, try converting it to an integer.
3043 Heap::kHeapNumberMapRootIndex,
3046 call_helper.BeforeCall(masm);
3047 // Consumed by runtime conversion function:
3048 __ Push(object_, index_);
3049 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3050 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3052 DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3053 // NumberToSmi discards numbers that are not exact integers.
3054 __ CallRuntime(Runtime::kNumberToSmi, 1);
3057 // Save the conversion result before the pop instructions below
3058 // have a chance to overwrite it.
3060 __ Move(index_, v0);
3062 // Reload the instance type.
3063 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3064 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3065 call_helper.AfterCall(masm);
3066 // If index is still not a smi, it must be out of range.
3067 __ JumpIfNotSmi(index_, index_out_of_range_);
3068 // Otherwise, return to the fast path.
3069 __ Branch(&got_smi_index_);
3071 // Call runtime. We get here when the receiver is a string and the
3072 // index is a number, but the code of getting the actual character
3073 // is too complex (e.g., when the string needs to be flattened).
3074 __ bind(&call_runtime_);
3075 call_helper.BeforeCall(masm);
3076 __ sll(index_, index_, kSmiTagSize);
3077 __ Push(object_, index_);
3078 __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
3080 __ Move(result_, v0);
3082 call_helper.AfterCall(masm);
3085 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3089 // -------------------------------------------------------------------------
3090 // StringCharFromCodeGenerator
3092 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3093 // Fast case of Heap::LookupSingleCharacterStringFromCode.
3095 DCHECK(!t0.is(result_));
3096 DCHECK(!t0.is(code_));
3098 STATIC_ASSERT(kSmiTag == 0);
3099 STATIC_ASSERT(kSmiShiftSize == 0);
3100 DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
3103 Operand(kSmiTagMask |
3104 ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
3105 __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
3107 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3108 // At this point code register contains smi tagged one-byte char code.
3109 STATIC_ASSERT(kSmiTag == 0);
3110 __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
3111 __ Addu(result_, result_, t0);
3112 __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3113 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3114 __ Branch(&slow_case_, eq, result_, Operand(t0));
3119 void StringCharFromCodeGenerator::GenerateSlow(
3120 MacroAssembler* masm,
3121 const RuntimeCallHelper& call_helper) {
3122 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3124 __ bind(&slow_case_);
3125 call_helper.BeforeCall(masm);
3127 __ CallRuntime(Runtime::kCharFromCode, 1);
3128 __ Move(result_, v0);
3130 call_helper.AfterCall(masm);
3133 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3137 enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
3140 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
3145 String::Encoding encoding) {
3146 if (FLAG_debug_code) {
3147 // Check that destination is word aligned.
3148 __ And(scratch, dest, Operand(kPointerAlignmentMask));
3150 kDestinationOfCopyNotAligned,
3155 // Assumes word reads and writes are little endian.
3156 // Nothing to do for zero characters.
3159 if (encoding == String::TWO_BYTE_ENCODING) {
3160 __ Addu(count, count, count);
3163 Register limit = count; // Read until dest equals this.
3164 __ Addu(limit, dest, Operand(count));
3166 Label loop_entry, loop;
3167 // Copy bytes from src to dest until dest hits limit.
3168 __ Branch(&loop_entry);
3170 __ lbu(scratch, MemOperand(src));
3171 __ Addu(src, src, Operand(1));
3172 __ sb(scratch, MemOperand(dest));
3173 __ Addu(dest, dest, Operand(1));
3174 __ bind(&loop_entry);
3175 __ Branch(&loop, lt, dest, Operand(limit));
3181 void SubStringStub::Generate(MacroAssembler* masm) {
3183 // Stack frame on entry.
3184 // ra: return address
3189 // This stub is called from the native-call %_SubString(...), so
3190 // nothing can be assumed about the arguments. It is tested that:
3191 // "string" is a sequential string,
3192 // both "from" and "to" are smis, and
3193 // 0 <= from <= to <= string.length.
3194 // If any of these assumptions fail, we call the runtime system.
3196 const int kToOffset = 0 * kPointerSize;
3197 const int kFromOffset = 1 * kPointerSize;
3198 const int kStringOffset = 2 * kPointerSize;
3200 __ lw(a2, MemOperand(sp, kToOffset));
3201 __ lw(a3, MemOperand(sp, kFromOffset));
3202 STATIC_ASSERT(kFromOffset == kToOffset + 4);
3203 STATIC_ASSERT(kSmiTag == 0);
3204 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3206 // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
3207 // safe in this case.
3208 __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
3209 __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
3210 // Both a2 and a3 are untagged integers.
3212 __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
3214 __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to.
3215 __ Subu(a2, a2, a3);
3217 // Make sure first argument is a string.
3218 __ lw(v0, MemOperand(sp, kStringOffset));
3219 __ JumpIfSmi(v0, &runtime);
3220 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
3221 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3222 __ And(t0, a1, Operand(kIsNotStringMask));
3224 __ Branch(&runtime, ne, t0, Operand(zero_reg));
3227 __ Branch(&single_char, eq, a2, Operand(1));
3229 // Short-cut for the case of trivial substring.
3231 // v0: original string
3232 // a2: result string length
3233 __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
3235 // Return original string.
3236 __ Branch(&return_v0, eq, a2, Operand(t0));
3237 // Longer than original string's length or negative: unsafe arguments.
3238 __ Branch(&runtime, hi, a2, Operand(t0));
3239 // Shorter than original string's length: an actual substring.
3241 // Deal with different string types: update the index if necessary
3242 // and put the underlying string into t1.
3243 // v0: original string
3244 // a1: instance type
3246 // a3: from index (untagged)
3247 Label underlying_unpacked, sliced_string, seq_or_external_string;
3248 // If the string is not indirect, it can only be sequential or external.
3249 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
3250 STATIC_ASSERT(kIsIndirectStringMask != 0);
3251 __ And(t0, a1, Operand(kIsIndirectStringMask));
3252 __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
3253 // t0 is used as a scratch register and can be overwritten in either case.
3254 __ And(t0, a1, Operand(kSlicedNotConsMask));
3255 __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
3256 // Cons string. Check whether it is flat, then fetch first part.
3257 __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
3258 __ LoadRoot(t0, Heap::kempty_stringRootIndex);
3259 __ Branch(&runtime, ne, t1, Operand(t0));
3260 __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
3261 // Update instance type.
3262 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
3263 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3264 __ jmp(&underlying_unpacked);
3266 __ bind(&sliced_string);
3267 // Sliced string. Fetch parent and correct start index by offset.
3268 __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
3269 __ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
3270 __ sra(t0, t0, 1); // Add offset to index.
3271 __ Addu(a3, a3, t0);
3272 // Update instance type.
3273 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
3274 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3275 __ jmp(&underlying_unpacked);
3277 __ bind(&seq_or_external_string);
3278 // Sequential or external string. Just move string to the expected register.
3281 __ bind(&underlying_unpacked);
3283 if (FLAG_string_slices) {
3285 // t1: underlying subject string
3286 // a1: instance type of underlying subject string
3288 // a3: adjusted start index (untagged)
3289 // Short slice. Copy instead of slicing.
3290 __ Branch(©_routine, lt, a2, Operand(SlicedString::kMinLength));
3291 // Allocate new sliced string. At this point we do not reload the instance
3292 // type including the string encoding because we simply rely on the info
3293 // provided by the original string. It does not matter if the original
3294 // string's encoding is wrong because we always have to recheck encoding of
3295 // the newly created string's parent anyways due to externalized strings.
3296 Label two_byte_slice, set_slice_header;
3297 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
3298 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
3299 __ And(t0, a1, Operand(kStringEncodingMask));
3300 __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
3301 __ AllocateOneByteSlicedString(v0, a2, t2, t3, &runtime);
3302 __ jmp(&set_slice_header);
3303 __ bind(&two_byte_slice);
3304 __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
3305 __ bind(&set_slice_header);
3307 __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
3308 __ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
3311 __ bind(©_routine);
3314 // t1: underlying subject string
3315 // a1: instance type of underlying subject string
3317 // a3: adjusted start index (untagged)
3318 Label two_byte_sequential, sequential_string, allocate_result;
3319 STATIC_ASSERT(kExternalStringTag != 0);
3320 STATIC_ASSERT(kSeqStringTag == 0);
3321 __ And(t0, a1, Operand(kExternalStringTag));
3322 __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
3324 // Handle external string.
3325 // Rule out short external strings.
3326 STATIC_ASSERT(kShortExternalStringTag != 0);
3327 __ And(t0, a1, Operand(kShortExternalStringTag));
3328 __ Branch(&runtime, ne, t0, Operand(zero_reg));
3329 __ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
3330 // t1 already points to the first character of underlying string.
3331 __ jmp(&allocate_result);
3333 __ bind(&sequential_string);
3334 // Locate first character of underlying subject string.
3335 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3336 __ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3338 __ bind(&allocate_result);
3339 // Sequential acii string. Allocate the result.
3340 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3341 __ And(t0, a1, Operand(kStringEncodingMask));
3342 __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
3344 // Allocate and copy the resulting ASCII string.
3345 __ AllocateOneByteString(v0, a2, t0, t2, t3, &runtime);
3347 // Locate first character of substring to copy.
3348 __ Addu(t1, t1, a3);
3350 // Locate first character of result.
3351 __ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3353 // v0: result string
3354 // a1: first character of result string
3355 // a2: result string length
3356 // t1: first character of substring to copy
3357 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3358 StringHelper::GenerateCopyCharacters(
3359 masm, a1, t1, a2, a3, String::ONE_BYTE_ENCODING);
3362 // Allocate and copy the resulting two-byte string.
3363 __ bind(&two_byte_sequential);
3364 __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
3366 // Locate first character of substring to copy.
3367 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
3369 __ Addu(t1, t1, t0);
3370 // Locate first character of result.
3371 __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3373 // v0: result string.
3374 // a1: first character of result.
3375 // a2: result length.
3376 // t1: first character of substring to copy.
3377 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3378 StringHelper::GenerateCopyCharacters(
3379 masm, a1, t1, a2, a3, String::TWO_BYTE_ENCODING);
3381 __ bind(&return_v0);
3382 Counters* counters = isolate()->counters();
3383 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
3386 // Just jump to runtime to create the sub string.
3388 __ TailCallRuntime(Runtime::kSubString, 3, 1);
3390 __ bind(&single_char);
3391 // v0: original string
3392 // a1: instance type
3394 // a3: from index (untagged)
3396 StringCharAtGenerator generator(v0, a3, a2, v0, &runtime, &runtime, &runtime,
3397 STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
3398 generator.GenerateFast(masm);
3400 generator.SkipSlow(masm, &runtime);
3404 void ToNumberStub::Generate(MacroAssembler* masm) {
3405 // The ToNumber stub takes one argument in a0.
3407 __ JumpIfNotSmi(a0, ¬_smi);
3408 __ Ret(USE_DELAY_SLOT);
3412 Label not_heap_number;
3413 __ lw(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
3414 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3416 // a1: instance type.
3417 __ Branch(¬_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
3418 __ Ret(USE_DELAY_SLOT);
3420 __ bind(¬_heap_number);
3422 Label not_string, slow_string;
3423 __ Branch(¬_string, hs, a1, Operand(FIRST_NONSTRING_TYPE));
3424 // Check if string has a cached array index.
3425 __ lw(a2, FieldMemOperand(a0, String::kHashFieldOffset));
3426 __ And(at, a2, Operand(String::kContainsCachedArrayIndexMask));
3427 __ Branch(&slow_string, ne, at, Operand(zero_reg));
3428 __ IndexFromHash(a2, a0);
3429 __ Ret(USE_DELAY_SLOT);
3431 __ bind(&slow_string);
3432 __ push(a0); // Push argument.
3433 __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
3434 __ bind(¬_string);
3437 __ Branch(¬_oddball, ne, a1, Operand(ODDBALL_TYPE));
3438 __ Ret(USE_DELAY_SLOT);
3439 __ lw(v0, FieldMemOperand(a0, Oddball::kToNumberOffset));
3440 __ bind(¬_oddball);
3442 __ push(a0); // Push argument.
3443 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
3447 void StringHelper::GenerateFlatOneByteStringEquals(
3448 MacroAssembler* masm, Register left, Register right, Register scratch1,
3449 Register scratch2, Register scratch3) {
3450 Register length = scratch1;
3453 Label strings_not_equal, check_zero_length;
3454 __ lw(length, FieldMemOperand(left, String::kLengthOffset));
3455 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
3456 __ Branch(&check_zero_length, eq, length, Operand(scratch2));
3457 __ bind(&strings_not_equal);
3458 DCHECK(is_int16(NOT_EQUAL));
3459 __ Ret(USE_DELAY_SLOT);
3460 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
3462 // Check if the length is zero.
3463 Label compare_chars;
3464 __ bind(&check_zero_length);
3465 STATIC_ASSERT(kSmiTag == 0);
3466 __ Branch(&compare_chars, ne, length, Operand(zero_reg));
3467 DCHECK(is_int16(EQUAL));
3468 __ Ret(USE_DELAY_SLOT);
3469 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3471 // Compare characters.
3472 __ bind(&compare_chars);
3474 GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
3475 v0, &strings_not_equal);
3477 // Characters are equal.
3478 __ Ret(USE_DELAY_SLOT);
3479 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3483 void StringHelper::GenerateCompareFlatOneByteStrings(
3484 MacroAssembler* masm, Register left, Register right, Register scratch1,
3485 Register scratch2, Register scratch3, Register scratch4) {
3486 Label result_not_equal, compare_lengths;
3487 // Find minimum length and length difference.
3488 __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
3489 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
3490 __ Subu(scratch3, scratch1, Operand(scratch2));
3491 Register length_delta = scratch3;
3492 __ slt(scratch4, scratch2, scratch1);
3493 __ Movn(scratch1, scratch2, scratch4);
3494 Register min_length = scratch1;
3495 STATIC_ASSERT(kSmiTag == 0);
3496 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
3499 GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
3500 scratch4, v0, &result_not_equal);
3502 // Compare lengths - strings up to min-length are equal.
3503 __ bind(&compare_lengths);
3504 DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
3505 // Use length_delta as result if it's zero.
3506 __ mov(scratch2, length_delta);
3507 __ mov(scratch4, zero_reg);
3508 __ mov(v0, zero_reg);
3510 __ bind(&result_not_equal);
3511 // Conditionally update the result based either on length_delta or
3512 // the last comparion performed in the loop above.
3514 __ Branch(&ret, eq, scratch2, Operand(scratch4));
3515 __ li(v0, Operand(Smi::FromInt(GREATER)));
3516 __ Branch(&ret, gt, scratch2, Operand(scratch4));
3517 __ li(v0, Operand(Smi::FromInt(LESS)));
3523 void StringHelper::GenerateOneByteCharsCompareLoop(
3524 MacroAssembler* masm, Register left, Register right, Register length,
3525 Register scratch1, Register scratch2, Register scratch3,
3526 Label* chars_not_equal) {
3527 // Change index to run from -length to -1 by adding length to string
3528 // start. This means that loop ends when index reaches zero, which
3529 // doesn't need an additional compare.
3530 __ SmiUntag(length);
3531 __ Addu(scratch1, length,
3532 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3533 __ Addu(left, left, Operand(scratch1));
3534 __ Addu(right, right, Operand(scratch1));
3535 __ Subu(length, zero_reg, length);
3536 Register index = length; // index = -length;
3542 __ Addu(scratch3, left, index);
3543 __ lbu(scratch1, MemOperand(scratch3));
3544 __ Addu(scratch3, right, index);
3545 __ lbu(scratch2, MemOperand(scratch3));
3546 __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
3547 __ Addu(index, index, 1);
3548 __ Branch(&loop, ne, index, Operand(zero_reg));
3552 void StringCompareStub::Generate(MacroAssembler* masm) {
3555 Counters* counters = isolate()->counters();
3557 // Stack frame on entry.
3558 // sp[0]: right string
3559 // sp[4]: left string
3560 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
3561 __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
3564 __ Branch(¬_same, ne, a0, Operand(a1));
3565 STATIC_ASSERT(EQUAL == 0);
3566 STATIC_ASSERT(kSmiTag == 0);
3567 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3568 __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
3573 // Check that both objects are sequential one-byte strings.
3574 __ JumpIfNotBothSequentialOneByteStrings(a1, a0, a2, a3, &runtime);
3576 // Compare flat ASCII strings natively. Remove arguments from stack first.
3577 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
3578 __ Addu(sp, sp, Operand(2 * kPointerSize));
3579 StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, t0, t1);
3582 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3586 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
3587 // ----------- S t a t e -------------
3590 // -- ra : return address
3591 // -----------------------------------
3593 // Load a2 with the allocation site. We stick an undefined dummy value here
3594 // and replace it with the real allocation site later when we instantiate this
3595 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
3596 __ li(a2, handle(isolate()->heap()->undefined_value()));
3598 // Make sure that we actually patched the allocation site.
3599 if (FLAG_debug_code) {
3600 __ And(at, a2, Operand(kSmiTagMask));
3601 __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
3602 __ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
3603 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
3604 __ Assert(eq, kExpectedAllocationSite, t0, Operand(at));
3607 // Tail call into the stub that handles binary operations with allocation
3609 BinaryOpWithAllocationSiteStub stub(isolate(), state());
3610 __ TailCallStub(&stub);
3614 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
3615 DCHECK(state() == CompareICState::SMI);
3618 __ JumpIfNotSmi(a2, &miss);
3620 if (GetCondition() == eq) {
3621 // For equality we do not care about the sign of the result.
3622 __ Ret(USE_DELAY_SLOT);
3623 __ Subu(v0, a0, a1);
3625 // Untag before subtracting to avoid handling overflow.
3628 __ Ret(USE_DELAY_SLOT);
3629 __ Subu(v0, a1, a0);
3637 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
3638 DCHECK(state() == CompareICState::NUMBER);
3641 Label unordered, maybe_undefined1, maybe_undefined2;
3644 if (left() == CompareICState::SMI) {
3645 __ JumpIfNotSmi(a1, &miss);
3647 if (right() == CompareICState::SMI) {
3648 __ JumpIfNotSmi(a0, &miss);
3651 // Inlining the double comparison and falling back to the general compare
3652 // stub if NaN is involved.
3653 // Load left and right operand.
3654 Label done, left, left_smi, right_smi;
3655 __ JumpIfSmi(a0, &right_smi);
3656 __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
3658 __ Subu(a2, a0, Operand(kHeapObjectTag));
3659 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
3661 __ bind(&right_smi);
3662 __ SmiUntag(a2, a0); // Can't clobber a0 yet.
3663 FPURegister single_scratch = f6;
3664 __ mtc1(a2, single_scratch);
3665 __ cvt_d_w(f2, single_scratch);
3668 __ JumpIfSmi(a1, &left_smi);
3669 __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
3671 __ Subu(a2, a1, Operand(kHeapObjectTag));
3672 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
3675 __ SmiUntag(a2, a1); // Can't clobber a1 yet.
3676 single_scratch = f8;
3677 __ mtc1(a2, single_scratch);
3678 __ cvt_d_w(f0, single_scratch);
3682 // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
3683 Label fpu_eq, fpu_lt;
3684 // Test if equal, and also handle the unordered/NaN case.
3685 __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
3687 // Test if less (unordered case is already handled).
3688 __ BranchF(&fpu_lt, NULL, lt, f0, f2);
3690 // Otherwise it's greater, so just fall thru, and return.
3691 DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
3692 __ Ret(USE_DELAY_SLOT);
3693 __ li(v0, Operand(GREATER));
3696 __ Ret(USE_DELAY_SLOT);
3697 __ li(v0, Operand(EQUAL));
3700 __ Ret(USE_DELAY_SLOT);
3701 __ li(v0, Operand(LESS));
3703 __ bind(&unordered);
3704 __ bind(&generic_stub);
3705 CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
3706 CompareICState::GENERIC, CompareICState::GENERIC);
3707 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3709 __ bind(&maybe_undefined1);
3710 if (Token::IsOrderedRelationalCompareOp(op())) {
3711 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3712 __ Branch(&miss, ne, a0, Operand(at));
3713 __ JumpIfSmi(a1, &unordered);
3714 __ GetObjectType(a1, a2, a2);
3715 __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
3719 __ bind(&maybe_undefined2);
3720 if (Token::IsOrderedRelationalCompareOp(op())) {
3721 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3722 __ Branch(&unordered, eq, a1, Operand(at));
3730 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3731 DCHECK(state() == CompareICState::INTERNALIZED_STRING);
3734 // Registers containing left and right operands respectively.
3736 Register right = a0;
3740 // Check that both operands are heap objects.
3741 __ JumpIfEitherSmi(left, right, &miss);
3743 // Check that both operands are internalized strings.
3744 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3745 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3746 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3747 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3748 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3749 __ Or(tmp1, tmp1, Operand(tmp2));
3750 __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3751 __ Branch(&miss, ne, at, Operand(zero_reg));
3753 // Make sure a0 is non-zero. At this point input operands are
3754 // guaranteed to be non-zero.
3755 DCHECK(right.is(a0));
3756 STATIC_ASSERT(EQUAL == 0);
3757 STATIC_ASSERT(kSmiTag == 0);
3759 // Internalized strings are compared by identity.
3760 __ Ret(ne, left, Operand(right));
3761 DCHECK(is_int16(EQUAL));
3762 __ Ret(USE_DELAY_SLOT);
3763 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3770 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
3771 DCHECK(state() == CompareICState::UNIQUE_NAME);
3772 DCHECK(GetCondition() == eq);
3775 // Registers containing left and right operands respectively.
3777 Register right = a0;
3781 // Check that both operands are heap objects.
3782 __ JumpIfEitherSmi(left, right, &miss);
3784 // Check that both operands are unique names. This leaves the instance
3785 // types loaded in tmp1 and tmp2.
3786 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3787 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3788 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3789 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3791 __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
3792 __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
3797 // Unique names are compared by identity.
3799 __ Branch(&done, ne, left, Operand(right));
3800 // Make sure a0 is non-zero. At this point input operands are
3801 // guaranteed to be non-zero.
3802 DCHECK(right.is(a0));
3803 STATIC_ASSERT(EQUAL == 0);
3804 STATIC_ASSERT(kSmiTag == 0);
3805 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3814 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
3815 DCHECK(state() == CompareICState::STRING);
3818 bool equality = Token::IsEqualityOp(op());
3820 // Registers containing left and right operands respectively.
3822 Register right = a0;
3829 // Check that both operands are heap objects.
3830 __ JumpIfEitherSmi(left, right, &miss);
3832 // Check that both operands are strings. This leaves the instance
3833 // types loaded in tmp1 and tmp2.
3834 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3835 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3836 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3837 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3838 STATIC_ASSERT(kNotStringTag != 0);
3839 __ Or(tmp3, tmp1, tmp2);
3840 __ And(tmp5, tmp3, Operand(kIsNotStringMask));
3841 __ Branch(&miss, ne, tmp5, Operand(zero_reg));
3843 // Fast check for identical strings.
3844 Label left_ne_right;
3845 STATIC_ASSERT(EQUAL == 0);
3846 STATIC_ASSERT(kSmiTag == 0);
3847 __ Branch(&left_ne_right, ne, left, Operand(right));
3848 __ Ret(USE_DELAY_SLOT);
3849 __ mov(v0, zero_reg); // In the delay slot.
3850 __ bind(&left_ne_right);
3852 // Handle not identical strings.
3854 // Check that both strings are internalized strings. If they are, we're done
3855 // because we already know they are not identical. We know they are both
3858 DCHECK(GetCondition() == eq);
3859 STATIC_ASSERT(kInternalizedTag == 0);
3860 __ Or(tmp3, tmp1, Operand(tmp2));
3861 __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
3863 __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
3864 // Make sure a0 is non-zero. At this point input operands are
3865 // guaranteed to be non-zero.
3866 DCHECK(right.is(a0));
3867 __ Ret(USE_DELAY_SLOT);
3868 __ mov(v0, a0); // In the delay slot.
3869 __ bind(&is_symbol);
3872 // Check that both strings are sequential one-byte.
3874 __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
3877 // Compare flat one-byte strings. Returns when done.
3879 StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
3882 StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
3886 // Handle more complex cases in runtime.
3888 __ Push(left, right);
3890 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
3892 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3900 void CompareICStub::GenerateObjects(MacroAssembler* masm) {
3901 DCHECK(state() == CompareICState::OBJECT);
3903 __ And(a2, a1, Operand(a0));
3904 __ JumpIfSmi(a2, &miss);
3906 __ GetObjectType(a0, a2, a2);
3907 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
3908 __ GetObjectType(a1, a2, a2);
3909 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
3911 DCHECK(GetCondition() == eq);
3912 __ Ret(USE_DELAY_SLOT);
3913 __ subu(v0, a0, a1);
3920 void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
3922 Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
3924 __ JumpIfSmi(a2, &miss);
3925 __ GetWeakValue(t0, cell);
3926 __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
3927 __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
3928 __ Branch(&miss, ne, a2, Operand(t0));
3929 __ Branch(&miss, ne, a3, Operand(t0));
3931 __ Ret(USE_DELAY_SLOT);
3932 __ subu(v0, a0, a1);
3939 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
3941 // Call the runtime system in a fresh internal frame.
3942 ExternalReference miss =
3943 ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
3944 FrameScope scope(masm, StackFrame::INTERNAL);
3946 __ Push(ra, a1, a0);
3947 __ li(t0, Operand(Smi::FromInt(op())));
3948 __ addiu(sp, sp, -kPointerSize);
3949 __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
3950 __ sw(t0, MemOperand(sp)); // In the delay slot.
3951 // Compute the entry point of the rewritten stub.
3952 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
3953 // Restore registers.
3960 void DirectCEntryStub::Generate(MacroAssembler* masm) {
3961 // Make place for arguments to fit C calling convention. Most of the callers
3962 // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
3963 // so they handle stack restoring and we don't have to do that here.
3964 // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
3965 // kCArgsSlotsSize stack space after the call.
3966 __ Subu(sp, sp, Operand(kCArgsSlotsSize));
3967 // Place the return address on the stack, making the call
3968 // GC safe. The RegExp backend also relies on this.
3969 __ sw(ra, MemOperand(sp, kCArgsSlotsSize));
3970 __ Call(t9); // Call the C++ function.
3971 __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
3973 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
3974 // In case of an error the return address may point to a memory area
3975 // filled with kZapValue by the GC.
3976 // Dereference the address and check for this.
3977 __ lw(t0, MemOperand(t9));
3978 __ Assert(ne, kReceivedInvalidReturnAddress, t0,
3979 Operand(reinterpret_cast<uint32_t>(kZapValue)));
3985 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
3988 reinterpret_cast<intptr_t>(GetCode().location());
3989 __ Move(t9, target);
3990 __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
3995 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
3999 Register properties,
4001 Register scratch0) {
4002 DCHECK(name->IsUniqueName());
4003 // If names of slots in range from 1 to kProbes - 1 for the hash value are
4004 // not equal to the name and kProbes-th slot is not used (its name is the
4005 // undefined value), it guarantees the hash table doesn't contain the
4006 // property. It's true even if some slots represent deleted properties
4007 // (their names are the hole value).
4008 for (int i = 0; i < kInlinedProbes; i++) {
4009 // scratch0 points to properties hash.
4010 // Compute the masked index: (hash + i + i * i) & mask.
4011 Register index = scratch0;
4012 // Capacity is smi 2^n.
4013 __ lw(index, FieldMemOperand(properties, kCapacityOffset));
4014 __ Subu(index, index, Operand(1));
4015 __ And(index, index, Operand(
4016 Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
4018 // Scale the index by multiplying by the entry size.
4019 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
4020 __ sll(at, index, 1);
4021 __ Addu(index, index, at);
4023 Register entity_name = scratch0;
4024 // Having undefined at this place means the name is not contained.
4025 DCHECK_EQ(kSmiTagSize, 1);
4026 Register tmp = properties;
4027 __ sll(scratch0, index, 1);
4028 __ Addu(tmp, properties, scratch0);
4029 __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
4031 DCHECK(!tmp.is(entity_name));
4032 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
4033 __ Branch(done, eq, entity_name, Operand(tmp));
4035 // Load the hole ready for use below:
4036 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
4038 // Stop if found the property.
4039 __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
4042 __ Branch(&good, eq, entity_name, Operand(tmp));
4044 // Check if the entry name is not a unique name.
4045 __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
4047 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
4048 __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
4051 // Restore the properties.
4053 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4056 const int spill_mask =
4057 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
4058 a2.bit() | a1.bit() | a0.bit() | v0.bit());
4060 __ MultiPush(spill_mask);
4061 __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4062 __ li(a1, Operand(Handle<Name>(name)));
4063 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
4066 __ MultiPop(spill_mask);
4068 __ Branch(done, eq, at, Operand(zero_reg));
4069 __ Branch(miss, ne, at, Operand(zero_reg));
4073 // Probe the name dictionary in the |elements| register. Jump to the
4074 // |done| label if a property with the given name is found. Jump to
4075 // the |miss| label otherwise.
4076 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
4077 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
4083 Register scratch2) {
4084 DCHECK(!elements.is(scratch1));
4085 DCHECK(!elements.is(scratch2));
4086 DCHECK(!name.is(scratch1));
4087 DCHECK(!name.is(scratch2));
4089 __ AssertName(name);
4091 // Compute the capacity mask.
4092 __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
4093 __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
4094 __ Subu(scratch1, scratch1, Operand(1));
4096 // Generate an unrolled loop that performs a few probes before
4097 // giving up. Measurements done on Gmail indicate that 2 probes
4098 // cover ~93% of loads from dictionaries.
4099 for (int i = 0; i < kInlinedProbes; i++) {
4100 // Compute the masked index: (hash + i + i * i) & mask.
4101 __ lw(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
4103 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4104 // the hash in a separate instruction. The value hash + i + i * i is right
4105 // shifted in the following and instruction.
4106 DCHECK(NameDictionary::GetProbeOffset(i) <
4107 1 << (32 - Name::kHashFieldOffset));
4108 __ Addu(scratch2, scratch2, Operand(
4109 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4111 __ srl(scratch2, scratch2, Name::kHashShift);
4112 __ And(scratch2, scratch1, scratch2);
4114 // Scale the index by multiplying by the element size.
4115 DCHECK(NameDictionary::kEntrySize == 3);
4116 // scratch2 = scratch2 * 3.
4118 __ sll(at, scratch2, 1);
4119 __ Addu(scratch2, scratch2, at);
4121 // Check if the key is identical to the name.
4122 __ sll(at, scratch2, 2);
4123 __ Addu(scratch2, elements, at);
4124 __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
4125 __ Branch(done, eq, name, Operand(at));
4128 const int spill_mask =
4129 (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
4130 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
4131 ~(scratch1.bit() | scratch2.bit());
4133 __ MultiPush(spill_mask);
4135 DCHECK(!elements.is(a1));
4137 __ Move(a0, elements);
4139 __ Move(a0, elements);
4142 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
4144 __ mov(scratch2, a2);
4146 __ MultiPop(spill_mask);
4148 __ Branch(done, ne, at, Operand(zero_reg));
4149 __ Branch(miss, eq, at, Operand(zero_reg));
4153 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
4154 // This stub overrides SometimesSetsUpAFrame() to return false. That means
4155 // we cannot call anything that could cause a GC from this stub.
4157 // result: NameDictionary to probe
4159 // dictionary: NameDictionary to probe.
4160 // index: will hold an index of entry if lookup is successful.
4161 // might alias with result_.
4163 // result_ is zero if lookup failed, non zero otherwise.
4165 Register result = v0;
4166 Register dictionary = a0;
4168 Register index = a2;
4171 Register undefined = t1;
4172 Register entry_key = t2;
4174 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
4176 __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
4177 __ sra(mask, mask, kSmiTagSize);
4178 __ Subu(mask, mask, Operand(1));
4180 __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
4182 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
4184 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
4185 // Compute the masked index: (hash + i + i * i) & mask.
4186 // Capacity is smi 2^n.
4188 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4189 // the hash in a separate instruction. The value hash + i + i * i is right
4190 // shifted in the following and instruction.
4191 DCHECK(NameDictionary::GetProbeOffset(i) <
4192 1 << (32 - Name::kHashFieldOffset));
4193 __ Addu(index, hash, Operand(
4194 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4196 __ mov(index, hash);
4198 __ srl(index, index, Name::kHashShift);
4199 __ And(index, mask, index);
4201 // Scale the index by multiplying by the entry size.
4202 DCHECK(NameDictionary::kEntrySize == 3);
4205 __ sll(index, index, 1);
4206 __ Addu(index, index, at);
4209 DCHECK_EQ(kSmiTagSize, 1);
4210 __ sll(index, index, 2);
4211 __ Addu(index, index, dictionary);
4212 __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
4214 // Having undefined at this place means the name is not contained.
4215 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined));
4217 // Stop if found the property.
4218 __ Branch(&in_dictionary, eq, entry_key, Operand(key));
4220 if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
4221 // Check if the entry name is not a unique name.
4222 __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
4224 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
4225 __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
4229 __ bind(&maybe_in_dictionary);
4230 // If we are doing negative lookup then probing failure should be
4231 // treated as a lookup success. For positive lookup probing failure
4232 // should be treated as lookup failure.
4233 if (mode() == POSITIVE_LOOKUP) {
4234 __ Ret(USE_DELAY_SLOT);
4235 __ mov(result, zero_reg);
4238 __ bind(&in_dictionary);
4239 __ Ret(USE_DELAY_SLOT);
4242 __ bind(¬_in_dictionary);
4243 __ Ret(USE_DELAY_SLOT);
4244 __ mov(result, zero_reg);
4248 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
4250 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
4252 // Hydrogen code stubs need stub2 at snapshot time.
4253 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
4258 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
4259 // the value has just been written into the object, now this stub makes sure
4260 // we keep the GC informed. The word in the object where the value has been
4261 // written is in the address register.
4262 void RecordWriteStub::Generate(MacroAssembler* masm) {
4263 Label skip_to_incremental_noncompacting;
4264 Label skip_to_incremental_compacting;
4266 // The first two branch+nop instructions are generated with labels so as to
4267 // get the offset fixed up correctly by the bind(Label*) call. We patch it
4268 // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
4269 // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
4270 // incremental heap marking.
4271 // See RecordWriteStub::Patch for details.
4272 __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
4274 __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
4277 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4278 __ RememberedSetHelper(object(),
4281 save_fp_regs_mode(),
4282 MacroAssembler::kReturnAtEnd);
4286 __ bind(&skip_to_incremental_noncompacting);
4287 GenerateIncremental(masm, INCREMENTAL);
4289 __ bind(&skip_to_incremental_compacting);
4290 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4292 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
4293 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
4295 PatchBranchIntoNop(masm, 0);
4296 PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
4300 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4303 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4304 Label dont_need_remembered_set;
4306 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
4307 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
4309 &dont_need_remembered_set);
4311 __ CheckPageFlag(regs_.object(),
4313 1 << MemoryChunk::SCAN_ON_SCAVENGE,
4315 &dont_need_remembered_set);
4317 // First notify the incremental marker if necessary, then update the
4319 CheckNeedsToInformIncrementalMarker(
4320 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4321 InformIncrementalMarker(masm);
4322 regs_.Restore(masm);
4323 __ RememberedSetHelper(object(),
4326 save_fp_regs_mode(),
4327 MacroAssembler::kReturnAtEnd);
4329 __ bind(&dont_need_remembered_set);
4332 CheckNeedsToInformIncrementalMarker(
4333 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4334 InformIncrementalMarker(masm);
4335 regs_.Restore(masm);
4340 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4341 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
4342 int argument_count = 3;
4343 __ PrepareCallCFunction(argument_count, regs_.scratch0());
4345 a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
4346 DCHECK(!address.is(regs_.object()));
4347 DCHECK(!address.is(a0));
4348 __ Move(address, regs_.address());
4349 __ Move(a0, regs_.object());
4350 __ Move(a1, address);
4351 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
4353 AllowExternalCallThatCantCauseGC scope(masm);
4355 ExternalReference::incremental_marking_record_write_function(isolate()),
4357 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
4361 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4362 MacroAssembler* masm,
4363 OnNoNeedToInformIncrementalMarker on_no_need,
4366 Label need_incremental;
4367 Label need_incremental_pop_scratch;
4369 __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
4370 __ lw(regs_.scratch1(),
4371 MemOperand(regs_.scratch0(),
4372 MemoryChunk::kWriteBarrierCounterOffset));
4373 __ Subu(regs_.scratch1(), regs_.scratch1(), Operand(1));
4374 __ sw(regs_.scratch1(),
4375 MemOperand(regs_.scratch0(),
4376 MemoryChunk::kWriteBarrierCounterOffset));
4377 __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
4379 // Let's look at the color of the object: If it is not black we don't have
4380 // to inform the incremental marker.
4381 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4383 regs_.Restore(masm);
4384 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4385 __ RememberedSetHelper(object(),
4388 save_fp_regs_mode(),
4389 MacroAssembler::kReturnAtEnd);
4396 // Get the value from the slot.
4397 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
4399 if (mode == INCREMENTAL_COMPACTION) {
4400 Label ensure_not_white;
4402 __ CheckPageFlag(regs_.scratch0(), // Contains value.
4403 regs_.scratch1(), // Scratch.
4404 MemoryChunk::kEvacuationCandidateMask,
4408 __ CheckPageFlag(regs_.object(),
4409 regs_.scratch1(), // Scratch.
4410 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
4414 __ bind(&ensure_not_white);
4417 // We need extra registers for this, so we push the object and the address
4418 // register temporarily.
4419 __ Push(regs_.object(), regs_.address());
4420 __ EnsureNotWhite(regs_.scratch0(), // The value.
4421 regs_.scratch1(), // Scratch.
4422 regs_.object(), // Scratch.
4423 regs_.address(), // Scratch.
4424 &need_incremental_pop_scratch);
4425 __ Pop(regs_.object(), regs_.address());
4427 regs_.Restore(masm);
4428 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4429 __ RememberedSetHelper(object(),
4432 save_fp_regs_mode(),
4433 MacroAssembler::kReturnAtEnd);
4438 __ bind(&need_incremental_pop_scratch);
4439 __ Pop(regs_.object(), regs_.address());
4441 __ bind(&need_incremental);
4443 // Fall through when we need to inform the incremental marker.
4447 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4448 // ----------- S t a t e -------------
4449 // -- a0 : element value to store
4450 // -- a3 : element index as smi
4451 // -- sp[0] : array literal index in function as smi
4452 // -- sp[4] : array literal
4453 // clobbers a1, a2, t0
4454 // -----------------------------------
4457 Label double_elements;
4459 Label slow_elements;
4460 Label fast_elements;
4462 // Get array literal index, array literal and its map.
4463 __ lw(t0, MemOperand(sp, 0 * kPointerSize));
4464 __ lw(a1, MemOperand(sp, 1 * kPointerSize));
4465 __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
4467 __ CheckFastElements(a2, t1, &double_elements);
4468 // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
4469 __ JumpIfSmi(a0, &smi_element);
4470 __ CheckFastSmiElements(a2, t1, &fast_elements);
4472 // Store into the array literal requires a elements transition. Call into
4474 __ bind(&slow_elements);
4476 __ Push(a1, a3, a0);
4477 __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4478 __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset));
4480 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4482 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4483 __ bind(&fast_elements);
4484 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
4485 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
4486 __ Addu(t2, t1, t2);
4487 __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4488 __ sw(a0, MemOperand(t2, 0));
4489 // Update the write barrier for the array store.
4490 __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
4491 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
4492 __ Ret(USE_DELAY_SLOT);
4495 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
4496 // and value is Smi.
4497 __ bind(&smi_element);
4498 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
4499 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
4500 __ Addu(t2, t1, t2);
4501 __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
4502 __ Ret(USE_DELAY_SLOT);
4505 // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
4506 __ bind(&double_elements);
4507 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
4508 __ StoreNumberToDoubleElements(a0, a3, t1, t3, t5, a2, &slow_elements);
4509 __ Ret(USE_DELAY_SLOT);
4514 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4515 CEntryStub ces(isolate(), 1, kSaveFPRegs);
4516 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4517 int parameter_count_offset =
4518 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4519 __ lw(a1, MemOperand(fp, parameter_count_offset));
4520 if (function_mode() == JS_FUNCTION_STUB_MODE) {
4521 __ Addu(a1, a1, Operand(1));
4523 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4524 __ sll(a1, a1, kPointerSizeLog2);
4525 __ Ret(USE_DELAY_SLOT);
4526 __ Addu(sp, sp, a1);
4530 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
4531 EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4532 VectorLoadStub stub(isolate(), state());
4533 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4537 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
4538 EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4539 VectorKeyedLoadStub stub(isolate());
4540 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4544 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
4545 EmitLoadTypeFeedbackVector(masm, a2);
4546 CallICStub stub(isolate(), state());
4547 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4551 void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
4552 EmitLoadTypeFeedbackVector(masm, a2);
4553 CallIC_ArrayStub stub(isolate(), state());
4554 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4558 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4559 if (masm->isolate()->function_entry_hook() != NULL) {
4560 ProfileEntryHookStub stub(masm->isolate());
4568 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4569 // The entry hook is a "push ra" instruction, followed by a call.
4570 // Note: on MIPS "push" is 2 instruction
4571 const int32_t kReturnAddressDistanceFromFunctionStart =
4572 Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
4574 // This should contain all kJSCallerSaved registers.
4575 const RegList kSavedRegs =
4576 kJSCallerSaved | // Caller saved registers.
4577 s5.bit(); // Saved stack pointer.
4579 // We also save ra, so the count here is one higher than the mask indicates.
4580 const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
4582 // Save all caller-save registers as this may be called from anywhere.
4583 __ MultiPush(kSavedRegs | ra.bit());
4585 // Compute the function's address for the first argument.
4586 __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
4588 // The caller's return address is above the saved temporaries.
4589 // Grab that for the second argument to the hook.
4590 __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
4592 // Align the stack if necessary.
4593 int frame_alignment = masm->ActivationFrameAlignment();
4594 if (frame_alignment > kPointerSize) {
4596 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4597 __ And(sp, sp, Operand(-frame_alignment));
4599 __ Subu(sp, sp, kCArgsSlotsSize);
4600 #if defined(V8_HOST_ARCH_MIPS)
4601 int32_t entry_hook =
4602 reinterpret_cast<int32_t>(isolate()->function_entry_hook());
4603 __ li(t9, Operand(entry_hook));
4605 // Under the simulator we need to indirect the entry hook through a
4606 // trampoline function at a known address.
4607 // It additionally takes an isolate as a third parameter.
4608 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
4610 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4611 __ li(t9, Operand(ExternalReference(&dispatcher,
4612 ExternalReference::BUILTIN_CALL,
4615 // Call C function through t9 to conform ABI for PIC.
4618 // Restore the stack pointer if needed.
4619 if (frame_alignment > kPointerSize) {
4622 __ Addu(sp, sp, kCArgsSlotsSize);
4625 // Also pop ra to get Ret(0).
4626 __ MultiPop(kSavedRegs | ra.bit());
4632 static void CreateArrayDispatch(MacroAssembler* masm,
4633 AllocationSiteOverrideMode mode) {
4634 if (mode == DISABLE_ALLOCATION_SITES) {
4635 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4636 __ TailCallStub(&stub);
4637 } else if (mode == DONT_OVERRIDE) {
4638 int last_index = GetSequenceIndexFromFastElementsKind(
4639 TERMINAL_FAST_ELEMENTS_KIND);
4640 for (int i = 0; i <= last_index; ++i) {
4641 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4642 T stub(masm->isolate(), kind);
4643 __ TailCallStub(&stub, eq, a3, Operand(kind));
4646 // If we reached this point there is a problem.
4647 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4654 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4655 AllocationSiteOverrideMode mode) {
4656 // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4657 // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
4658 // a0 - number of arguments
4659 // a1 - constructor?
4660 // sp[0] - last argument
4661 Label normal_sequence;
4662 if (mode == DONT_OVERRIDE) {
4663 DCHECK(FAST_SMI_ELEMENTS == 0);
4664 DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
4665 DCHECK(FAST_ELEMENTS == 2);
4666 DCHECK(FAST_HOLEY_ELEMENTS == 3);
4667 DCHECK(FAST_DOUBLE_ELEMENTS == 4);
4668 DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
4670 // is the low bit set? If so, we are holey and that is good.
4671 __ And(at, a3, Operand(1));
4672 __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
4675 // look at the first argument
4676 __ lw(t1, MemOperand(sp, 0));
4677 __ Branch(&normal_sequence, eq, t1, Operand(zero_reg));
4679 if (mode == DISABLE_ALLOCATION_SITES) {
4680 ElementsKind initial = GetInitialFastElementsKind();
4681 ElementsKind holey_initial = GetHoleyElementsKind(initial);
4683 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
4685 DISABLE_ALLOCATION_SITES);
4686 __ TailCallStub(&stub_holey);
4688 __ bind(&normal_sequence);
4689 ArraySingleArgumentConstructorStub stub(masm->isolate(),
4691 DISABLE_ALLOCATION_SITES);
4692 __ TailCallStub(&stub);
4693 } else if (mode == DONT_OVERRIDE) {
4694 // We are going to create a holey array, but our kind is non-holey.
4695 // Fix kind and retry (only if we have an allocation site in the slot).
4696 __ Addu(a3, a3, Operand(1));
4698 if (FLAG_debug_code) {
4699 __ lw(t1, FieldMemOperand(a2, 0));
4700 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
4701 __ Assert(eq, kExpectedAllocationSite, t1, Operand(at));
4704 // Save the resulting elements kind in type info. We can't just store a3
4705 // in the AllocationSite::transition_info field because elements kind is
4706 // restricted to a portion of the field...upper bits need to be left alone.
4707 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4708 __ lw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
4709 __ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
4710 __ sw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
4713 __ bind(&normal_sequence);
4714 int last_index = GetSequenceIndexFromFastElementsKind(
4715 TERMINAL_FAST_ELEMENTS_KIND);
4716 for (int i = 0; i <= last_index; ++i) {
4717 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4718 ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
4719 __ TailCallStub(&stub, eq, a3, Operand(kind));
4722 // If we reached this point there is a problem.
4723 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4731 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
4732 int to_index = GetSequenceIndexFromFastElementsKind(
4733 TERMINAL_FAST_ELEMENTS_KIND);
4734 for (int i = 0; i <= to_index; ++i) {
4735 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4736 T stub(isolate, kind);
4738 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
4739 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
4746 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
4747 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
4749 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
4751 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
4756 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
4758 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
4759 for (int i = 0; i < 2; i++) {
4760 // For internal arrays we only need a few things.
4761 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
4763 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
4765 InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
4771 void ArrayConstructorStub::GenerateDispatchToArrayStub(
4772 MacroAssembler* masm,
4773 AllocationSiteOverrideMode mode) {
4774 if (argument_count() == ANY) {
4775 Label not_zero_case, not_one_case;
4777 __ Branch(¬_zero_case, ne, at, Operand(zero_reg));
4778 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4780 __ bind(¬_zero_case);
4781 __ Branch(¬_one_case, gt, a0, Operand(1));
4782 CreateArrayDispatchOneArgument(masm, mode);
4784 __ bind(¬_one_case);
4785 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4786 } else if (argument_count() == NONE) {
4787 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4788 } else if (argument_count() == ONE) {
4789 CreateArrayDispatchOneArgument(masm, mode);
4790 } else if (argument_count() == MORE_THAN_ONE) {
4791 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4798 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
4799 // ----------- S t a t e -------------
4800 // -- a0 : argc (only if argument_count() is ANY or MORE_THAN_ONE)
4801 // -- a1 : constructor
4802 // -- a2 : AllocationSite or undefined
4803 // -- a3 : Original constructor
4804 // -- sp[0] : last argument
4805 // -----------------------------------
4807 if (FLAG_debug_code) {
4808 // The array construct code is only set for the global and natives
4809 // builtin Array functions which always have maps.
4811 // Initial map for the builtin Array function should be a map.
4812 __ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
4813 // Will both indicate a NULL and a Smi.
4815 __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
4816 at, Operand(zero_reg));
4817 __ GetObjectType(t0, t0, t1);
4818 __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
4819 t1, Operand(MAP_TYPE));
4821 // We should either have undefined in a2 or a valid AllocationSite
4822 __ AssertUndefinedOrAllocationSite(a2, t0);
4826 __ Branch(&subclassing, ne, a1, Operand(a3));
4829 // Get the elements kind and case on that.
4830 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4831 __ Branch(&no_info, eq, a2, Operand(at));
4833 __ lw(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
4835 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4836 __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
4837 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
4840 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
4843 __ bind(&subclassing);
4848 switch (argument_count()) {
4851 __ li(at, Operand(2));
4852 __ addu(a0, a0, at);
4855 __ li(a0, Operand(2));
4858 __ li(a0, Operand(3));
4862 __ JumpToExternalReference(
4863 ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
4867 void InternalArrayConstructorStub::GenerateCase(
4868 MacroAssembler* masm, ElementsKind kind) {
4870 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
4871 __ TailCallStub(&stub0, lo, a0, Operand(1));
4873 InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
4874 __ TailCallStub(&stubN, hi, a0, Operand(1));
4876 if (IsFastPackedElementsKind(kind)) {
4877 // We might need to create a holey array
4878 // look at the first argument.
4879 __ lw(at, MemOperand(sp, 0));
4881 InternalArraySingleArgumentConstructorStub
4882 stub1_holey(isolate(), GetHoleyElementsKind(kind));
4883 __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
4886 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
4887 __ TailCallStub(&stub1);
4891 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
4892 // ----------- S t a t e -------------
4894 // -- a1 : constructor
4895 // -- sp[0] : return address
4896 // -- sp[4] : last argument
4897 // -----------------------------------
4899 if (FLAG_debug_code) {
4900 // The array construct code is only set for the global and natives
4901 // builtin Array functions which always have maps.
4903 // Initial map for the builtin Array function should be a map.
4904 __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
4905 // Will both indicate a NULL and a Smi.
4907 __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
4908 at, Operand(zero_reg));
4909 __ GetObjectType(a3, a3, t0);
4910 __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
4911 t0, Operand(MAP_TYPE));
4914 // Figure out the right elements kind.
4915 __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
4917 // Load the map's "bit field 2" into a3. We only need the first byte,
4918 // but the following bit field extraction takes care of that anyway.
4919 __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
4920 // Retrieve elements_kind from bit field 2.
4921 __ DecodeField<Map::ElementsKindBits>(a3);
4923 if (FLAG_debug_code) {
4925 __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
4927 eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
4928 a3, Operand(FAST_HOLEY_ELEMENTS));
4932 Label fast_elements_case;
4933 __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
4934 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
4936 __ bind(&fast_elements_case);
4937 GenerateCase(masm, FAST_ELEMENTS);
4941 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
4942 return ref0.address() - ref1.address();
4946 // Calls an API function. Allocates HandleScope, extracts returned value
4947 // from handle and propagates exceptions. Restores context. stack_space
4948 // - space to be unwound on exit (includes the call JS arguments space and
4949 // the additional space allocated for the fast call).
4950 static void CallApiFunctionAndReturn(
4951 MacroAssembler* masm, Register function_address,
4952 ExternalReference thunk_ref, int stack_space, int32_t stack_space_offset,
4953 MemOperand return_value_operand, MemOperand* context_restore_operand) {
4954 Isolate* isolate = masm->isolate();
4955 ExternalReference next_address =
4956 ExternalReference::handle_scope_next_address(isolate);
4957 const int kNextOffset = 0;
4958 const int kLimitOffset = AddressOffset(
4959 ExternalReference::handle_scope_limit_address(isolate), next_address);
4960 const int kLevelOffset = AddressOffset(
4961 ExternalReference::handle_scope_level_address(isolate), next_address);
4963 DCHECK(function_address.is(a1) || function_address.is(a2));
4965 Label profiler_disabled;
4966 Label end_profiler_check;
4967 __ li(t9, Operand(ExternalReference::is_profiling_address(isolate)));
4968 __ lb(t9, MemOperand(t9, 0));
4969 __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
4971 // Additional parameter is the address of the actual callback.
4972 __ li(t9, Operand(thunk_ref));
4973 __ jmp(&end_profiler_check);
4975 __ bind(&profiler_disabled);
4976 __ mov(t9, function_address);
4977 __ bind(&end_profiler_check);
4979 // Allocate HandleScope in callee-save registers.
4980 __ li(s3, Operand(next_address));
4981 __ lw(s0, MemOperand(s3, kNextOffset));
4982 __ lw(s1, MemOperand(s3, kLimitOffset));
4983 __ lw(s2, MemOperand(s3, kLevelOffset));
4984 __ Addu(s2, s2, Operand(1));
4985 __ sw(s2, MemOperand(s3, kLevelOffset));
4987 if (FLAG_log_timer_events) {
4988 FrameScope frame(masm, StackFrame::MANUAL);
4989 __ PushSafepointRegisters();
4990 __ PrepareCallCFunction(1, a0);
4991 __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
4992 __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
4994 __ PopSafepointRegisters();
4997 // Native call returns to the DirectCEntry stub which redirects to the
4998 // return address pushed on stack (could have moved after GC).
4999 // DirectCEntry stub itself is generated early and never moves.
5000 DirectCEntryStub stub(isolate);
5001 stub.GenerateCall(masm, t9);
5003 if (FLAG_log_timer_events) {
5004 FrameScope frame(masm, StackFrame::MANUAL);
5005 __ PushSafepointRegisters();
5006 __ PrepareCallCFunction(1, a0);
5007 __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
5008 __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
5010 __ PopSafepointRegisters();
5013 Label promote_scheduled_exception;
5014 Label exception_handled;
5015 Label delete_allocated_handles;
5016 Label leave_exit_frame;
5017 Label return_value_loaded;
5019 // Load value from ReturnValue.
5020 __ lw(v0, return_value_operand);
5021 __ bind(&return_value_loaded);
5023 // No more valid handles (the result handle was the last one). Restore
5024 // previous handle scope.
5025 __ sw(s0, MemOperand(s3, kNextOffset));
5026 if (__ emit_debug_code()) {
5027 __ lw(a1, MemOperand(s3, kLevelOffset));
5028 __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
5030 __ Subu(s2, s2, Operand(1));
5031 __ sw(s2, MemOperand(s3, kLevelOffset));
5032 __ lw(at, MemOperand(s3, kLimitOffset));
5033 __ Branch(&delete_allocated_handles, ne, s1, Operand(at));
5035 // Check if the function scheduled an exception.
5036 __ bind(&leave_exit_frame);
5037 __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
5038 __ li(at, Operand(ExternalReference::scheduled_exception_address(isolate)));
5039 __ lw(t1, MemOperand(at));
5040 __ Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
5041 __ bind(&exception_handled);
5043 bool restore_context = context_restore_operand != NULL;
5044 if (restore_context) {
5045 __ lw(cp, *context_restore_operand);
5047 if (stack_space_offset != kInvalidStackOffset) {
5048 // ExitFrame contains four MIPS argument slots after DirectCEntryStub call
5049 // so this must be accounted for.
5050 __ lw(s0, MemOperand(sp, stack_space_offset + kCArgsSlotsSize));
5052 __ li(s0, Operand(stack_space));
5054 __ LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN,
5055 stack_space_offset != kInvalidStackOffset);
5057 __ bind(&promote_scheduled_exception);
5059 FrameScope frame(masm, StackFrame::INTERNAL);
5060 __ CallExternalReference(
5061 ExternalReference(Runtime::kPromoteScheduledException, isolate), 0);
5063 __ jmp(&exception_handled);
5065 // HandleScope limit has changed. Delete allocated extensions.
5066 __ bind(&delete_allocated_handles);
5067 __ sw(s1, MemOperand(s3, kLimitOffset));
5070 __ PrepareCallCFunction(1, s1);
5071 __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
5072 __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
5075 __ jmp(&leave_exit_frame);
5079 static void CallApiFunctionStubHelper(MacroAssembler* masm,
5080 const ParameterCount& argc,
5081 bool return_first_arg,
5082 bool call_data_undefined) {
5083 // ----------- S t a t e -------------
5085 // -- t0 : call_data
5087 // -- a1 : api_function_address
5088 // -- a3 : number of arguments if argc is a register
5091 // -- sp[0] : last argument
5093 // -- sp[(argc - 1)* 4] : first argument
5094 // -- sp[argc * 4] : receiver
5095 // -----------------------------------
5097 Register callee = a0;
5098 Register call_data = t0;
5099 Register holder = a2;
5100 Register api_function_address = a1;
5101 Register context = cp;
5103 typedef FunctionCallbackArguments FCA;
5105 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
5106 STATIC_ASSERT(FCA::kCalleeIndex == 5);
5107 STATIC_ASSERT(FCA::kDataIndex == 4);
5108 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
5109 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
5110 STATIC_ASSERT(FCA::kIsolateIndex == 1);
5111 STATIC_ASSERT(FCA::kHolderIndex == 0);
5112 STATIC_ASSERT(FCA::kArgsLength == 7);
5114 DCHECK(argc.is_immediate() || a3.is(argc.reg()));
5116 // Save context, callee and call data.
5117 __ Push(context, callee, call_data);
5118 // Load context from callee.
5119 __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
5121 Register scratch = call_data;
5122 if (!call_data_undefined) {
5123 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5125 // Push return value and default return value.
5126 __ Push(scratch, scratch);
5127 __ li(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
5128 // Push isolate and holder.
5129 __ Push(scratch, holder);
5131 // Prepare arguments.
5132 __ mov(scratch, sp);
5134 // Allocate the v8::Arguments structure in the arguments' space since
5135 // it's not controlled by GC.
5136 const int kApiStackSpace = 4;
5138 FrameScope frame_scope(masm, StackFrame::MANUAL);
5139 __ EnterExitFrame(false, kApiStackSpace);
5141 DCHECK(!api_function_address.is(a0) && !scratch.is(a0));
5142 // a0 = FunctionCallbackInfo&
5143 // Arguments is after the return address.
5144 __ Addu(a0, sp, Operand(1 * kPointerSize));
5145 // FunctionCallbackInfo::implicit_args_
5146 __ sw(scratch, MemOperand(a0, 0 * kPointerSize));
5147 if (argc.is_immediate()) {
5148 // FunctionCallbackInfo::values_
5149 __ Addu(at, scratch,
5150 Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
5151 __ sw(at, MemOperand(a0, 1 * kPointerSize));
5152 // FunctionCallbackInfo::length_ = argc
5153 __ li(at, Operand(argc.immediate()));
5154 __ sw(at, MemOperand(a0, 2 * kPointerSize));
5155 // FunctionCallbackInfo::is_construct_call_ = 0
5156 __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
5158 // FunctionCallbackInfo::values_
5159 __ sll(at, argc.reg(), kPointerSizeLog2);
5160 __ Addu(at, at, scratch);
5161 __ Addu(at, at, Operand((FCA::kArgsLength - 1) * kPointerSize));
5162 __ sw(at, MemOperand(a0, 1 * kPointerSize));
5163 // FunctionCallbackInfo::length_ = argc
5164 __ sw(argc.reg(), MemOperand(a0, 2 * kPointerSize));
5165 // FunctionCallbackInfo::is_construct_call_
5166 __ Addu(argc.reg(), argc.reg(), Operand(FCA::kArgsLength + 1));
5167 __ sll(at, argc.reg(), kPointerSizeLog2);
5168 __ sw(at, MemOperand(a0, 3 * kPointerSize));
5171 ExternalReference thunk_ref =
5172 ExternalReference::invoke_function_callback(masm->isolate());
5174 AllowExternalCallThatCantCauseGC scope(masm);
5175 MemOperand context_restore_operand(
5176 fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
5177 // Stores return the first js argument.
5178 int return_value_offset = 0;
5179 if (return_first_arg) {
5180 return_value_offset = 2 + FCA::kArgsLength;
5182 return_value_offset = 2 + FCA::kReturnValueOffset;
5184 MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
5185 int stack_space = 0;
5186 int32_t stack_space_offset = 4 * kPointerSize;
5187 if (argc.is_immediate()) {
5188 stack_space = argc.immediate() + FCA::kArgsLength + 1;
5189 stack_space_offset = kInvalidStackOffset;
5191 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
5192 stack_space_offset, return_value_operand,
5193 &context_restore_operand);
5197 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
5198 bool call_data_undefined = this->call_data_undefined();
5199 CallApiFunctionStubHelper(masm, ParameterCount(a3), false,
5200 call_data_undefined);
5204 void CallApiAccessorStub::Generate(MacroAssembler* masm) {
5205 bool is_store = this->is_store();
5206 int argc = this->argc();
5207 bool call_data_undefined = this->call_data_undefined();
5208 CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
5209 call_data_undefined);
5213 void CallApiGetterStub::Generate(MacroAssembler* masm) {
5214 // ----------- S t a t e -------------
5216 // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
5218 // -- a2 : api_function_address
5219 // -----------------------------------
5221 Register api_function_address = ApiGetterDescriptor::function_address();
5222 DCHECK(api_function_address.is(a2));
5224 __ mov(a0, sp); // a0 = Handle<Name>
5225 __ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA
5227 const int kApiStackSpace = 1;
5228 FrameScope frame_scope(masm, StackFrame::MANUAL);
5229 __ EnterExitFrame(false, kApiStackSpace);
5231 // Create PropertyAccessorInfo instance on the stack above the exit frame with
5232 // a1 (internal::Object** args_) as the data.
5233 __ sw(a1, MemOperand(sp, 1 * kPointerSize));
5234 __ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo&
5236 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
5238 ExternalReference thunk_ref =
5239 ExternalReference::invoke_accessor_getter_callback(isolate());
5240 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
5241 kStackUnwindSpace, kInvalidStackOffset,
5242 MemOperand(fp, 6 * kPointerSize), NULL);
5248 } } // namespace v8::internal
5250 #endif // V8_TARGET_ARCH_MIPS