1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #if V8_TARGET_ARCH_MIPS
9 #include "src/base/bits.h"
10 #include "src/bootstrapper.h"
11 #include "src/code-stubs.h"
12 #include "src/codegen.h"
13 #include "src/ic/handler-compiler.h"
14 #include "src/ic/ic.h"
15 #include "src/ic/stub-cache.h"
16 #include "src/isolate.h"
17 #include "src/jsregexp.h"
18 #include "src/regexp-macro-assembler.h"
19 #include "src/runtime/runtime.h"
25 static void InitializeArrayConstructorDescriptor(
26 Isolate* isolate, CodeStubDescriptor* descriptor,
27 int constant_stack_parameter_count) {
28 Address deopt_handler = Runtime::FunctionForId(
29 Runtime::kArrayConstructor)->entry;
31 if (constant_stack_parameter_count == 0) {
32 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
33 JS_FUNCTION_STUB_MODE);
35 descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
36 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
41 static void InitializeInternalArrayConstructorDescriptor(
42 Isolate* isolate, CodeStubDescriptor* descriptor,
43 int constant_stack_parameter_count) {
44 Address deopt_handler = Runtime::FunctionForId(
45 Runtime::kInternalArrayConstructor)->entry;
47 if (constant_stack_parameter_count == 0) {
48 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
49 JS_FUNCTION_STUB_MODE);
51 descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
52 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
57 void ArrayNoArgumentConstructorStub::InitializeDescriptor(
58 CodeStubDescriptor* descriptor) {
59 InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
63 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
64 CodeStubDescriptor* descriptor) {
65 InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
69 void ArrayNArgumentsConstructorStub::InitializeDescriptor(
70 CodeStubDescriptor* descriptor) {
71 InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
75 void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
76 CodeStubDescriptor* descriptor) {
77 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
81 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
82 CodeStubDescriptor* descriptor) {
83 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
87 void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
88 CodeStubDescriptor* descriptor) {
89 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
93 #define __ ACCESS_MASM(masm)
96 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
99 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
105 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
110 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
111 ExternalReference miss) {
112 // Update the static counter each time a new code stub is generated.
113 isolate()->counters()->code_stubs()->Increment();
115 CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
116 int param_count = descriptor.GetEnvironmentParameterCount();
118 // Call the runtime system in a fresh internal frame.
119 FrameScope scope(masm, StackFrame::INTERNAL);
120 DCHECK(param_count == 0 ||
121 a0.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
122 // Push arguments, adjust sp.
123 __ Subu(sp, sp, Operand(param_count * kPointerSize));
124 for (int i = 0; i < param_count; ++i) {
125 // Store argument to stack.
126 __ sw(descriptor.GetEnvironmentParameterRegister(i),
127 MemOperand(sp, (param_count - 1 - i) * kPointerSize));
129 __ CallExternalReference(miss, param_count);
136 void DoubleToIStub::Generate(MacroAssembler* masm) {
137 Label out_of_range, only_low, negate, done;
138 Register input_reg = source();
139 Register result_reg = destination();
141 int double_offset = offset();
142 // Account for saved regs if input is sp.
143 if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
146 GetRegisterThatIsNotOneOf(input_reg, result_reg);
148 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
150 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
151 DoubleRegister double_scratch = kLithiumScratchDouble;
153 __ Push(scratch, scratch2, scratch3);
155 if (!skip_fastpath()) {
156 // Load double input.
157 __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
159 // Clear cumulative exception flags and save the FCSR.
160 __ cfc1(scratch2, FCSR);
161 __ ctc1(zero_reg, FCSR);
163 // Try a conversion to a signed integer.
164 __ Trunc_w_d(double_scratch, double_scratch);
165 // Move the converted value into the result register.
166 __ mfc1(scratch3, double_scratch);
168 // Retrieve and restore the FCSR.
169 __ cfc1(scratch, FCSR);
170 __ ctc1(scratch2, FCSR);
172 // Check for overflow and NaNs.
175 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
176 | kFCSRInvalidOpFlagMask);
177 // If we had no exceptions then set result_reg and we are done.
179 __ Branch(&error, ne, scratch, Operand(zero_reg));
180 __ Move(result_reg, scratch3);
185 // Load the double value and perform a manual truncation.
186 Register input_high = scratch2;
187 Register input_low = scratch3;
190 MemOperand(input_reg, double_offset + Register::kMantissaOffset));
192 MemOperand(input_reg, double_offset + Register::kExponentOffset));
194 Label normal_exponent, restore_sign;
195 // Extract the biased exponent in result.
198 HeapNumber::kExponentShift,
199 HeapNumber::kExponentBits);
201 // Check for Infinity and NaNs, which should return 0.
202 __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
203 __ Movz(result_reg, zero_reg, scratch);
204 __ Branch(&done, eq, scratch, Operand(zero_reg));
206 // Express exponent as delta to (number of mantissa bits + 31).
209 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
211 // If the delta is strictly positive, all bits would be shifted away,
212 // which means that we can return 0.
213 __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
214 __ mov(result_reg, zero_reg);
217 __ bind(&normal_exponent);
218 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
220 __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
223 Register sign = result_reg;
225 __ And(sign, input_high, Operand(HeapNumber::kSignMask));
227 // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
228 // to check for this specific case.
229 Label high_shift_needed, high_shift_done;
230 __ Branch(&high_shift_needed, lt, scratch, Operand(32));
231 __ mov(input_high, zero_reg);
232 __ Branch(&high_shift_done);
233 __ bind(&high_shift_needed);
235 // Set the implicit 1 before the mantissa part in input_high.
238 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
239 // Shift the mantissa bits to the correct position.
240 // We don't need to clear non-mantissa bits as they will be shifted away.
241 // If they weren't, it would mean that the answer is in the 32bit range.
242 __ sllv(input_high, input_high, scratch);
244 __ bind(&high_shift_done);
246 // Replace the shifted bits with bits from the lower mantissa word.
247 Label pos_shift, shift_done;
249 __ subu(scratch, at, scratch);
250 __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
253 __ Subu(scratch, zero_reg, scratch);
254 __ sllv(input_low, input_low, scratch);
255 __ Branch(&shift_done);
258 __ srlv(input_low, input_low, scratch);
260 __ bind(&shift_done);
261 __ Or(input_high, input_high, Operand(input_low));
262 // Restore sign if necessary.
263 __ mov(scratch, sign);
266 __ Subu(result_reg, zero_reg, input_high);
267 __ Movz(result_reg, input_high, scratch);
271 __ Pop(scratch, scratch2, scratch3);
276 // Handle the case where the lhs and rhs are the same object.
277 // Equality is almost reflexive (everything but NaN), so this is a test
278 // for "identity and not NaN".
279 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
283 Label heap_number, return_equal;
284 Register exp_mask_reg = t5;
286 __ Branch(¬_identical, ne, a0, Operand(a1));
288 __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
290 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
291 // so we do the second best thing - test it ourselves.
292 // They are both equal and they are not both Smis so both of them are not
293 // Smis. If it's not a heap number, then return equal.
294 if (cc == less || cc == greater) {
295 __ GetObjectType(a0, t4, t4);
296 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
298 __ GetObjectType(a0, t4, t4);
299 __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
300 // Comparing JS objects with <=, >= is complicated.
302 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
303 // Normally here we fall through to return_equal, but undefined is
304 // special: (undefined == undefined) == true, but
305 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
306 if (cc == less_equal || cc == greater_equal) {
307 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
308 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
309 __ Branch(&return_equal, ne, a0, Operand(t2));
310 DCHECK(is_int16(GREATER) && is_int16(LESS));
311 __ Ret(USE_DELAY_SLOT);
313 // undefined <= undefined should fail.
314 __ li(v0, Operand(GREATER));
316 // undefined >= undefined should fail.
317 __ li(v0, Operand(LESS));
323 __ bind(&return_equal);
324 DCHECK(is_int16(GREATER) && is_int16(LESS));
325 __ Ret(USE_DELAY_SLOT);
327 __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
328 } else if (cc == greater) {
329 __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
331 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
334 // For less and greater we don't have to check for NaN since the result of
335 // x < x is false regardless. For the others here is some code to check
337 if (cc != lt && cc != gt) {
338 __ bind(&heap_number);
339 // It is a heap number, so return non-equal if it's NaN and equal if it's
342 // The representation of NaN values has all exponent bits (52..62) set,
343 // and not all mantissa bits (0..51) clear.
344 // Read top bits of double representation (second word of value).
345 __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
346 // Test that exponent bits are all set.
347 __ And(t3, t2, Operand(exp_mask_reg));
348 // If all bits not set (ne cond), then not a NaN, objects are equal.
349 __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
351 // Shift out flag and all exponent bits, retaining only mantissa.
352 __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
353 // Or with all low-bits of mantissa.
354 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
355 __ Or(v0, t3, Operand(t2));
356 // For equal we already have the right value in v0: Return zero (equal)
357 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
358 // not (it's a NaN). For <= and >= we need to load v0 with the failing
359 // value if it's a NaN.
361 // All-zero means Infinity means equal.
362 __ Ret(eq, v0, Operand(zero_reg));
363 DCHECK(is_int16(GREATER) && is_int16(LESS));
364 __ Ret(USE_DELAY_SLOT);
366 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
368 __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
372 // No fall through here.
374 __ bind(¬_identical);
378 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
381 Label* both_loaded_as_doubles,
384 DCHECK((lhs.is(a0) && rhs.is(a1)) ||
385 (lhs.is(a1) && rhs.is(a0)));
388 __ JumpIfSmi(lhs, &lhs_is_smi);
390 // Check whether the non-smi is a heap number.
391 __ GetObjectType(lhs, t4, t4);
393 // If lhs was not a number and rhs was a Smi then strict equality cannot
394 // succeed. Return non-equal (lhs is already not zero).
395 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
398 // Smi compared non-strictly with a non-Smi non-heap-number. Call
400 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
403 // Rhs is a smi, lhs is a number.
404 // Convert smi rhs to double.
405 __ sra(at, rhs, kSmiTagSize);
407 __ cvt_d_w(f14, f14);
408 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
410 // We now have both loaded as doubles.
411 __ jmp(both_loaded_as_doubles);
413 __ bind(&lhs_is_smi);
414 // Lhs is a Smi. Check whether the non-smi is a heap number.
415 __ GetObjectType(rhs, t4, t4);
417 // If lhs was not a number and rhs was a Smi then strict equality cannot
418 // succeed. Return non-equal.
419 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
420 __ li(v0, Operand(1));
422 // Smi compared non-strictly with a non-Smi non-heap-number. Call
424 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
427 // Lhs is a smi, rhs is a number.
428 // Convert smi lhs to double.
429 __ sra(at, lhs, kSmiTagSize);
431 __ cvt_d_w(f12, f12);
432 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
433 // Fall through to both_loaded_as_doubles.
437 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
440 // If either operand is a JS object or an oddball value, then they are
441 // not equal since their pointers are different.
442 // There is no test for undetectability in strict equality.
443 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
444 Label first_non_object;
445 // Get the type of the first operand into a2 and compare it with
446 // FIRST_SPEC_OBJECT_TYPE.
447 __ GetObjectType(lhs, a2, a2);
448 __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
451 Label return_not_equal;
452 __ bind(&return_not_equal);
453 __ Ret(USE_DELAY_SLOT);
454 __ li(v0, Operand(1));
456 __ bind(&first_non_object);
457 // Check for oddballs: true, false, null, undefined.
458 __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
460 __ GetObjectType(rhs, a3, a3);
461 __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
463 // Check for oddballs: true, false, null, undefined.
464 __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
466 // Now that we have the types we might as well check for
467 // internalized-internalized.
468 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
469 __ Or(a2, a2, Operand(a3));
470 __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
471 __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
475 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
478 Label* both_loaded_as_doubles,
479 Label* not_heap_numbers,
481 __ GetObjectType(lhs, a3, a2);
482 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
483 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
484 // If first was a heap number & second wasn't, go to slow case.
485 __ Branch(slow, ne, a3, Operand(a2));
487 // Both are heap numbers. Load them up then jump to the code we have
489 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
490 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
492 __ jmp(both_loaded_as_doubles);
496 // Fast negative check for internalized-to-internalized equality.
497 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
500 Label* possible_strings,
501 Label* not_both_strings) {
502 DCHECK((lhs.is(a0) && rhs.is(a1)) ||
503 (lhs.is(a1) && rhs.is(a0)));
505 // a2 is object type of rhs.
507 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
508 __ And(at, a2, Operand(kIsNotStringMask));
509 __ Branch(&object_test, ne, at, Operand(zero_reg));
510 __ And(at, a2, Operand(kIsNotInternalizedMask));
511 __ Branch(possible_strings, ne, at, Operand(zero_reg));
512 __ GetObjectType(rhs, a3, a3);
513 __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
514 __ And(at, a3, Operand(kIsNotInternalizedMask));
515 __ Branch(possible_strings, ne, at, Operand(zero_reg));
517 // Both are internalized strings. We already checked they weren't the same
518 // pointer so they are not equal.
519 __ Ret(USE_DELAY_SLOT);
520 __ li(v0, Operand(1)); // Non-zero indicates not equal.
522 __ bind(&object_test);
523 __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
524 __ GetObjectType(rhs, a2, a3);
525 __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
527 // If both objects are undetectable, they are equal. Otherwise, they
528 // are not equal, since they are different objects and an object is not
529 // equal to undefined.
530 __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
531 __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
532 __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
534 __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
535 __ Ret(USE_DELAY_SLOT);
536 __ xori(v0, a0, 1 << Map::kIsUndetectable);
540 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
542 CompareICState::State expected,
545 if (expected == CompareICState::SMI) {
546 __ JumpIfNotSmi(input, fail);
547 } else if (expected == CompareICState::NUMBER) {
548 __ JumpIfSmi(input, &ok);
549 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
552 // We could be strict about internalized/string here, but as long as
553 // hydrogen doesn't care, the stub doesn't have to care either.
558 // On entry a1 and a2 are the values to be compared.
559 // On exit a0 is 0, positive or negative to indicate the result of
561 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
564 Condition cc = GetCondition();
567 CompareICStub_CheckInputType(masm, lhs, a2, left(), &miss);
568 CompareICStub_CheckInputType(masm, rhs, a3, right(), &miss);
570 Label slow; // Call builtin.
571 Label not_smis, both_loaded_as_doubles;
573 Label not_two_smis, smi_done;
575 __ JumpIfNotSmi(a2, ¬_two_smis);
578 __ Ret(USE_DELAY_SLOT);
580 __ bind(¬_two_smis);
582 // NOTICE! This code is only reached after a smi-fast-case check, so
583 // it is certain that at least one operand isn't a smi.
585 // Handle the case where the objects are identical. Either returns the answer
586 // or goes to slow. Only falls through if the objects were not identical.
587 EmitIdenticalObjectComparison(masm, &slow, cc);
589 // If either is a Smi (we know that not both are), then they can only
590 // be strictly equal if the other is a HeapNumber.
591 STATIC_ASSERT(kSmiTag == 0);
592 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
593 __ And(t2, lhs, Operand(rhs));
594 __ JumpIfNotSmi(t2, ¬_smis, t0);
595 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
596 // 1) Return the answer.
598 // 3) Fall through to both_loaded_as_doubles.
599 // 4) Jump to rhs_not_nan.
600 // In cases 3 and 4 we have found out we were dealing with a number-number
601 // comparison and the numbers have been loaded into f12 and f14 as doubles,
602 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
603 EmitSmiNonsmiComparison(masm, lhs, rhs,
604 &both_loaded_as_doubles, &slow, strict());
606 __ bind(&both_loaded_as_doubles);
607 // f12, f14 are the double representations of the left hand side
608 // and the right hand side if we have FPU. Otherwise a2, a3 represent
609 // left hand side and a0, a1 represent right hand side.
611 __ li(t0, Operand(LESS));
612 __ li(t1, Operand(GREATER));
613 __ li(t2, Operand(EQUAL));
615 // Check if either rhs or lhs is NaN.
616 __ BranchF(NULL, &nan, eq, f12, f14);
618 // Check if LESS condition is satisfied. If true, move conditionally
620 if (!IsMipsArchVariant(kMips32r6)) {
621 __ c(OLT, D, f12, f14);
623 // Use previous check to store conditionally to v0 oposite condition
624 // (GREATER). If rhs is equal to lhs, this will be corrected in next
627 // Check if EQUAL condition is satisfied. If true, move conditionally
629 __ c(EQ, D, f12, f14);
633 __ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
634 __ mov(v0, t0); // Return LESS as result.
636 __ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
637 __ mov(v0, t2); // Return EQUAL as result.
639 __ mov(v0, t1); // Return GREATER as result.
646 // NaN comparisons always fail.
647 // Load whatever we need in v0 to make the comparison fail.
648 DCHECK(is_int16(GREATER) && is_int16(LESS));
649 __ Ret(USE_DELAY_SLOT);
650 if (cc == lt || cc == le) {
651 __ li(v0, Operand(GREATER));
653 __ li(v0, Operand(LESS));
658 // At this point we know we are dealing with two different objects,
659 // and neither of them is a Smi. The objects are in lhs_ and rhs_.
661 // This returns non-equal for some object types, or falls through if it
663 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
666 Label check_for_internalized_strings;
667 Label flat_string_check;
668 // Check for heap-number-heap-number comparison. Can jump to slow case,
669 // or load both doubles and jump to the code that handles
670 // that case. If the inputs are not doubles then jumps to
671 // check_for_internalized_strings.
672 // In this case a2 will contain the type of lhs_.
673 EmitCheckForTwoHeapNumbers(masm,
676 &both_loaded_as_doubles,
677 &check_for_internalized_strings,
680 __ bind(&check_for_internalized_strings);
681 if (cc == eq && !strict()) {
682 // Returns an answer for two internalized strings or two
683 // detectable objects.
684 // Otherwise jumps to string case or not both strings case.
685 // Assumes that a2 is the type of lhs_ on entry.
686 EmitCheckForInternalizedStringsOrObjects(
687 masm, lhs, rhs, &flat_string_check, &slow);
690 // Check for both being sequential one-byte strings,
691 // and inline if that is the case.
692 __ bind(&flat_string_check);
694 __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, a2, a3, &slow);
696 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
699 StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, a2, a3, t0);
701 StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, a2, a3, t0,
704 // Never falls through to here.
707 // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
710 // Figure out which native to call and setup the arguments.
711 Builtins::JavaScript native;
713 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
715 native = Builtins::COMPARE;
716 int ncr; // NaN compare result.
717 if (cc == lt || cc == le) {
720 DCHECK(cc == gt || cc == ge); // Remaining cases.
723 __ li(a0, Operand(Smi::FromInt(ncr)));
727 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
728 // tagged as a small integer.
729 __ InvokeBuiltin(native, JUMP_FUNCTION);
736 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
739 __ PushSafepointRegisters();
744 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
747 __ PopSafepointRegisters();
752 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
753 // We don't allow a GC during a store buffer overflow so there is no need to
754 // store the registers in any particular way, but we do have to store and
756 __ MultiPush(kJSCallerSaved | ra.bit());
757 if (save_doubles()) {
758 __ MultiPushFPU(kCallerSavedFPU);
760 const int argument_count = 1;
761 const int fp_argument_count = 0;
762 const Register scratch = a1;
764 AllowExternalCallThatCantCauseGC scope(masm);
765 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
766 __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
768 ExternalReference::store_buffer_overflow_function(isolate()),
770 if (save_doubles()) {
771 __ MultiPopFPU(kCallerSavedFPU);
774 __ MultiPop(kJSCallerSaved | ra.bit());
779 void MathPowStub::Generate(MacroAssembler* masm) {
780 const Register base = a1;
781 const Register exponent = MathPowTaggedDescriptor::exponent();
782 DCHECK(exponent.is(a2));
783 const Register heapnumbermap = t1;
784 const Register heapnumber = v0;
785 const DoubleRegister double_base = f2;
786 const DoubleRegister double_exponent = f4;
787 const DoubleRegister double_result = f0;
788 const DoubleRegister double_scratch = f6;
789 const FPURegister single_scratch = f8;
790 const Register scratch = t5;
791 const Register scratch2 = t3;
793 Label call_runtime, done, int_exponent;
794 if (exponent_type() == ON_STACK) {
795 Label base_is_smi, unpack_exponent;
796 // The exponent and base are supplied as arguments on the stack.
797 // This can only happen if the stub is called from non-optimized code.
798 // Load input parameters from stack to double registers.
799 __ lw(base, MemOperand(sp, 1 * kPointerSize));
800 __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
802 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
804 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
805 __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
806 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
808 __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
809 __ jmp(&unpack_exponent);
811 __ bind(&base_is_smi);
812 __ mtc1(scratch, single_scratch);
813 __ cvt_d_w(double_base, single_scratch);
814 __ bind(&unpack_exponent);
816 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
818 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
819 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
820 __ ldc1(double_exponent,
821 FieldMemOperand(exponent, HeapNumber::kValueOffset));
822 } else if (exponent_type() == TAGGED) {
823 // Base is already in double_base.
824 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
826 __ ldc1(double_exponent,
827 FieldMemOperand(exponent, HeapNumber::kValueOffset));
830 if (exponent_type() != INTEGER) {
831 Label int_exponent_convert;
832 // Detect integer exponents stored as double.
833 __ EmitFPUTruncate(kRoundToMinusInf,
839 kCheckForInexactConversion);
840 // scratch2 == 0 means there was no conversion error.
841 __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
843 if (exponent_type() == ON_STACK) {
844 // Detect square root case. Crankshaft detects constant +/-0.5 at
845 // compile time and uses DoMathPowHalf instead. We then skip this check
846 // for non-constant cases of +/-0.5 as these hardly occur.
849 __ Move(double_scratch, 0.5);
850 __ BranchF(USE_DELAY_SLOT,
856 // double_scratch can be overwritten in the delay slot.
857 // Calculates square root of base. Check for the special case of
858 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
859 __ Move(double_scratch, static_cast<double>(-V8_INFINITY));
860 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
861 __ neg_d(double_result, double_scratch);
863 // Add +0 to convert -0 to +0.
864 __ add_d(double_scratch, double_base, kDoubleRegZero);
865 __ sqrt_d(double_result, double_scratch);
868 __ bind(¬_plus_half);
869 __ Move(double_scratch, -0.5);
870 __ BranchF(USE_DELAY_SLOT,
876 // double_scratch can be overwritten in the delay slot.
877 // Calculates square root of base. Check for the special case of
878 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
879 __ Move(double_scratch, static_cast<double>(-V8_INFINITY));
880 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
881 __ Move(double_result, kDoubleRegZero);
883 // Add +0 to convert -0 to +0.
884 __ add_d(double_scratch, double_base, kDoubleRegZero);
885 __ Move(double_result, 1.);
886 __ sqrt_d(double_scratch, double_scratch);
887 __ div_d(double_result, double_result, double_scratch);
893 AllowExternalCallThatCantCauseGC scope(masm);
894 __ PrepareCallCFunction(0, 2, scratch2);
895 __ MovToFloatParameters(double_base, double_exponent);
897 ExternalReference::power_double_double_function(isolate()),
901 __ MovFromFloatResult(double_result);
904 __ bind(&int_exponent_convert);
907 // Calculate power with integer exponent.
908 __ bind(&int_exponent);
910 // Get two copies of exponent in the registers scratch and exponent.
911 if (exponent_type() == INTEGER) {
912 __ mov(scratch, exponent);
914 // Exponent has previously been stored into scratch as untagged integer.
915 __ mov(exponent, scratch);
918 __ mov_d(double_scratch, double_base); // Back up base.
919 __ Move(double_result, 1.0);
921 // Get absolute value of exponent.
922 Label positive_exponent;
923 __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
924 __ Subu(scratch, zero_reg, scratch);
925 __ bind(&positive_exponent);
927 Label while_true, no_carry, loop_end;
928 __ bind(&while_true);
930 __ And(scratch2, scratch, 1);
932 __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
933 __ mul_d(double_result, double_result, double_scratch);
936 __ sra(scratch, scratch, 1);
938 __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
939 __ mul_d(double_scratch, double_scratch, double_scratch);
941 __ Branch(&while_true);
945 __ Branch(&done, ge, exponent, Operand(zero_reg));
946 __ Move(double_scratch, 1.0);
947 __ div_d(double_result, double_scratch, double_result);
948 // Test whether result is zero. Bail out to check for subnormal result.
949 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
950 __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
952 // double_exponent may not contain the exponent value if the input was a
953 // smi. We set it with exponent value before bailing out.
954 __ mtc1(exponent, single_scratch);
955 __ cvt_d_w(double_exponent, single_scratch);
957 // Returning or bailing out.
958 Counters* counters = isolate()->counters();
959 if (exponent_type() == ON_STACK) {
960 // The arguments are still on the stack.
961 __ bind(&call_runtime);
962 __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
964 // The stub is called from non-optimized code, which expects the result
965 // as heap number in exponent.
967 __ AllocateHeapNumber(
968 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
969 __ sdc1(double_result,
970 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
971 DCHECK(heapnumber.is(v0));
972 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
977 AllowExternalCallThatCantCauseGC scope(masm);
978 __ PrepareCallCFunction(0, 2, scratch);
979 __ MovToFloatParameters(double_base, double_exponent);
981 ExternalReference::power_double_double_function(isolate()),
985 __ MovFromFloatResult(double_result);
988 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
994 bool CEntryStub::NeedsImmovableCode() {
999 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
1000 CEntryStub::GenerateAheadOfTime(isolate);
1001 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
1002 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
1003 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
1004 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
1005 CreateWeakCellStub::GenerateAheadOfTime(isolate);
1006 BinaryOpICStub::GenerateAheadOfTime(isolate);
1007 StoreRegistersStateStub::GenerateAheadOfTime(isolate);
1008 RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
1009 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
1013 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1014 StoreRegistersStateStub stub(isolate);
1019 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1020 RestoreRegistersStateStub stub(isolate);
1025 void CodeStub::GenerateFPStubs(Isolate* isolate) {
1026 // Generate if not already in cache.
1027 SaveFPRegsMode mode = kSaveFPRegs;
1028 CEntryStub(isolate, 1, mode).GetCode();
1029 StoreBufferOverflowStub(isolate, mode).GetCode();
1030 isolate->set_fp_stubs_generated(true);
1034 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1035 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
1040 void CEntryStub::Generate(MacroAssembler* masm) {
1041 // Called from JavaScript; parameters are on stack as if calling JS function
1042 // a0: number of arguments including receiver
1043 // a1: pointer to builtin function
1044 // fp: frame pointer (restored after C call)
1045 // sp: stack pointer (restored as callee's sp after C call)
1046 // cp: current context (C callee-saved)
1048 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1050 // Compute the argv pointer in a callee-saved register.
1051 __ sll(s1, a0, kPointerSizeLog2);
1052 __ Addu(s1, sp, s1);
1053 __ Subu(s1, s1, kPointerSize);
1055 // Enter the exit frame that transitions from JavaScript to C++.
1056 FrameScope scope(masm, StackFrame::MANUAL);
1057 __ EnterExitFrame(save_doubles());
1059 // s0: number of arguments including receiver (C callee-saved)
1060 // s1: pointer to first argument (C callee-saved)
1061 // s2: pointer to builtin function (C callee-saved)
1063 // Prepare arguments for C routine.
1067 // a1 = argv (set in the delay slot after find_ra below).
1069 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
1070 // also need to reserve the 4 argument slots on the stack.
1072 __ AssertStackIsAligned();
1074 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
1076 // To let the GC traverse the return address of the exit frames, we need to
1077 // know where the return address is. The CEntryStub is unmovable, so
1078 // we can store the address on the stack to be able to find it again and
1079 // we never have to restore it, because it will not change.
1080 { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
1081 // This branch-and-link sequence is needed to find the current PC on mips,
1082 // saved to the ra register.
1083 // Use masm-> here instead of the double-underscore macro since extra
1084 // coverage code can interfere with the proper calculation of ra.
1086 masm->bal(&find_ra); // bal exposes branch delay slot.
1088 masm->bind(&find_ra);
1090 // Adjust the value in ra to point to the correct return location, 2nd
1091 // instruction past the real call into C code (the jalr(t9)), and push it.
1092 // This is the return address of the exit frame.
1093 const int kNumInstructionsToJump = 5;
1094 masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
1095 masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
1096 // Stack space reservation moved to the branch delay slot below.
1097 // Stack is still aligned.
1099 // Call the C routine.
1100 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
1102 // Set up sp in the delay slot.
1103 masm->addiu(sp, sp, -kCArgsSlotsSize);
1104 // Make sure the stored 'ra' points to this position.
1105 DCHECK_EQ(kNumInstructionsToJump,
1106 masm->InstructionsGeneratedSince(&find_ra));
1110 // Runtime functions should not return 'the hole'. Allowing it to escape may
1111 // lead to crashes in the IC code later.
1112 if (FLAG_debug_code) {
1114 __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
1115 __ Branch(&okay, ne, v0, Operand(t0));
1116 __ stop("The hole escaped");
1120 // Check result for exception sentinel.
1121 Label exception_returned;
1122 __ LoadRoot(t0, Heap::kExceptionRootIndex);
1123 __ Branch(&exception_returned, eq, t0, Operand(v0));
1125 // Check that there is no pending exception, otherwise we
1126 // should have returned the exception sentinel.
1127 if (FLAG_debug_code) {
1129 ExternalReference pending_exception_address(
1130 Isolate::kPendingExceptionAddress, isolate());
1131 __ li(a2, Operand(pending_exception_address));
1132 __ lw(a2, MemOperand(a2));
1133 __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
1134 // Cannot use check here as it attempts to generate call into runtime.
1135 __ Branch(&okay, eq, t0, Operand(a2));
1136 __ stop("Unexpected pending exception");
1140 // Exit C frame and return.
1142 // sp: stack pointer
1143 // fp: frame pointer
1144 // s0: still holds argc (callee-saved).
1145 __ LeaveExitFrame(save_doubles(), s0, true, EMIT_RETURN);
1147 // Handling of exception.
1148 __ bind(&exception_returned);
1150 ExternalReference pending_handler_context_address(
1151 Isolate::kPendingHandlerContextAddress, isolate());
1152 ExternalReference pending_handler_code_address(
1153 Isolate::kPendingHandlerCodeAddress, isolate());
1154 ExternalReference pending_handler_offset_address(
1155 Isolate::kPendingHandlerOffsetAddress, isolate());
1156 ExternalReference pending_handler_fp_address(
1157 Isolate::kPendingHandlerFPAddress, isolate());
1158 ExternalReference pending_handler_sp_address(
1159 Isolate::kPendingHandlerSPAddress, isolate());
1161 // Ask the runtime for help to determine the handler. This will set v0 to
1162 // contain the current pending exception, don't clobber it.
1163 ExternalReference find_handler(Runtime::kFindExceptionHandler, isolate());
1165 FrameScope scope(masm, StackFrame::MANUAL);
1166 __ PrepareCallCFunction(3, 0, a0);
1167 __ mov(a0, zero_reg);
1168 __ mov(a1, zero_reg);
1169 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
1170 __ CallCFunction(find_handler, 3);
1173 // Retrieve the handler context, SP and FP.
1174 __ li(cp, Operand(pending_handler_context_address));
1175 __ lw(cp, MemOperand(cp));
1176 __ li(sp, Operand(pending_handler_sp_address));
1177 __ lw(sp, MemOperand(sp));
1178 __ li(fp, Operand(pending_handler_fp_address));
1179 __ lw(fp, MemOperand(fp));
1181 // If the handler is a JS frame, restore the context to the frame. Note that
1182 // the context will be set to (cp == 0) for non-JS frames.
1184 __ Branch(&zero, eq, cp, Operand(zero_reg));
1185 __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1188 // Compute the handler entry address and jump to it.
1189 __ li(a1, Operand(pending_handler_code_address));
1190 __ lw(a1, MemOperand(a1));
1191 __ li(a2, Operand(pending_handler_offset_address));
1192 __ lw(a2, MemOperand(a2));
1193 __ Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
1194 __ Addu(t9, a1, a2);
1199 void JSEntryStub::Generate(MacroAssembler* masm) {
1200 Label invoke, handler_entry, exit;
1201 Isolate* isolate = masm->isolate();
1204 // a0: entry address
1213 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1215 // Save callee saved registers on the stack.
1216 __ MultiPush(kCalleeSaved | ra.bit());
1218 // Save callee-saved FPU registers.
1219 __ MultiPushFPU(kCalleeSavedFPU);
1220 // Set up the reserved register for 0.0.
1221 __ Move(kDoubleRegZero, 0.0);
1224 // Load argv in s0 register.
1225 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1226 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
1228 __ InitializeRootRegister();
1229 __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
1231 // We build an EntryFrame.
1232 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
1233 int marker = type();
1234 __ li(t2, Operand(Smi::FromInt(marker)));
1235 __ li(t1, Operand(Smi::FromInt(marker)));
1236 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1238 __ lw(t0, MemOperand(t0));
1239 __ Push(t3, t2, t1, t0);
1240 // Set up frame pointer for the frame to be pushed.
1241 __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
1244 // a0: entry_address
1246 // a2: receiver_pointer
1252 // function slot | entry frame
1254 // bad fp (0xff...f) |
1255 // callee saved registers + ra
1259 // If this is the outermost JS call, set js_entry_sp value.
1260 Label non_outermost_js;
1261 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
1262 __ li(t1, Operand(ExternalReference(js_entry_sp)));
1263 __ lw(t2, MemOperand(t1));
1264 __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
1265 __ sw(fp, MemOperand(t1));
1266 __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1269 __ nop(); // Branch delay slot nop.
1270 __ bind(&non_outermost_js);
1271 __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1275 // Jump to a faked try block that does the invoke, with a faked catch
1276 // block that sets the pending exception.
1278 __ bind(&handler_entry);
1279 handler_offset_ = handler_entry.pos();
1280 // Caught exception: Store result (exception) in the pending exception
1281 // field in the JSEnv and return a failure sentinel. Coming in here the
1282 // fp will be invalid because the PushStackHandler below sets it to 0 to
1283 // signal the existence of the JSEntry frame.
1284 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1286 __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
1287 __ LoadRoot(v0, Heap::kExceptionRootIndex);
1288 __ b(&exit); // b exposes branch delay slot.
1289 __ nop(); // Branch delay slot nop.
1291 // Invoke: Link this frame into the handler chain.
1293 __ PushStackHandler();
1294 // If an exception not caught by another handler occurs, this handler
1295 // returns control to the code after the bal(&invoke) above, which
1296 // restores all kCalleeSaved registers (including cp and fp) to their
1297 // saved values before returning a failure to C.
1299 // Clear any pending exceptions.
1300 __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
1301 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1303 __ sw(t1, MemOperand(t0));
1305 // Invoke the function by calling through JS entry trampoline builtin.
1306 // Notice that we cannot store a reference to the trampoline code directly in
1307 // this stub, because runtime stubs are not traversed when doing GC.
1310 // a0: entry_address
1312 // a2: receiver_pointer
1319 // callee saved registers + ra
1323 if (type() == StackFrame::ENTRY_CONSTRUCT) {
1324 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1326 __ li(t0, Operand(construct_entry));
1328 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
1329 __ li(t0, Operand(entry));
1331 __ lw(t9, MemOperand(t0)); // Deref address.
1333 // Call JSEntryTrampoline.
1334 __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
1337 // Unlink this frame from the handler chain.
1338 __ PopStackHandler();
1340 __ bind(&exit); // v0 holds result
1341 // Check if the current stack frame is marked as the outermost JS frame.
1342 Label non_outermost_js_2;
1344 __ Branch(&non_outermost_js_2,
1347 Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1348 __ li(t1, Operand(ExternalReference(js_entry_sp)));
1349 __ sw(zero_reg, MemOperand(t1));
1350 __ bind(&non_outermost_js_2);
1352 // Restore the top frame descriptors from the stack.
1354 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1356 __ sw(t1, MemOperand(t0));
1358 // Reset the stack to the callee saved registers.
1359 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
1361 // Restore callee-saved fpu registers.
1362 __ MultiPopFPU(kCalleeSavedFPU);
1364 // Restore callee saved registers from the stack.
1365 __ MultiPop(kCalleeSaved | ra.bit());
1371 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
1372 // Return address is in ra.
1375 Register receiver = LoadDescriptor::ReceiverRegister();
1376 Register index = LoadDescriptor::NameRegister();
1377 Register scratch = t1;
1378 Register result = v0;
1379 DCHECK(!scratch.is(receiver) && !scratch.is(index));
1380 DCHECK(!FLAG_vector_ics ||
1381 !scratch.is(VectorLoadICDescriptor::VectorRegister()));
1383 StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
1384 &miss, // When not a string.
1385 &miss, // When not a number.
1386 &miss, // When index out of range.
1387 STRING_INDEX_IS_ARRAY_INDEX,
1388 RECEIVER_IS_STRING);
1389 char_at_generator.GenerateFast(masm);
1392 StubRuntimeCallHelper call_helper;
1393 char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
1396 PropertyAccessCompiler::TailCallBuiltin(
1397 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1401 // Uses registers a0 to t0.
1402 // Expected input (depending on whether args are in registers or on the stack):
1403 // * object: a0 or at sp + 1 * kPointerSize.
1404 // * function: a1 or at sp.
1406 // An inlined call site may have been generated before calling this stub.
1407 // In this case the offset to the inline site to patch is passed on the stack,
1408 // in the safepoint slot for register t0.
1409 void InstanceofStub::Generate(MacroAssembler* masm) {
1410 // Call site inlining and patching implies arguments in registers.
1411 DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
1413 // Fixed register usage throughout the stub:
1414 const Register object = a0; // Object (lhs).
1415 Register map = a3; // Map of the object.
1416 const Register function = a1; // Function (rhs).
1417 const Register prototype = t0; // Prototype of the function.
1418 const Register inline_site = t5;
1419 const Register scratch = a2;
1421 const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
1423 Label slow, loop, is_instance, is_not_instance, not_js_object;
1425 if (!HasArgsInRegisters()) {
1426 __ lw(object, MemOperand(sp, 1 * kPointerSize));
1427 __ lw(function, MemOperand(sp, 0));
1430 // Check that the left hand is a JS object and load map.
1431 __ JumpIfSmi(object, ¬_js_object);
1432 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
1434 // If there is a call site cache don't look in the global cache, but do the
1435 // real lookup and update the call site cache.
1436 if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
1438 __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
1439 __ Branch(&miss, ne, function, Operand(at));
1440 __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
1441 __ Branch(&miss, ne, map, Operand(at));
1442 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1443 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1448 // Get the prototype of the function.
1449 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
1451 // Check that the function prototype is a JS object.
1452 __ JumpIfSmi(prototype, &slow);
1453 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
1455 // Update the global instanceof or call site inlined cache with the current
1456 // map and function. The cached answer will be set when it is known below.
1457 if (!HasCallSiteInlineCheck()) {
1458 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1459 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
1461 DCHECK(HasArgsInRegisters());
1462 // Patch the (relocated) inlined map check.
1464 // The offset was stored in t0 safepoint slot.
1465 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
1466 __ LoadFromSafepointRegisterSlot(scratch, t0);
1467 __ Subu(inline_site, ra, scratch);
1468 // Get the map location in scratch and patch it.
1469 __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
1470 __ sw(map, FieldMemOperand(scratch, Cell::kValueOffset));
1473 // Register mapping: a3 is object map and t0 is function prototype.
1474 // Get prototype of object into a2.
1475 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
1477 // We don't need map any more. Use it as a scratch register.
1478 Register scratch2 = map;
1481 // Loop through the prototype chain looking for the function prototype.
1482 __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
1484 __ Branch(&is_instance, eq, scratch, Operand(prototype));
1485 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
1486 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
1487 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
1490 __ bind(&is_instance);
1491 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
1492 if (!HasCallSiteInlineCheck()) {
1493 __ mov(v0, zero_reg);
1494 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1495 if (ReturnTrueFalseObject()) {
1496 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
1499 // Patch the call site to return true.
1500 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
1501 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1502 // Get the boolean result location in scratch and patch it.
1503 __ PatchRelocatedValue(inline_site, scratch, v0);
1505 if (!ReturnTrueFalseObject()) {
1506 __ mov(v0, zero_reg);
1509 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1511 __ bind(&is_not_instance);
1512 if (!HasCallSiteInlineCheck()) {
1513 __ li(v0, Operand(Smi::FromInt(1)));
1514 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1515 if (ReturnTrueFalseObject()) {
1516 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1519 // Patch the call site to return false.
1520 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1521 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1522 // Get the boolean result location in scratch and patch it.
1523 __ PatchRelocatedValue(inline_site, scratch, v0);
1525 if (!ReturnTrueFalseObject()) {
1526 __ li(v0, Operand(Smi::FromInt(1)));
1530 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1532 Label object_not_null, object_not_null_or_smi;
1533 __ bind(¬_js_object);
1534 // Before null, smi and string value checks, check that the rhs is a function
1535 // as for a non-function rhs an exception needs to be thrown.
1536 __ JumpIfSmi(function, &slow);
1537 __ GetObjectType(function, scratch2, scratch);
1538 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
1540 // Null is not instance of anything.
1541 __ Branch(&object_not_null, ne, object,
1542 Operand(isolate()->factory()->null_value()));
1543 if (ReturnTrueFalseObject()) {
1544 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1546 __ li(v0, Operand(Smi::FromInt(1)));
1548 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1550 __ bind(&object_not_null);
1551 // Smi values are not instances of anything.
1552 __ JumpIfNotSmi(object, &object_not_null_or_smi);
1553 if (ReturnTrueFalseObject()) {
1554 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1556 __ li(v0, Operand(Smi::FromInt(1)));
1558 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1560 __ bind(&object_not_null_or_smi);
1561 // String values are not instances of anything.
1562 __ IsObjectJSStringType(object, scratch, &slow);
1563 if (ReturnTrueFalseObject()) {
1564 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1566 __ li(v0, Operand(Smi::FromInt(1)));
1568 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1570 // Slow-case. Tail call builtin.
1572 if (!ReturnTrueFalseObject()) {
1573 if (HasArgsInRegisters()) {
1576 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
1579 FrameScope scope(masm, StackFrame::INTERNAL);
1581 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
1584 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
1585 __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
1586 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1587 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1592 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1594 Register receiver = LoadDescriptor::ReceiverRegister();
1595 // Ensure that the vector and slot registers won't be clobbered before
1596 // calling the miss handler.
1597 DCHECK(!FLAG_vector_ics ||
1598 !AreAliased(t0, t1, VectorLoadICDescriptor::VectorRegister(),
1599 VectorLoadICDescriptor::SlotRegister()));
1601 NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, t0,
1604 PropertyAccessCompiler::TailCallBuiltin(
1605 masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1609 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
1610 CHECK(!has_new_target());
1611 // The displacement is the offset of the last parameter (if any)
1612 // relative to the frame pointer.
1613 const int kDisplacement =
1614 StandardFrameConstants::kCallerSPOffset - kPointerSize;
1615 DCHECK(a1.is(ArgumentsAccessReadDescriptor::index()));
1616 DCHECK(a0.is(ArgumentsAccessReadDescriptor::parameter_count()));
1618 // Check that the key is a smiGenerateReadElement.
1620 __ JumpIfNotSmi(a1, &slow);
1622 // Check if the calling frame is an arguments adaptor frame.
1624 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1625 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
1629 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1631 // Check index (a1) against formal parameters count limit passed in
1632 // through register a0. Use unsigned comparison to get negative
1634 __ Branch(&slow, hs, a1, Operand(a0));
1636 // Read the argument from the stack and return it.
1637 __ subu(a3, a0, a1);
1638 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
1639 __ Addu(a3, fp, Operand(t3));
1640 __ Ret(USE_DELAY_SLOT);
1641 __ lw(v0, MemOperand(a3, kDisplacement));
1643 // Arguments adaptor case: Check index (a1) against actual arguments
1644 // limit found in the arguments adaptor frame. Use unsigned
1645 // comparison to get negative check for free.
1647 __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1648 __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
1650 // Read the argument from the adaptor frame and return it.
1651 __ subu(a3, a0, a1);
1652 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
1653 __ Addu(a3, a2, Operand(t3));
1654 __ Ret(USE_DELAY_SLOT);
1655 __ lw(v0, MemOperand(a3, kDisplacement));
1657 // Slow-case: Handle non-smi or out-of-bounds access to arguments
1658 // by calling the runtime system.
1661 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
1665 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
1666 // sp[0] : number of parameters
1667 // sp[4] : receiver displacement
1670 CHECK(!has_new_target());
1672 // Check if the calling frame is an arguments adaptor frame.
1674 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1675 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
1679 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1681 // Patch the arguments.length and the parameters pointer in the current frame.
1682 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
1683 __ sw(a2, MemOperand(sp, 0 * kPointerSize));
1685 __ Addu(a3, a3, Operand(t3));
1686 __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
1687 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
1690 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1694 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
1696 // sp[0] : number of parameters (tagged)
1697 // sp[4] : address of receiver argument
1699 // Registers used over whole function:
1700 // t2 : allocated object (tagged)
1701 // t5 : mapped parameter count (tagged)
1703 CHECK(!has_new_target());
1705 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
1706 // a1 = parameter count (tagged)
1708 // Check if the calling frame is an arguments adaptor frame.
1710 Label adaptor_frame, try_allocate;
1711 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1712 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
1713 __ Branch(&adaptor_frame,
1716 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1718 // No adaptor, parameter count = argument count.
1720 __ b(&try_allocate);
1721 __ nop(); // Branch delay slot nop.
1723 // We have an adaptor frame. Patch the parameters pointer.
1724 __ bind(&adaptor_frame);
1725 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
1727 __ Addu(a3, a3, Operand(t6));
1728 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
1729 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
1731 // a1 = parameter count (tagged)
1732 // a2 = argument count (tagged)
1733 // Compute the mapped parameter count = min(a1, a2) in a1.
1735 __ Branch(&skip_min, lt, a1, Operand(a2));
1739 __ bind(&try_allocate);
1741 // Compute the sizes of backing store, parameter map, and arguments object.
1742 // 1. Parameter map, has 2 extra words containing context and backing store.
1743 const int kParameterMapHeaderSize =
1744 FixedArray::kHeaderSize + 2 * kPointerSize;
1745 // If there are no mapped parameters, we do not need the parameter_map.
1746 Label param_map_size;
1747 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
1748 __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg));
1749 __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
1751 __ addiu(t5, t5, kParameterMapHeaderSize);
1752 __ bind(¶m_map_size);
1754 // 2. Backing store.
1756 __ Addu(t5, t5, Operand(t6));
1757 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
1759 // 3. Arguments object.
1760 __ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize));
1762 // Do the allocation of all three objects in one go.
1763 __ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT);
1765 // v0 = address of new object(s) (tagged)
1766 // a2 = argument count (smi-tagged)
1767 // Get the arguments boilerplate from the current native context into t0.
1768 const int kNormalOffset =
1769 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
1770 const int kAliasedOffset =
1771 Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
1773 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1774 __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
1775 Label skip2_ne, skip2_eq;
1776 __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
1777 __ lw(t0, MemOperand(t0, kNormalOffset));
1780 __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
1781 __ lw(t0, MemOperand(t0, kAliasedOffset));
1784 // v0 = address of new object (tagged)
1785 // a1 = mapped parameter count (tagged)
1786 // a2 = argument count (smi-tagged)
1787 // t0 = address of arguments map (tagged)
1788 __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
1789 __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
1790 __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
1791 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
1793 // Set up the callee in-object property.
1794 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
1795 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
1796 __ AssertNotSmi(a3);
1797 const int kCalleeOffset = JSObject::kHeaderSize +
1798 Heap::kArgumentsCalleeIndex * kPointerSize;
1799 __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
1801 // Use the length (smi tagged) and set that as an in-object property too.
1803 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1804 const int kLengthOffset = JSObject::kHeaderSize +
1805 Heap::kArgumentsLengthIndex * kPointerSize;
1806 __ sw(a2, FieldMemOperand(v0, kLengthOffset));
1808 // Set up the elements pointer in the allocated arguments object.
1809 // If we allocated a parameter map, t0 will point there, otherwise
1810 // it will point to the backing store.
1811 __ Addu(t0, v0, Operand(Heap::kSloppyArgumentsObjectSize));
1812 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
1814 // v0 = address of new object (tagged)
1815 // a1 = mapped parameter count (tagged)
1816 // a2 = argument count (tagged)
1817 // t0 = address of parameter map or backing store (tagged)
1818 // Initialize parameter map. If there are no mapped arguments, we're done.
1819 Label skip_parameter_map;
1821 __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
1822 // Move backing store address to a3, because it is
1823 // expected there when filling in the unmapped arguments.
1827 __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
1829 __ LoadRoot(t2, Heap::kSloppyArgumentsElementsMapRootIndex);
1830 __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
1831 __ Addu(t2, a1, Operand(Smi::FromInt(2)));
1832 __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
1833 __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
1835 __ Addu(t2, t0, Operand(t6));
1836 __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
1837 __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
1839 // Copy the parameter slots and the holes in the arguments.
1840 // We need to fill in mapped_parameter_count slots. They index the context,
1841 // where parameters are stored in reverse order, at
1842 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
1843 // The mapped parameter thus need to get indices
1844 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
1845 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
1846 // We loop from right to left.
1847 Label parameters_loop, parameters_test;
1849 __ lw(t5, MemOperand(sp, 0 * kPointerSize));
1850 __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
1851 __ Subu(t5, t5, Operand(a1));
1852 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
1854 __ Addu(a3, t0, Operand(t6));
1855 __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
1857 // t2 = loop variable (tagged)
1858 // a1 = mapping index (tagged)
1859 // a3 = address of backing store (tagged)
1860 // t0 = address of parameter map (tagged)
1861 // t1 = temporary scratch (a.o., for address calculation)
1862 // t3 = the hole value
1863 __ jmp(¶meters_test);
1865 __ bind(¶meters_loop);
1866 __ Subu(t2, t2, Operand(Smi::FromInt(1)));
1868 __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
1869 __ Addu(t6, t0, t1);
1870 __ sw(t5, MemOperand(t6));
1871 __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
1872 __ Addu(t6, a3, t1);
1873 __ sw(t3, MemOperand(t6));
1874 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
1875 __ bind(¶meters_test);
1876 __ Branch(¶meters_loop, ne, t2, Operand(Smi::FromInt(0)));
1878 __ bind(&skip_parameter_map);
1879 // a2 = argument count (tagged)
1880 // a3 = address of backing store (tagged)
1882 // Copy arguments header and remaining slots (if there are any).
1883 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
1884 __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
1885 __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
1887 Label arguments_loop, arguments_test;
1889 __ lw(t0, MemOperand(sp, 1 * kPointerSize));
1891 __ Subu(t0, t0, Operand(t6));
1892 __ jmp(&arguments_test);
1894 __ bind(&arguments_loop);
1895 __ Subu(t0, t0, Operand(kPointerSize));
1896 __ lw(t2, MemOperand(t0, 0));
1898 __ Addu(t1, a3, Operand(t6));
1899 __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
1900 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
1902 __ bind(&arguments_test);
1903 __ Branch(&arguments_loop, lt, t5, Operand(a2));
1905 // Return and remove the on-stack parameters.
1908 // Do the runtime call to allocate the arguments object.
1909 // a2 = argument count (tagged)
1911 __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
1912 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1916 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
1917 // Return address is in ra.
1920 Register receiver = LoadDescriptor::ReceiverRegister();
1921 Register key = LoadDescriptor::NameRegister();
1923 // Check that the key is an array index, that is Uint32.
1924 __ And(t0, key, Operand(kSmiTagMask | kSmiSignMask));
1925 __ Branch(&slow, ne, t0, Operand(zero_reg));
1927 // Everything is fine, call runtime.
1928 __ Push(receiver, key); // Receiver, key.
1930 // Perform tail call to the entry.
1931 __ TailCallExternalReference(
1932 ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
1937 PropertyAccessCompiler::TailCallBuiltin(
1938 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1942 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
1943 // sp[0] : number of parameters
1944 // sp[4] : receiver displacement
1946 // Check if the calling frame is an arguments adaptor frame.
1947 Label adaptor_frame, try_allocate, runtime;
1948 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1949 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
1950 __ Branch(&adaptor_frame,
1953 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1955 // Get the length from the frame.
1956 __ lw(a1, MemOperand(sp, 0));
1957 __ Branch(&try_allocate);
1959 // Patch the arguments.length and the parameters pointer.
1960 __ bind(&adaptor_frame);
1961 __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1962 if (has_new_target()) {
1963 Label skip_decrement;
1964 __ Branch(&skip_decrement, eq, a1, Operand(Smi::FromInt(0)));
1965 // Subtract 1 from smi-tagged arguments count.
1966 __ Subu(a1, a1, Operand(2));
1967 __ bind(&skip_decrement);
1969 __ sw(a1, MemOperand(sp, 0));
1970 __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
1971 __ Addu(a3, a2, Operand(at));
1973 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
1974 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
1976 // Try the new space allocation. Start out with computing the size
1977 // of the arguments object and the elements array in words.
1978 Label add_arguments_object;
1979 __ bind(&try_allocate);
1980 __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
1981 __ srl(a1, a1, kSmiTagSize);
1983 __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
1984 __ bind(&add_arguments_object);
1985 __ Addu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
1987 // Do the allocation of both objects in one go.
1988 __ Allocate(a1, v0, a2, a3, &runtime,
1989 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
1991 // Get the arguments boilerplate from the current native context.
1992 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1993 __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
1994 __ lw(t0, MemOperand(
1995 t0, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
1997 __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
1998 __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
1999 __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
2000 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
2002 // Get the length (smi tagged) and set that as an in-object property too.
2003 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2004 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
2006 __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
2007 Heap::kArgumentsLengthIndex * kPointerSize));
2010 __ Branch(&done, eq, a1, Operand(zero_reg));
2012 // Get the parameters pointer from the stack.
2013 __ lw(a2, MemOperand(sp, 1 * kPointerSize));
2015 // Set up the elements pointer in the allocated arguments object and
2016 // initialize the header in the elements fixed array.
2017 __ Addu(t0, v0, Operand(Heap::kStrictArgumentsObjectSize));
2018 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
2019 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
2020 __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
2021 __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
2022 // Untag the length for the loop.
2023 __ srl(a1, a1, kSmiTagSize);
2025 // Copy the fixed array slots.
2027 // Set up t0 to point to the first array slot.
2028 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2030 // Pre-decrement a2 with kPointerSize on each iteration.
2031 // Pre-decrement in order to skip receiver.
2032 __ Addu(a2, a2, Operand(-kPointerSize));
2033 __ lw(a3, MemOperand(a2));
2034 // Post-increment t0 with kPointerSize on each iteration.
2035 __ sw(a3, MemOperand(t0));
2036 __ Addu(t0, t0, Operand(kPointerSize));
2037 __ Subu(a1, a1, Operand(1));
2038 __ Branch(&loop, ne, a1, Operand(zero_reg));
2040 // Return and remove the on-stack parameters.
2044 // Do the runtime call to allocate the arguments object.
2046 __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
2050 void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
2051 // sp[0] : index of rest parameter
2052 // sp[4] : number of parameters
2053 // sp[8] : receiver displacement
2054 // Check if the calling frame is an arguments adaptor frame.
2057 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2058 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
2059 __ Branch(&runtime, ne, a3,
2060 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2062 // Patch the arguments.length and the parameters pointer.
2063 __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
2064 __ sw(a1, MemOperand(sp, 1 * kPointerSize));
2065 __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
2066 __ Addu(a3, a2, Operand(at));
2068 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
2069 __ sw(a3, MemOperand(sp, 2 * kPointerSize));
2071 // Do the runtime call to allocate the arguments object.
2073 __ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
2077 void RegExpExecStub::Generate(MacroAssembler* masm) {
2078 // Just jump directly to runtime if native RegExp is not selected at compile
2079 // time or if regexp entry in generated code is turned off runtime switch or
2081 #ifdef V8_INTERPRETED_REGEXP
2082 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
2083 #else // V8_INTERPRETED_REGEXP
2085 // Stack frame on entry.
2086 // sp[0]: last_match_info (expected JSArray)
2087 // sp[4]: previous index
2088 // sp[8]: subject string
2089 // sp[12]: JSRegExp object
2091 const int kLastMatchInfoOffset = 0 * kPointerSize;
2092 const int kPreviousIndexOffset = 1 * kPointerSize;
2093 const int kSubjectOffset = 2 * kPointerSize;
2094 const int kJSRegExpOffset = 3 * kPointerSize;
2097 // Allocation of registers for this function. These are in callee save
2098 // registers and will be preserved by the call to the native RegExp code, as
2099 // this code is called using the normal C calling convention. When calling
2100 // directly from generated code the native RegExp code will not do a GC and
2101 // therefore the content of these registers are safe to use after the call.
2102 // MIPS - using s0..s2, since we are not using CEntry Stub.
2103 Register subject = s0;
2104 Register regexp_data = s1;
2105 Register last_match_info_elements = s2;
2107 // Ensure that a RegExp stack is allocated.
2108 ExternalReference address_of_regexp_stack_memory_address =
2109 ExternalReference::address_of_regexp_stack_memory_address(
2111 ExternalReference address_of_regexp_stack_memory_size =
2112 ExternalReference::address_of_regexp_stack_memory_size(isolate());
2113 __ li(a0, Operand(address_of_regexp_stack_memory_size));
2114 __ lw(a0, MemOperand(a0, 0));
2115 __ Branch(&runtime, eq, a0, Operand(zero_reg));
2117 // Check that the first argument is a JSRegExp object.
2118 __ lw(a0, MemOperand(sp, kJSRegExpOffset));
2119 STATIC_ASSERT(kSmiTag == 0);
2120 __ JumpIfSmi(a0, &runtime);
2121 __ GetObjectType(a0, a1, a1);
2122 __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
2124 // Check that the RegExp has been compiled (data contains a fixed array).
2125 __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
2126 if (FLAG_debug_code) {
2127 __ SmiTst(regexp_data, t0);
2129 kUnexpectedTypeForRegExpDataFixedArrayExpected,
2132 __ GetObjectType(regexp_data, a0, a0);
2134 kUnexpectedTypeForRegExpDataFixedArrayExpected,
2136 Operand(FIXED_ARRAY_TYPE));
2139 // regexp_data: RegExp data (FixedArray)
2140 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2141 __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2142 __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
2144 // regexp_data: RegExp data (FixedArray)
2145 // Check that the number of captures fit in the static offsets vector buffer.
2147 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2148 // Check (number_of_captures + 1) * 2 <= offsets vector size
2149 // Or number_of_captures * 2 <= offsets vector size - 2
2150 // Multiplying by 2 comes for free since a2 is smi-tagged.
2151 STATIC_ASSERT(kSmiTag == 0);
2152 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2153 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
2155 &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
2157 // Reset offset for possibly sliced string.
2158 __ mov(t0, zero_reg);
2159 __ lw(subject, MemOperand(sp, kSubjectOffset));
2160 __ JumpIfSmi(subject, &runtime);
2161 __ mov(a3, subject); // Make a copy of the original subject string.
2162 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2163 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2164 // subject: subject string
2165 // a3: subject string
2166 // a0: subject string instance type
2167 // regexp_data: RegExp data (FixedArray)
2168 // Handle subject string according to its encoding and representation:
2169 // (1) Sequential string? If yes, go to (5).
2170 // (2) Anything but sequential or cons? If yes, go to (6).
2171 // (3) Cons string. If the string is flat, replace subject with first string.
2172 // Otherwise bailout.
2173 // (4) Is subject external? If yes, go to (7).
2174 // (5) Sequential string. Load regexp code according to encoding.
2178 // Deferred code at the end of the stub:
2179 // (6) Not a long external string? If yes, go to (8).
2180 // (7) External string. Make it, offset-wise, look like a sequential string.
2182 // (8) Short external string or not a string? If yes, bail out to runtime.
2183 // (9) Sliced string. Replace subject with parent. Go to (4).
2185 Label seq_string /* 5 */, external_string /* 7 */,
2186 check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
2187 not_long_external /* 8 */;
2189 // (1) Sequential string? If yes, go to (5).
2192 Operand(kIsNotStringMask |
2193 kStringRepresentationMask |
2194 kShortExternalStringMask));
2195 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
2196 __ Branch(&seq_string, eq, a1, Operand(zero_reg)); // Go to (5).
2198 // (2) Anything but sequential or cons? If yes, go to (6).
2199 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2200 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2201 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2202 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2204 __ Branch(¬_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
2206 // (3) Cons string. Check that it's flat.
2207 // Replace subject with first string and reload instance type.
2208 __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
2209 __ LoadRoot(a1, Heap::kempty_stringRootIndex);
2210 __ Branch(&runtime, ne, a0, Operand(a1));
2211 __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2213 // (4) Is subject external? If yes, go to (7).
2214 __ bind(&check_underlying);
2215 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2216 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2217 STATIC_ASSERT(kSeqStringTag == 0);
2218 __ And(at, a0, Operand(kStringRepresentationMask));
2219 // The underlying external string is never a short external string.
2220 STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
2221 STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2222 __ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7).
2224 // (5) Sequential string. Load regexp code according to encoding.
2225 __ bind(&seq_string);
2226 // subject: sequential subject string (or look-alike, external string)
2227 // a3: original subject string
2228 // Load previous index and check range before a3 is overwritten. We have to
2229 // use a3 instead of subject here because subject might have been only made
2230 // to look like a sequential string when it actually is an external string.
2231 __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
2232 __ JumpIfNotSmi(a1, &runtime);
2233 __ lw(a3, FieldMemOperand(a3, String::kLengthOffset));
2234 __ Branch(&runtime, ls, a3, Operand(a1));
2235 __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
2237 STATIC_ASSERT(kStringEncodingMask == 4);
2238 STATIC_ASSERT(kOneByteStringTag == 4);
2239 STATIC_ASSERT(kTwoByteStringTag == 0);
2240 __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for one-byte.
2241 __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
2242 __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
2243 __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
2244 __ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
2246 // (E) Carry on. String handling is done.
2247 // t9: irregexp code
2248 // Check that the irregexp code has been generated for the actual string
2249 // encoding. If it has, the field contains a code object otherwise it contains
2250 // a smi (code flushing support).
2251 __ JumpIfSmi(t9, &runtime);
2253 // a1: previous index
2254 // a3: encoding of subject string (1 if one_byte, 0 if two_byte);
2256 // subject: Subject string
2257 // regexp_data: RegExp data (FixedArray)
2258 // All checks done. Now push arguments for native regexp code.
2259 __ IncrementCounter(isolate()->counters()->regexp_entry_native(),
2262 // Isolates: note we add an additional parameter here (isolate pointer).
2263 const int kRegExpExecuteArguments = 9;
2264 const int kParameterRegisters = 4;
2265 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
2267 // Stack pointer now points to cell where return address is to be written.
2268 // Arguments are before that on the stack or in registers, meaning we
2269 // treat the return address as argument 5. Thus every argument after that
2270 // needs to be shifted back by 1. Since DirectCEntryStub will handle
2271 // allocating space for the c argument slots, we don't need to calculate
2272 // that into the argument positions on the stack. This is how the stack will
2273 // look (sp meaning the value of sp at this moment):
2274 // [sp + 5] - Argument 9
2275 // [sp + 4] - Argument 8
2276 // [sp + 3] - Argument 7
2277 // [sp + 2] - Argument 6
2278 // [sp + 1] - Argument 5
2279 // [sp + 0] - saved ra
2281 // Argument 9: Pass current isolate address.
2282 // CFunctionArgumentOperand handles MIPS stack argument slots.
2283 __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
2284 __ sw(a0, MemOperand(sp, 5 * kPointerSize));
2286 // Argument 8: Indicate that this is a direct call from JavaScript.
2287 __ li(a0, Operand(1));
2288 __ sw(a0, MemOperand(sp, 4 * kPointerSize));
2290 // Argument 7: Start (high end) of backtracking stack memory area.
2291 __ li(a0, Operand(address_of_regexp_stack_memory_address));
2292 __ lw(a0, MemOperand(a0, 0));
2293 __ li(a2, Operand(address_of_regexp_stack_memory_size));
2294 __ lw(a2, MemOperand(a2, 0));
2295 __ addu(a0, a0, a2);
2296 __ sw(a0, MemOperand(sp, 3 * kPointerSize));
2298 // Argument 6: Set the number of capture registers to zero to force global
2299 // regexps to behave as non-global. This does not affect non-global regexps.
2300 __ mov(a0, zero_reg);
2301 __ sw(a0, MemOperand(sp, 2 * kPointerSize));
2303 // Argument 5: static offsets vector buffer.
2305 ExternalReference::address_of_static_offsets_vector(isolate())));
2306 __ sw(a0, MemOperand(sp, 1 * kPointerSize));
2308 // For arguments 4 and 3 get string length, calculate start of string data
2309 // calculate the shift of the index (0 for one-byte and 1 for two-byte).
2310 __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
2311 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
2312 // Load the length from the original subject string from the previous stack
2313 // frame. Therefore we have to use fp, which points exactly to two pointer
2314 // sizes below the previous sp. (Because creating a new stack frame pushes
2315 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
2316 __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2317 // If slice offset is not 0, load the length from the original sliced string.
2318 // Argument 4, a3: End of string data
2319 // Argument 3, a2: Start of string data
2320 // Prepare start and end index of the input.
2321 __ sllv(t1, t0, a3);
2322 __ addu(t0, t2, t1);
2323 __ sllv(t1, a1, a3);
2324 __ addu(a2, t0, t1);
2326 __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
2327 __ sra(t2, t2, kSmiTagSize);
2328 __ sllv(t1, t2, a3);
2329 __ addu(a3, t0, t1);
2330 // Argument 2 (a1): Previous index.
2333 // Argument 1 (a0): Subject string.
2334 __ mov(a0, subject);
2336 // Locate the code entry and call it.
2337 __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
2338 DirectCEntryStub stub(isolate());
2339 stub.GenerateCall(masm, t9);
2341 __ LeaveExitFrame(false, no_reg, true);
2344 // subject: subject string (callee saved)
2345 // regexp_data: RegExp data (callee saved)
2346 // last_match_info_elements: Last match info elements (callee saved)
2347 // Check the result.
2349 __ Branch(&success, eq, v0, Operand(1));
2350 // We expect exactly one result since we force the called regexp to behave
2353 __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
2354 // If not exception it can only be retry. Handle that in the runtime system.
2355 __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
2356 // Result must now be exception. If there is no pending exception already a
2357 // stack overflow (on the backtrack stack) was detected in RegExp code but
2358 // haven't created the exception yet. Handle that in the runtime system.
2359 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
2360 __ li(a1, Operand(isolate()->factory()->the_hole_value()));
2361 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2363 __ lw(v0, MemOperand(a2, 0));
2364 __ Branch(&runtime, eq, v0, Operand(a1));
2366 // For exception, throw the exception again.
2367 __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
2370 // For failure and exception return null.
2371 __ li(v0, Operand(isolate()->factory()->null_value()));
2374 // Process the result from the native regexp code.
2377 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2378 // Calculate number of capture registers (number_of_captures + 1) * 2.
2379 // Multiplying by 2 comes for free since r1 is smi-tagged.
2380 STATIC_ASSERT(kSmiTag == 0);
2381 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2382 __ Addu(a1, a1, Operand(2)); // a1 was a smi.
2384 __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
2385 __ JumpIfSmi(a0, &runtime);
2386 __ GetObjectType(a0, a2, a2);
2387 __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
2388 // Check that the JSArray is in fast case.
2389 __ lw(last_match_info_elements,
2390 FieldMemOperand(a0, JSArray::kElementsOffset));
2391 __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2392 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
2393 __ Branch(&runtime, ne, a0, Operand(at));
2394 // Check that the last match info has space for the capture registers and the
2395 // additional information.
2397 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
2398 __ Addu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
2399 __ sra(at, a0, kSmiTagSize);
2400 __ Branch(&runtime, gt, a2, Operand(at));
2402 // a1: number of capture registers
2403 // subject: subject string
2404 // Store the capture count.
2405 __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
2406 __ sw(a2, FieldMemOperand(last_match_info_elements,
2407 RegExpImpl::kLastCaptureCountOffset));
2408 // Store last subject and last input.
2410 FieldMemOperand(last_match_info_elements,
2411 RegExpImpl::kLastSubjectOffset));
2412 __ mov(a2, subject);
2413 __ RecordWriteField(last_match_info_elements,
2414 RegExpImpl::kLastSubjectOffset,
2419 __ mov(subject, a2);
2421 FieldMemOperand(last_match_info_elements,
2422 RegExpImpl::kLastInputOffset));
2423 __ RecordWriteField(last_match_info_elements,
2424 RegExpImpl::kLastInputOffset,
2430 // Get the static offsets vector filled by the native regexp code.
2431 ExternalReference address_of_static_offsets_vector =
2432 ExternalReference::address_of_static_offsets_vector(isolate());
2433 __ li(a2, Operand(address_of_static_offsets_vector));
2435 // a1: number of capture registers
2436 // a2: offsets vector
2437 Label next_capture, done;
2438 // Capture register counter starts from number of capture registers and
2439 // counts down until wrapping after zero.
2441 last_match_info_elements,
2442 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
2443 __ bind(&next_capture);
2444 __ Subu(a1, a1, Operand(1));
2445 __ Branch(&done, lt, a1, Operand(zero_reg));
2446 // Read the value from the static offsets vector buffer.
2447 __ lw(a3, MemOperand(a2, 0));
2448 __ addiu(a2, a2, kPointerSize);
2449 // Store the smi value in the last match info.
2450 __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
2451 __ sw(a3, MemOperand(a0, 0));
2452 __ Branch(&next_capture, USE_DELAY_SLOT);
2453 __ addiu(a0, a0, kPointerSize); // In branch delay slot.
2457 // Return last match info.
2458 __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
2461 // Do the runtime call to execute the regexp.
2463 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
2465 // Deferred code for string handling.
2466 // (6) Not a long external string? If yes, go to (8).
2467 __ bind(¬_seq_nor_cons);
2469 __ Branch(¬_long_external, gt, a1, Operand(kExternalStringTag));
2471 // (7) External string. Make it, offset-wise, look like a sequential string.
2472 __ bind(&external_string);
2473 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2474 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2475 if (FLAG_debug_code) {
2476 // Assert that we do not have a cons or slice (indirect strings) here.
2477 // Sequential strings have already been ruled out.
2478 __ And(at, a0, Operand(kIsIndirectStringMask));
2480 kExternalStringExpectedButNotFound,
2485 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
2486 // Move the pointer so that offset-wise, it looks like a sequential string.
2487 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2490 SeqTwoByteString::kHeaderSize - kHeapObjectTag);
2491 __ jmp(&seq_string); // Go to (5).
2493 // (8) Short external string or not a string? If yes, bail out to runtime.
2494 __ bind(¬_long_external);
2495 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
2496 __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
2497 __ Branch(&runtime, ne, at, Operand(zero_reg));
2499 // (9) Sliced string. Replace subject with parent. Go to (4).
2500 // Load offset into t0 and replace subject string with parent.
2501 __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
2502 __ sra(t0, t0, kSmiTagSize);
2503 __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
2504 __ jmp(&check_underlying); // Go to (4).
2505 #endif // V8_INTERPRETED_REGEXP
2509 static void GenerateRecordCallTarget(MacroAssembler* masm) {
2510 // Cache the called function in a feedback vector slot. Cache states
2511 // are uninitialized, monomorphic (indicated by a JSFunction), and
2513 // a0 : number of arguments to the construct function
2514 // a1 : the function to call
2515 // a2 : Feedback vector
2516 // a3 : slot in feedback vector (Smi)
2517 Label initialize, done, miss, megamorphic, not_array_function;
2519 DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
2520 masm->isolate()->heap()->megamorphic_symbol());
2521 DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
2522 masm->isolate()->heap()->uninitialized_symbol());
2524 // Load the cache state into t0.
2525 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2526 __ Addu(t0, a2, Operand(t0));
2527 __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
2529 // A monomorphic cache hit or an already megamorphic state: invoke the
2530 // function without changing the state.
2531 __ Branch(&done, eq, t0, Operand(a1));
2533 if (!FLAG_pretenuring_call_new) {
2534 // If we came here, we need to see if we are the array function.
2535 // If we didn't have a matching function, and we didn't find the megamorph
2536 // sentinel, then we have in the slot either some other function or an
2537 // AllocationSite. Do a map check on the object in a3.
2538 __ lw(t1, FieldMemOperand(t0, 0));
2539 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2540 __ Branch(&miss, ne, t1, Operand(at));
2542 // Make sure the function is the Array() function
2543 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
2544 __ Branch(&megamorphic, ne, a1, Operand(t0));
2550 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2552 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
2553 __ Branch(&initialize, eq, t0, Operand(at));
2554 // MegamorphicSentinel is an immortal immovable object (undefined) so no
2555 // write-barrier is needed.
2556 __ bind(&megamorphic);
2557 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2558 __ Addu(t0, a2, Operand(t0));
2559 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2560 __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
2563 // An uninitialized cache is patched with the function.
2564 __ bind(&initialize);
2565 if (!FLAG_pretenuring_call_new) {
2566 // Make sure the function is the Array() function.
2567 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
2568 __ Branch(¬_array_function, ne, a1, Operand(t0));
2570 // The target function is the Array constructor,
2571 // Create an AllocationSite if we don't already have it, store it in the
2574 FrameScope scope(masm, StackFrame::INTERNAL);
2575 const RegList kSavedRegs =
2581 // Arguments register must be smi-tagged to call out.
2583 __ MultiPush(kSavedRegs);
2585 CreateAllocationSiteStub create_stub(masm->isolate());
2586 __ CallStub(&create_stub);
2588 __ MultiPop(kSavedRegs);
2593 __ bind(¬_array_function);
2596 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2597 __ Addu(t0, a2, Operand(t0));
2598 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2599 __ sw(a1, MemOperand(t0, 0));
2601 __ Push(t0, a2, a1);
2602 __ RecordWrite(a2, t0, a1, kRAHasNotBeenSaved, kDontSaveFPRegs,
2603 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
2610 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
2611 __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2612 __ lw(t0, FieldMemOperand(a3, SharedFunctionInfo::kCompilerHintsOffset));
2614 // Do not transform the receiver for strict mode functions.
2615 int32_t strict_mode_function_mask =
2616 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
2617 // Do not transform the receiver for native (Compilerhints already in a3).
2618 int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
2619 __ And(at, t0, Operand(strict_mode_function_mask | native_mask));
2620 __ Branch(cont, ne, at, Operand(zero_reg));
2624 static void EmitSlowCase(MacroAssembler* masm,
2626 Label* non_function) {
2627 // Check for function proxy.
2628 __ Branch(non_function, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
2629 __ push(a1); // put proxy as additional argument
2630 __ li(a0, Operand(argc + 1, RelocInfo::NONE32));
2631 __ mov(a2, zero_reg);
2632 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
2634 Handle<Code> adaptor =
2635 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
2636 __ Jump(adaptor, RelocInfo::CODE_TARGET);
2639 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
2640 // of the original receiver from the call site).
2641 __ bind(non_function);
2642 __ sw(a1, MemOperand(sp, argc * kPointerSize));
2643 __ li(a0, Operand(argc)); // Set up the number of arguments.
2644 __ mov(a2, zero_reg);
2645 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
2646 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2647 RelocInfo::CODE_TARGET);
2651 static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
2652 // Wrap the receiver and patch it back onto the stack.
2653 { FrameScope frame_scope(masm, StackFrame::INTERNAL);
2655 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
2658 __ Branch(USE_DELAY_SLOT, cont);
2659 __ sw(v0, MemOperand(sp, argc * kPointerSize));
2663 static void CallFunctionNoFeedback(MacroAssembler* masm,
2664 int argc, bool needs_checks,
2665 bool call_as_method) {
2666 // a1 : the function to call
2667 Label slow, non_function, wrap, cont;
2670 // Check that the function is really a JavaScript function.
2671 // a1: pushed function (to be verified)
2672 __ JumpIfSmi(a1, &non_function);
2674 // Goto slow case if we do not have a function.
2675 __ GetObjectType(a1, t0, t0);
2676 __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
2679 // Fast-case: Invoke the function now.
2680 // a1: pushed function
2681 ParameterCount actual(argc);
2683 if (call_as_method) {
2685 EmitContinueIfStrictOrNative(masm, &cont);
2688 // Compute the receiver in sloppy mode.
2689 __ lw(a3, MemOperand(sp, argc * kPointerSize));
2692 __ JumpIfSmi(a3, &wrap);
2693 __ GetObjectType(a3, t0, t0);
2694 __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
2702 __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
2705 // Slow-case: Non-function called.
2707 EmitSlowCase(masm, argc, &non_function);
2710 if (call_as_method) {
2712 // Wrap the receiver and patch it back onto the stack.
2713 EmitWrapCase(masm, argc, &cont);
2718 void CallFunctionStub::Generate(MacroAssembler* masm) {
2719 CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
2723 void CallConstructStub::Generate(MacroAssembler* masm) {
2724 // a0 : number of arguments
2725 // a1 : the function to call
2726 // a2 : feedback vector
2727 // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
2728 Label slow, non_function_call;
2730 // Check that the function is not a smi.
2731 __ JumpIfSmi(a1, &non_function_call);
2732 // Check that the function is a JSFunction.
2733 __ GetObjectType(a1, t0, t0);
2734 __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
2736 if (RecordCallTarget()) {
2737 GenerateRecordCallTarget(masm);
2739 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
2740 __ Addu(t1, a2, at);
2741 if (FLAG_pretenuring_call_new) {
2742 // Put the AllocationSite from the feedback vector into a2.
2743 // By adding kPointerSize we encode that we know the AllocationSite
2744 // entry is at the feedback vector slot given by a3 + 1.
2745 __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize + kPointerSize));
2747 Label feedback_register_initialized;
2748 // Put the AllocationSite from the feedback vector into a2, or undefined.
2749 __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
2750 __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
2751 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2752 __ Branch(&feedback_register_initialized, eq, t1, Operand(at));
2753 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
2754 __ bind(&feedback_register_initialized);
2757 __ AssertUndefinedOrAllocationSite(a2, t1);
2760 // Pass function as original constructor.
2761 if (IsSuperConstructorCall()) {
2762 __ li(t0, Operand(1 * kPointerSize));
2763 __ sll(at, a0, kPointerSizeLog2);
2764 __ Addu(t0, t0, Operand(at));
2765 __ Addu(at, sp, Operand(t0));
2766 __ lw(a3, MemOperand(at, 0));
2771 // Jump to the function-specific construct stub.
2772 Register jmp_reg = t0;
2773 __ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2774 __ lw(jmp_reg, FieldMemOperand(jmp_reg,
2775 SharedFunctionInfo::kConstructStubOffset));
2776 __ Addu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
2779 // a0: number of arguments
2780 // a1: called object
2784 __ Branch(&non_function_call, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
2785 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
2788 __ bind(&non_function_call);
2789 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
2791 // Set expected number of arguments to zero (not changing r0).
2792 __ li(a2, Operand(0, RelocInfo::NONE32));
2793 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2794 RelocInfo::CODE_TARGET);
2798 static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
2799 __ lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
2800 __ lw(vector, FieldMemOperand(vector,
2801 JSFunction::kSharedFunctionInfoOffset));
2802 __ lw(vector, FieldMemOperand(vector,
2803 SharedFunctionInfo::kFeedbackVectorOffset));
2807 void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
2813 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
2814 __ Branch(&miss, ne, a1, Operand(at));
2816 __ li(a0, Operand(arg_count()));
2817 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
2818 __ Addu(at, a2, Operand(at));
2819 __ lw(t0, FieldMemOperand(at, FixedArray::kHeaderSize));
2821 // Verify that t0 contains an AllocationSite
2822 __ lw(t1, FieldMemOperand(t0, HeapObject::kMapOffset));
2823 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2824 __ Branch(&miss, ne, t1, Operand(at));
2828 ArrayConstructorStub stub(masm->isolate(), arg_count());
2829 __ TailCallStub(&stub);
2834 // The slow case, we need this no matter what to complete a call after a miss.
2835 CallFunctionNoFeedback(masm,
2841 __ stop("Unexpected code address");
2845 void CallICStub::Generate(MacroAssembler* masm) {
2847 // a3 - slot id (Smi)
2849 const int with_types_offset =
2850 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
2851 const int generic_offset =
2852 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
2853 Label extra_checks_or_miss, slow_start;
2854 Label slow, non_function, wrap, cont;
2855 Label have_js_function;
2856 int argc = arg_count();
2857 ParameterCount actual(argc);
2859 // The checks. First, does r1 match the recorded monomorphic target?
2860 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2861 __ Addu(t0, a2, Operand(t0));
2862 __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
2864 // We don't know that we have a weak cell. We might have a private symbol
2865 // or an AllocationSite, but the memory is safe to examine.
2866 // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
2868 // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
2869 // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
2870 // computed, meaning that it can't appear to be a pointer. If the low bit is
2871 // 0, then hash is computed, but the 0 bit prevents the field from appearing
2873 STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
2874 STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
2875 WeakCell::kValueOffset &&
2876 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
2878 __ lw(t1, FieldMemOperand(t0, WeakCell::kValueOffset));
2879 __ Branch(&extra_checks_or_miss, ne, a1, Operand(t1));
2881 // The compare above could have been a SMI/SMI comparison. Guard against this
2882 // convincing us that we have a monomorphic JSFunction.
2883 __ JumpIfSmi(a1, &extra_checks_or_miss);
2885 __ bind(&have_js_function);
2886 if (CallAsMethod()) {
2887 EmitContinueIfStrictOrNative(masm, &cont);
2888 // Compute the receiver in sloppy mode.
2889 __ lw(a3, MemOperand(sp, argc * kPointerSize));
2891 __ JumpIfSmi(a3, &wrap);
2892 __ GetObjectType(a3, t0, t0);
2893 __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
2898 __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
2901 EmitSlowCase(masm, argc, &non_function);
2903 if (CallAsMethod()) {
2905 EmitWrapCase(masm, argc, &cont);
2908 __ bind(&extra_checks_or_miss);
2909 Label uninitialized, miss;
2911 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2912 __ Branch(&slow_start, eq, t0, Operand(at));
2914 // The following cases attempt to handle MISS cases without going to the
2916 if (FLAG_trace_ic) {
2920 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
2921 __ Branch(&uninitialized, eq, t0, Operand(at));
2923 // We are going megamorphic. If the feedback is a JSFunction, it is fine
2924 // to handle it here. More complex cases are dealt with in the runtime.
2925 __ AssertNotSmi(t0);
2926 __ GetObjectType(t0, t1, t1);
2927 __ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE));
2928 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2929 __ Addu(t0, a2, Operand(t0));
2930 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2931 __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
2932 // We have to update statistics for runtime profiling.
2933 __ lw(t0, FieldMemOperand(a2, with_types_offset));
2934 __ Subu(t0, t0, Operand(Smi::FromInt(1)));
2935 __ sw(t0, FieldMemOperand(a2, with_types_offset));
2936 __ lw(t0, FieldMemOperand(a2, generic_offset));
2937 __ Addu(t0, t0, Operand(Smi::FromInt(1)));
2938 __ Branch(USE_DELAY_SLOT, &slow_start);
2939 __ sw(t0, FieldMemOperand(a2, generic_offset)); // In delay slot.
2941 __ bind(&uninitialized);
2943 // We are going monomorphic, provided we actually have a JSFunction.
2944 __ JumpIfSmi(a1, &miss);
2946 // Goto miss case if we do not have a function.
2947 __ GetObjectType(a1, t0, t0);
2948 __ Branch(&miss, ne, t0, Operand(JS_FUNCTION_TYPE));
2950 // Make sure the function is not the Array() function, which requires special
2951 // behavior on MISS.
2952 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
2953 __ Branch(&miss, eq, a1, Operand(t0));
2956 __ lw(t0, FieldMemOperand(a2, with_types_offset));
2957 __ Addu(t0, t0, Operand(Smi::FromInt(1)));
2958 __ sw(t0, FieldMemOperand(a2, with_types_offset));
2960 // Store the function. Use a stub since we need a frame for allocation.
2965 FrameScope scope(masm, StackFrame::INTERNAL);
2966 CreateWeakCellStub create_stub(masm->isolate());
2968 __ CallStub(&create_stub);
2972 __ Branch(&have_js_function);
2974 // We are here because tracing is on or we encountered a MISS case we can't
2980 __ bind(&slow_start);
2981 // Check that the function is really a JavaScript function.
2982 // r1: pushed function (to be verified)
2983 __ JumpIfSmi(a1, &non_function);
2985 // Goto slow case if we do not have a function.
2986 __ GetObjectType(a1, t0, t0);
2987 __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
2988 __ Branch(&have_js_function);
2992 void CallICStub::GenerateMiss(MacroAssembler* masm) {
2993 FrameScope scope(masm, StackFrame::INTERNAL);
2995 // Push the receiver and the function and feedback info.
2996 __ Push(a1, a2, a3);
2999 IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
3000 : IC::kCallIC_Customization_Miss;
3002 ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
3003 __ CallExternalReference(miss, 3);
3005 // Move result to a1 and exit the internal frame.
3010 // StringCharCodeAtGenerator.
3011 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
3012 DCHECK(!t0.is(index_));
3013 DCHECK(!t0.is(result_));
3014 DCHECK(!t0.is(object_));
3015 if (check_mode_ == RECEIVER_IS_UNKNOWN) {
3016 // If the receiver is a smi trigger the non-string case.
3017 __ JumpIfSmi(object_, receiver_not_string_);
3019 // Fetch the instance type of the receiver into result register.
3020 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3021 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3022 // If the receiver is not a string trigger the non-string case.
3023 __ And(t0, result_, Operand(kIsNotStringMask));
3024 __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
3027 // If the index is non-smi trigger the non-smi case.
3028 __ JumpIfNotSmi(index_, &index_not_smi_);
3030 __ bind(&got_smi_index_);
3032 // Check for index out of range.
3033 __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
3034 __ Branch(index_out_of_range_, ls, t0, Operand(index_));
3036 __ sra(index_, index_, kSmiTagSize);
3038 StringCharLoadGenerator::Generate(masm,
3044 __ sll(result_, result_, kSmiTagSize);
3049 void StringCharCodeAtGenerator::GenerateSlow(
3050 MacroAssembler* masm, EmbedMode embed_mode,
3051 const RuntimeCallHelper& call_helper) {
3052 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
3054 // Index is not a smi.
3055 __ bind(&index_not_smi_);
3056 // If index is a heap number, try converting it to an integer.
3059 Heap::kHeapNumberMapRootIndex,
3062 call_helper.BeforeCall(masm);
3063 // Consumed by runtime conversion function:
3064 if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
3065 __ Push(VectorLoadICDescriptor::VectorRegister(),
3066 VectorLoadICDescriptor::SlotRegister(), object_, index_);
3068 __ Push(object_, index_);
3070 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3071 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3073 DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3074 // NumberToSmi discards numbers that are not exact integers.
3075 __ CallRuntime(Runtime::kNumberToSmi, 1);
3078 // Save the conversion result before the pop instructions below
3079 // have a chance to overwrite it.
3080 __ Move(index_, v0);
3081 if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
3082 __ Pop(VectorLoadICDescriptor::SlotRegister(),
3083 VectorLoadICDescriptor::VectorRegister(), object_);
3087 // Reload the instance type.
3088 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3089 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3090 call_helper.AfterCall(masm);
3091 // If index is still not a smi, it must be out of range.
3092 __ JumpIfNotSmi(index_, index_out_of_range_);
3093 // Otherwise, return to the fast path.
3094 __ Branch(&got_smi_index_);
3096 // Call runtime. We get here when the receiver is a string and the
3097 // index is a number, but the code of getting the actual character
3098 // is too complex (e.g., when the string needs to be flattened).
3099 __ bind(&call_runtime_);
3100 call_helper.BeforeCall(masm);
3101 __ sll(index_, index_, kSmiTagSize);
3102 __ Push(object_, index_);
3103 __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
3105 __ Move(result_, v0);
3107 call_helper.AfterCall(masm);
3110 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3114 // -------------------------------------------------------------------------
3115 // StringCharFromCodeGenerator
3117 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3118 // Fast case of Heap::LookupSingleCharacterStringFromCode.
3120 DCHECK(!t0.is(result_));
3121 DCHECK(!t0.is(code_));
3123 STATIC_ASSERT(kSmiTag == 0);
3124 STATIC_ASSERT(kSmiShiftSize == 0);
3125 DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
3128 Operand(kSmiTagMask |
3129 ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
3130 __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
3132 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3133 // At this point code register contains smi tagged one-byte char code.
3134 STATIC_ASSERT(kSmiTag == 0);
3135 __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
3136 __ Addu(result_, result_, t0);
3137 __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3138 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3139 __ Branch(&slow_case_, eq, result_, Operand(t0));
3144 void StringCharFromCodeGenerator::GenerateSlow(
3145 MacroAssembler* masm,
3146 const RuntimeCallHelper& call_helper) {
3147 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3149 __ bind(&slow_case_);
3150 call_helper.BeforeCall(masm);
3152 __ CallRuntime(Runtime::kCharFromCode, 1);
3153 __ Move(result_, v0);
3155 call_helper.AfterCall(masm);
3158 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3162 enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
3165 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
3170 String::Encoding encoding) {
3171 if (FLAG_debug_code) {
3172 // Check that destination is word aligned.
3173 __ And(scratch, dest, Operand(kPointerAlignmentMask));
3175 kDestinationOfCopyNotAligned,
3180 // Assumes word reads and writes are little endian.
3181 // Nothing to do for zero characters.
3184 if (encoding == String::TWO_BYTE_ENCODING) {
3185 __ Addu(count, count, count);
3188 Register limit = count; // Read until dest equals this.
3189 __ Addu(limit, dest, Operand(count));
3191 Label loop_entry, loop;
3192 // Copy bytes from src to dest until dest hits limit.
3193 __ Branch(&loop_entry);
3195 __ lbu(scratch, MemOperand(src));
3196 __ Addu(src, src, Operand(1));
3197 __ sb(scratch, MemOperand(dest));
3198 __ Addu(dest, dest, Operand(1));
3199 __ bind(&loop_entry);
3200 __ Branch(&loop, lt, dest, Operand(limit));
3206 void SubStringStub::Generate(MacroAssembler* masm) {
3208 // Stack frame on entry.
3209 // ra: return address
3214 // This stub is called from the native-call %_SubString(...), so
3215 // nothing can be assumed about the arguments. It is tested that:
3216 // "string" is a sequential string,
3217 // both "from" and "to" are smis, and
3218 // 0 <= from <= to <= string.length.
3219 // If any of these assumptions fail, we call the runtime system.
3221 const int kToOffset = 0 * kPointerSize;
3222 const int kFromOffset = 1 * kPointerSize;
3223 const int kStringOffset = 2 * kPointerSize;
3225 __ lw(a2, MemOperand(sp, kToOffset));
3226 __ lw(a3, MemOperand(sp, kFromOffset));
3227 STATIC_ASSERT(kFromOffset == kToOffset + 4);
3228 STATIC_ASSERT(kSmiTag == 0);
3229 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3231 // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
3232 // safe in this case.
3233 __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
3234 __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
3235 // Both a2 and a3 are untagged integers.
3237 __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
3239 __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to.
3240 __ Subu(a2, a2, a3);
3242 // Make sure first argument is a string.
3243 __ lw(v0, MemOperand(sp, kStringOffset));
3244 __ JumpIfSmi(v0, &runtime);
3245 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
3246 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3247 __ And(t0, a1, Operand(kIsNotStringMask));
3249 __ Branch(&runtime, ne, t0, Operand(zero_reg));
3252 __ Branch(&single_char, eq, a2, Operand(1));
3254 // Short-cut for the case of trivial substring.
3256 // v0: original string
3257 // a2: result string length
3258 __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
3260 // Return original string.
3261 __ Branch(&return_v0, eq, a2, Operand(t0));
3262 // Longer than original string's length or negative: unsafe arguments.
3263 __ Branch(&runtime, hi, a2, Operand(t0));
3264 // Shorter than original string's length: an actual substring.
3266 // Deal with different string types: update the index if necessary
3267 // and put the underlying string into t1.
3268 // v0: original string
3269 // a1: instance type
3271 // a3: from index (untagged)
3272 Label underlying_unpacked, sliced_string, seq_or_external_string;
3273 // If the string is not indirect, it can only be sequential or external.
3274 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
3275 STATIC_ASSERT(kIsIndirectStringMask != 0);
3276 __ And(t0, a1, Operand(kIsIndirectStringMask));
3277 __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
3278 // t0 is used as a scratch register and can be overwritten in either case.
3279 __ And(t0, a1, Operand(kSlicedNotConsMask));
3280 __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
3281 // Cons string. Check whether it is flat, then fetch first part.
3282 __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
3283 __ LoadRoot(t0, Heap::kempty_stringRootIndex);
3284 __ Branch(&runtime, ne, t1, Operand(t0));
3285 __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
3286 // Update instance type.
3287 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
3288 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3289 __ jmp(&underlying_unpacked);
3291 __ bind(&sliced_string);
3292 // Sliced string. Fetch parent and correct start index by offset.
3293 __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
3294 __ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
3295 __ sra(t0, t0, 1); // Add offset to index.
3296 __ Addu(a3, a3, t0);
3297 // Update instance type.
3298 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
3299 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3300 __ jmp(&underlying_unpacked);
3302 __ bind(&seq_or_external_string);
3303 // Sequential or external string. Just move string to the expected register.
3306 __ bind(&underlying_unpacked);
3308 if (FLAG_string_slices) {
3310 // t1: underlying subject string
3311 // a1: instance type of underlying subject string
3313 // a3: adjusted start index (untagged)
3314 // Short slice. Copy instead of slicing.
3315 __ Branch(©_routine, lt, a2, Operand(SlicedString::kMinLength));
3316 // Allocate new sliced string. At this point we do not reload the instance
3317 // type including the string encoding because we simply rely on the info
3318 // provided by the original string. It does not matter if the original
3319 // string's encoding is wrong because we always have to recheck encoding of
3320 // the newly created string's parent anyways due to externalized strings.
3321 Label two_byte_slice, set_slice_header;
3322 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
3323 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
3324 __ And(t0, a1, Operand(kStringEncodingMask));
3325 __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
3326 __ AllocateOneByteSlicedString(v0, a2, t2, t3, &runtime);
3327 __ jmp(&set_slice_header);
3328 __ bind(&two_byte_slice);
3329 __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
3330 __ bind(&set_slice_header);
3332 __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
3333 __ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
3336 __ bind(©_routine);
3339 // t1: underlying subject string
3340 // a1: instance type of underlying subject string
3342 // a3: adjusted start index (untagged)
3343 Label two_byte_sequential, sequential_string, allocate_result;
3344 STATIC_ASSERT(kExternalStringTag != 0);
3345 STATIC_ASSERT(kSeqStringTag == 0);
3346 __ And(t0, a1, Operand(kExternalStringTag));
3347 __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
3349 // Handle external string.
3350 // Rule out short external strings.
3351 STATIC_ASSERT(kShortExternalStringTag != 0);
3352 __ And(t0, a1, Operand(kShortExternalStringTag));
3353 __ Branch(&runtime, ne, t0, Operand(zero_reg));
3354 __ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
3355 // t1 already points to the first character of underlying string.
3356 __ jmp(&allocate_result);
3358 __ bind(&sequential_string);
3359 // Locate first character of underlying subject string.
3360 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3361 __ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3363 __ bind(&allocate_result);
3364 // Sequential acii string. Allocate the result.
3365 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3366 __ And(t0, a1, Operand(kStringEncodingMask));
3367 __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
3369 // Allocate and copy the resulting ASCII string.
3370 __ AllocateOneByteString(v0, a2, t0, t2, t3, &runtime);
3372 // Locate first character of substring to copy.
3373 __ Addu(t1, t1, a3);
3375 // Locate first character of result.
3376 __ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3378 // v0: result string
3379 // a1: first character of result string
3380 // a2: result string length
3381 // t1: first character of substring to copy
3382 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3383 StringHelper::GenerateCopyCharacters(
3384 masm, a1, t1, a2, a3, String::ONE_BYTE_ENCODING);
3387 // Allocate and copy the resulting two-byte string.
3388 __ bind(&two_byte_sequential);
3389 __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
3391 // Locate first character of substring to copy.
3392 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
3394 __ Addu(t1, t1, t0);
3395 // Locate first character of result.
3396 __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3398 // v0: result string.
3399 // a1: first character of result.
3400 // a2: result length.
3401 // t1: first character of substring to copy.
3402 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3403 StringHelper::GenerateCopyCharacters(
3404 masm, a1, t1, a2, a3, String::TWO_BYTE_ENCODING);
3406 __ bind(&return_v0);
3407 Counters* counters = isolate()->counters();
3408 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
3411 // Just jump to runtime to create the sub string.
3413 __ TailCallRuntime(Runtime::kSubStringRT, 3, 1);
3415 __ bind(&single_char);
3416 // v0: original string
3417 // a1: instance type
3419 // a3: from index (untagged)
3421 StringCharAtGenerator generator(v0, a3, a2, v0, &runtime, &runtime, &runtime,
3422 STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
3423 generator.GenerateFast(masm);
3425 generator.SkipSlow(masm, &runtime);
3429 void ToNumberStub::Generate(MacroAssembler* masm) {
3430 // The ToNumber stub takes one argument in a0.
3432 __ JumpIfNotSmi(a0, ¬_smi);
3433 __ Ret(USE_DELAY_SLOT);
3437 Label not_heap_number;
3438 __ lw(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
3439 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3441 // a1: instance type.
3442 __ Branch(¬_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
3443 __ Ret(USE_DELAY_SLOT);
3445 __ bind(¬_heap_number);
3447 Label not_string, slow_string;
3448 __ Branch(¬_string, hs, a1, Operand(FIRST_NONSTRING_TYPE));
3449 // Check if string has a cached array index.
3450 __ lw(a2, FieldMemOperand(a0, String::kHashFieldOffset));
3451 __ And(at, a2, Operand(String::kContainsCachedArrayIndexMask));
3452 __ Branch(&slow_string, ne, at, Operand(zero_reg));
3453 __ IndexFromHash(a2, a0);
3454 __ Ret(USE_DELAY_SLOT);
3456 __ bind(&slow_string);
3457 __ push(a0); // Push argument.
3458 __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
3459 __ bind(¬_string);
3462 __ Branch(¬_oddball, ne, a1, Operand(ODDBALL_TYPE));
3463 __ Ret(USE_DELAY_SLOT);
3464 __ lw(v0, FieldMemOperand(a0, Oddball::kToNumberOffset));
3465 __ bind(¬_oddball);
3467 __ push(a0); // Push argument.
3468 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
3472 void StringHelper::GenerateFlatOneByteStringEquals(
3473 MacroAssembler* masm, Register left, Register right, Register scratch1,
3474 Register scratch2, Register scratch3) {
3475 Register length = scratch1;
3478 Label strings_not_equal, check_zero_length;
3479 __ lw(length, FieldMemOperand(left, String::kLengthOffset));
3480 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
3481 __ Branch(&check_zero_length, eq, length, Operand(scratch2));
3482 __ bind(&strings_not_equal);
3483 DCHECK(is_int16(NOT_EQUAL));
3484 __ Ret(USE_DELAY_SLOT);
3485 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
3487 // Check if the length is zero.
3488 Label compare_chars;
3489 __ bind(&check_zero_length);
3490 STATIC_ASSERT(kSmiTag == 0);
3491 __ Branch(&compare_chars, ne, length, Operand(zero_reg));
3492 DCHECK(is_int16(EQUAL));
3493 __ Ret(USE_DELAY_SLOT);
3494 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3496 // Compare characters.
3497 __ bind(&compare_chars);
3499 GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
3500 v0, &strings_not_equal);
3502 // Characters are equal.
3503 __ Ret(USE_DELAY_SLOT);
3504 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3508 void StringHelper::GenerateCompareFlatOneByteStrings(
3509 MacroAssembler* masm, Register left, Register right, Register scratch1,
3510 Register scratch2, Register scratch3, Register scratch4) {
3511 Label result_not_equal, compare_lengths;
3512 // Find minimum length and length difference.
3513 __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
3514 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
3515 __ Subu(scratch3, scratch1, Operand(scratch2));
3516 Register length_delta = scratch3;
3517 __ slt(scratch4, scratch2, scratch1);
3518 __ Movn(scratch1, scratch2, scratch4);
3519 Register min_length = scratch1;
3520 STATIC_ASSERT(kSmiTag == 0);
3521 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
3524 GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
3525 scratch4, v0, &result_not_equal);
3527 // Compare lengths - strings up to min-length are equal.
3528 __ bind(&compare_lengths);
3529 DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
3530 // Use length_delta as result if it's zero.
3531 __ mov(scratch2, length_delta);
3532 __ mov(scratch4, zero_reg);
3533 __ mov(v0, zero_reg);
3535 __ bind(&result_not_equal);
3536 // Conditionally update the result based either on length_delta or
3537 // the last comparion performed in the loop above.
3539 __ Branch(&ret, eq, scratch2, Operand(scratch4));
3540 __ li(v0, Operand(Smi::FromInt(GREATER)));
3541 __ Branch(&ret, gt, scratch2, Operand(scratch4));
3542 __ li(v0, Operand(Smi::FromInt(LESS)));
3548 void StringHelper::GenerateOneByteCharsCompareLoop(
3549 MacroAssembler* masm, Register left, Register right, Register length,
3550 Register scratch1, Register scratch2, Register scratch3,
3551 Label* chars_not_equal) {
3552 // Change index to run from -length to -1 by adding length to string
3553 // start. This means that loop ends when index reaches zero, which
3554 // doesn't need an additional compare.
3555 __ SmiUntag(length);
3556 __ Addu(scratch1, length,
3557 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3558 __ Addu(left, left, Operand(scratch1));
3559 __ Addu(right, right, Operand(scratch1));
3560 __ Subu(length, zero_reg, length);
3561 Register index = length; // index = -length;
3567 __ Addu(scratch3, left, index);
3568 __ lbu(scratch1, MemOperand(scratch3));
3569 __ Addu(scratch3, right, index);
3570 __ lbu(scratch2, MemOperand(scratch3));
3571 __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
3572 __ Addu(index, index, 1);
3573 __ Branch(&loop, ne, index, Operand(zero_reg));
3577 void StringCompareStub::Generate(MacroAssembler* masm) {
3580 Counters* counters = isolate()->counters();
3582 // Stack frame on entry.
3583 // sp[0]: right string
3584 // sp[4]: left string
3585 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
3586 __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
3589 __ Branch(¬_same, ne, a0, Operand(a1));
3590 STATIC_ASSERT(EQUAL == 0);
3591 STATIC_ASSERT(kSmiTag == 0);
3592 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3593 __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
3598 // Check that both objects are sequential one-byte strings.
3599 __ JumpIfNotBothSequentialOneByteStrings(a1, a0, a2, a3, &runtime);
3601 // Compare flat ASCII strings natively. Remove arguments from stack first.
3602 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
3603 __ Addu(sp, sp, Operand(2 * kPointerSize));
3604 StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, t0, t1);
3607 __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
3611 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
3612 // ----------- S t a t e -------------
3615 // -- ra : return address
3616 // -----------------------------------
3618 // Load a2 with the allocation site. We stick an undefined dummy value here
3619 // and replace it with the real allocation site later when we instantiate this
3620 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
3621 __ li(a2, handle(isolate()->heap()->undefined_value()));
3623 // Make sure that we actually patched the allocation site.
3624 if (FLAG_debug_code) {
3625 __ And(at, a2, Operand(kSmiTagMask));
3626 __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
3627 __ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
3628 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
3629 __ Assert(eq, kExpectedAllocationSite, t0, Operand(at));
3632 // Tail call into the stub that handles binary operations with allocation
3634 BinaryOpWithAllocationSiteStub stub(isolate(), state());
3635 __ TailCallStub(&stub);
3639 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
3640 DCHECK(state() == CompareICState::SMI);
3643 __ JumpIfNotSmi(a2, &miss);
3645 if (GetCondition() == eq) {
3646 // For equality we do not care about the sign of the result.
3647 __ Ret(USE_DELAY_SLOT);
3648 __ Subu(v0, a0, a1);
3650 // Untag before subtracting to avoid handling overflow.
3653 __ Ret(USE_DELAY_SLOT);
3654 __ Subu(v0, a1, a0);
3662 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
3663 DCHECK(state() == CompareICState::NUMBER);
3666 Label unordered, maybe_undefined1, maybe_undefined2;
3669 if (left() == CompareICState::SMI) {
3670 __ JumpIfNotSmi(a1, &miss);
3672 if (right() == CompareICState::SMI) {
3673 __ JumpIfNotSmi(a0, &miss);
3676 // Inlining the double comparison and falling back to the general compare
3677 // stub if NaN is involved.
3678 // Load left and right operand.
3679 Label done, left, left_smi, right_smi;
3680 __ JumpIfSmi(a0, &right_smi);
3681 __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
3683 __ Subu(a2, a0, Operand(kHeapObjectTag));
3684 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
3686 __ bind(&right_smi);
3687 __ SmiUntag(a2, a0); // Can't clobber a0 yet.
3688 FPURegister single_scratch = f6;
3689 __ mtc1(a2, single_scratch);
3690 __ cvt_d_w(f2, single_scratch);
3693 __ JumpIfSmi(a1, &left_smi);
3694 __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
3696 __ Subu(a2, a1, Operand(kHeapObjectTag));
3697 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
3700 __ SmiUntag(a2, a1); // Can't clobber a1 yet.
3701 single_scratch = f8;
3702 __ mtc1(a2, single_scratch);
3703 __ cvt_d_w(f0, single_scratch);
3707 // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
3708 Label fpu_eq, fpu_lt;
3709 // Test if equal, and also handle the unordered/NaN case.
3710 __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
3712 // Test if less (unordered case is already handled).
3713 __ BranchF(&fpu_lt, NULL, lt, f0, f2);
3715 // Otherwise it's greater, so just fall thru, and return.
3716 DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
3717 __ Ret(USE_DELAY_SLOT);
3718 __ li(v0, Operand(GREATER));
3721 __ Ret(USE_DELAY_SLOT);
3722 __ li(v0, Operand(EQUAL));
3725 __ Ret(USE_DELAY_SLOT);
3726 __ li(v0, Operand(LESS));
3728 __ bind(&unordered);
3729 __ bind(&generic_stub);
3730 CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
3731 CompareICState::GENERIC, CompareICState::GENERIC);
3732 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3734 __ bind(&maybe_undefined1);
3735 if (Token::IsOrderedRelationalCompareOp(op())) {
3736 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3737 __ Branch(&miss, ne, a0, Operand(at));
3738 __ JumpIfSmi(a1, &unordered);
3739 __ GetObjectType(a1, a2, a2);
3740 __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
3744 __ bind(&maybe_undefined2);
3745 if (Token::IsOrderedRelationalCompareOp(op())) {
3746 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3747 __ Branch(&unordered, eq, a1, Operand(at));
3755 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3756 DCHECK(state() == CompareICState::INTERNALIZED_STRING);
3759 // Registers containing left and right operands respectively.
3761 Register right = a0;
3765 // Check that both operands are heap objects.
3766 __ JumpIfEitherSmi(left, right, &miss);
3768 // Check that both operands are internalized strings.
3769 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3770 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3771 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3772 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3773 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3774 __ Or(tmp1, tmp1, Operand(tmp2));
3775 __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3776 __ Branch(&miss, ne, at, Operand(zero_reg));
3778 // Make sure a0 is non-zero. At this point input operands are
3779 // guaranteed to be non-zero.
3780 DCHECK(right.is(a0));
3781 STATIC_ASSERT(EQUAL == 0);
3782 STATIC_ASSERT(kSmiTag == 0);
3784 // Internalized strings are compared by identity.
3785 __ Ret(ne, left, Operand(right));
3786 DCHECK(is_int16(EQUAL));
3787 __ Ret(USE_DELAY_SLOT);
3788 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3795 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
3796 DCHECK(state() == CompareICState::UNIQUE_NAME);
3797 DCHECK(GetCondition() == eq);
3800 // Registers containing left and right operands respectively.
3802 Register right = a0;
3806 // Check that both operands are heap objects.
3807 __ JumpIfEitherSmi(left, right, &miss);
3809 // Check that both operands are unique names. This leaves the instance
3810 // types loaded in tmp1 and tmp2.
3811 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3812 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3813 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3814 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3816 __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
3817 __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
3822 // Unique names are compared by identity.
3824 __ Branch(&done, ne, left, Operand(right));
3825 // Make sure a0 is non-zero. At this point input operands are
3826 // guaranteed to be non-zero.
3827 DCHECK(right.is(a0));
3828 STATIC_ASSERT(EQUAL == 0);
3829 STATIC_ASSERT(kSmiTag == 0);
3830 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3839 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
3840 DCHECK(state() == CompareICState::STRING);
3843 bool equality = Token::IsEqualityOp(op());
3845 // Registers containing left and right operands respectively.
3847 Register right = a0;
3854 // Check that both operands are heap objects.
3855 __ JumpIfEitherSmi(left, right, &miss);
3857 // Check that both operands are strings. This leaves the instance
3858 // types loaded in tmp1 and tmp2.
3859 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3860 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3861 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3862 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3863 STATIC_ASSERT(kNotStringTag != 0);
3864 __ Or(tmp3, tmp1, tmp2);
3865 __ And(tmp5, tmp3, Operand(kIsNotStringMask));
3866 __ Branch(&miss, ne, tmp5, Operand(zero_reg));
3868 // Fast check for identical strings.
3869 Label left_ne_right;
3870 STATIC_ASSERT(EQUAL == 0);
3871 STATIC_ASSERT(kSmiTag == 0);
3872 __ Branch(&left_ne_right, ne, left, Operand(right));
3873 __ Ret(USE_DELAY_SLOT);
3874 __ mov(v0, zero_reg); // In the delay slot.
3875 __ bind(&left_ne_right);
3877 // Handle not identical strings.
3879 // Check that both strings are internalized strings. If they are, we're done
3880 // because we already know they are not identical. We know they are both
3883 DCHECK(GetCondition() == eq);
3884 STATIC_ASSERT(kInternalizedTag == 0);
3885 __ Or(tmp3, tmp1, Operand(tmp2));
3886 __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
3888 __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
3889 // Make sure a0 is non-zero. At this point input operands are
3890 // guaranteed to be non-zero.
3891 DCHECK(right.is(a0));
3892 __ Ret(USE_DELAY_SLOT);
3893 __ mov(v0, a0); // In the delay slot.
3894 __ bind(&is_symbol);
3897 // Check that both strings are sequential one-byte.
3899 __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
3902 // Compare flat one-byte strings. Returns when done.
3904 StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
3907 StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
3911 // Handle more complex cases in runtime.
3913 __ Push(left, right);
3915 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
3917 __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
3925 void CompareICStub::GenerateObjects(MacroAssembler* masm) {
3926 DCHECK(state() == CompareICState::OBJECT);
3928 __ And(a2, a1, Operand(a0));
3929 __ JumpIfSmi(a2, &miss);
3931 __ GetObjectType(a0, a2, a2);
3932 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
3933 __ GetObjectType(a1, a2, a2);
3934 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
3936 DCHECK(GetCondition() == eq);
3937 __ Ret(USE_DELAY_SLOT);
3938 __ subu(v0, a0, a1);
3945 void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
3947 Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
3949 __ JumpIfSmi(a2, &miss);
3950 __ GetWeakValue(t0, cell);
3951 __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
3952 __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
3953 __ Branch(&miss, ne, a2, Operand(t0));
3954 __ Branch(&miss, ne, a3, Operand(t0));
3956 __ Ret(USE_DELAY_SLOT);
3957 __ subu(v0, a0, a1);
3964 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
3966 // Call the runtime system in a fresh internal frame.
3967 ExternalReference miss =
3968 ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
3969 FrameScope scope(masm, StackFrame::INTERNAL);
3971 __ Push(ra, a1, a0);
3972 __ li(t0, Operand(Smi::FromInt(op())));
3973 __ addiu(sp, sp, -kPointerSize);
3974 __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
3975 __ sw(t0, MemOperand(sp)); // In the delay slot.
3976 // Compute the entry point of the rewritten stub.
3977 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
3978 // Restore registers.
3985 void DirectCEntryStub::Generate(MacroAssembler* masm) {
3986 // Make place for arguments to fit C calling convention. Most of the callers
3987 // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
3988 // so they handle stack restoring and we don't have to do that here.
3989 // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
3990 // kCArgsSlotsSize stack space after the call.
3991 __ Subu(sp, sp, Operand(kCArgsSlotsSize));
3992 // Place the return address on the stack, making the call
3993 // GC safe. The RegExp backend also relies on this.
3994 __ sw(ra, MemOperand(sp, kCArgsSlotsSize));
3995 __ Call(t9); // Call the C++ function.
3996 __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
3998 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
3999 // In case of an error the return address may point to a memory area
4000 // filled with kZapValue by the GC.
4001 // Dereference the address and check for this.
4002 __ lw(t0, MemOperand(t9));
4003 __ Assert(ne, kReceivedInvalidReturnAddress, t0,
4004 Operand(reinterpret_cast<uint32_t>(kZapValue)));
4010 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
4013 reinterpret_cast<intptr_t>(GetCode().location());
4014 __ Move(t9, target);
4015 __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
4020 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
4024 Register properties,
4026 Register scratch0) {
4027 DCHECK(name->IsUniqueName());
4028 // If names of slots in range from 1 to kProbes - 1 for the hash value are
4029 // not equal to the name and kProbes-th slot is not used (its name is the
4030 // undefined value), it guarantees the hash table doesn't contain the
4031 // property. It's true even if some slots represent deleted properties
4032 // (their names are the hole value).
4033 for (int i = 0; i < kInlinedProbes; i++) {
4034 // scratch0 points to properties hash.
4035 // Compute the masked index: (hash + i + i * i) & mask.
4036 Register index = scratch0;
4037 // Capacity is smi 2^n.
4038 __ lw(index, FieldMemOperand(properties, kCapacityOffset));
4039 __ Subu(index, index, Operand(1));
4040 __ And(index, index, Operand(
4041 Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
4043 // Scale the index by multiplying by the entry size.
4044 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
4045 __ sll(at, index, 1);
4046 __ Addu(index, index, at);
4048 Register entity_name = scratch0;
4049 // Having undefined at this place means the name is not contained.
4050 DCHECK_EQ(kSmiTagSize, 1);
4051 Register tmp = properties;
4052 __ sll(scratch0, index, 1);
4053 __ Addu(tmp, properties, scratch0);
4054 __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
4056 DCHECK(!tmp.is(entity_name));
4057 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
4058 __ Branch(done, eq, entity_name, Operand(tmp));
4060 // Load the hole ready for use below:
4061 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
4063 // Stop if found the property.
4064 __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
4067 __ Branch(&good, eq, entity_name, Operand(tmp));
4069 // Check if the entry name is not a unique name.
4070 __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
4072 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
4073 __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
4076 // Restore the properties.
4078 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4081 const int spill_mask =
4082 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
4083 a2.bit() | a1.bit() | a0.bit() | v0.bit());
4085 __ MultiPush(spill_mask);
4086 __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4087 __ li(a1, Operand(Handle<Name>(name)));
4088 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
4091 __ MultiPop(spill_mask);
4093 __ Branch(done, eq, at, Operand(zero_reg));
4094 __ Branch(miss, ne, at, Operand(zero_reg));
4098 // Probe the name dictionary in the |elements| register. Jump to the
4099 // |done| label if a property with the given name is found. Jump to
4100 // the |miss| label otherwise.
4101 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
4102 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
4108 Register scratch2) {
4109 DCHECK(!elements.is(scratch1));
4110 DCHECK(!elements.is(scratch2));
4111 DCHECK(!name.is(scratch1));
4112 DCHECK(!name.is(scratch2));
4114 __ AssertName(name);
4116 // Compute the capacity mask.
4117 __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
4118 __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
4119 __ Subu(scratch1, scratch1, Operand(1));
4121 // Generate an unrolled loop that performs a few probes before
4122 // giving up. Measurements done on Gmail indicate that 2 probes
4123 // cover ~93% of loads from dictionaries.
4124 for (int i = 0; i < kInlinedProbes; i++) {
4125 // Compute the masked index: (hash + i + i * i) & mask.
4126 __ lw(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
4128 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4129 // the hash in a separate instruction. The value hash + i + i * i is right
4130 // shifted in the following and instruction.
4131 DCHECK(NameDictionary::GetProbeOffset(i) <
4132 1 << (32 - Name::kHashFieldOffset));
4133 __ Addu(scratch2, scratch2, Operand(
4134 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4136 __ srl(scratch2, scratch2, Name::kHashShift);
4137 __ And(scratch2, scratch1, scratch2);
4139 // Scale the index by multiplying by the element size.
4140 DCHECK(NameDictionary::kEntrySize == 3);
4141 // scratch2 = scratch2 * 3.
4143 __ sll(at, scratch2, 1);
4144 __ Addu(scratch2, scratch2, at);
4146 // Check if the key is identical to the name.
4147 __ sll(at, scratch2, 2);
4148 __ Addu(scratch2, elements, at);
4149 __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
4150 __ Branch(done, eq, name, Operand(at));
4153 const int spill_mask =
4154 (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
4155 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
4156 ~(scratch1.bit() | scratch2.bit());
4158 __ MultiPush(spill_mask);
4160 DCHECK(!elements.is(a1));
4162 __ Move(a0, elements);
4164 __ Move(a0, elements);
4167 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
4169 __ mov(scratch2, a2);
4171 __ MultiPop(spill_mask);
4173 __ Branch(done, ne, at, Operand(zero_reg));
4174 __ Branch(miss, eq, at, Operand(zero_reg));
4178 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
4179 // This stub overrides SometimesSetsUpAFrame() to return false. That means
4180 // we cannot call anything that could cause a GC from this stub.
4182 // result: NameDictionary to probe
4184 // dictionary: NameDictionary to probe.
4185 // index: will hold an index of entry if lookup is successful.
4186 // might alias with result_.
4188 // result_ is zero if lookup failed, non zero otherwise.
4190 Register result = v0;
4191 Register dictionary = a0;
4193 Register index = a2;
4196 Register undefined = t1;
4197 Register entry_key = t2;
4199 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
4201 __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
4202 __ sra(mask, mask, kSmiTagSize);
4203 __ Subu(mask, mask, Operand(1));
4205 __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
4207 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
4209 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
4210 // Compute the masked index: (hash + i + i * i) & mask.
4211 // Capacity is smi 2^n.
4213 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4214 // the hash in a separate instruction. The value hash + i + i * i is right
4215 // shifted in the following and instruction.
4216 DCHECK(NameDictionary::GetProbeOffset(i) <
4217 1 << (32 - Name::kHashFieldOffset));
4218 __ Addu(index, hash, Operand(
4219 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4221 __ mov(index, hash);
4223 __ srl(index, index, Name::kHashShift);
4224 __ And(index, mask, index);
4226 // Scale the index by multiplying by the entry size.
4227 DCHECK(NameDictionary::kEntrySize == 3);
4230 __ sll(index, index, 1);
4231 __ Addu(index, index, at);
4234 DCHECK_EQ(kSmiTagSize, 1);
4235 __ sll(index, index, 2);
4236 __ Addu(index, index, dictionary);
4237 __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
4239 // Having undefined at this place means the name is not contained.
4240 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined));
4242 // Stop if found the property.
4243 __ Branch(&in_dictionary, eq, entry_key, Operand(key));
4245 if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
4246 // Check if the entry name is not a unique name.
4247 __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
4249 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
4250 __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
4254 __ bind(&maybe_in_dictionary);
4255 // If we are doing negative lookup then probing failure should be
4256 // treated as a lookup success. For positive lookup probing failure
4257 // should be treated as lookup failure.
4258 if (mode() == POSITIVE_LOOKUP) {
4259 __ Ret(USE_DELAY_SLOT);
4260 __ mov(result, zero_reg);
4263 __ bind(&in_dictionary);
4264 __ Ret(USE_DELAY_SLOT);
4267 __ bind(¬_in_dictionary);
4268 __ Ret(USE_DELAY_SLOT);
4269 __ mov(result, zero_reg);
4273 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
4275 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
4277 // Hydrogen code stubs need stub2 at snapshot time.
4278 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
4283 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
4284 // the value has just been written into the object, now this stub makes sure
4285 // we keep the GC informed. The word in the object where the value has been
4286 // written is in the address register.
4287 void RecordWriteStub::Generate(MacroAssembler* masm) {
4288 Label skip_to_incremental_noncompacting;
4289 Label skip_to_incremental_compacting;
4291 // The first two branch+nop instructions are generated with labels so as to
4292 // get the offset fixed up correctly by the bind(Label*) call. We patch it
4293 // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
4294 // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
4295 // incremental heap marking.
4296 // See RecordWriteStub::Patch for details.
4297 __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
4299 __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
4302 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4303 __ RememberedSetHelper(object(),
4306 save_fp_regs_mode(),
4307 MacroAssembler::kReturnAtEnd);
4311 __ bind(&skip_to_incremental_noncompacting);
4312 GenerateIncremental(masm, INCREMENTAL);
4314 __ bind(&skip_to_incremental_compacting);
4315 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4317 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
4318 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
4320 PatchBranchIntoNop(masm, 0);
4321 PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
4325 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4328 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4329 Label dont_need_remembered_set;
4331 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
4332 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
4334 &dont_need_remembered_set);
4336 __ CheckPageFlag(regs_.object(),
4338 1 << MemoryChunk::SCAN_ON_SCAVENGE,
4340 &dont_need_remembered_set);
4342 // First notify the incremental marker if necessary, then update the
4344 CheckNeedsToInformIncrementalMarker(
4345 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4346 InformIncrementalMarker(masm);
4347 regs_.Restore(masm);
4348 __ RememberedSetHelper(object(),
4351 save_fp_regs_mode(),
4352 MacroAssembler::kReturnAtEnd);
4354 __ bind(&dont_need_remembered_set);
4357 CheckNeedsToInformIncrementalMarker(
4358 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4359 InformIncrementalMarker(masm);
4360 regs_.Restore(masm);
4365 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4366 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
4367 int argument_count = 3;
4368 __ PrepareCallCFunction(argument_count, regs_.scratch0());
4370 a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
4371 DCHECK(!address.is(regs_.object()));
4372 DCHECK(!address.is(a0));
4373 __ Move(address, regs_.address());
4374 __ Move(a0, regs_.object());
4375 __ Move(a1, address);
4376 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
4378 AllowExternalCallThatCantCauseGC scope(masm);
4380 ExternalReference::incremental_marking_record_write_function(isolate()),
4382 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
4386 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4387 MacroAssembler* masm,
4388 OnNoNeedToInformIncrementalMarker on_no_need,
4391 Label need_incremental;
4392 Label need_incremental_pop_scratch;
4394 __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
4395 __ lw(regs_.scratch1(),
4396 MemOperand(regs_.scratch0(),
4397 MemoryChunk::kWriteBarrierCounterOffset));
4398 __ Subu(regs_.scratch1(), regs_.scratch1(), Operand(1));
4399 __ sw(regs_.scratch1(),
4400 MemOperand(regs_.scratch0(),
4401 MemoryChunk::kWriteBarrierCounterOffset));
4402 __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
4404 // Let's look at the color of the object: If it is not black we don't have
4405 // to inform the incremental marker.
4406 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4408 regs_.Restore(masm);
4409 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4410 __ RememberedSetHelper(object(),
4413 save_fp_regs_mode(),
4414 MacroAssembler::kReturnAtEnd);
4421 // Get the value from the slot.
4422 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
4424 if (mode == INCREMENTAL_COMPACTION) {
4425 Label ensure_not_white;
4427 __ CheckPageFlag(regs_.scratch0(), // Contains value.
4428 regs_.scratch1(), // Scratch.
4429 MemoryChunk::kEvacuationCandidateMask,
4433 __ CheckPageFlag(regs_.object(),
4434 regs_.scratch1(), // Scratch.
4435 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
4439 __ bind(&ensure_not_white);
4442 // We need extra registers for this, so we push the object and the address
4443 // register temporarily.
4444 __ Push(regs_.object(), regs_.address());
4445 __ EnsureNotWhite(regs_.scratch0(), // The value.
4446 regs_.scratch1(), // Scratch.
4447 regs_.object(), // Scratch.
4448 regs_.address(), // Scratch.
4449 &need_incremental_pop_scratch);
4450 __ Pop(regs_.object(), regs_.address());
4452 regs_.Restore(masm);
4453 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4454 __ RememberedSetHelper(object(),
4457 save_fp_regs_mode(),
4458 MacroAssembler::kReturnAtEnd);
4463 __ bind(&need_incremental_pop_scratch);
4464 __ Pop(regs_.object(), regs_.address());
4466 __ bind(&need_incremental);
4468 // Fall through when we need to inform the incremental marker.
4472 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4473 // ----------- S t a t e -------------
4474 // -- a0 : element value to store
4475 // -- a3 : element index as smi
4476 // -- sp[0] : array literal index in function as smi
4477 // -- sp[4] : array literal
4478 // clobbers a1, a2, t0
4479 // -----------------------------------
4482 Label double_elements;
4484 Label slow_elements;
4485 Label fast_elements;
4487 // Get array literal index, array literal and its map.
4488 __ lw(t0, MemOperand(sp, 0 * kPointerSize));
4489 __ lw(a1, MemOperand(sp, 1 * kPointerSize));
4490 __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
4492 __ CheckFastElements(a2, t1, &double_elements);
4493 // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
4494 __ JumpIfSmi(a0, &smi_element);
4495 __ CheckFastSmiElements(a2, t1, &fast_elements);
4497 // Store into the array literal requires a elements transition. Call into
4499 __ bind(&slow_elements);
4501 __ Push(a1, a3, a0);
4502 __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4503 __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset));
4505 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4507 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4508 __ bind(&fast_elements);
4509 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
4510 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
4511 __ Addu(t2, t1, t2);
4512 __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4513 __ sw(a0, MemOperand(t2, 0));
4514 // Update the write barrier for the array store.
4515 __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
4516 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
4517 __ Ret(USE_DELAY_SLOT);
4520 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
4521 // and value is Smi.
4522 __ bind(&smi_element);
4523 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
4524 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
4525 __ Addu(t2, t1, t2);
4526 __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
4527 __ Ret(USE_DELAY_SLOT);
4530 // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
4531 __ bind(&double_elements);
4532 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
4533 __ StoreNumberToDoubleElements(a0, a3, t1, t3, t5, a2, &slow_elements);
4534 __ Ret(USE_DELAY_SLOT);
4539 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4540 CEntryStub ces(isolate(), 1, kSaveFPRegs);
4541 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4542 int parameter_count_offset =
4543 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4544 __ lw(a1, MemOperand(fp, parameter_count_offset));
4545 if (function_mode() == JS_FUNCTION_STUB_MODE) {
4546 __ Addu(a1, a1, Operand(1));
4548 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4549 __ sll(a1, a1, kPointerSizeLog2);
4550 __ Ret(USE_DELAY_SLOT);
4551 __ Addu(sp, sp, a1);
4555 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
4556 EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4557 VectorRawLoadStub stub(isolate(), state());
4558 stub.GenerateForTrampoline(masm);
4562 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
4563 EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4564 VectorRawKeyedLoadStub stub(isolate());
4565 stub.GenerateForTrampoline(masm);
4569 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
4570 EmitLoadTypeFeedbackVector(masm, a2);
4571 CallICStub stub(isolate(), state());
4572 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4576 void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
4577 EmitLoadTypeFeedbackVector(masm, a2);
4578 CallIC_ArrayStub stub(isolate(), state());
4579 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4583 void VectorRawLoadStub::Generate(MacroAssembler* masm) {
4584 GenerateImpl(masm, false);
4588 void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
4589 GenerateImpl(masm, true);
4593 static void HandleArrayCases(MacroAssembler* masm, Register receiver,
4594 Register key, Register vector, Register slot,
4595 Register feedback, Register scratch1,
4596 Register scratch2, Register scratch3,
4597 bool is_polymorphic, Label* miss) {
4598 // feedback initially contains the feedback array
4599 Label next_loop, prepare_next;
4600 Label load_smi_map, compare_map;
4601 Label start_polymorphic;
4603 Register receiver_map = scratch1;
4604 Register cached_map = scratch2;
4606 // Receiver might not be a heap object.
4607 __ JumpIfSmi(receiver, &load_smi_map);
4608 __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
4609 __ bind(&compare_map);
4611 FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
4612 __ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
4613 __ Branch(&start_polymorphic, ne, receiver_map, Operand(cached_map));
4614 // found, now call handler.
4615 Register handler = feedback;
4616 __ lw(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
4617 __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
4621 Register length = scratch3;
4622 __ bind(&start_polymorphic);
4623 __ lw(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
4624 if (!is_polymorphic) {
4625 // If the IC could be monomorphic we have to make sure we don't go past the
4626 // end of the feedback array.
4627 __ Branch(miss, eq, length, Operand(Smi::FromInt(2)));
4630 Register too_far = length;
4631 Register pointer_reg = feedback;
4633 // +-----+------+------+-----+-----+ ... ----+
4634 // | map | len | wm0 | h0 | wm1 | hN |
4635 // +-----+------+------+-----+-----+ ... ----+
4639 // pointer_reg too_far
4640 // aka feedback scratch3
4641 // also need receiver_map (aka scratch1)
4642 // use cached_map (scratch2) to look in the weak map values.
4643 __ sll(at, length, kPointerSizeLog2 - kSmiTagSize);
4644 __ Addu(too_far, feedback, Operand(at));
4645 __ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4646 __ Addu(pointer_reg, feedback,
4647 Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
4649 __ bind(&next_loop);
4650 __ lw(cached_map, MemOperand(pointer_reg));
4651 __ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
4652 __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
4653 __ lw(handler, MemOperand(pointer_reg, kPointerSize));
4654 __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
4657 __ bind(&prepare_next);
4658 __ Addu(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
4659 __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
4661 // We exhausted our array of map handler pairs.
4664 __ bind(&load_smi_map);
4665 __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
4666 __ jmp(&compare_map);
4670 static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
4671 Register key, Register vector, Register slot,
4672 Register weak_cell, Register scratch,
4674 // feedback initially contains the feedback array
4675 Label compare_smi_map;
4676 Register receiver_map = scratch;
4677 Register cached_map = weak_cell;
4679 // Move the weak map into the weak_cell register.
4680 __ lw(cached_map, FieldMemOperand(weak_cell, WeakCell::kValueOffset));
4682 // Receiver might not be a heap object.
4683 __ JumpIfSmi(receiver, &compare_smi_map);
4684 __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
4685 __ Branch(miss, ne, cached_map, Operand(receiver_map));
4687 Register handler = weak_cell;
4688 __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
4689 __ Addu(handler, vector, Operand(at));
4691 FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
4692 __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
4695 // In microbenchmarks, it made sense to unroll this code so that the call to
4696 // the handler is duplicated for a HeapObject receiver and a Smi receiver.
4697 __ bind(&compare_smi_map);
4698 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4699 __ Branch(miss, ne, at, Operand(weak_cell));
4700 __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
4701 __ Addu(handler, vector, Operand(at));
4703 FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
4704 __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
4709 void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4710 Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // a1
4711 Register name = VectorLoadICDescriptor::NameRegister(); // a2
4712 Register vector = VectorLoadICDescriptor::VectorRegister(); // a3
4713 Register slot = VectorLoadICDescriptor::SlotRegister(); // a0
4714 Register feedback = t0;
4715 Register scratch1 = t1;
4717 __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
4718 __ Addu(feedback, vector, Operand(at));
4719 __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
4721 // Is it a weak cell?
4723 Label not_array, smi_key, key_okay, miss;
4724 __ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
4725 __ LoadRoot(at, Heap::kWeakCellMapRootIndex);
4726 __ Branch(&try_array, ne, at, Operand(scratch1));
4727 HandleMonomorphicCase(masm, receiver, name, vector, slot, feedback, scratch1,
4730 // Is it a fixed array?
4731 __ bind(&try_array);
4732 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4733 __ Branch(¬_array, ne, at, Operand(scratch1));
4734 HandleArrayCases(masm, receiver, name, vector, slot, feedback, scratch1, t4,
4737 __ bind(¬_array);
4738 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
4739 __ Branch(&miss, ne, at, Operand(feedback));
4740 Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
4741 Code::ComputeHandlerFlags(Code::LOAD_IC));
4742 masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
4743 false, receiver, name, feedback,
4747 LoadIC::GenerateMiss(masm);
4751 void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
4752 GenerateImpl(masm, false);
4756 void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
4757 GenerateImpl(masm, true);
4761 void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4762 Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // a1
4763 Register key = VectorLoadICDescriptor::NameRegister(); // a2
4764 Register vector = VectorLoadICDescriptor::VectorRegister(); // a3
4765 Register slot = VectorLoadICDescriptor::SlotRegister(); // a0
4766 Register feedback = t0;
4767 Register scratch1 = t1;
4769 __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
4770 __ Addu(feedback, vector, Operand(at));
4771 __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
4773 // Is it a weak cell?
4775 Label not_array, smi_key, key_okay, miss;
4776 __ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
4777 __ LoadRoot(at, Heap::kWeakCellMapRootIndex);
4778 __ Branch(&try_array, ne, at, Operand(scratch1));
4779 __ JumpIfNotSmi(key, &miss);
4780 HandleMonomorphicCase(masm, receiver, key, vector, slot, feedback, scratch1,
4783 __ bind(&try_array);
4784 // Is it a fixed array?
4785 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4786 __ Branch(¬_array, ne, at, Operand(scratch1));
4787 // We have a polymorphic element handler.
4788 __ JumpIfNotSmi(key, &miss);
4790 Label polymorphic, try_poly_name;
4791 __ bind(&polymorphic);
4792 HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, t4,
4795 __ bind(¬_array);
4797 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
4798 __ Branch(&try_poly_name, ne, at, Operand(feedback));
4799 Handle<Code> megamorphic_stub =
4800 KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
4801 __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
4803 __ bind(&try_poly_name);
4804 // We might have a name in feedback, and a fixed array in the next slot.
4805 __ Branch(&miss, ne, key, Operand(feedback));
4806 // If the name comparison succeeded, we know we have a fixed array with
4807 // at least one map/handler pair.
4808 __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
4809 __ Addu(feedback, vector, Operand(at));
4811 FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
4812 HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, t4,
4816 KeyedLoadIC::GenerateMiss(masm);
4820 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4821 if (masm->isolate()->function_entry_hook() != NULL) {
4822 ProfileEntryHookStub stub(masm->isolate());
4830 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4831 // The entry hook is a "push ra" instruction, followed by a call.
4832 // Note: on MIPS "push" is 2 instruction
4833 const int32_t kReturnAddressDistanceFromFunctionStart =
4834 Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
4836 // This should contain all kJSCallerSaved registers.
4837 const RegList kSavedRegs =
4838 kJSCallerSaved | // Caller saved registers.
4839 s5.bit(); // Saved stack pointer.
4841 // We also save ra, so the count here is one higher than the mask indicates.
4842 const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
4844 // Save all caller-save registers as this may be called from anywhere.
4845 __ MultiPush(kSavedRegs | ra.bit());
4847 // Compute the function's address for the first argument.
4848 __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
4850 // The caller's return address is above the saved temporaries.
4851 // Grab that for the second argument to the hook.
4852 __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
4854 // Align the stack if necessary.
4855 int frame_alignment = masm->ActivationFrameAlignment();
4856 if (frame_alignment > kPointerSize) {
4858 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4859 __ And(sp, sp, Operand(-frame_alignment));
4861 __ Subu(sp, sp, kCArgsSlotsSize);
4862 #if defined(V8_HOST_ARCH_MIPS)
4863 int32_t entry_hook =
4864 reinterpret_cast<int32_t>(isolate()->function_entry_hook());
4865 __ li(t9, Operand(entry_hook));
4867 // Under the simulator we need to indirect the entry hook through a
4868 // trampoline function at a known address.
4869 // It additionally takes an isolate as a third parameter.
4870 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
4872 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4873 __ li(t9, Operand(ExternalReference(&dispatcher,
4874 ExternalReference::BUILTIN_CALL,
4877 // Call C function through t9 to conform ABI for PIC.
4880 // Restore the stack pointer if needed.
4881 if (frame_alignment > kPointerSize) {
4884 __ Addu(sp, sp, kCArgsSlotsSize);
4887 // Also pop ra to get Ret(0).
4888 __ MultiPop(kSavedRegs | ra.bit());
4894 static void CreateArrayDispatch(MacroAssembler* masm,
4895 AllocationSiteOverrideMode mode) {
4896 if (mode == DISABLE_ALLOCATION_SITES) {
4897 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4898 __ TailCallStub(&stub);
4899 } else if (mode == DONT_OVERRIDE) {
4900 int last_index = GetSequenceIndexFromFastElementsKind(
4901 TERMINAL_FAST_ELEMENTS_KIND);
4902 for (int i = 0; i <= last_index; ++i) {
4903 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4904 T stub(masm->isolate(), kind);
4905 __ TailCallStub(&stub, eq, a3, Operand(kind));
4908 // If we reached this point there is a problem.
4909 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4916 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4917 AllocationSiteOverrideMode mode) {
4918 // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4919 // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
4920 // a0 - number of arguments
4921 // a1 - constructor?
4922 // sp[0] - last argument
4923 Label normal_sequence;
4924 if (mode == DONT_OVERRIDE) {
4925 DCHECK(FAST_SMI_ELEMENTS == 0);
4926 DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
4927 DCHECK(FAST_ELEMENTS == 2);
4928 DCHECK(FAST_HOLEY_ELEMENTS == 3);
4929 DCHECK(FAST_DOUBLE_ELEMENTS == 4);
4930 DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
4932 // is the low bit set? If so, we are holey and that is good.
4933 __ And(at, a3, Operand(1));
4934 __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
4937 // look at the first argument
4938 __ lw(t1, MemOperand(sp, 0));
4939 __ Branch(&normal_sequence, eq, t1, Operand(zero_reg));
4941 if (mode == DISABLE_ALLOCATION_SITES) {
4942 ElementsKind initial = GetInitialFastElementsKind();
4943 ElementsKind holey_initial = GetHoleyElementsKind(initial);
4945 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
4947 DISABLE_ALLOCATION_SITES);
4948 __ TailCallStub(&stub_holey);
4950 __ bind(&normal_sequence);
4951 ArraySingleArgumentConstructorStub stub(masm->isolate(),
4953 DISABLE_ALLOCATION_SITES);
4954 __ TailCallStub(&stub);
4955 } else if (mode == DONT_OVERRIDE) {
4956 // We are going to create a holey array, but our kind is non-holey.
4957 // Fix kind and retry (only if we have an allocation site in the slot).
4958 __ Addu(a3, a3, Operand(1));
4960 if (FLAG_debug_code) {
4961 __ lw(t1, FieldMemOperand(a2, 0));
4962 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
4963 __ Assert(eq, kExpectedAllocationSite, t1, Operand(at));
4966 // Save the resulting elements kind in type info. We can't just store a3
4967 // in the AllocationSite::transition_info field because elements kind is
4968 // restricted to a portion of the field...upper bits need to be left alone.
4969 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4970 __ lw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
4971 __ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
4972 __ sw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
4975 __ bind(&normal_sequence);
4976 int last_index = GetSequenceIndexFromFastElementsKind(
4977 TERMINAL_FAST_ELEMENTS_KIND);
4978 for (int i = 0; i <= last_index; ++i) {
4979 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4980 ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
4981 __ TailCallStub(&stub, eq, a3, Operand(kind));
4984 // If we reached this point there is a problem.
4985 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4993 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
4994 int to_index = GetSequenceIndexFromFastElementsKind(
4995 TERMINAL_FAST_ELEMENTS_KIND);
4996 for (int i = 0; i <= to_index; ++i) {
4997 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4998 T stub(isolate, kind);
5000 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
5001 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
5008 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
5009 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
5011 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
5013 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
5018 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
5020 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
5021 for (int i = 0; i < 2; i++) {
5022 // For internal arrays we only need a few things.
5023 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
5025 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
5027 InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
5033 void ArrayConstructorStub::GenerateDispatchToArrayStub(
5034 MacroAssembler* masm,
5035 AllocationSiteOverrideMode mode) {
5036 if (argument_count() == ANY) {
5037 Label not_zero_case, not_one_case;
5039 __ Branch(¬_zero_case, ne, at, Operand(zero_reg));
5040 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5042 __ bind(¬_zero_case);
5043 __ Branch(¬_one_case, gt, a0, Operand(1));
5044 CreateArrayDispatchOneArgument(masm, mode);
5046 __ bind(¬_one_case);
5047 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5048 } else if (argument_count() == NONE) {
5049 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5050 } else if (argument_count() == ONE) {
5051 CreateArrayDispatchOneArgument(masm, mode);
5052 } else if (argument_count() == MORE_THAN_ONE) {
5053 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5060 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
5061 // ----------- S t a t e -------------
5062 // -- a0 : argc (only if argument_count() is ANY or MORE_THAN_ONE)
5063 // -- a1 : constructor
5064 // -- a2 : AllocationSite or undefined
5065 // -- a3 : Original constructor
5066 // -- sp[0] : last argument
5067 // -----------------------------------
5069 if (FLAG_debug_code) {
5070 // The array construct code is only set for the global and natives
5071 // builtin Array functions which always have maps.
5073 // Initial map for the builtin Array function should be a map.
5074 __ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
5075 // Will both indicate a NULL and a Smi.
5077 __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
5078 at, Operand(zero_reg));
5079 __ GetObjectType(t0, t0, t1);
5080 __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
5081 t1, Operand(MAP_TYPE));
5083 // We should either have undefined in a2 or a valid AllocationSite
5084 __ AssertUndefinedOrAllocationSite(a2, t0);
5088 __ Branch(&subclassing, ne, a1, Operand(a3));
5091 // Get the elements kind and case on that.
5092 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5093 __ Branch(&no_info, eq, a2, Operand(at));
5095 __ lw(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
5097 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
5098 __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
5099 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
5102 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
5105 __ bind(&subclassing);
5110 switch (argument_count()) {
5113 __ li(at, Operand(2));
5114 __ addu(a0, a0, at);
5117 __ li(a0, Operand(2));
5120 __ li(a0, Operand(3));
5124 __ JumpToExternalReference(
5125 ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
5129 void InternalArrayConstructorStub::GenerateCase(
5130 MacroAssembler* masm, ElementsKind kind) {
5132 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
5133 __ TailCallStub(&stub0, lo, a0, Operand(1));
5135 InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
5136 __ TailCallStub(&stubN, hi, a0, Operand(1));
5138 if (IsFastPackedElementsKind(kind)) {
5139 // We might need to create a holey array
5140 // look at the first argument.
5141 __ lw(at, MemOperand(sp, 0));
5143 InternalArraySingleArgumentConstructorStub
5144 stub1_holey(isolate(), GetHoleyElementsKind(kind));
5145 __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
5148 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
5149 __ TailCallStub(&stub1);
5153 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
5154 // ----------- S t a t e -------------
5156 // -- a1 : constructor
5157 // -- sp[0] : return address
5158 // -- sp[4] : last argument
5159 // -----------------------------------
5161 if (FLAG_debug_code) {
5162 // The array construct code is only set for the global and natives
5163 // builtin Array functions which always have maps.
5165 // Initial map for the builtin Array function should be a map.
5166 __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
5167 // Will both indicate a NULL and a Smi.
5169 __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
5170 at, Operand(zero_reg));
5171 __ GetObjectType(a3, a3, t0);
5172 __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
5173 t0, Operand(MAP_TYPE));
5176 // Figure out the right elements kind.
5177 __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
5179 // Load the map's "bit field 2" into a3. We only need the first byte,
5180 // but the following bit field extraction takes care of that anyway.
5181 __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
5182 // Retrieve elements_kind from bit field 2.
5183 __ DecodeField<Map::ElementsKindBits>(a3);
5185 if (FLAG_debug_code) {
5187 __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
5189 eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
5190 a3, Operand(FAST_HOLEY_ELEMENTS));
5194 Label fast_elements_case;
5195 __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
5196 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
5198 __ bind(&fast_elements_case);
5199 GenerateCase(masm, FAST_ELEMENTS);
5203 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
5204 return ref0.address() - ref1.address();
5208 // Calls an API function. Allocates HandleScope, extracts returned value
5209 // from handle and propagates exceptions. Restores context. stack_space
5210 // - space to be unwound on exit (includes the call JS arguments space and
5211 // the additional space allocated for the fast call).
5212 static void CallApiFunctionAndReturn(
5213 MacroAssembler* masm, Register function_address,
5214 ExternalReference thunk_ref, int stack_space, int32_t stack_space_offset,
5215 MemOperand return_value_operand, MemOperand* context_restore_operand) {
5216 Isolate* isolate = masm->isolate();
5217 ExternalReference next_address =
5218 ExternalReference::handle_scope_next_address(isolate);
5219 const int kNextOffset = 0;
5220 const int kLimitOffset = AddressOffset(
5221 ExternalReference::handle_scope_limit_address(isolate), next_address);
5222 const int kLevelOffset = AddressOffset(
5223 ExternalReference::handle_scope_level_address(isolate), next_address);
5225 DCHECK(function_address.is(a1) || function_address.is(a2));
5227 Label profiler_disabled;
5228 Label end_profiler_check;
5229 __ li(t9, Operand(ExternalReference::is_profiling_address(isolate)));
5230 __ lb(t9, MemOperand(t9, 0));
5231 __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
5233 // Additional parameter is the address of the actual callback.
5234 __ li(t9, Operand(thunk_ref));
5235 __ jmp(&end_profiler_check);
5237 __ bind(&profiler_disabled);
5238 __ mov(t9, function_address);
5239 __ bind(&end_profiler_check);
5241 // Allocate HandleScope in callee-save registers.
5242 __ li(s3, Operand(next_address));
5243 __ lw(s0, MemOperand(s3, kNextOffset));
5244 __ lw(s1, MemOperand(s3, kLimitOffset));
5245 __ lw(s2, MemOperand(s3, kLevelOffset));
5246 __ Addu(s2, s2, Operand(1));
5247 __ sw(s2, MemOperand(s3, kLevelOffset));
5249 if (FLAG_log_timer_events) {
5250 FrameScope frame(masm, StackFrame::MANUAL);
5251 __ PushSafepointRegisters();
5252 __ PrepareCallCFunction(1, a0);
5253 __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
5254 __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
5256 __ PopSafepointRegisters();
5259 // Native call returns to the DirectCEntry stub which redirects to the
5260 // return address pushed on stack (could have moved after GC).
5261 // DirectCEntry stub itself is generated early and never moves.
5262 DirectCEntryStub stub(isolate);
5263 stub.GenerateCall(masm, t9);
5265 if (FLAG_log_timer_events) {
5266 FrameScope frame(masm, StackFrame::MANUAL);
5267 __ PushSafepointRegisters();
5268 __ PrepareCallCFunction(1, a0);
5269 __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
5270 __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
5272 __ PopSafepointRegisters();
5275 Label promote_scheduled_exception;
5276 Label delete_allocated_handles;
5277 Label leave_exit_frame;
5278 Label return_value_loaded;
5280 // Load value from ReturnValue.
5281 __ lw(v0, return_value_operand);
5282 __ bind(&return_value_loaded);
5284 // No more valid handles (the result handle was the last one). Restore
5285 // previous handle scope.
5286 __ sw(s0, MemOperand(s3, kNextOffset));
5287 if (__ emit_debug_code()) {
5288 __ lw(a1, MemOperand(s3, kLevelOffset));
5289 __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
5291 __ Subu(s2, s2, Operand(1));
5292 __ sw(s2, MemOperand(s3, kLevelOffset));
5293 __ lw(at, MemOperand(s3, kLimitOffset));
5294 __ Branch(&delete_allocated_handles, ne, s1, Operand(at));
5296 // Leave the API exit frame.
5297 __ bind(&leave_exit_frame);
5299 bool restore_context = context_restore_operand != NULL;
5300 if (restore_context) {
5301 __ lw(cp, *context_restore_operand);
5303 if (stack_space_offset != kInvalidStackOffset) {
5304 // ExitFrame contains four MIPS argument slots after DirectCEntryStub call
5305 // so this must be accounted for.
5306 __ lw(s0, MemOperand(sp, stack_space_offset + kCArgsSlotsSize));
5308 __ li(s0, Operand(stack_space));
5310 __ LeaveExitFrame(false, s0, !restore_context, NO_EMIT_RETURN,
5311 stack_space_offset != kInvalidStackOffset);
5313 // Check if the function scheduled an exception.
5314 __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
5315 __ li(at, Operand(ExternalReference::scheduled_exception_address(isolate)));
5316 __ lw(t1, MemOperand(at));
5317 __ Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
5321 // Re-throw by promoting a scheduled exception.
5322 __ bind(&promote_scheduled_exception);
5323 __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
5325 // HandleScope limit has changed. Delete allocated extensions.
5326 __ bind(&delete_allocated_handles);
5327 __ sw(s1, MemOperand(s3, kLimitOffset));
5330 __ PrepareCallCFunction(1, s1);
5331 __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
5332 __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
5335 __ jmp(&leave_exit_frame);
5339 static void CallApiFunctionStubHelper(MacroAssembler* masm,
5340 const ParameterCount& argc,
5341 bool return_first_arg,
5342 bool call_data_undefined) {
5343 // ----------- S t a t e -------------
5345 // -- t0 : call_data
5347 // -- a1 : api_function_address
5348 // -- a3 : number of arguments if argc is a register
5351 // -- sp[0] : last argument
5353 // -- sp[(argc - 1)* 4] : first argument
5354 // -- sp[argc * 4] : receiver
5355 // -----------------------------------
5357 Register callee = a0;
5358 Register call_data = t0;
5359 Register holder = a2;
5360 Register api_function_address = a1;
5361 Register context = cp;
5363 typedef FunctionCallbackArguments FCA;
5365 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
5366 STATIC_ASSERT(FCA::kCalleeIndex == 5);
5367 STATIC_ASSERT(FCA::kDataIndex == 4);
5368 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
5369 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
5370 STATIC_ASSERT(FCA::kIsolateIndex == 1);
5371 STATIC_ASSERT(FCA::kHolderIndex == 0);
5372 STATIC_ASSERT(FCA::kArgsLength == 7);
5374 DCHECK(argc.is_immediate() || a3.is(argc.reg()));
5376 // Save context, callee and call data.
5377 __ Push(context, callee, call_data);
5378 // Load context from callee.
5379 __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
5381 Register scratch = call_data;
5382 if (!call_data_undefined) {
5383 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5385 // Push return value and default return value.
5386 __ Push(scratch, scratch);
5387 __ li(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
5388 // Push isolate and holder.
5389 __ Push(scratch, holder);
5391 // Prepare arguments.
5392 __ mov(scratch, sp);
5394 // Allocate the v8::Arguments structure in the arguments' space since
5395 // it's not controlled by GC.
5396 const int kApiStackSpace = 4;
5398 FrameScope frame_scope(masm, StackFrame::MANUAL);
5399 __ EnterExitFrame(false, kApiStackSpace);
5401 DCHECK(!api_function_address.is(a0) && !scratch.is(a0));
5402 // a0 = FunctionCallbackInfo&
5403 // Arguments is after the return address.
5404 __ Addu(a0, sp, Operand(1 * kPointerSize));
5405 // FunctionCallbackInfo::implicit_args_
5406 __ sw(scratch, MemOperand(a0, 0 * kPointerSize));
5407 if (argc.is_immediate()) {
5408 // FunctionCallbackInfo::values_
5409 __ Addu(at, scratch,
5410 Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
5411 __ sw(at, MemOperand(a0, 1 * kPointerSize));
5412 // FunctionCallbackInfo::length_ = argc
5413 __ li(at, Operand(argc.immediate()));
5414 __ sw(at, MemOperand(a0, 2 * kPointerSize));
5415 // FunctionCallbackInfo::is_construct_call_ = 0
5416 __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
5418 // FunctionCallbackInfo::values_
5419 __ sll(at, argc.reg(), kPointerSizeLog2);
5420 __ Addu(at, at, scratch);
5421 __ Addu(at, at, Operand((FCA::kArgsLength - 1) * kPointerSize));
5422 __ sw(at, MemOperand(a0, 1 * kPointerSize));
5423 // FunctionCallbackInfo::length_ = argc
5424 __ sw(argc.reg(), MemOperand(a0, 2 * kPointerSize));
5425 // FunctionCallbackInfo::is_construct_call_
5426 __ Addu(argc.reg(), argc.reg(), Operand(FCA::kArgsLength + 1));
5427 __ sll(at, argc.reg(), kPointerSizeLog2);
5428 __ sw(at, MemOperand(a0, 3 * kPointerSize));
5431 ExternalReference thunk_ref =
5432 ExternalReference::invoke_function_callback(masm->isolate());
5434 AllowExternalCallThatCantCauseGC scope(masm);
5435 MemOperand context_restore_operand(
5436 fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
5437 // Stores return the first js argument.
5438 int return_value_offset = 0;
5439 if (return_first_arg) {
5440 return_value_offset = 2 + FCA::kArgsLength;
5442 return_value_offset = 2 + FCA::kReturnValueOffset;
5444 MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
5445 int stack_space = 0;
5446 int32_t stack_space_offset = 4 * kPointerSize;
5447 if (argc.is_immediate()) {
5448 stack_space = argc.immediate() + FCA::kArgsLength + 1;
5449 stack_space_offset = kInvalidStackOffset;
5451 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
5452 stack_space_offset, return_value_operand,
5453 &context_restore_operand);
5457 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
5458 bool call_data_undefined = this->call_data_undefined();
5459 CallApiFunctionStubHelper(masm, ParameterCount(a3), false,
5460 call_data_undefined);
5464 void CallApiAccessorStub::Generate(MacroAssembler* masm) {
5465 bool is_store = this->is_store();
5466 int argc = this->argc();
5467 bool call_data_undefined = this->call_data_undefined();
5468 CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
5469 call_data_undefined);
5473 void CallApiGetterStub::Generate(MacroAssembler* masm) {
5474 // ----------- S t a t e -------------
5476 // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
5478 // -- a2 : api_function_address
5479 // -----------------------------------
5481 Register api_function_address = ApiGetterDescriptor::function_address();
5482 DCHECK(api_function_address.is(a2));
5484 __ mov(a0, sp); // a0 = Handle<Name>
5485 __ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA
5487 const int kApiStackSpace = 1;
5488 FrameScope frame_scope(masm, StackFrame::MANUAL);
5489 __ EnterExitFrame(false, kApiStackSpace);
5491 // Create PropertyAccessorInfo instance on the stack above the exit frame with
5492 // a1 (internal::Object** args_) as the data.
5493 __ sw(a1, MemOperand(sp, 1 * kPointerSize));
5494 __ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo&
5496 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
5498 ExternalReference thunk_ref =
5499 ExternalReference::invoke_accessor_getter_callback(isolate());
5500 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
5501 kStackUnwindSpace, kInvalidStackOffset,
5502 MemOperand(fp, 6 * kPointerSize), NULL);
5508 } } // namespace v8::internal
5510 #endif // V8_TARGET_ARCH_MIPS