1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #if V8_TARGET_ARCH_MIPS
9 #include "src/base/bits.h"
10 #include "src/bootstrapper.h"
11 #include "src/code-stubs.h"
12 #include "src/codegen.h"
13 #include "src/ic/handler-compiler.h"
14 #include "src/ic/ic.h"
15 #include "src/isolate.h"
16 #include "src/jsregexp.h"
17 #include "src/regexp-macro-assembler.h"
18 #include "src/runtime/runtime.h"
24 static void InitializeArrayConstructorDescriptor(
25 Isolate* isolate, CodeStubDescriptor* descriptor,
26 int constant_stack_parameter_count) {
27 Address deopt_handler = Runtime::FunctionForId(
28 Runtime::kArrayConstructor)->entry;
30 if (constant_stack_parameter_count == 0) {
31 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
32 JS_FUNCTION_STUB_MODE);
34 descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
35 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
40 static void InitializeInternalArrayConstructorDescriptor(
41 Isolate* isolate, CodeStubDescriptor* descriptor,
42 int constant_stack_parameter_count) {
43 Address deopt_handler = Runtime::FunctionForId(
44 Runtime::kInternalArrayConstructor)->entry;
46 if (constant_stack_parameter_count == 0) {
47 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
48 JS_FUNCTION_STUB_MODE);
50 descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
51 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
56 void ArrayNoArgumentConstructorStub::InitializeDescriptor(
57 CodeStubDescriptor* descriptor) {
58 InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
62 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
63 CodeStubDescriptor* descriptor) {
64 InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
68 void ArrayNArgumentsConstructorStub::InitializeDescriptor(
69 CodeStubDescriptor* descriptor) {
70 InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
74 void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
75 CodeStubDescriptor* descriptor) {
76 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
80 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
81 CodeStubDescriptor* descriptor) {
82 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
86 void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
87 CodeStubDescriptor* descriptor) {
88 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
92 #define __ ACCESS_MASM(masm)
95 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
98 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
104 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
109 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
110 ExternalReference miss) {
111 // Update the static counter each time a new code stub is generated.
112 isolate()->counters()->code_stubs()->Increment();
114 CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
115 int param_count = descriptor.GetEnvironmentParameterCount();
117 // Call the runtime system in a fresh internal frame.
118 FrameScope scope(masm, StackFrame::INTERNAL);
119 DCHECK(param_count == 0 ||
120 a0.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
121 // Push arguments, adjust sp.
122 __ Subu(sp, sp, Operand(param_count * kPointerSize));
123 for (int i = 0; i < param_count; ++i) {
124 // Store argument to stack.
125 __ sw(descriptor.GetEnvironmentParameterRegister(i),
126 MemOperand(sp, (param_count - 1 - i) * kPointerSize));
128 __ CallExternalReference(miss, param_count);
135 void DoubleToIStub::Generate(MacroAssembler* masm) {
136 Label out_of_range, only_low, negate, done;
137 Register input_reg = source();
138 Register result_reg = destination();
140 int double_offset = offset();
141 // Account for saved regs if input is sp.
142 if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
145 GetRegisterThatIsNotOneOf(input_reg, result_reg);
147 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
149 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
150 DoubleRegister double_scratch = kLithiumScratchDouble;
152 __ Push(scratch, scratch2, scratch3);
154 if (!skip_fastpath()) {
155 // Load double input.
156 __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
158 // Clear cumulative exception flags and save the FCSR.
159 __ cfc1(scratch2, FCSR);
160 __ ctc1(zero_reg, FCSR);
162 // Try a conversion to a signed integer.
163 __ Trunc_w_d(double_scratch, double_scratch);
164 // Move the converted value into the result register.
165 __ mfc1(scratch3, double_scratch);
167 // Retrieve and restore the FCSR.
168 __ cfc1(scratch, FCSR);
169 __ ctc1(scratch2, FCSR);
171 // Check for overflow and NaNs.
174 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
175 | kFCSRInvalidOpFlagMask);
176 // If we had no exceptions then set result_reg and we are done.
178 __ Branch(&error, ne, scratch, Operand(zero_reg));
179 __ Move(result_reg, scratch3);
184 // Load the double value and perform a manual truncation.
185 Register input_high = scratch2;
186 Register input_low = scratch3;
189 MemOperand(input_reg, double_offset + Register::kMantissaOffset));
191 MemOperand(input_reg, double_offset + Register::kExponentOffset));
193 Label normal_exponent, restore_sign;
194 // Extract the biased exponent in result.
197 HeapNumber::kExponentShift,
198 HeapNumber::kExponentBits);
200 // Check for Infinity and NaNs, which should return 0.
201 __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
202 __ Movz(result_reg, zero_reg, scratch);
203 __ Branch(&done, eq, scratch, Operand(zero_reg));
205 // Express exponent as delta to (number of mantissa bits + 31).
208 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
210 // If the delta is strictly positive, all bits would be shifted away,
211 // which means that we can return 0.
212 __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
213 __ mov(result_reg, zero_reg);
216 __ bind(&normal_exponent);
217 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
219 __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
222 Register sign = result_reg;
224 __ And(sign, input_high, Operand(HeapNumber::kSignMask));
226 // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
227 // to check for this specific case.
228 Label high_shift_needed, high_shift_done;
229 __ Branch(&high_shift_needed, lt, scratch, Operand(32));
230 __ mov(input_high, zero_reg);
231 __ Branch(&high_shift_done);
232 __ bind(&high_shift_needed);
234 // Set the implicit 1 before the mantissa part in input_high.
237 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
238 // Shift the mantissa bits to the correct position.
239 // We don't need to clear non-mantissa bits as they will be shifted away.
240 // If they weren't, it would mean that the answer is in the 32bit range.
241 __ sllv(input_high, input_high, scratch);
243 __ bind(&high_shift_done);
245 // Replace the shifted bits with bits from the lower mantissa word.
246 Label pos_shift, shift_done;
248 __ subu(scratch, at, scratch);
249 __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
252 __ Subu(scratch, zero_reg, scratch);
253 __ sllv(input_low, input_low, scratch);
254 __ Branch(&shift_done);
257 __ srlv(input_low, input_low, scratch);
259 __ bind(&shift_done);
260 __ Or(input_high, input_high, Operand(input_low));
261 // Restore sign if necessary.
262 __ mov(scratch, sign);
265 __ Subu(result_reg, zero_reg, input_high);
266 __ Movz(result_reg, input_high, scratch);
270 __ Pop(scratch, scratch2, scratch3);
275 void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
277 WriteInt32ToHeapNumberStub stub1(isolate, a1, v0, a2, a3);
278 WriteInt32ToHeapNumberStub stub2(isolate, a2, v0, a3, a0);
284 // See comment for class, this does NOT work for int32's that are in Smi range.
285 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
286 Label max_negative_int;
287 // the_int_ has the answer which is a signed int32 but not a Smi.
288 // We test for the special value that has a different exponent.
289 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
290 // Test sign, and save for later conditionals.
291 __ And(sign(), the_int(), Operand(0x80000000u));
292 __ Branch(&max_negative_int, eq, the_int(), Operand(0x80000000u));
294 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
295 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
296 uint32_t non_smi_exponent =
297 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
298 __ li(scratch(), Operand(non_smi_exponent));
299 // Set the sign bit in scratch_ if the value was negative.
300 __ or_(scratch(), scratch(), sign());
301 // Subtract from 0 if the value was negative.
302 __ subu(at, zero_reg, the_int());
303 __ Movn(the_int(), at, sign());
304 // We should be masking the implict first digit of the mantissa away here,
305 // but it just ends up combining harmlessly with the last digit of the
306 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
307 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
308 DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
309 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
310 __ srl(at, the_int(), shift_distance);
311 __ or_(scratch(), scratch(), at);
312 __ sw(scratch(), FieldMemOperand(the_heap_number(),
313 HeapNumber::kExponentOffset));
314 __ sll(scratch(), the_int(), 32 - shift_distance);
315 __ Ret(USE_DELAY_SLOT);
316 __ sw(scratch(), FieldMemOperand(the_heap_number(),
317 HeapNumber::kMantissaOffset));
319 __ bind(&max_negative_int);
320 // The max negative int32 is stored as a positive number in the mantissa of
321 // a double because it uses a sign bit instead of using two's complement.
322 // The actual mantissa bits stored are all 0 because the implicit most
323 // significant 1 bit is not stored.
324 non_smi_exponent += 1 << HeapNumber::kExponentShift;
325 __ li(scratch(), Operand(HeapNumber::kSignMask | non_smi_exponent));
327 FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
328 __ mov(scratch(), zero_reg);
329 __ Ret(USE_DELAY_SLOT);
331 FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
335 // Handle the case where the lhs and rhs are the same object.
336 // Equality is almost reflexive (everything but NaN), so this is a test
337 // for "identity and not NaN".
338 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
342 Label heap_number, return_equal;
343 Register exp_mask_reg = t5;
345 __ Branch(¬_identical, ne, a0, Operand(a1));
347 __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
349 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
350 // so we do the second best thing - test it ourselves.
351 // They are both equal and they are not both Smis so both of them are not
352 // Smis. If it's not a heap number, then return equal.
353 if (cc == less || cc == greater) {
354 __ GetObjectType(a0, t4, t4);
355 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
357 __ GetObjectType(a0, t4, t4);
358 __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
359 // Comparing JS objects with <=, >= is complicated.
361 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
362 // Normally here we fall through to return_equal, but undefined is
363 // special: (undefined == undefined) == true, but
364 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
365 if (cc == less_equal || cc == greater_equal) {
366 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
367 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
368 __ Branch(&return_equal, ne, a0, Operand(t2));
369 DCHECK(is_int16(GREATER) && is_int16(LESS));
370 __ Ret(USE_DELAY_SLOT);
372 // undefined <= undefined should fail.
373 __ li(v0, Operand(GREATER));
375 // undefined >= undefined should fail.
376 __ li(v0, Operand(LESS));
382 __ bind(&return_equal);
383 DCHECK(is_int16(GREATER) && is_int16(LESS));
384 __ Ret(USE_DELAY_SLOT);
386 __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
387 } else if (cc == greater) {
388 __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
390 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
393 // For less and greater we don't have to check for NaN since the result of
394 // x < x is false regardless. For the others here is some code to check
396 if (cc != lt && cc != gt) {
397 __ bind(&heap_number);
398 // It is a heap number, so return non-equal if it's NaN and equal if it's
401 // The representation of NaN values has all exponent bits (52..62) set,
402 // and not all mantissa bits (0..51) clear.
403 // Read top bits of double representation (second word of value).
404 __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
405 // Test that exponent bits are all set.
406 __ And(t3, t2, Operand(exp_mask_reg));
407 // If all bits not set (ne cond), then not a NaN, objects are equal.
408 __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
410 // Shift out flag and all exponent bits, retaining only mantissa.
411 __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
412 // Or with all low-bits of mantissa.
413 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
414 __ Or(v0, t3, Operand(t2));
415 // For equal we already have the right value in v0: Return zero (equal)
416 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
417 // not (it's a NaN). For <= and >= we need to load v0 with the failing
418 // value if it's a NaN.
420 // All-zero means Infinity means equal.
421 __ Ret(eq, v0, Operand(zero_reg));
422 DCHECK(is_int16(GREATER) && is_int16(LESS));
423 __ Ret(USE_DELAY_SLOT);
425 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
427 __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
431 // No fall through here.
433 __ bind(¬_identical);
437 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
440 Label* both_loaded_as_doubles,
443 DCHECK((lhs.is(a0) && rhs.is(a1)) ||
444 (lhs.is(a1) && rhs.is(a0)));
447 __ JumpIfSmi(lhs, &lhs_is_smi);
449 // Check whether the non-smi is a heap number.
450 __ GetObjectType(lhs, t4, t4);
452 // If lhs was not a number and rhs was a Smi then strict equality cannot
453 // succeed. Return non-equal (lhs is already not zero).
454 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
457 // Smi compared non-strictly with a non-Smi non-heap-number. Call
459 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
462 // Rhs is a smi, lhs is a number.
463 // Convert smi rhs to double.
464 __ sra(at, rhs, kSmiTagSize);
466 __ cvt_d_w(f14, f14);
467 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
469 // We now have both loaded as doubles.
470 __ jmp(both_loaded_as_doubles);
472 __ bind(&lhs_is_smi);
473 // Lhs is a Smi. Check whether the non-smi is a heap number.
474 __ GetObjectType(rhs, t4, t4);
476 // If lhs was not a number and rhs was a Smi then strict equality cannot
477 // succeed. Return non-equal.
478 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
479 __ li(v0, Operand(1));
481 // Smi compared non-strictly with a non-Smi non-heap-number. Call
483 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
486 // Lhs is a smi, rhs is a number.
487 // Convert smi lhs to double.
488 __ sra(at, lhs, kSmiTagSize);
490 __ cvt_d_w(f12, f12);
491 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
492 // Fall through to both_loaded_as_doubles.
496 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
499 // If either operand is a JS object or an oddball value, then they are
500 // not equal since their pointers are different.
501 // There is no test for undetectability in strict equality.
502 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
503 Label first_non_object;
504 // Get the type of the first operand into a2 and compare it with
505 // FIRST_SPEC_OBJECT_TYPE.
506 __ GetObjectType(lhs, a2, a2);
507 __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
510 Label return_not_equal;
511 __ bind(&return_not_equal);
512 __ Ret(USE_DELAY_SLOT);
513 __ li(v0, Operand(1));
515 __ bind(&first_non_object);
516 // Check for oddballs: true, false, null, undefined.
517 __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
519 __ GetObjectType(rhs, a3, a3);
520 __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
522 // Check for oddballs: true, false, null, undefined.
523 __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
525 // Now that we have the types we might as well check for
526 // internalized-internalized.
527 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
528 __ Or(a2, a2, Operand(a3));
529 __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
530 __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
534 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
537 Label* both_loaded_as_doubles,
538 Label* not_heap_numbers,
540 __ GetObjectType(lhs, a3, a2);
541 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
542 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
543 // If first was a heap number & second wasn't, go to slow case.
544 __ Branch(slow, ne, a3, Operand(a2));
546 // Both are heap numbers. Load them up then jump to the code we have
548 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
549 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
551 __ jmp(both_loaded_as_doubles);
555 // Fast negative check for internalized-to-internalized equality.
556 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
559 Label* possible_strings,
560 Label* not_both_strings) {
561 DCHECK((lhs.is(a0) && rhs.is(a1)) ||
562 (lhs.is(a1) && rhs.is(a0)));
564 // a2 is object type of rhs.
566 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
567 __ And(at, a2, Operand(kIsNotStringMask));
568 __ Branch(&object_test, ne, at, Operand(zero_reg));
569 __ And(at, a2, Operand(kIsNotInternalizedMask));
570 __ Branch(possible_strings, ne, at, Operand(zero_reg));
571 __ GetObjectType(rhs, a3, a3);
572 __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
573 __ And(at, a3, Operand(kIsNotInternalizedMask));
574 __ Branch(possible_strings, ne, at, Operand(zero_reg));
576 // Both are internalized strings. We already checked they weren't the same
577 // pointer so they are not equal.
578 __ Ret(USE_DELAY_SLOT);
579 __ li(v0, Operand(1)); // Non-zero indicates not equal.
581 __ bind(&object_test);
582 __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
583 __ GetObjectType(rhs, a2, a3);
584 __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
586 // If both objects are undetectable, they are equal. Otherwise, they
587 // are not equal, since they are different objects and an object is not
588 // equal to undefined.
589 __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
590 __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
591 __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
593 __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
594 __ Ret(USE_DELAY_SLOT);
595 __ xori(v0, a0, 1 << Map::kIsUndetectable);
599 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
601 CompareICState::State expected,
604 if (expected == CompareICState::SMI) {
605 __ JumpIfNotSmi(input, fail);
606 } else if (expected == CompareICState::NUMBER) {
607 __ JumpIfSmi(input, &ok);
608 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
611 // We could be strict about internalized/string here, but as long as
612 // hydrogen doesn't care, the stub doesn't have to care either.
617 // On entry a1 and a2 are the values to be compared.
618 // On exit a0 is 0, positive or negative to indicate the result of
620 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
623 Condition cc = GetCondition();
626 CompareICStub_CheckInputType(masm, lhs, a2, left(), &miss);
627 CompareICStub_CheckInputType(masm, rhs, a3, right(), &miss);
629 Label slow; // Call builtin.
630 Label not_smis, both_loaded_as_doubles;
632 Label not_two_smis, smi_done;
634 __ JumpIfNotSmi(a2, ¬_two_smis);
637 __ Ret(USE_DELAY_SLOT);
639 __ bind(¬_two_smis);
641 // NOTICE! This code is only reached after a smi-fast-case check, so
642 // it is certain that at least one operand isn't a smi.
644 // Handle the case where the objects are identical. Either returns the answer
645 // or goes to slow. Only falls through if the objects were not identical.
646 EmitIdenticalObjectComparison(masm, &slow, cc);
648 // If either is a Smi (we know that not both are), then they can only
649 // be strictly equal if the other is a HeapNumber.
650 STATIC_ASSERT(kSmiTag == 0);
651 DCHECK_EQ(0, Smi::FromInt(0));
652 __ And(t2, lhs, Operand(rhs));
653 __ JumpIfNotSmi(t2, ¬_smis, t0);
654 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
655 // 1) Return the answer.
657 // 3) Fall through to both_loaded_as_doubles.
658 // 4) Jump to rhs_not_nan.
659 // In cases 3 and 4 we have found out we were dealing with a number-number
660 // comparison and the numbers have been loaded into f12 and f14 as doubles,
661 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
662 EmitSmiNonsmiComparison(masm, lhs, rhs,
663 &both_loaded_as_doubles, &slow, strict());
665 __ bind(&both_loaded_as_doubles);
666 // f12, f14 are the double representations of the left hand side
667 // and the right hand side if we have FPU. Otherwise a2, a3 represent
668 // left hand side and a0, a1 represent right hand side.
670 __ li(t0, Operand(LESS));
671 __ li(t1, Operand(GREATER));
672 __ li(t2, Operand(EQUAL));
674 // Check if either rhs or lhs is NaN.
675 __ BranchF(NULL, &nan, eq, f12, f14);
677 // Check if LESS condition is satisfied. If true, move conditionally
679 if (!IsMipsArchVariant(kMips32r6)) {
680 __ c(OLT, D, f12, f14);
682 // Use previous check to store conditionally to v0 oposite condition
683 // (GREATER). If rhs is equal to lhs, this will be corrected in next
686 // Check if EQUAL condition is satisfied. If true, move conditionally
688 __ c(EQ, D, f12, f14);
692 __ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
693 __ mov(v0, t0); // Return LESS as result.
695 __ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
696 __ mov(v0, t2); // Return EQUAL as result.
698 __ mov(v0, t1); // Return GREATER as result.
705 // NaN comparisons always fail.
706 // Load whatever we need in v0 to make the comparison fail.
707 DCHECK(is_int16(GREATER) && is_int16(LESS));
708 __ Ret(USE_DELAY_SLOT);
709 if (cc == lt || cc == le) {
710 __ li(v0, Operand(GREATER));
712 __ li(v0, Operand(LESS));
717 // At this point we know we are dealing with two different objects,
718 // and neither of them is a Smi. The objects are in lhs_ and rhs_.
720 // This returns non-equal for some object types, or falls through if it
722 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
725 Label check_for_internalized_strings;
726 Label flat_string_check;
727 // Check for heap-number-heap-number comparison. Can jump to slow case,
728 // or load both doubles and jump to the code that handles
729 // that case. If the inputs are not doubles then jumps to
730 // check_for_internalized_strings.
731 // In this case a2 will contain the type of lhs_.
732 EmitCheckForTwoHeapNumbers(masm,
735 &both_loaded_as_doubles,
736 &check_for_internalized_strings,
739 __ bind(&check_for_internalized_strings);
740 if (cc == eq && !strict()) {
741 // Returns an answer for two internalized strings or two
742 // detectable objects.
743 // Otherwise jumps to string case or not both strings case.
744 // Assumes that a2 is the type of lhs_ on entry.
745 EmitCheckForInternalizedStringsOrObjects(
746 masm, lhs, rhs, &flat_string_check, &slow);
749 // Check for both being sequential one-byte strings,
750 // and inline if that is the case.
751 __ bind(&flat_string_check);
753 __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, a2, a3, &slow);
755 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
758 StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, a2, a3, t0);
760 StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, a2, a3, t0,
763 // Never falls through to here.
766 // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
769 // Figure out which native to call and setup the arguments.
770 Builtins::JavaScript native;
772 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
774 native = Builtins::COMPARE;
775 int ncr; // NaN compare result.
776 if (cc == lt || cc == le) {
779 DCHECK(cc == gt || cc == ge); // Remaining cases.
782 __ li(a0, Operand(Smi::FromInt(ncr)));
786 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
787 // tagged as a small integer.
788 __ InvokeBuiltin(native, JUMP_FUNCTION);
795 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
798 __ PushSafepointRegisters();
803 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
806 __ PopSafepointRegisters();
811 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
812 // We don't allow a GC during a store buffer overflow so there is no need to
813 // store the registers in any particular way, but we do have to store and
815 __ MultiPush(kJSCallerSaved | ra.bit());
816 if (save_doubles()) {
817 __ MultiPushFPU(kCallerSavedFPU);
819 const int argument_count = 1;
820 const int fp_argument_count = 0;
821 const Register scratch = a1;
823 AllowExternalCallThatCantCauseGC scope(masm);
824 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
825 __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
827 ExternalReference::store_buffer_overflow_function(isolate()),
829 if (save_doubles()) {
830 __ MultiPopFPU(kCallerSavedFPU);
833 __ MultiPop(kJSCallerSaved | ra.bit());
838 void MathPowStub::Generate(MacroAssembler* masm) {
839 const Register base = a1;
840 const Register exponent = MathPowTaggedDescriptor::exponent();
841 DCHECK(exponent.is(a2));
842 const Register heapnumbermap = t1;
843 const Register heapnumber = v0;
844 const DoubleRegister double_base = f2;
845 const DoubleRegister double_exponent = f4;
846 const DoubleRegister double_result = f0;
847 const DoubleRegister double_scratch = f6;
848 const FPURegister single_scratch = f8;
849 const Register scratch = t5;
850 const Register scratch2 = t3;
852 Label call_runtime, done, int_exponent;
853 if (exponent_type() == ON_STACK) {
854 Label base_is_smi, unpack_exponent;
855 // The exponent and base are supplied as arguments on the stack.
856 // This can only happen if the stub is called from non-optimized code.
857 // Load input parameters from stack to double registers.
858 __ lw(base, MemOperand(sp, 1 * kPointerSize));
859 __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
861 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
863 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
864 __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
865 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
867 __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
868 __ jmp(&unpack_exponent);
870 __ bind(&base_is_smi);
871 __ mtc1(scratch, single_scratch);
872 __ cvt_d_w(double_base, single_scratch);
873 __ bind(&unpack_exponent);
875 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
877 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
878 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
879 __ ldc1(double_exponent,
880 FieldMemOperand(exponent, HeapNumber::kValueOffset));
881 } else if (exponent_type() == TAGGED) {
882 // Base is already in double_base.
883 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
885 __ ldc1(double_exponent,
886 FieldMemOperand(exponent, HeapNumber::kValueOffset));
889 if (exponent_type() != INTEGER) {
890 Label int_exponent_convert;
891 // Detect integer exponents stored as double.
892 __ EmitFPUTruncate(kRoundToMinusInf,
898 kCheckForInexactConversion);
899 // scratch2 == 0 means there was no conversion error.
900 __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
902 if (exponent_type() == ON_STACK) {
903 // Detect square root case. Crankshaft detects constant +/-0.5 at
904 // compile time and uses DoMathPowHalf instead. We then skip this check
905 // for non-constant cases of +/-0.5 as these hardly occur.
909 __ Move(double_scratch, 0.5);
910 __ BranchF(USE_DELAY_SLOT,
916 // double_scratch can be overwritten in the delay slot.
917 // Calculates square root of base. Check for the special case of
918 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
919 __ Move(double_scratch, -V8_INFINITY);
920 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
921 __ neg_d(double_result, double_scratch);
923 // Add +0 to convert -0 to +0.
924 __ add_d(double_scratch, double_base, kDoubleRegZero);
925 __ sqrt_d(double_result, double_scratch);
928 __ bind(¬_plus_half);
929 __ Move(double_scratch, -0.5);
930 __ BranchF(USE_DELAY_SLOT,
936 // double_scratch can be overwritten in the delay slot.
937 // Calculates square root of base. Check for the special case of
938 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
939 __ Move(double_scratch, -V8_INFINITY);
940 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
941 __ Move(double_result, kDoubleRegZero);
943 // Add +0 to convert -0 to +0.
944 __ add_d(double_scratch, double_base, kDoubleRegZero);
945 __ Move(double_result, 1);
946 __ sqrt_d(double_scratch, double_scratch);
947 __ div_d(double_result, double_result, double_scratch);
953 AllowExternalCallThatCantCauseGC scope(masm);
954 __ PrepareCallCFunction(0, 2, scratch2);
955 __ MovToFloatParameters(double_base, double_exponent);
957 ExternalReference::power_double_double_function(isolate()),
961 __ MovFromFloatResult(double_result);
964 __ bind(&int_exponent_convert);
967 // Calculate power with integer exponent.
968 __ bind(&int_exponent);
970 // Get two copies of exponent in the registers scratch and exponent.
971 if (exponent_type() == INTEGER) {
972 __ mov(scratch, exponent);
974 // Exponent has previously been stored into scratch as untagged integer.
975 __ mov(exponent, scratch);
978 __ mov_d(double_scratch, double_base); // Back up base.
979 __ Move(double_result, 1.0);
981 // Get absolute value of exponent.
982 Label positive_exponent;
983 __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
984 __ Subu(scratch, zero_reg, scratch);
985 __ bind(&positive_exponent);
987 Label while_true, no_carry, loop_end;
988 __ bind(&while_true);
990 __ And(scratch2, scratch, 1);
992 __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
993 __ mul_d(double_result, double_result, double_scratch);
996 __ sra(scratch, scratch, 1);
998 __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
999 __ mul_d(double_scratch, double_scratch, double_scratch);
1001 __ Branch(&while_true);
1005 __ Branch(&done, ge, exponent, Operand(zero_reg));
1006 __ Move(double_scratch, 1.0);
1007 __ div_d(double_result, double_scratch, double_result);
1008 // Test whether result is zero. Bail out to check for subnormal result.
1009 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
1010 __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
1012 // double_exponent may not contain the exponent value if the input was a
1013 // smi. We set it with exponent value before bailing out.
1014 __ mtc1(exponent, single_scratch);
1015 __ cvt_d_w(double_exponent, single_scratch);
1017 // Returning or bailing out.
1018 Counters* counters = isolate()->counters();
1019 if (exponent_type() == ON_STACK) {
1020 // The arguments are still on the stack.
1021 __ bind(&call_runtime);
1022 __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
1024 // The stub is called from non-optimized code, which expects the result
1025 // as heap number in exponent.
1027 __ AllocateHeapNumber(
1028 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
1029 __ sdc1(double_result,
1030 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
1031 DCHECK(heapnumber.is(v0));
1032 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1037 AllowExternalCallThatCantCauseGC scope(masm);
1038 __ PrepareCallCFunction(0, 2, scratch);
1039 __ MovToFloatParameters(double_base, double_exponent);
1041 ExternalReference::power_double_double_function(isolate()),
1045 __ MovFromFloatResult(double_result);
1048 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1054 bool CEntryStub::NeedsImmovableCode() {
1059 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
1060 CEntryStub::GenerateAheadOfTime(isolate);
1061 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
1062 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
1063 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
1064 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
1065 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
1066 BinaryOpICStub::GenerateAheadOfTime(isolate);
1067 StoreRegistersStateStub::GenerateAheadOfTime(isolate);
1068 RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
1069 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
1073 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1074 StoreRegistersStateStub stub(isolate);
1079 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1080 RestoreRegistersStateStub stub(isolate);
1085 void CodeStub::GenerateFPStubs(Isolate* isolate) {
1086 // Generate if not already in cache.
1087 SaveFPRegsMode mode = kSaveFPRegs;
1088 CEntryStub(isolate, 1, mode).GetCode();
1089 StoreBufferOverflowStub(isolate, mode).GetCode();
1090 isolate->set_fp_stubs_generated(true);
1094 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1095 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
1100 void CEntryStub::Generate(MacroAssembler* masm) {
1101 // Called from JavaScript; parameters are on stack as if calling JS function
1102 // a0: number of arguments including receiver
1103 // a1: pointer to builtin function
1104 // fp: frame pointer (restored after C call)
1105 // sp: stack pointer (restored as callee's sp after C call)
1106 // cp: current context (C callee-saved)
1108 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1110 // Compute the argv pointer in a callee-saved register.
1111 __ sll(s1, a0, kPointerSizeLog2);
1112 __ Addu(s1, sp, s1);
1113 __ Subu(s1, s1, kPointerSize);
1115 // Enter the exit frame that transitions from JavaScript to C++.
1116 FrameScope scope(masm, StackFrame::MANUAL);
1117 __ EnterExitFrame(save_doubles());
1119 // s0: number of arguments including receiver (C callee-saved)
1120 // s1: pointer to first argument (C callee-saved)
1121 // s2: pointer to builtin function (C callee-saved)
1123 // Prepare arguments for C routine.
1127 // a1 = argv (set in the delay slot after find_ra below).
1129 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
1130 // also need to reserve the 4 argument slots on the stack.
1132 __ AssertStackIsAligned();
1134 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
1136 // To let the GC traverse the return address of the exit frames, we need to
1137 // know where the return address is. The CEntryStub is unmovable, so
1138 // we can store the address on the stack to be able to find it again and
1139 // we never have to restore it, because it will not change.
1140 { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
1141 // This branch-and-link sequence is needed to find the current PC on mips,
1142 // saved to the ra register.
1143 // Use masm-> here instead of the double-underscore macro since extra
1144 // coverage code can interfere with the proper calculation of ra.
1146 masm->bal(&find_ra); // bal exposes branch delay slot.
1148 masm->bind(&find_ra);
1150 // Adjust the value in ra to point to the correct return location, 2nd
1151 // instruction past the real call into C code (the jalr(t9)), and push it.
1152 // This is the return address of the exit frame.
1153 const int kNumInstructionsToJump = 5;
1154 masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
1155 masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
1156 // Stack space reservation moved to the branch delay slot below.
1157 // Stack is still aligned.
1159 // Call the C routine.
1160 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
1162 // Set up sp in the delay slot.
1163 masm->addiu(sp, sp, -kCArgsSlotsSize);
1164 // Make sure the stored 'ra' points to this position.
1165 DCHECK_EQ(kNumInstructionsToJump,
1166 masm->InstructionsGeneratedSince(&find_ra));
1170 // Runtime functions should not return 'the hole'. Allowing it to escape may
1171 // lead to crashes in the IC code later.
1172 if (FLAG_debug_code) {
1174 __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
1175 __ Branch(&okay, ne, v0, Operand(t0));
1176 __ stop("The hole escaped");
1180 // Check result for exception sentinel.
1181 Label exception_returned;
1182 __ LoadRoot(t0, Heap::kExceptionRootIndex);
1183 __ Branch(&exception_returned, eq, t0, Operand(v0));
1185 ExternalReference pending_exception_address(
1186 Isolate::kPendingExceptionAddress, isolate());
1188 // Check that there is no pending exception, otherwise we
1189 // should have returned the exception sentinel.
1190 if (FLAG_debug_code) {
1192 __ li(a2, Operand(pending_exception_address));
1193 __ lw(a2, MemOperand(a2));
1194 __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
1195 // Cannot use check here as it attempts to generate call into runtime.
1196 __ Branch(&okay, eq, t0, Operand(a2));
1197 __ stop("Unexpected pending exception");
1201 // Exit C frame and return.
1203 // sp: stack pointer
1204 // fp: frame pointer
1205 // s0: still holds argc (callee-saved).
1206 __ LeaveExitFrame(save_doubles(), s0, true, EMIT_RETURN);
1208 // Handling of exception.
1209 __ bind(&exception_returned);
1211 // Retrieve the pending exception.
1212 __ li(a2, Operand(pending_exception_address));
1213 __ lw(v0, MemOperand(a2));
1215 // Clear the pending exception.
1216 __ li(a3, Operand(isolate()->factory()->the_hole_value()));
1217 __ sw(a3, MemOperand(a2));
1219 // Special handling of termination exceptions which are uncatchable
1220 // by javascript code.
1221 Label throw_termination_exception;
1222 __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
1223 __ Branch(&throw_termination_exception, eq, v0, Operand(t0));
1225 // Handle normal exception.
1228 __ bind(&throw_termination_exception);
1229 __ ThrowUncatchable(v0);
1233 void JSEntryStub::Generate(MacroAssembler* masm) {
1234 Label invoke, handler_entry, exit;
1235 Isolate* isolate = masm->isolate();
1238 // a0: entry address
1247 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1249 // Save callee saved registers on the stack.
1250 __ MultiPush(kCalleeSaved | ra.bit());
1252 // Save callee-saved FPU registers.
1253 __ MultiPushFPU(kCalleeSavedFPU);
1254 // Set up the reserved register for 0.0.
1255 __ Move(kDoubleRegZero, 0.0);
1258 // Load argv in s0 register.
1259 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1260 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
1262 __ InitializeRootRegister();
1263 __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
1265 // We build an EntryFrame.
1266 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
1267 int marker = type();
1268 __ li(t2, Operand(Smi::FromInt(marker)));
1269 __ li(t1, Operand(Smi::FromInt(marker)));
1270 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1272 __ lw(t0, MemOperand(t0));
1273 __ Push(t3, t2, t1, t0);
1274 // Set up frame pointer for the frame to be pushed.
1275 __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
1278 // a0: entry_address
1280 // a2: receiver_pointer
1286 // function slot | entry frame
1288 // bad fp (0xff...f) |
1289 // callee saved registers + ra
1293 // If this is the outermost JS call, set js_entry_sp value.
1294 Label non_outermost_js;
1295 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
1296 __ li(t1, Operand(ExternalReference(js_entry_sp)));
1297 __ lw(t2, MemOperand(t1));
1298 __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
1299 __ sw(fp, MemOperand(t1));
1300 __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1303 __ nop(); // Branch delay slot nop.
1304 __ bind(&non_outermost_js);
1305 __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1309 // Jump to a faked try block that does the invoke, with a faked catch
1310 // block that sets the pending exception.
1312 __ bind(&handler_entry);
1313 handler_offset_ = handler_entry.pos();
1314 // Caught exception: Store result (exception) in the pending exception
1315 // field in the JSEnv and return a failure sentinel. Coming in here the
1316 // fp will be invalid because the PushTryHandler below sets it to 0 to
1317 // signal the existence of the JSEntry frame.
1318 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1320 __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
1321 __ LoadRoot(v0, Heap::kExceptionRootIndex);
1322 __ b(&exit); // b exposes branch delay slot.
1323 __ nop(); // Branch delay slot nop.
1325 // Invoke: Link this frame into the handler chain. There's only one
1326 // handler block in this code object, so its index is 0.
1328 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
1329 // If an exception not caught by another handler occurs, this handler
1330 // returns control to the code after the bal(&invoke) above, which
1331 // restores all kCalleeSaved registers (including cp and fp) to their
1332 // saved values before returning a failure to C.
1334 // Clear any pending exceptions.
1335 __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
1336 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1338 __ sw(t1, MemOperand(t0));
1340 // Invoke the function by calling through JS entry trampoline builtin.
1341 // Notice that we cannot store a reference to the trampoline code directly in
1342 // this stub, because runtime stubs are not traversed when doing GC.
1345 // a0: entry_address
1347 // a2: receiver_pointer
1354 // callee saved registers + ra
1358 if (type() == StackFrame::ENTRY_CONSTRUCT) {
1359 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1361 __ li(t0, Operand(construct_entry));
1363 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
1364 __ li(t0, Operand(entry));
1366 __ lw(t9, MemOperand(t0)); // Deref address.
1368 // Call JSEntryTrampoline.
1369 __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
1372 // Unlink this frame from the handler chain.
1375 __ bind(&exit); // v0 holds result
1376 // Check if the current stack frame is marked as the outermost JS frame.
1377 Label non_outermost_js_2;
1379 __ Branch(&non_outermost_js_2,
1382 Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1383 __ li(t1, Operand(ExternalReference(js_entry_sp)));
1384 __ sw(zero_reg, MemOperand(t1));
1385 __ bind(&non_outermost_js_2);
1387 // Restore the top frame descriptors from the stack.
1389 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1391 __ sw(t1, MemOperand(t0));
1393 // Reset the stack to the callee saved registers.
1394 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
1396 // Restore callee-saved fpu registers.
1397 __ MultiPopFPU(kCalleeSavedFPU);
1399 // Restore callee saved registers from the stack.
1400 __ MultiPop(kCalleeSaved | ra.bit());
1406 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
1407 // Return address is in ra.
1410 Register receiver = LoadDescriptor::ReceiverRegister();
1411 Register index = LoadDescriptor::NameRegister();
1412 Register scratch = a3;
1413 Register result = v0;
1414 DCHECK(!scratch.is(receiver) && !scratch.is(index));
1416 StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
1417 &miss, // When not a string.
1418 &miss, // When not a number.
1419 &miss, // When index out of range.
1420 STRING_INDEX_IS_ARRAY_INDEX,
1421 RECEIVER_IS_STRING);
1422 char_at_generator.GenerateFast(masm);
1425 StubRuntimeCallHelper call_helper;
1426 char_at_generator.GenerateSlow(masm, call_helper);
1429 PropertyAccessCompiler::TailCallBuiltin(
1430 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1434 // Uses registers a0 to t0.
1435 // Expected input (depending on whether args are in registers or on the stack):
1436 // * object: a0 or at sp + 1 * kPointerSize.
1437 // * function: a1 or at sp.
1439 // An inlined call site may have been generated before calling this stub.
1440 // In this case the offset to the inline site to patch is passed on the stack,
1441 // in the safepoint slot for register t0.
1442 void InstanceofStub::Generate(MacroAssembler* masm) {
1443 // Call site inlining and patching implies arguments in registers.
1444 DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
1446 // Fixed register usage throughout the stub:
1447 const Register object = a0; // Object (lhs).
1448 Register map = a3; // Map of the object.
1449 const Register function = a1; // Function (rhs).
1450 const Register prototype = t0; // Prototype of the function.
1451 const Register inline_site = t5;
1452 const Register scratch = a2;
1454 const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
1456 Label slow, loop, is_instance, is_not_instance, not_js_object;
1458 if (!HasArgsInRegisters()) {
1459 __ lw(object, MemOperand(sp, 1 * kPointerSize));
1460 __ lw(function, MemOperand(sp, 0));
1463 // Check that the left hand is a JS object and load map.
1464 __ JumpIfSmi(object, ¬_js_object);
1465 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
1467 // If there is a call site cache don't look in the global cache, but do the
1468 // real lookup and update the call site cache.
1469 if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
1471 __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
1472 __ Branch(&miss, ne, function, Operand(at));
1473 __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
1474 __ Branch(&miss, ne, map, Operand(at));
1475 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1476 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1481 // Get the prototype of the function.
1482 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
1484 // Check that the function prototype is a JS object.
1485 __ JumpIfSmi(prototype, &slow);
1486 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
1488 // Update the global instanceof or call site inlined cache with the current
1489 // map and function. The cached answer will be set when it is known below.
1490 if (!HasCallSiteInlineCheck()) {
1491 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1492 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
1494 DCHECK(HasArgsInRegisters());
1495 // Patch the (relocated) inlined map check.
1497 // The offset was stored in t0 safepoint slot.
1498 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
1499 __ LoadFromSafepointRegisterSlot(scratch, t0);
1500 __ Subu(inline_site, ra, scratch);
1501 // Get the map location in scratch and patch it.
1502 __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
1503 __ sw(map, FieldMemOperand(scratch, Cell::kValueOffset));
1506 // Register mapping: a3 is object map and t0 is function prototype.
1507 // Get prototype of object into a2.
1508 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
1510 // We don't need map any more. Use it as a scratch register.
1511 Register scratch2 = map;
1514 // Loop through the prototype chain looking for the function prototype.
1515 __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
1517 __ Branch(&is_instance, eq, scratch, Operand(prototype));
1518 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
1519 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
1520 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
1523 __ bind(&is_instance);
1524 DCHECK(Smi::FromInt(0) == 0);
1525 if (!HasCallSiteInlineCheck()) {
1526 __ mov(v0, zero_reg);
1527 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1528 if (ReturnTrueFalseObject()) {
1529 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
1532 // Patch the call site to return true.
1533 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
1534 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1535 // Get the boolean result location in scratch and patch it.
1536 __ PatchRelocatedValue(inline_site, scratch, v0);
1538 if (!ReturnTrueFalseObject()) {
1539 DCHECK_EQ(Smi::FromInt(0), 0);
1540 __ mov(v0, zero_reg);
1543 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1545 __ bind(&is_not_instance);
1546 if (!HasCallSiteInlineCheck()) {
1547 __ li(v0, Operand(Smi::FromInt(1)));
1548 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1549 if (ReturnTrueFalseObject()) {
1550 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1553 // Patch the call site to return false.
1554 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1555 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1556 // Get the boolean result location in scratch and patch it.
1557 __ PatchRelocatedValue(inline_site, scratch, v0);
1559 if (!ReturnTrueFalseObject()) {
1560 __ li(v0, Operand(Smi::FromInt(1)));
1564 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1566 Label object_not_null, object_not_null_or_smi;
1567 __ bind(¬_js_object);
1568 // Before null, smi and string value checks, check that the rhs is a function
1569 // as for a non-function rhs an exception needs to be thrown.
1570 __ JumpIfSmi(function, &slow);
1571 __ GetObjectType(function, scratch2, scratch);
1572 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
1574 // Null is not instance of anything.
1575 __ Branch(&object_not_null, ne, object,
1576 Operand(isolate()->factory()->null_value()));
1577 if (ReturnTrueFalseObject()) {
1578 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1580 __ li(v0, Operand(Smi::FromInt(1)));
1582 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1584 __ bind(&object_not_null);
1585 // Smi values are not instances of anything.
1586 __ JumpIfNotSmi(object, &object_not_null_or_smi);
1587 if (ReturnTrueFalseObject()) {
1588 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1590 __ li(v0, Operand(Smi::FromInt(1)));
1592 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1594 __ bind(&object_not_null_or_smi);
1595 // String values are not instances of anything.
1596 __ IsObjectJSStringType(object, scratch, &slow);
1597 if (ReturnTrueFalseObject()) {
1598 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1600 __ li(v0, Operand(Smi::FromInt(1)));
1602 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1604 // Slow-case. Tail call builtin.
1606 if (!ReturnTrueFalseObject()) {
1607 if (HasArgsInRegisters()) {
1610 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
1613 FrameScope scope(masm, StackFrame::INTERNAL);
1615 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
1618 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
1619 __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
1620 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1621 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1626 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1628 Register receiver = LoadDescriptor::ReceiverRegister();
1629 NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3,
1632 PropertyAccessCompiler::TailCallBuiltin(
1633 masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1637 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
1638 // The displacement is the offset of the last parameter (if any)
1639 // relative to the frame pointer.
1640 const int kDisplacement =
1641 StandardFrameConstants::kCallerSPOffset - kPointerSize;
1642 DCHECK(a1.is(ArgumentsAccessReadDescriptor::index()));
1643 DCHECK(a0.is(ArgumentsAccessReadDescriptor::parameter_count()));
1645 // Check that the key is a smiGenerateReadElement.
1647 __ JumpIfNotSmi(a1, &slow);
1649 // Check if the calling frame is an arguments adaptor frame.
1651 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1652 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
1656 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1658 // Check index (a1) against formal parameters count limit passed in
1659 // through register a0. Use unsigned comparison to get negative
1661 __ Branch(&slow, hs, a1, Operand(a0));
1663 // Read the argument from the stack and return it.
1664 __ subu(a3, a0, a1);
1665 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
1666 __ Addu(a3, fp, Operand(t3));
1667 __ Ret(USE_DELAY_SLOT);
1668 __ lw(v0, MemOperand(a3, kDisplacement));
1670 // Arguments adaptor case: Check index (a1) against actual arguments
1671 // limit found in the arguments adaptor frame. Use unsigned
1672 // comparison to get negative check for free.
1674 __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1675 __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
1677 // Read the argument from the adaptor frame and return it.
1678 __ subu(a3, a0, a1);
1679 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
1680 __ Addu(a3, a2, Operand(t3));
1681 __ Ret(USE_DELAY_SLOT);
1682 __ lw(v0, MemOperand(a3, kDisplacement));
1684 // Slow-case: Handle non-smi or out-of-bounds access to arguments
1685 // by calling the runtime system.
1688 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
1692 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
1693 // sp[0] : number of parameters
1694 // sp[4] : receiver displacement
1696 // Check if the calling frame is an arguments adaptor frame.
1698 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1699 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
1703 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1705 // Patch the arguments.length and the parameters pointer in the current frame.
1706 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
1707 __ sw(a2, MemOperand(sp, 0 * kPointerSize));
1709 __ Addu(a3, a3, Operand(t3));
1710 __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
1711 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
1714 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1718 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
1720 // sp[0] : number of parameters (tagged)
1721 // sp[4] : address of receiver argument
1723 // Registers used over whole function:
1724 // t2 : allocated object (tagged)
1725 // t5 : mapped parameter count (tagged)
1727 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
1728 // a1 = parameter count (tagged)
1730 // Check if the calling frame is an arguments adaptor frame.
1732 Label adaptor_frame, try_allocate;
1733 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1734 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
1735 __ Branch(&adaptor_frame,
1738 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1740 // No adaptor, parameter count = argument count.
1742 __ b(&try_allocate);
1743 __ nop(); // Branch delay slot nop.
1745 // We have an adaptor frame. Patch the parameters pointer.
1746 __ bind(&adaptor_frame);
1747 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
1749 __ Addu(a3, a3, Operand(t6));
1750 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
1751 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
1753 // a1 = parameter count (tagged)
1754 // a2 = argument count (tagged)
1755 // Compute the mapped parameter count = min(a1, a2) in a1.
1757 __ Branch(&skip_min, lt, a1, Operand(a2));
1761 __ bind(&try_allocate);
1763 // Compute the sizes of backing store, parameter map, and arguments object.
1764 // 1. Parameter map, has 2 extra words containing context and backing store.
1765 const int kParameterMapHeaderSize =
1766 FixedArray::kHeaderSize + 2 * kPointerSize;
1767 // If there are no mapped parameters, we do not need the parameter_map.
1768 Label param_map_size;
1769 DCHECK_EQ(0, Smi::FromInt(0));
1770 __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg));
1771 __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
1773 __ addiu(t5, t5, kParameterMapHeaderSize);
1774 __ bind(¶m_map_size);
1776 // 2. Backing store.
1778 __ Addu(t5, t5, Operand(t6));
1779 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
1781 // 3. Arguments object.
1782 __ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize));
1784 // Do the allocation of all three objects in one go.
1785 __ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT);
1787 // v0 = address of new object(s) (tagged)
1788 // a2 = argument count (smi-tagged)
1789 // Get the arguments boilerplate from the current native context into t0.
1790 const int kNormalOffset =
1791 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
1792 const int kAliasedOffset =
1793 Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
1795 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1796 __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
1797 Label skip2_ne, skip2_eq;
1798 __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
1799 __ lw(t0, MemOperand(t0, kNormalOffset));
1802 __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
1803 __ lw(t0, MemOperand(t0, kAliasedOffset));
1806 // v0 = address of new object (tagged)
1807 // a1 = mapped parameter count (tagged)
1808 // a2 = argument count (smi-tagged)
1809 // t0 = address of arguments map (tagged)
1810 __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
1811 __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
1812 __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
1813 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
1815 // Set up the callee in-object property.
1816 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
1817 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
1818 __ AssertNotSmi(a3);
1819 const int kCalleeOffset = JSObject::kHeaderSize +
1820 Heap::kArgumentsCalleeIndex * kPointerSize;
1821 __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
1823 // Use the length (smi tagged) and set that as an in-object property too.
1825 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1826 const int kLengthOffset = JSObject::kHeaderSize +
1827 Heap::kArgumentsLengthIndex * kPointerSize;
1828 __ sw(a2, FieldMemOperand(v0, kLengthOffset));
1830 // Set up the elements pointer in the allocated arguments object.
1831 // If we allocated a parameter map, t0 will point there, otherwise
1832 // it will point to the backing store.
1833 __ Addu(t0, v0, Operand(Heap::kSloppyArgumentsObjectSize));
1834 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
1836 // v0 = address of new object (tagged)
1837 // a1 = mapped parameter count (tagged)
1838 // a2 = argument count (tagged)
1839 // t0 = address of parameter map or backing store (tagged)
1840 // Initialize parameter map. If there are no mapped arguments, we're done.
1841 Label skip_parameter_map;
1843 __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
1844 // Move backing store address to a3, because it is
1845 // expected there when filling in the unmapped arguments.
1849 __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
1851 __ LoadRoot(t2, Heap::kSloppyArgumentsElementsMapRootIndex);
1852 __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
1853 __ Addu(t2, a1, Operand(Smi::FromInt(2)));
1854 __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
1855 __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
1857 __ Addu(t2, t0, Operand(t6));
1858 __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
1859 __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
1861 // Copy the parameter slots and the holes in the arguments.
1862 // We need to fill in mapped_parameter_count slots. They index the context,
1863 // where parameters are stored in reverse order, at
1864 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
1865 // The mapped parameter thus need to get indices
1866 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
1867 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
1868 // We loop from right to left.
1869 Label parameters_loop, parameters_test;
1871 __ lw(t5, MemOperand(sp, 0 * kPointerSize));
1872 __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
1873 __ Subu(t5, t5, Operand(a1));
1874 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
1876 __ Addu(a3, t0, Operand(t6));
1877 __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
1879 // t2 = loop variable (tagged)
1880 // a1 = mapping index (tagged)
1881 // a3 = address of backing store (tagged)
1882 // t0 = address of parameter map (tagged)
1883 // t1 = temporary scratch (a.o., for address calculation)
1884 // t3 = the hole value
1885 __ jmp(¶meters_test);
1887 __ bind(¶meters_loop);
1888 __ Subu(t2, t2, Operand(Smi::FromInt(1)));
1890 __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
1891 __ Addu(t6, t0, t1);
1892 __ sw(t5, MemOperand(t6));
1893 __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
1894 __ Addu(t6, a3, t1);
1895 __ sw(t3, MemOperand(t6));
1896 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
1897 __ bind(¶meters_test);
1898 __ Branch(¶meters_loop, ne, t2, Operand(Smi::FromInt(0)));
1900 __ bind(&skip_parameter_map);
1901 // a2 = argument count (tagged)
1902 // a3 = address of backing store (tagged)
1904 // Copy arguments header and remaining slots (if there are any).
1905 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
1906 __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
1907 __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
1909 Label arguments_loop, arguments_test;
1911 __ lw(t0, MemOperand(sp, 1 * kPointerSize));
1913 __ Subu(t0, t0, Operand(t6));
1914 __ jmp(&arguments_test);
1916 __ bind(&arguments_loop);
1917 __ Subu(t0, t0, Operand(kPointerSize));
1918 __ lw(t2, MemOperand(t0, 0));
1920 __ Addu(t1, a3, Operand(t6));
1921 __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
1922 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
1924 __ bind(&arguments_test);
1925 __ Branch(&arguments_loop, lt, t5, Operand(a2));
1927 // Return and remove the on-stack parameters.
1930 // Do the runtime call to allocate the arguments object.
1931 // a2 = argument count (tagged)
1933 __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
1934 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1938 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
1939 // Return address is in ra.
1942 Register receiver = LoadDescriptor::ReceiverRegister();
1943 Register key = LoadDescriptor::NameRegister();
1945 // Check that the key is an array index, that is Uint32.
1946 __ And(t0, key, Operand(kSmiTagMask | kSmiSignMask));
1947 __ Branch(&slow, ne, t0, Operand(zero_reg));
1949 // Everything is fine, call runtime.
1950 __ Push(receiver, key); // Receiver, key.
1952 // Perform tail call to the entry.
1953 __ TailCallExternalReference(
1954 ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
1959 PropertyAccessCompiler::TailCallBuiltin(
1960 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1964 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
1965 // sp[0] : number of parameters
1966 // sp[4] : receiver displacement
1968 // Check if the calling frame is an arguments adaptor frame.
1969 Label adaptor_frame, try_allocate, runtime;
1970 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1971 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
1972 __ Branch(&adaptor_frame,
1975 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1977 // Get the length from the frame.
1978 __ lw(a1, MemOperand(sp, 0));
1979 __ Branch(&try_allocate);
1981 // Patch the arguments.length and the parameters pointer.
1982 __ bind(&adaptor_frame);
1983 __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1984 __ sw(a1, MemOperand(sp, 0));
1985 __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
1986 __ Addu(a3, a2, Operand(at));
1988 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
1989 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
1991 // Try the new space allocation. Start out with computing the size
1992 // of the arguments object and the elements array in words.
1993 Label add_arguments_object;
1994 __ bind(&try_allocate);
1995 __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
1996 __ srl(a1, a1, kSmiTagSize);
1998 __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
1999 __ bind(&add_arguments_object);
2000 __ Addu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
2002 // Do the allocation of both objects in one go.
2003 __ Allocate(a1, v0, a2, a3, &runtime,
2004 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
2006 // Get the arguments boilerplate from the current native context.
2007 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2008 __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
2009 __ lw(t0, MemOperand(
2010 t0, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
2012 __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
2013 __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
2014 __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
2015 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
2017 // Get the length (smi tagged) and set that as an in-object property too.
2018 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2019 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
2021 __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
2022 Heap::kArgumentsLengthIndex * kPointerSize));
2025 __ Branch(&done, eq, a1, Operand(zero_reg));
2027 // Get the parameters pointer from the stack.
2028 __ lw(a2, MemOperand(sp, 1 * kPointerSize));
2030 // Set up the elements pointer in the allocated arguments object and
2031 // initialize the header in the elements fixed array.
2032 __ Addu(t0, v0, Operand(Heap::kStrictArgumentsObjectSize));
2033 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
2034 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
2035 __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
2036 __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
2037 // Untag the length for the loop.
2038 __ srl(a1, a1, kSmiTagSize);
2040 // Copy the fixed array slots.
2042 // Set up t0 to point to the first array slot.
2043 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2045 // Pre-decrement a2 with kPointerSize on each iteration.
2046 // Pre-decrement in order to skip receiver.
2047 __ Addu(a2, a2, Operand(-kPointerSize));
2048 __ lw(a3, MemOperand(a2));
2049 // Post-increment t0 with kPointerSize on each iteration.
2050 __ sw(a3, MemOperand(t0));
2051 __ Addu(t0, t0, Operand(kPointerSize));
2052 __ Subu(a1, a1, Operand(1));
2053 __ Branch(&loop, ne, a1, Operand(zero_reg));
2055 // Return and remove the on-stack parameters.
2059 // Do the runtime call to allocate the arguments object.
2061 __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
2065 void RegExpExecStub::Generate(MacroAssembler* masm) {
2066 // Just jump directly to runtime if native RegExp is not selected at compile
2067 // time or if regexp entry in generated code is turned off runtime switch or
2069 #ifdef V8_INTERPRETED_REGEXP
2070 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2071 #else // V8_INTERPRETED_REGEXP
2073 // Stack frame on entry.
2074 // sp[0]: last_match_info (expected JSArray)
2075 // sp[4]: previous index
2076 // sp[8]: subject string
2077 // sp[12]: JSRegExp object
2079 const int kLastMatchInfoOffset = 0 * kPointerSize;
2080 const int kPreviousIndexOffset = 1 * kPointerSize;
2081 const int kSubjectOffset = 2 * kPointerSize;
2082 const int kJSRegExpOffset = 3 * kPointerSize;
2085 // Allocation of registers for this function. These are in callee save
2086 // registers and will be preserved by the call to the native RegExp code, as
2087 // this code is called using the normal C calling convention. When calling
2088 // directly from generated code the native RegExp code will not do a GC and
2089 // therefore the content of these registers are safe to use after the call.
2090 // MIPS - using s0..s2, since we are not using CEntry Stub.
2091 Register subject = s0;
2092 Register regexp_data = s1;
2093 Register last_match_info_elements = s2;
2095 // Ensure that a RegExp stack is allocated.
2096 ExternalReference address_of_regexp_stack_memory_address =
2097 ExternalReference::address_of_regexp_stack_memory_address(
2099 ExternalReference address_of_regexp_stack_memory_size =
2100 ExternalReference::address_of_regexp_stack_memory_size(isolate());
2101 __ li(a0, Operand(address_of_regexp_stack_memory_size));
2102 __ lw(a0, MemOperand(a0, 0));
2103 __ Branch(&runtime, eq, a0, Operand(zero_reg));
2105 // Check that the first argument is a JSRegExp object.
2106 __ lw(a0, MemOperand(sp, kJSRegExpOffset));
2107 STATIC_ASSERT(kSmiTag == 0);
2108 __ JumpIfSmi(a0, &runtime);
2109 __ GetObjectType(a0, a1, a1);
2110 __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
2112 // Check that the RegExp has been compiled (data contains a fixed array).
2113 __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
2114 if (FLAG_debug_code) {
2115 __ SmiTst(regexp_data, t0);
2117 kUnexpectedTypeForRegExpDataFixedArrayExpected,
2120 __ GetObjectType(regexp_data, a0, a0);
2122 kUnexpectedTypeForRegExpDataFixedArrayExpected,
2124 Operand(FIXED_ARRAY_TYPE));
2127 // regexp_data: RegExp data (FixedArray)
2128 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2129 __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2130 __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
2132 // regexp_data: RegExp data (FixedArray)
2133 // Check that the number of captures fit in the static offsets vector buffer.
2135 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2136 // Check (number_of_captures + 1) * 2 <= offsets vector size
2137 // Or number_of_captures * 2 <= offsets vector size - 2
2138 // Multiplying by 2 comes for free since a2 is smi-tagged.
2139 STATIC_ASSERT(kSmiTag == 0);
2140 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2141 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
2143 &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
2145 // Reset offset for possibly sliced string.
2146 __ mov(t0, zero_reg);
2147 __ lw(subject, MemOperand(sp, kSubjectOffset));
2148 __ JumpIfSmi(subject, &runtime);
2149 __ mov(a3, subject); // Make a copy of the original subject string.
2150 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2151 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2152 // subject: subject string
2153 // a3: subject string
2154 // a0: subject string instance type
2155 // regexp_data: RegExp data (FixedArray)
2156 // Handle subject string according to its encoding and representation:
2157 // (1) Sequential string? If yes, go to (5).
2158 // (2) Anything but sequential or cons? If yes, go to (6).
2159 // (3) Cons string. If the string is flat, replace subject with first string.
2160 // Otherwise bailout.
2161 // (4) Is subject external? If yes, go to (7).
2162 // (5) Sequential string. Load regexp code according to encoding.
2166 // Deferred code at the end of the stub:
2167 // (6) Not a long external string? If yes, go to (8).
2168 // (7) External string. Make it, offset-wise, look like a sequential string.
2170 // (8) Short external string or not a string? If yes, bail out to runtime.
2171 // (9) Sliced string. Replace subject with parent. Go to (4).
2173 Label seq_string /* 5 */, external_string /* 7 */,
2174 check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
2175 not_long_external /* 8 */;
2177 // (1) Sequential string? If yes, go to (5).
2180 Operand(kIsNotStringMask |
2181 kStringRepresentationMask |
2182 kShortExternalStringMask));
2183 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
2184 __ Branch(&seq_string, eq, a1, Operand(zero_reg)); // Go to (5).
2186 // (2) Anything but sequential or cons? If yes, go to (6).
2187 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2188 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2189 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2190 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2192 __ Branch(¬_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
2194 // (3) Cons string. Check that it's flat.
2195 // Replace subject with first string and reload instance type.
2196 __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
2197 __ LoadRoot(a1, Heap::kempty_stringRootIndex);
2198 __ Branch(&runtime, ne, a0, Operand(a1));
2199 __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2201 // (4) Is subject external? If yes, go to (7).
2202 __ bind(&check_underlying);
2203 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2204 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2205 STATIC_ASSERT(kSeqStringTag == 0);
2206 __ And(at, a0, Operand(kStringRepresentationMask));
2207 // The underlying external string is never a short external string.
2208 STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
2209 STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2210 __ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7).
2212 // (5) Sequential string. Load regexp code according to encoding.
2213 __ bind(&seq_string);
2214 // subject: sequential subject string (or look-alike, external string)
2215 // a3: original subject string
2216 // Load previous index and check range before a3 is overwritten. We have to
2217 // use a3 instead of subject here because subject might have been only made
2218 // to look like a sequential string when it actually is an external string.
2219 __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
2220 __ JumpIfNotSmi(a1, &runtime);
2221 __ lw(a3, FieldMemOperand(a3, String::kLengthOffset));
2222 __ Branch(&runtime, ls, a3, Operand(a1));
2223 __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
2225 STATIC_ASSERT(kStringEncodingMask == 4);
2226 STATIC_ASSERT(kOneByteStringTag == 4);
2227 STATIC_ASSERT(kTwoByteStringTag == 0);
2228 __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for one-byte.
2229 __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
2230 __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
2231 __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
2232 __ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
2234 // (E) Carry on. String handling is done.
2235 // t9: irregexp code
2236 // Check that the irregexp code has been generated for the actual string
2237 // encoding. If it has, the field contains a code object otherwise it contains
2238 // a smi (code flushing support).
2239 __ JumpIfSmi(t9, &runtime);
2241 // a1: previous index
2242 // a3: encoding of subject string (1 if one_byte, 0 if two_byte);
2244 // subject: Subject string
2245 // regexp_data: RegExp data (FixedArray)
2246 // All checks done. Now push arguments for native regexp code.
2247 __ IncrementCounter(isolate()->counters()->regexp_entry_native(),
2250 // Isolates: note we add an additional parameter here (isolate pointer).
2251 const int kRegExpExecuteArguments = 9;
2252 const int kParameterRegisters = 4;
2253 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
2255 // Stack pointer now points to cell where return address is to be written.
2256 // Arguments are before that on the stack or in registers, meaning we
2257 // treat the return address as argument 5. Thus every argument after that
2258 // needs to be shifted back by 1. Since DirectCEntryStub will handle
2259 // allocating space for the c argument slots, we don't need to calculate
2260 // that into the argument positions on the stack. This is how the stack will
2261 // look (sp meaning the value of sp at this moment):
2262 // [sp + 5] - Argument 9
2263 // [sp + 4] - Argument 8
2264 // [sp + 3] - Argument 7
2265 // [sp + 2] - Argument 6
2266 // [sp + 1] - Argument 5
2267 // [sp + 0] - saved ra
2269 // Argument 9: Pass current isolate address.
2270 // CFunctionArgumentOperand handles MIPS stack argument slots.
2271 __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
2272 __ sw(a0, MemOperand(sp, 5 * kPointerSize));
2274 // Argument 8: Indicate that this is a direct call from JavaScript.
2275 __ li(a0, Operand(1));
2276 __ sw(a0, MemOperand(sp, 4 * kPointerSize));
2278 // Argument 7: Start (high end) of backtracking stack memory area.
2279 __ li(a0, Operand(address_of_regexp_stack_memory_address));
2280 __ lw(a0, MemOperand(a0, 0));
2281 __ li(a2, Operand(address_of_regexp_stack_memory_size));
2282 __ lw(a2, MemOperand(a2, 0));
2283 __ addu(a0, a0, a2);
2284 __ sw(a0, MemOperand(sp, 3 * kPointerSize));
2286 // Argument 6: Set the number of capture registers to zero to force global
2287 // regexps to behave as non-global. This does not affect non-global regexps.
2288 __ mov(a0, zero_reg);
2289 __ sw(a0, MemOperand(sp, 2 * kPointerSize));
2291 // Argument 5: static offsets vector buffer.
2293 ExternalReference::address_of_static_offsets_vector(isolate())));
2294 __ sw(a0, MemOperand(sp, 1 * kPointerSize));
2296 // For arguments 4 and 3 get string length, calculate start of string data
2297 // calculate the shift of the index (0 for one-byte and 1 for two-byte).
2298 __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
2299 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
2300 // Load the length from the original subject string from the previous stack
2301 // frame. Therefore we have to use fp, which points exactly to two pointer
2302 // sizes below the previous sp. (Because creating a new stack frame pushes
2303 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
2304 __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2305 // If slice offset is not 0, load the length from the original sliced string.
2306 // Argument 4, a3: End of string data
2307 // Argument 3, a2: Start of string data
2308 // Prepare start and end index of the input.
2309 __ sllv(t1, t0, a3);
2310 __ addu(t0, t2, t1);
2311 __ sllv(t1, a1, a3);
2312 __ addu(a2, t0, t1);
2314 __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
2315 __ sra(t2, t2, kSmiTagSize);
2316 __ sllv(t1, t2, a3);
2317 __ addu(a3, t0, t1);
2318 // Argument 2 (a1): Previous index.
2321 // Argument 1 (a0): Subject string.
2322 __ mov(a0, subject);
2324 // Locate the code entry and call it.
2325 __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
2326 DirectCEntryStub stub(isolate());
2327 stub.GenerateCall(masm, t9);
2329 __ LeaveExitFrame(false, no_reg, true);
2332 // subject: subject string (callee saved)
2333 // regexp_data: RegExp data (callee saved)
2334 // last_match_info_elements: Last match info elements (callee saved)
2335 // Check the result.
2337 __ Branch(&success, eq, v0, Operand(1));
2338 // We expect exactly one result since we force the called regexp to behave
2341 __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
2342 // If not exception it can only be retry. Handle that in the runtime system.
2343 __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
2344 // Result must now be exception. If there is no pending exception already a
2345 // stack overflow (on the backtrack stack) was detected in RegExp code but
2346 // haven't created the exception yet. Handle that in the runtime system.
2347 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
2348 __ li(a1, Operand(isolate()->factory()->the_hole_value()));
2349 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2351 __ lw(v0, MemOperand(a2, 0));
2352 __ Branch(&runtime, eq, v0, Operand(a1));
2354 __ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
2356 // Check if the exception is a termination. If so, throw as uncatchable.
2357 __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
2358 Label termination_exception;
2359 __ Branch(&termination_exception, eq, v0, Operand(a0));
2363 __ bind(&termination_exception);
2364 __ ThrowUncatchable(v0);
2367 // For failure and exception return null.
2368 __ li(v0, Operand(isolate()->factory()->null_value()));
2371 // Process the result from the native regexp code.
2374 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2375 // Calculate number of capture registers (number_of_captures + 1) * 2.
2376 // Multiplying by 2 comes for free since r1 is smi-tagged.
2377 STATIC_ASSERT(kSmiTag == 0);
2378 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2379 __ Addu(a1, a1, Operand(2)); // a1 was a smi.
2381 __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
2382 __ JumpIfSmi(a0, &runtime);
2383 __ GetObjectType(a0, a2, a2);
2384 __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
2385 // Check that the JSArray is in fast case.
2386 __ lw(last_match_info_elements,
2387 FieldMemOperand(a0, JSArray::kElementsOffset));
2388 __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2389 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
2390 __ Branch(&runtime, ne, a0, Operand(at));
2391 // Check that the last match info has space for the capture registers and the
2392 // additional information.
2394 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
2395 __ Addu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
2396 __ sra(at, a0, kSmiTagSize);
2397 __ Branch(&runtime, gt, a2, Operand(at));
2399 // a1: number of capture registers
2400 // subject: subject string
2401 // Store the capture count.
2402 __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
2403 __ sw(a2, FieldMemOperand(last_match_info_elements,
2404 RegExpImpl::kLastCaptureCountOffset));
2405 // Store last subject and last input.
2407 FieldMemOperand(last_match_info_elements,
2408 RegExpImpl::kLastSubjectOffset));
2409 __ mov(a2, subject);
2410 __ RecordWriteField(last_match_info_elements,
2411 RegExpImpl::kLastSubjectOffset,
2416 __ mov(subject, a2);
2418 FieldMemOperand(last_match_info_elements,
2419 RegExpImpl::kLastInputOffset));
2420 __ RecordWriteField(last_match_info_elements,
2421 RegExpImpl::kLastInputOffset,
2427 // Get the static offsets vector filled by the native regexp code.
2428 ExternalReference address_of_static_offsets_vector =
2429 ExternalReference::address_of_static_offsets_vector(isolate());
2430 __ li(a2, Operand(address_of_static_offsets_vector));
2432 // a1: number of capture registers
2433 // a2: offsets vector
2434 Label next_capture, done;
2435 // Capture register counter starts from number of capture registers and
2436 // counts down until wrapping after zero.
2438 last_match_info_elements,
2439 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
2440 __ bind(&next_capture);
2441 __ Subu(a1, a1, Operand(1));
2442 __ Branch(&done, lt, a1, Operand(zero_reg));
2443 // Read the value from the static offsets vector buffer.
2444 __ lw(a3, MemOperand(a2, 0));
2445 __ addiu(a2, a2, kPointerSize);
2446 // Store the smi value in the last match info.
2447 __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
2448 __ sw(a3, MemOperand(a0, 0));
2449 __ Branch(&next_capture, USE_DELAY_SLOT);
2450 __ addiu(a0, a0, kPointerSize); // In branch delay slot.
2454 // Return last match info.
2455 __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
2458 // Do the runtime call to execute the regexp.
2460 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2462 // Deferred code for string handling.
2463 // (6) Not a long external string? If yes, go to (8).
2464 __ bind(¬_seq_nor_cons);
2466 __ Branch(¬_long_external, gt, a1, Operand(kExternalStringTag));
2468 // (7) External string. Make it, offset-wise, look like a sequential string.
2469 __ bind(&external_string);
2470 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2471 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2472 if (FLAG_debug_code) {
2473 // Assert that we do not have a cons or slice (indirect strings) here.
2474 // Sequential strings have already been ruled out.
2475 __ And(at, a0, Operand(kIsIndirectStringMask));
2477 kExternalStringExpectedButNotFound,
2482 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
2483 // Move the pointer so that offset-wise, it looks like a sequential string.
2484 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2487 SeqTwoByteString::kHeaderSize - kHeapObjectTag);
2488 __ jmp(&seq_string); // Go to (5).
2490 // (8) Short external string or not a string? If yes, bail out to runtime.
2491 __ bind(¬_long_external);
2492 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
2493 __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
2494 __ Branch(&runtime, ne, at, Operand(zero_reg));
2496 // (9) Sliced string. Replace subject with parent. Go to (4).
2497 // Load offset into t0 and replace subject string with parent.
2498 __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
2499 __ sra(t0, t0, kSmiTagSize);
2500 __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
2501 __ jmp(&check_underlying); // Go to (4).
2502 #endif // V8_INTERPRETED_REGEXP
2506 static void GenerateRecordCallTarget(MacroAssembler* masm) {
2507 // Cache the called function in a feedback vector slot. Cache states
2508 // are uninitialized, monomorphic (indicated by a JSFunction), and
2510 // a0 : number of arguments to the construct function
2511 // a1 : the function to call
2512 // a2 : Feedback vector
2513 // a3 : slot in feedback vector (Smi)
2514 Label initialize, done, miss, megamorphic, not_array_function;
2516 DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
2517 masm->isolate()->heap()->megamorphic_symbol());
2518 DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
2519 masm->isolate()->heap()->uninitialized_symbol());
2521 // Load the cache state into t0.
2522 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2523 __ Addu(t0, a2, Operand(t0));
2524 __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
2526 // A monomorphic cache hit or an already megamorphic state: invoke the
2527 // function without changing the state.
2528 __ Branch(&done, eq, t0, Operand(a1));
2530 if (!FLAG_pretenuring_call_new) {
2531 // If we came here, we need to see if we are the array function.
2532 // If we didn't have a matching function, and we didn't find the megamorph
2533 // sentinel, then we have in the slot either some other function or an
2534 // AllocationSite. Do a map check on the object in a3.
2535 __ lw(t1, FieldMemOperand(t0, 0));
2536 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2537 __ Branch(&miss, ne, t1, Operand(at));
2539 // Make sure the function is the Array() function
2540 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
2541 __ Branch(&megamorphic, ne, a1, Operand(t0));
2547 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2549 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
2550 __ Branch(&initialize, eq, t0, Operand(at));
2551 // MegamorphicSentinel is an immortal immovable object (undefined) so no
2552 // write-barrier is needed.
2553 __ bind(&megamorphic);
2554 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2555 __ Addu(t0, a2, Operand(t0));
2556 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2557 __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
2560 // An uninitialized cache is patched with the function.
2561 __ bind(&initialize);
2562 if (!FLAG_pretenuring_call_new) {
2563 // Make sure the function is the Array() function.
2564 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
2565 __ Branch(¬_array_function, ne, a1, Operand(t0));
2567 // The target function is the Array constructor,
2568 // Create an AllocationSite if we don't already have it, store it in the
2571 FrameScope scope(masm, StackFrame::INTERNAL);
2572 const RegList kSavedRegs =
2578 // Arguments register must be smi-tagged to call out.
2580 __ MultiPush(kSavedRegs);
2582 CreateAllocationSiteStub create_stub(masm->isolate());
2583 __ CallStub(&create_stub);
2585 __ MultiPop(kSavedRegs);
2590 __ bind(¬_array_function);
2593 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2594 __ Addu(t0, a2, Operand(t0));
2595 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2596 __ sw(a1, MemOperand(t0, 0));
2598 __ Push(t0, a2, a1);
2599 __ RecordWrite(a2, t0, a1, kRAHasNotBeenSaved, kDontSaveFPRegs,
2600 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
2607 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
2608 __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2609 __ lw(t0, FieldMemOperand(a3, SharedFunctionInfo::kCompilerHintsOffset));
2611 // Do not transform the receiver for strict mode functions.
2612 int32_t strict_mode_function_mask =
2613 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
2614 // Do not transform the receiver for native (Compilerhints already in a3).
2615 int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
2616 __ And(at, t0, Operand(strict_mode_function_mask | native_mask));
2617 __ Branch(cont, ne, at, Operand(zero_reg));
2621 static void EmitSlowCase(MacroAssembler* masm,
2623 Label* non_function) {
2624 // Check for function proxy.
2625 __ Branch(non_function, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
2626 __ push(a1); // put proxy as additional argument
2627 __ li(a0, Operand(argc + 1, RelocInfo::NONE32));
2628 __ mov(a2, zero_reg);
2629 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
2631 Handle<Code> adaptor =
2632 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
2633 __ Jump(adaptor, RelocInfo::CODE_TARGET);
2636 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
2637 // of the original receiver from the call site).
2638 __ bind(non_function);
2639 __ sw(a1, MemOperand(sp, argc * kPointerSize));
2640 __ li(a0, Operand(argc)); // Set up the number of arguments.
2641 __ mov(a2, zero_reg);
2642 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
2643 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2644 RelocInfo::CODE_TARGET);
2648 static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
2649 // Wrap the receiver and patch it back onto the stack.
2650 { FrameScope frame_scope(masm, StackFrame::INTERNAL);
2652 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
2655 __ Branch(USE_DELAY_SLOT, cont);
2656 __ sw(v0, MemOperand(sp, argc * kPointerSize));
2660 static void CallFunctionNoFeedback(MacroAssembler* masm,
2661 int argc, bool needs_checks,
2662 bool call_as_method) {
2663 // a1 : the function to call
2664 Label slow, non_function, wrap, cont;
2667 // Check that the function is really a JavaScript function.
2668 // a1: pushed function (to be verified)
2669 __ JumpIfSmi(a1, &non_function);
2671 // Goto slow case if we do not have a function.
2672 __ GetObjectType(a1, t0, t0);
2673 __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
2676 // Fast-case: Invoke the function now.
2677 // a1: pushed function
2678 ParameterCount actual(argc);
2680 if (call_as_method) {
2682 EmitContinueIfStrictOrNative(masm, &cont);
2685 // Compute the receiver in sloppy mode.
2686 __ lw(a3, MemOperand(sp, argc * kPointerSize));
2689 __ JumpIfSmi(a3, &wrap);
2690 __ GetObjectType(a3, t0, t0);
2691 __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
2699 __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
2702 // Slow-case: Non-function called.
2704 EmitSlowCase(masm, argc, &non_function);
2707 if (call_as_method) {
2709 // Wrap the receiver and patch it back onto the stack.
2710 EmitWrapCase(masm, argc, &cont);
2715 void CallFunctionStub::Generate(MacroAssembler* masm) {
2716 CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
2720 void CallConstructStub::Generate(MacroAssembler* masm) {
2721 // a0 : number of arguments
2722 // a1 : the function to call
2723 // a2 : feedback vector
2724 // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
2725 Label slow, non_function_call;
2727 // Check that the function is not a smi.
2728 __ JumpIfSmi(a1, &non_function_call);
2729 // Check that the function is a JSFunction.
2730 __ GetObjectType(a1, t0, t0);
2731 __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
2733 if (RecordCallTarget()) {
2734 GenerateRecordCallTarget(masm);
2736 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
2737 __ Addu(t1, a2, at);
2738 if (FLAG_pretenuring_call_new) {
2739 // Put the AllocationSite from the feedback vector into a2.
2740 // By adding kPointerSize we encode that we know the AllocationSite
2741 // entry is at the feedback vector slot given by a3 + 1.
2742 __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize + kPointerSize));
2744 Label feedback_register_initialized;
2745 // Put the AllocationSite from the feedback vector into a2, or undefined.
2746 __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
2747 __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
2748 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2749 __ Branch(&feedback_register_initialized, eq, t1, Operand(at));
2750 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
2751 __ bind(&feedback_register_initialized);
2754 __ AssertUndefinedOrAllocationSite(a2, t1);
2757 // Jump to the function-specific construct stub.
2758 Register jmp_reg = t0;
2759 __ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2760 __ lw(jmp_reg, FieldMemOperand(jmp_reg,
2761 SharedFunctionInfo::kConstructStubOffset));
2762 __ Addu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
2765 // a0: number of arguments
2766 // a1: called object
2770 __ Branch(&non_function_call, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
2771 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
2774 __ bind(&non_function_call);
2775 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
2777 // Set expected number of arguments to zero (not changing r0).
2778 __ li(a2, Operand(0, RelocInfo::NONE32));
2779 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2780 RelocInfo::CODE_TARGET);
2784 static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
2785 __ lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
2786 __ lw(vector, FieldMemOperand(vector,
2787 JSFunction::kSharedFunctionInfoOffset));
2788 __ lw(vector, FieldMemOperand(vector,
2789 SharedFunctionInfo::kFeedbackVectorOffset));
2793 void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
2798 EmitLoadTypeFeedbackVector(masm, a2);
2800 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
2801 __ Branch(&miss, ne, a1, Operand(at));
2803 __ li(a0, Operand(arg_count()));
2804 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
2805 __ Addu(at, a2, Operand(at));
2806 __ lw(t0, FieldMemOperand(at, FixedArray::kHeaderSize));
2808 // Verify that t0 contains an AllocationSite
2809 __ lw(t1, FieldMemOperand(t0, HeapObject::kMapOffset));
2810 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2811 __ Branch(&miss, ne, t1, Operand(at));
2814 ArrayConstructorStub stub(masm->isolate(), arg_count());
2815 __ TailCallStub(&stub);
2820 // The slow case, we need this no matter what to complete a call after a miss.
2821 CallFunctionNoFeedback(masm,
2827 __ stop("Unexpected code address");
2831 void CallICStub::Generate(MacroAssembler* masm) {
2833 // r3 - slot id (Smi)
2834 Label extra_checks_or_miss, slow_start;
2835 Label slow, non_function, wrap, cont;
2836 Label have_js_function;
2837 int argc = arg_count();
2838 ParameterCount actual(argc);
2840 EmitLoadTypeFeedbackVector(masm, a2);
2842 // The checks. First, does r1 match the recorded monomorphic target?
2843 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2844 __ Addu(t0, a2, Operand(t0));
2845 __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
2846 __ Branch(&extra_checks_or_miss, ne, a1, Operand(t0));
2848 __ bind(&have_js_function);
2849 if (CallAsMethod()) {
2850 EmitContinueIfStrictOrNative(masm, &cont);
2851 // Compute the receiver in sloppy mode.
2852 __ lw(a3, MemOperand(sp, argc * kPointerSize));
2854 __ JumpIfSmi(a3, &wrap);
2855 __ GetObjectType(a3, t0, t0);
2856 __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
2861 __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
2864 EmitSlowCase(masm, argc, &non_function);
2866 if (CallAsMethod()) {
2868 EmitWrapCase(masm, argc, &cont);
2871 __ bind(&extra_checks_or_miss);
2874 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2875 __ Branch(&slow_start, eq, t0, Operand(at));
2876 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
2877 __ Branch(&miss, eq, t0, Operand(at));
2879 if (!FLAG_trace_ic) {
2880 // We are going megamorphic. If the feedback is a JSFunction, it is fine
2881 // to handle it here. More complex cases are dealt with in the runtime.
2882 __ AssertNotSmi(t0);
2883 __ GetObjectType(t0, t1, t1);
2884 __ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE));
2885 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2886 __ Addu(t0, a2, Operand(t0));
2887 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2888 __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
2889 // We have to update statistics for runtime profiling.
2890 const int with_types_offset =
2891 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
2892 __ lw(t0, FieldMemOperand(a2, with_types_offset));
2893 __ Subu(t0, t0, Operand(Smi::FromInt(1)));
2894 __ sw(t0, FieldMemOperand(a2, with_types_offset));
2895 const int generic_offset =
2896 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
2897 __ lw(t0, FieldMemOperand(a2, generic_offset));
2898 __ Addu(t0, t0, Operand(Smi::FromInt(1)));
2899 __ sw(t0, FieldMemOperand(a2, generic_offset));
2900 __ Branch(&slow_start);
2903 // We are here because tracing is on or we are going monomorphic.
2908 __ bind(&slow_start);
2909 // Check that the function is really a JavaScript function.
2910 // r1: pushed function (to be verified)
2911 __ JumpIfSmi(a1, &non_function);
2913 // Goto slow case if we do not have a function.
2914 __ GetObjectType(a1, t0, t0);
2915 __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
2916 __ Branch(&have_js_function);
2920 void CallICStub::GenerateMiss(MacroAssembler* masm) {
2921 // Get the receiver of the function from the stack; 1 ~ return address.
2922 __ lw(t0, MemOperand(sp, (arg_count() + 1) * kPointerSize));
2925 FrameScope scope(masm, StackFrame::INTERNAL);
2927 // Push the receiver and the function and feedback info.
2928 __ Push(t0, a1, a2, a3);
2931 IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
2932 : IC::kCallIC_Customization_Miss;
2934 ExternalReference miss = ExternalReference(IC_Utility(id),
2936 __ CallExternalReference(miss, 4);
2938 // Move result to a1 and exit the internal frame.
2944 // StringCharCodeAtGenerator.
2945 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
2946 DCHECK(!t0.is(index_));
2947 DCHECK(!t0.is(result_));
2948 DCHECK(!t0.is(object_));
2949 if (check_mode_ == RECEIVER_IS_UNKNOWN) {
2950 // If the receiver is a smi trigger the non-string case.
2951 __ JumpIfSmi(object_, receiver_not_string_);
2953 // Fetch the instance type of the receiver into result register.
2954 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2955 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2956 // If the receiver is not a string trigger the non-string case.
2957 __ And(t0, result_, Operand(kIsNotStringMask));
2958 __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
2961 // If the index is non-smi trigger the non-smi case.
2962 __ JumpIfNotSmi(index_, &index_not_smi_);
2964 __ bind(&got_smi_index_);
2966 // Check for index out of range.
2967 __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
2968 __ Branch(index_out_of_range_, ls, t0, Operand(index_));
2970 __ sra(index_, index_, kSmiTagSize);
2972 StringCharLoadGenerator::Generate(masm,
2978 __ sll(result_, result_, kSmiTagSize);
2983 void StringCharCodeAtGenerator::GenerateSlow(
2984 MacroAssembler* masm,
2985 const RuntimeCallHelper& call_helper) {
2986 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
2988 // Index is not a smi.
2989 __ bind(&index_not_smi_);
2990 // If index is a heap number, try converting it to an integer.
2993 Heap::kHeapNumberMapRootIndex,
2996 call_helper.BeforeCall(masm);
2997 // Consumed by runtime conversion function:
2998 __ Push(object_, index_);
2999 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3000 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3002 DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3003 // NumberToSmi discards numbers that are not exact integers.
3004 __ CallRuntime(Runtime::kNumberToSmi, 1);
3007 // Save the conversion result before the pop instructions below
3008 // have a chance to overwrite it.
3010 __ Move(index_, v0);
3012 // Reload the instance type.
3013 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3014 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3015 call_helper.AfterCall(masm);
3016 // If index is still not a smi, it must be out of range.
3017 __ JumpIfNotSmi(index_, index_out_of_range_);
3018 // Otherwise, return to the fast path.
3019 __ Branch(&got_smi_index_);
3021 // Call runtime. We get here when the receiver is a string and the
3022 // index is a number, but the code of getting the actual character
3023 // is too complex (e.g., when the string needs to be flattened).
3024 __ bind(&call_runtime_);
3025 call_helper.BeforeCall(masm);
3026 __ sll(index_, index_, kSmiTagSize);
3027 __ Push(object_, index_);
3028 __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
3030 __ Move(result_, v0);
3032 call_helper.AfterCall(masm);
3035 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3039 // -------------------------------------------------------------------------
3040 // StringCharFromCodeGenerator
3042 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3043 // Fast case of Heap::LookupSingleCharacterStringFromCode.
3045 DCHECK(!t0.is(result_));
3046 DCHECK(!t0.is(code_));
3048 STATIC_ASSERT(kSmiTag == 0);
3049 STATIC_ASSERT(kSmiShiftSize == 0);
3050 DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
3053 Operand(kSmiTagMask |
3054 ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
3055 __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
3057 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3058 // At this point code register contains smi tagged one-byte char code.
3059 STATIC_ASSERT(kSmiTag == 0);
3060 __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
3061 __ Addu(result_, result_, t0);
3062 __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3063 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3064 __ Branch(&slow_case_, eq, result_, Operand(t0));
3069 void StringCharFromCodeGenerator::GenerateSlow(
3070 MacroAssembler* masm,
3071 const RuntimeCallHelper& call_helper) {
3072 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3074 __ bind(&slow_case_);
3075 call_helper.BeforeCall(masm);
3077 __ CallRuntime(Runtime::kCharFromCode, 1);
3078 __ Move(result_, v0);
3080 call_helper.AfterCall(masm);
3083 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3087 enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
3090 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
3095 String::Encoding encoding) {
3096 if (FLAG_debug_code) {
3097 // Check that destination is word aligned.
3098 __ And(scratch, dest, Operand(kPointerAlignmentMask));
3100 kDestinationOfCopyNotAligned,
3105 // Assumes word reads and writes are little endian.
3106 // Nothing to do for zero characters.
3109 if (encoding == String::TWO_BYTE_ENCODING) {
3110 __ Addu(count, count, count);
3113 Register limit = count; // Read until dest equals this.
3114 __ Addu(limit, dest, Operand(count));
3116 Label loop_entry, loop;
3117 // Copy bytes from src to dest until dest hits limit.
3118 __ Branch(&loop_entry);
3120 __ lbu(scratch, MemOperand(src));
3121 __ Addu(src, src, Operand(1));
3122 __ sb(scratch, MemOperand(dest));
3123 __ Addu(dest, dest, Operand(1));
3124 __ bind(&loop_entry);
3125 __ Branch(&loop, lt, dest, Operand(limit));
3131 void SubStringStub::Generate(MacroAssembler* masm) {
3133 // Stack frame on entry.
3134 // ra: return address
3139 // This stub is called from the native-call %_SubString(...), so
3140 // nothing can be assumed about the arguments. It is tested that:
3141 // "string" is a sequential string,
3142 // both "from" and "to" are smis, and
3143 // 0 <= from <= to <= string.length.
3144 // If any of these assumptions fail, we call the runtime system.
3146 const int kToOffset = 0 * kPointerSize;
3147 const int kFromOffset = 1 * kPointerSize;
3148 const int kStringOffset = 2 * kPointerSize;
3150 __ lw(a2, MemOperand(sp, kToOffset));
3151 __ lw(a3, MemOperand(sp, kFromOffset));
3152 STATIC_ASSERT(kFromOffset == kToOffset + 4);
3153 STATIC_ASSERT(kSmiTag == 0);
3154 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3156 // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
3157 // safe in this case.
3158 __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
3159 __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
3160 // Both a2 and a3 are untagged integers.
3162 __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
3164 __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to.
3165 __ Subu(a2, a2, a3);
3167 // Make sure first argument is a string.
3168 __ lw(v0, MemOperand(sp, kStringOffset));
3169 __ JumpIfSmi(v0, &runtime);
3170 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
3171 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3172 __ And(t0, a1, Operand(kIsNotStringMask));
3174 __ Branch(&runtime, ne, t0, Operand(zero_reg));
3177 __ Branch(&single_char, eq, a2, Operand(1));
3179 // Short-cut for the case of trivial substring.
3181 // v0: original string
3182 // a2: result string length
3183 __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
3185 // Return original string.
3186 __ Branch(&return_v0, eq, a2, Operand(t0));
3187 // Longer than original string's length or negative: unsafe arguments.
3188 __ Branch(&runtime, hi, a2, Operand(t0));
3189 // Shorter than original string's length: an actual substring.
3191 // Deal with different string types: update the index if necessary
3192 // and put the underlying string into t1.
3193 // v0: original string
3194 // a1: instance type
3196 // a3: from index (untagged)
3197 Label underlying_unpacked, sliced_string, seq_or_external_string;
3198 // If the string is not indirect, it can only be sequential or external.
3199 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
3200 STATIC_ASSERT(kIsIndirectStringMask != 0);
3201 __ And(t0, a1, Operand(kIsIndirectStringMask));
3202 __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
3203 // t0 is used as a scratch register and can be overwritten in either case.
3204 __ And(t0, a1, Operand(kSlicedNotConsMask));
3205 __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
3206 // Cons string. Check whether it is flat, then fetch first part.
3207 __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
3208 __ LoadRoot(t0, Heap::kempty_stringRootIndex);
3209 __ Branch(&runtime, ne, t1, Operand(t0));
3210 __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
3211 // Update instance type.
3212 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
3213 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3214 __ jmp(&underlying_unpacked);
3216 __ bind(&sliced_string);
3217 // Sliced string. Fetch parent and correct start index by offset.
3218 __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
3219 __ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
3220 __ sra(t0, t0, 1); // Add offset to index.
3221 __ Addu(a3, a3, t0);
3222 // Update instance type.
3223 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
3224 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3225 __ jmp(&underlying_unpacked);
3227 __ bind(&seq_or_external_string);
3228 // Sequential or external string. Just move string to the expected register.
3231 __ bind(&underlying_unpacked);
3233 if (FLAG_string_slices) {
3235 // t1: underlying subject string
3236 // a1: instance type of underlying subject string
3238 // a3: adjusted start index (untagged)
3239 // Short slice. Copy instead of slicing.
3240 __ Branch(©_routine, lt, a2, Operand(SlicedString::kMinLength));
3241 // Allocate new sliced string. At this point we do not reload the instance
3242 // type including the string encoding because we simply rely on the info
3243 // provided by the original string. It does not matter if the original
3244 // string's encoding is wrong because we always have to recheck encoding of
3245 // the newly created string's parent anyways due to externalized strings.
3246 Label two_byte_slice, set_slice_header;
3247 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
3248 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
3249 __ And(t0, a1, Operand(kStringEncodingMask));
3250 __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
3251 __ AllocateOneByteSlicedString(v0, a2, t2, t3, &runtime);
3252 __ jmp(&set_slice_header);
3253 __ bind(&two_byte_slice);
3254 __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
3255 __ bind(&set_slice_header);
3257 __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
3258 __ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
3261 __ bind(©_routine);
3264 // t1: underlying subject string
3265 // a1: instance type of underlying subject string
3267 // a3: adjusted start index (untagged)
3268 Label two_byte_sequential, sequential_string, allocate_result;
3269 STATIC_ASSERT(kExternalStringTag != 0);
3270 STATIC_ASSERT(kSeqStringTag == 0);
3271 __ And(t0, a1, Operand(kExternalStringTag));
3272 __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
3274 // Handle external string.
3275 // Rule out short external strings.
3276 STATIC_ASSERT(kShortExternalStringTag != 0);
3277 __ And(t0, a1, Operand(kShortExternalStringTag));
3278 __ Branch(&runtime, ne, t0, Operand(zero_reg));
3279 __ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
3280 // t1 already points to the first character of underlying string.
3281 __ jmp(&allocate_result);
3283 __ bind(&sequential_string);
3284 // Locate first character of underlying subject string.
3285 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3286 __ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3288 __ bind(&allocate_result);
3289 // Sequential acii string. Allocate the result.
3290 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3291 __ And(t0, a1, Operand(kStringEncodingMask));
3292 __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
3294 // Allocate and copy the resulting ASCII string.
3295 __ AllocateOneByteString(v0, a2, t0, t2, t3, &runtime);
3297 // Locate first character of substring to copy.
3298 __ Addu(t1, t1, a3);
3300 // Locate first character of result.
3301 __ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3303 // v0: result string
3304 // a1: first character of result string
3305 // a2: result string length
3306 // t1: first character of substring to copy
3307 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3308 StringHelper::GenerateCopyCharacters(
3309 masm, a1, t1, a2, a3, String::ONE_BYTE_ENCODING);
3312 // Allocate and copy the resulting two-byte string.
3313 __ bind(&two_byte_sequential);
3314 __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
3316 // Locate first character of substring to copy.
3317 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
3319 __ Addu(t1, t1, t0);
3320 // Locate first character of result.
3321 __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3323 // v0: result string.
3324 // a1: first character of result.
3325 // a2: result length.
3326 // t1: first character of substring to copy.
3327 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3328 StringHelper::GenerateCopyCharacters(
3329 masm, a1, t1, a2, a3, String::TWO_BYTE_ENCODING);
3331 __ bind(&return_v0);
3332 Counters* counters = isolate()->counters();
3333 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
3336 // Just jump to runtime to create the sub string.
3338 __ TailCallRuntime(Runtime::kSubString, 3, 1);
3340 __ bind(&single_char);
3341 // v0: original string
3342 // a1: instance type
3344 // a3: from index (untagged)
3346 StringCharAtGenerator generator(v0, a3, a2, v0, &runtime, &runtime, &runtime,
3347 STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
3348 generator.GenerateFast(masm);
3350 generator.SkipSlow(masm, &runtime);
3354 void StringHelper::GenerateFlatOneByteStringEquals(
3355 MacroAssembler* masm, Register left, Register right, Register scratch1,
3356 Register scratch2, Register scratch3) {
3357 Register length = scratch1;
3360 Label strings_not_equal, check_zero_length;
3361 __ lw(length, FieldMemOperand(left, String::kLengthOffset));
3362 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
3363 __ Branch(&check_zero_length, eq, length, Operand(scratch2));
3364 __ bind(&strings_not_equal);
3365 DCHECK(is_int16(NOT_EQUAL));
3366 __ Ret(USE_DELAY_SLOT);
3367 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
3369 // Check if the length is zero.
3370 Label compare_chars;
3371 __ bind(&check_zero_length);
3372 STATIC_ASSERT(kSmiTag == 0);
3373 __ Branch(&compare_chars, ne, length, Operand(zero_reg));
3374 DCHECK(is_int16(EQUAL));
3375 __ Ret(USE_DELAY_SLOT);
3376 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3378 // Compare characters.
3379 __ bind(&compare_chars);
3381 GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
3382 v0, &strings_not_equal);
3384 // Characters are equal.
3385 __ Ret(USE_DELAY_SLOT);
3386 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3390 void StringHelper::GenerateCompareFlatOneByteStrings(
3391 MacroAssembler* masm, Register left, Register right, Register scratch1,
3392 Register scratch2, Register scratch3, Register scratch4) {
3393 Label result_not_equal, compare_lengths;
3394 // Find minimum length and length difference.
3395 __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
3396 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
3397 __ Subu(scratch3, scratch1, Operand(scratch2));
3398 Register length_delta = scratch3;
3399 __ slt(scratch4, scratch2, scratch1);
3400 __ Movn(scratch1, scratch2, scratch4);
3401 Register min_length = scratch1;
3402 STATIC_ASSERT(kSmiTag == 0);
3403 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
3406 GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
3407 scratch4, v0, &result_not_equal);
3409 // Compare lengths - strings up to min-length are equal.
3410 __ bind(&compare_lengths);
3411 DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
3412 // Use length_delta as result if it's zero.
3413 __ mov(scratch2, length_delta);
3414 __ mov(scratch4, zero_reg);
3415 __ mov(v0, zero_reg);
3417 __ bind(&result_not_equal);
3418 // Conditionally update the result based either on length_delta or
3419 // the last comparion performed in the loop above.
3421 __ Branch(&ret, eq, scratch2, Operand(scratch4));
3422 __ li(v0, Operand(Smi::FromInt(GREATER)));
3423 __ Branch(&ret, gt, scratch2, Operand(scratch4));
3424 __ li(v0, Operand(Smi::FromInt(LESS)));
3430 void StringHelper::GenerateOneByteCharsCompareLoop(
3431 MacroAssembler* masm, Register left, Register right, Register length,
3432 Register scratch1, Register scratch2, Register scratch3,
3433 Label* chars_not_equal) {
3434 // Change index to run from -length to -1 by adding length to string
3435 // start. This means that loop ends when index reaches zero, which
3436 // doesn't need an additional compare.
3437 __ SmiUntag(length);
3438 __ Addu(scratch1, length,
3439 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3440 __ Addu(left, left, Operand(scratch1));
3441 __ Addu(right, right, Operand(scratch1));
3442 __ Subu(length, zero_reg, length);
3443 Register index = length; // index = -length;
3449 __ Addu(scratch3, left, index);
3450 __ lbu(scratch1, MemOperand(scratch3));
3451 __ Addu(scratch3, right, index);
3452 __ lbu(scratch2, MemOperand(scratch3));
3453 __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
3454 __ Addu(index, index, 1);
3455 __ Branch(&loop, ne, index, Operand(zero_reg));
3459 void StringCompareStub::Generate(MacroAssembler* masm) {
3462 Counters* counters = isolate()->counters();
3464 // Stack frame on entry.
3465 // sp[0]: right string
3466 // sp[4]: left string
3467 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
3468 __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
3471 __ Branch(¬_same, ne, a0, Operand(a1));
3472 STATIC_ASSERT(EQUAL == 0);
3473 STATIC_ASSERT(kSmiTag == 0);
3474 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3475 __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
3480 // Check that both objects are sequential one-byte strings.
3481 __ JumpIfNotBothSequentialOneByteStrings(a1, a0, a2, a3, &runtime);
3483 // Compare flat ASCII strings natively. Remove arguments from stack first.
3484 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
3485 __ Addu(sp, sp, Operand(2 * kPointerSize));
3486 StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, t0, t1);
3489 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3493 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
3494 // ----------- S t a t e -------------
3497 // -- ra : return address
3498 // -----------------------------------
3500 // Load a2 with the allocation site. We stick an undefined dummy value here
3501 // and replace it with the real allocation site later when we instantiate this
3502 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
3503 __ li(a2, handle(isolate()->heap()->undefined_value()));
3505 // Make sure that we actually patched the allocation site.
3506 if (FLAG_debug_code) {
3507 __ And(at, a2, Operand(kSmiTagMask));
3508 __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
3509 __ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
3510 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
3511 __ Assert(eq, kExpectedAllocationSite, t0, Operand(at));
3514 // Tail call into the stub that handles binary operations with allocation
3516 BinaryOpWithAllocationSiteStub stub(isolate(), state());
3517 __ TailCallStub(&stub);
3521 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
3522 DCHECK(state() == CompareICState::SMI);
3525 __ JumpIfNotSmi(a2, &miss);
3527 if (GetCondition() == eq) {
3528 // For equality we do not care about the sign of the result.
3529 __ Ret(USE_DELAY_SLOT);
3530 __ Subu(v0, a0, a1);
3532 // Untag before subtracting to avoid handling overflow.
3535 __ Ret(USE_DELAY_SLOT);
3536 __ Subu(v0, a1, a0);
3544 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
3545 DCHECK(state() == CompareICState::NUMBER);
3548 Label unordered, maybe_undefined1, maybe_undefined2;
3551 if (left() == CompareICState::SMI) {
3552 __ JumpIfNotSmi(a1, &miss);
3554 if (right() == CompareICState::SMI) {
3555 __ JumpIfNotSmi(a0, &miss);
3558 // Inlining the double comparison and falling back to the general compare
3559 // stub if NaN is involved.
3560 // Load left and right operand.
3561 Label done, left, left_smi, right_smi;
3562 __ JumpIfSmi(a0, &right_smi);
3563 __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
3565 __ Subu(a2, a0, Operand(kHeapObjectTag));
3566 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
3568 __ bind(&right_smi);
3569 __ SmiUntag(a2, a0); // Can't clobber a0 yet.
3570 FPURegister single_scratch = f6;
3571 __ mtc1(a2, single_scratch);
3572 __ cvt_d_w(f2, single_scratch);
3575 __ JumpIfSmi(a1, &left_smi);
3576 __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
3578 __ Subu(a2, a1, Operand(kHeapObjectTag));
3579 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
3582 __ SmiUntag(a2, a1); // Can't clobber a1 yet.
3583 single_scratch = f8;
3584 __ mtc1(a2, single_scratch);
3585 __ cvt_d_w(f0, single_scratch);
3589 // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
3590 Label fpu_eq, fpu_lt;
3591 // Test if equal, and also handle the unordered/NaN case.
3592 __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
3594 // Test if less (unordered case is already handled).
3595 __ BranchF(&fpu_lt, NULL, lt, f0, f2);
3597 // Otherwise it's greater, so just fall thru, and return.
3598 DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
3599 __ Ret(USE_DELAY_SLOT);
3600 __ li(v0, Operand(GREATER));
3603 __ Ret(USE_DELAY_SLOT);
3604 __ li(v0, Operand(EQUAL));
3607 __ Ret(USE_DELAY_SLOT);
3608 __ li(v0, Operand(LESS));
3610 __ bind(&unordered);
3611 __ bind(&generic_stub);
3612 CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
3613 CompareICState::GENERIC, CompareICState::GENERIC);
3614 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3616 __ bind(&maybe_undefined1);
3617 if (Token::IsOrderedRelationalCompareOp(op())) {
3618 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3619 __ Branch(&miss, ne, a0, Operand(at));
3620 __ JumpIfSmi(a1, &unordered);
3621 __ GetObjectType(a1, a2, a2);
3622 __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
3626 __ bind(&maybe_undefined2);
3627 if (Token::IsOrderedRelationalCompareOp(op())) {
3628 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3629 __ Branch(&unordered, eq, a1, Operand(at));
3637 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3638 DCHECK(state() == CompareICState::INTERNALIZED_STRING);
3641 // Registers containing left and right operands respectively.
3643 Register right = a0;
3647 // Check that both operands are heap objects.
3648 __ JumpIfEitherSmi(left, right, &miss);
3650 // Check that both operands are internalized strings.
3651 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3652 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3653 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3654 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3655 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3656 __ Or(tmp1, tmp1, Operand(tmp2));
3657 __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3658 __ Branch(&miss, ne, at, Operand(zero_reg));
3660 // Make sure a0 is non-zero. At this point input operands are
3661 // guaranteed to be non-zero.
3662 DCHECK(right.is(a0));
3663 STATIC_ASSERT(EQUAL == 0);
3664 STATIC_ASSERT(kSmiTag == 0);
3666 // Internalized strings are compared by identity.
3667 __ Ret(ne, left, Operand(right));
3668 DCHECK(is_int16(EQUAL));
3669 __ Ret(USE_DELAY_SLOT);
3670 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3677 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
3678 DCHECK(state() == CompareICState::UNIQUE_NAME);
3679 DCHECK(GetCondition() == eq);
3682 // Registers containing left and right operands respectively.
3684 Register right = a0;
3688 // Check that both operands are heap objects.
3689 __ JumpIfEitherSmi(left, right, &miss);
3691 // Check that both operands are unique names. This leaves the instance
3692 // types loaded in tmp1 and tmp2.
3693 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3694 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3695 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3696 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3698 __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
3699 __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
3704 // Unique names are compared by identity.
3706 __ Branch(&done, ne, left, Operand(right));
3707 // Make sure a0 is non-zero. At this point input operands are
3708 // guaranteed to be non-zero.
3709 DCHECK(right.is(a0));
3710 STATIC_ASSERT(EQUAL == 0);
3711 STATIC_ASSERT(kSmiTag == 0);
3712 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3721 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
3722 DCHECK(state() == CompareICState::STRING);
3725 bool equality = Token::IsEqualityOp(op());
3727 // Registers containing left and right operands respectively.
3729 Register right = a0;
3736 // Check that both operands are heap objects.
3737 __ JumpIfEitherSmi(left, right, &miss);
3739 // Check that both operands are strings. This leaves the instance
3740 // types loaded in tmp1 and tmp2.
3741 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3742 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3743 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3744 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3745 STATIC_ASSERT(kNotStringTag != 0);
3746 __ Or(tmp3, tmp1, tmp2);
3747 __ And(tmp5, tmp3, Operand(kIsNotStringMask));
3748 __ Branch(&miss, ne, tmp5, Operand(zero_reg));
3750 // Fast check for identical strings.
3751 Label left_ne_right;
3752 STATIC_ASSERT(EQUAL == 0);
3753 STATIC_ASSERT(kSmiTag == 0);
3754 __ Branch(&left_ne_right, ne, left, Operand(right));
3755 __ Ret(USE_DELAY_SLOT);
3756 __ mov(v0, zero_reg); // In the delay slot.
3757 __ bind(&left_ne_right);
3759 // Handle not identical strings.
3761 // Check that both strings are internalized strings. If they are, we're done
3762 // because we already know they are not identical. We know they are both
3765 DCHECK(GetCondition() == eq);
3766 STATIC_ASSERT(kInternalizedTag == 0);
3767 __ Or(tmp3, tmp1, Operand(tmp2));
3768 __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
3770 __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
3771 // Make sure a0 is non-zero. At this point input operands are
3772 // guaranteed to be non-zero.
3773 DCHECK(right.is(a0));
3774 __ Ret(USE_DELAY_SLOT);
3775 __ mov(v0, a0); // In the delay slot.
3776 __ bind(&is_symbol);
3779 // Check that both strings are sequential one-byte.
3781 __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
3784 // Compare flat one-byte strings. Returns when done.
3786 StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
3789 StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
3793 // Handle more complex cases in runtime.
3795 __ Push(left, right);
3797 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
3799 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3807 void CompareICStub::GenerateObjects(MacroAssembler* masm) {
3808 DCHECK(state() == CompareICState::OBJECT);
3810 __ And(a2, a1, Operand(a0));
3811 __ JumpIfSmi(a2, &miss);
3813 __ GetObjectType(a0, a2, a2);
3814 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
3815 __ GetObjectType(a1, a2, a2);
3816 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
3818 DCHECK(GetCondition() == eq);
3819 __ Ret(USE_DELAY_SLOT);
3820 __ subu(v0, a0, a1);
3827 void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
3830 __ JumpIfSmi(a2, &miss);
3831 __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
3832 __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
3833 __ Branch(&miss, ne, a2, Operand(known_map_));
3834 __ Branch(&miss, ne, a3, Operand(known_map_));
3836 __ Ret(USE_DELAY_SLOT);
3837 __ subu(v0, a0, a1);
3844 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
3846 // Call the runtime system in a fresh internal frame.
3847 ExternalReference miss =
3848 ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
3849 FrameScope scope(masm, StackFrame::INTERNAL);
3851 __ Push(ra, a1, a0);
3852 __ li(t0, Operand(Smi::FromInt(op())));
3853 __ addiu(sp, sp, -kPointerSize);
3854 __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
3855 __ sw(t0, MemOperand(sp)); // In the delay slot.
3856 // Compute the entry point of the rewritten stub.
3857 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
3858 // Restore registers.
3865 void DirectCEntryStub::Generate(MacroAssembler* masm) {
3866 // Make place for arguments to fit C calling convention. Most of the callers
3867 // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
3868 // so they handle stack restoring and we don't have to do that here.
3869 // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
3870 // kCArgsSlotsSize stack space after the call.
3871 __ Subu(sp, sp, Operand(kCArgsSlotsSize));
3872 // Place the return address on the stack, making the call
3873 // GC safe. The RegExp backend also relies on this.
3874 __ sw(ra, MemOperand(sp, kCArgsSlotsSize));
3875 __ Call(t9); // Call the C++ function.
3876 __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
3878 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
3879 // In case of an error the return address may point to a memory area
3880 // filled with kZapValue by the GC.
3881 // Dereference the address and check for this.
3882 __ lw(t0, MemOperand(t9));
3883 __ Assert(ne, kReceivedInvalidReturnAddress, t0,
3884 Operand(reinterpret_cast<uint32_t>(kZapValue)));
3890 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
3893 reinterpret_cast<intptr_t>(GetCode().location());
3894 __ Move(t9, target);
3895 __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
3900 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
3904 Register properties,
3906 Register scratch0) {
3907 DCHECK(name->IsUniqueName());
3908 // If names of slots in range from 1 to kProbes - 1 for the hash value are
3909 // not equal to the name and kProbes-th slot is not used (its name is the
3910 // undefined value), it guarantees the hash table doesn't contain the
3911 // property. It's true even if some slots represent deleted properties
3912 // (their names are the hole value).
3913 for (int i = 0; i < kInlinedProbes; i++) {
3914 // scratch0 points to properties hash.
3915 // Compute the masked index: (hash + i + i * i) & mask.
3916 Register index = scratch0;
3917 // Capacity is smi 2^n.
3918 __ lw(index, FieldMemOperand(properties, kCapacityOffset));
3919 __ Subu(index, index, Operand(1));
3920 __ And(index, index, Operand(
3921 Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
3923 // Scale the index by multiplying by the entry size.
3924 DCHECK(NameDictionary::kEntrySize == 3);
3925 __ sll(at, index, 1);
3926 __ Addu(index, index, at);
3928 Register entity_name = scratch0;
3929 // Having undefined at this place means the name is not contained.
3930 DCHECK_EQ(kSmiTagSize, 1);
3931 Register tmp = properties;
3932 __ sll(scratch0, index, 1);
3933 __ Addu(tmp, properties, scratch0);
3934 __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
3936 DCHECK(!tmp.is(entity_name));
3937 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
3938 __ Branch(done, eq, entity_name, Operand(tmp));
3940 // Load the hole ready for use below:
3941 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
3943 // Stop if found the property.
3944 __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
3947 __ Branch(&good, eq, entity_name, Operand(tmp));
3949 // Check if the entry name is not a unique name.
3950 __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
3952 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
3953 __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
3956 // Restore the properties.
3958 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
3961 const int spill_mask =
3962 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
3963 a2.bit() | a1.bit() | a0.bit() | v0.bit());
3965 __ MultiPush(spill_mask);
3966 __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
3967 __ li(a1, Operand(Handle<Name>(name)));
3968 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
3971 __ MultiPop(spill_mask);
3973 __ Branch(done, eq, at, Operand(zero_reg));
3974 __ Branch(miss, ne, at, Operand(zero_reg));
3978 // Probe the name dictionary in the |elements| register. Jump to the
3979 // |done| label if a property with the given name is found. Jump to
3980 // the |miss| label otherwise.
3981 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
3982 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
3988 Register scratch2) {
3989 DCHECK(!elements.is(scratch1));
3990 DCHECK(!elements.is(scratch2));
3991 DCHECK(!name.is(scratch1));
3992 DCHECK(!name.is(scratch2));
3994 __ AssertName(name);
3996 // Compute the capacity mask.
3997 __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
3998 __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
3999 __ Subu(scratch1, scratch1, Operand(1));
4001 // Generate an unrolled loop that performs a few probes before
4002 // giving up. Measurements done on Gmail indicate that 2 probes
4003 // cover ~93% of loads from dictionaries.
4004 for (int i = 0; i < kInlinedProbes; i++) {
4005 // Compute the masked index: (hash + i + i * i) & mask.
4006 __ lw(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
4008 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4009 // the hash in a separate instruction. The value hash + i + i * i is right
4010 // shifted in the following and instruction.
4011 DCHECK(NameDictionary::GetProbeOffset(i) <
4012 1 << (32 - Name::kHashFieldOffset));
4013 __ Addu(scratch2, scratch2, Operand(
4014 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4016 __ srl(scratch2, scratch2, Name::kHashShift);
4017 __ And(scratch2, scratch1, scratch2);
4019 // Scale the index by multiplying by the element size.
4020 DCHECK(NameDictionary::kEntrySize == 3);
4021 // scratch2 = scratch2 * 3.
4023 __ sll(at, scratch2, 1);
4024 __ Addu(scratch2, scratch2, at);
4026 // Check if the key is identical to the name.
4027 __ sll(at, scratch2, 2);
4028 __ Addu(scratch2, elements, at);
4029 __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
4030 __ Branch(done, eq, name, Operand(at));
4033 const int spill_mask =
4034 (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
4035 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
4036 ~(scratch1.bit() | scratch2.bit());
4038 __ MultiPush(spill_mask);
4040 DCHECK(!elements.is(a1));
4042 __ Move(a0, elements);
4044 __ Move(a0, elements);
4047 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
4049 __ mov(scratch2, a2);
4051 __ MultiPop(spill_mask);
4053 __ Branch(done, ne, at, Operand(zero_reg));
4054 __ Branch(miss, eq, at, Operand(zero_reg));
4058 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
4059 // This stub overrides SometimesSetsUpAFrame() to return false. That means
4060 // we cannot call anything that could cause a GC from this stub.
4062 // result: NameDictionary to probe
4064 // dictionary: NameDictionary to probe.
4065 // index: will hold an index of entry if lookup is successful.
4066 // might alias with result_.
4068 // result_ is zero if lookup failed, non zero otherwise.
4070 Register result = v0;
4071 Register dictionary = a0;
4073 Register index = a2;
4076 Register undefined = t1;
4077 Register entry_key = t2;
4079 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
4081 __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
4082 __ sra(mask, mask, kSmiTagSize);
4083 __ Subu(mask, mask, Operand(1));
4085 __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
4087 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
4089 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
4090 // Compute the masked index: (hash + i + i * i) & mask.
4091 // Capacity is smi 2^n.
4093 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4094 // the hash in a separate instruction. The value hash + i + i * i is right
4095 // shifted in the following and instruction.
4096 DCHECK(NameDictionary::GetProbeOffset(i) <
4097 1 << (32 - Name::kHashFieldOffset));
4098 __ Addu(index, hash, Operand(
4099 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4101 __ mov(index, hash);
4103 __ srl(index, index, Name::kHashShift);
4104 __ And(index, mask, index);
4106 // Scale the index by multiplying by the entry size.
4107 DCHECK(NameDictionary::kEntrySize == 3);
4110 __ sll(index, index, 1);
4111 __ Addu(index, index, at);
4114 DCHECK_EQ(kSmiTagSize, 1);
4115 __ sll(index, index, 2);
4116 __ Addu(index, index, dictionary);
4117 __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
4119 // Having undefined at this place means the name is not contained.
4120 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined));
4122 // Stop if found the property.
4123 __ Branch(&in_dictionary, eq, entry_key, Operand(key));
4125 if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
4126 // Check if the entry name is not a unique name.
4127 __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
4129 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
4130 __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
4134 __ bind(&maybe_in_dictionary);
4135 // If we are doing negative lookup then probing failure should be
4136 // treated as a lookup success. For positive lookup probing failure
4137 // should be treated as lookup failure.
4138 if (mode() == POSITIVE_LOOKUP) {
4139 __ Ret(USE_DELAY_SLOT);
4140 __ mov(result, zero_reg);
4143 __ bind(&in_dictionary);
4144 __ Ret(USE_DELAY_SLOT);
4147 __ bind(¬_in_dictionary);
4148 __ Ret(USE_DELAY_SLOT);
4149 __ mov(result, zero_reg);
4153 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
4155 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
4157 // Hydrogen code stubs need stub2 at snapshot time.
4158 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
4163 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
4164 // the value has just been written into the object, now this stub makes sure
4165 // we keep the GC informed. The word in the object where the value has been
4166 // written is in the address register.
4167 void RecordWriteStub::Generate(MacroAssembler* masm) {
4168 Label skip_to_incremental_noncompacting;
4169 Label skip_to_incremental_compacting;
4171 // The first two branch+nop instructions are generated with labels so as to
4172 // get the offset fixed up correctly by the bind(Label*) call. We patch it
4173 // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
4174 // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
4175 // incremental heap marking.
4176 // See RecordWriteStub::Patch for details.
4177 __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
4179 __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
4182 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4183 __ RememberedSetHelper(object(),
4186 save_fp_regs_mode(),
4187 MacroAssembler::kReturnAtEnd);
4191 __ bind(&skip_to_incremental_noncompacting);
4192 GenerateIncremental(masm, INCREMENTAL);
4194 __ bind(&skip_to_incremental_compacting);
4195 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4197 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
4198 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
4200 PatchBranchIntoNop(masm, 0);
4201 PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
4205 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4208 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4209 Label dont_need_remembered_set;
4211 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
4212 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
4214 &dont_need_remembered_set);
4216 __ CheckPageFlag(regs_.object(),
4218 1 << MemoryChunk::SCAN_ON_SCAVENGE,
4220 &dont_need_remembered_set);
4222 // First notify the incremental marker if necessary, then update the
4224 CheckNeedsToInformIncrementalMarker(
4225 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4226 InformIncrementalMarker(masm);
4227 regs_.Restore(masm);
4228 __ RememberedSetHelper(object(),
4231 save_fp_regs_mode(),
4232 MacroAssembler::kReturnAtEnd);
4234 __ bind(&dont_need_remembered_set);
4237 CheckNeedsToInformIncrementalMarker(
4238 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4239 InformIncrementalMarker(masm);
4240 regs_.Restore(masm);
4245 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4246 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
4247 int argument_count = 3;
4248 __ PrepareCallCFunction(argument_count, regs_.scratch0());
4250 a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
4251 DCHECK(!address.is(regs_.object()));
4252 DCHECK(!address.is(a0));
4253 __ Move(address, regs_.address());
4254 __ Move(a0, regs_.object());
4255 __ Move(a1, address);
4256 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
4258 AllowExternalCallThatCantCauseGC scope(masm);
4260 ExternalReference::incremental_marking_record_write_function(isolate()),
4262 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
4266 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4267 MacroAssembler* masm,
4268 OnNoNeedToInformIncrementalMarker on_no_need,
4271 Label need_incremental;
4272 Label need_incremental_pop_scratch;
4274 __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
4275 __ lw(regs_.scratch1(),
4276 MemOperand(regs_.scratch0(),
4277 MemoryChunk::kWriteBarrierCounterOffset));
4278 __ Subu(regs_.scratch1(), regs_.scratch1(), Operand(1));
4279 __ sw(regs_.scratch1(),
4280 MemOperand(regs_.scratch0(),
4281 MemoryChunk::kWriteBarrierCounterOffset));
4282 __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
4284 // Let's look at the color of the object: If it is not black we don't have
4285 // to inform the incremental marker.
4286 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4288 regs_.Restore(masm);
4289 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4290 __ RememberedSetHelper(object(),
4293 save_fp_regs_mode(),
4294 MacroAssembler::kReturnAtEnd);
4301 // Get the value from the slot.
4302 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
4304 if (mode == INCREMENTAL_COMPACTION) {
4305 Label ensure_not_white;
4307 __ CheckPageFlag(regs_.scratch0(), // Contains value.
4308 regs_.scratch1(), // Scratch.
4309 MemoryChunk::kEvacuationCandidateMask,
4313 __ CheckPageFlag(regs_.object(),
4314 regs_.scratch1(), // Scratch.
4315 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
4319 __ bind(&ensure_not_white);
4322 // We need extra registers for this, so we push the object and the address
4323 // register temporarily.
4324 __ Push(regs_.object(), regs_.address());
4325 __ EnsureNotWhite(regs_.scratch0(), // The value.
4326 regs_.scratch1(), // Scratch.
4327 regs_.object(), // Scratch.
4328 regs_.address(), // Scratch.
4329 &need_incremental_pop_scratch);
4330 __ Pop(regs_.object(), regs_.address());
4332 regs_.Restore(masm);
4333 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4334 __ RememberedSetHelper(object(),
4337 save_fp_regs_mode(),
4338 MacroAssembler::kReturnAtEnd);
4343 __ bind(&need_incremental_pop_scratch);
4344 __ Pop(regs_.object(), regs_.address());
4346 __ bind(&need_incremental);
4348 // Fall through when we need to inform the incremental marker.
4352 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4353 // ----------- S t a t e -------------
4354 // -- a0 : element value to store
4355 // -- a3 : element index as smi
4356 // -- sp[0] : array literal index in function as smi
4357 // -- sp[4] : array literal
4358 // clobbers a1, a2, t0
4359 // -----------------------------------
4362 Label double_elements;
4364 Label slow_elements;
4365 Label fast_elements;
4367 // Get array literal index, array literal and its map.
4368 __ lw(t0, MemOperand(sp, 0 * kPointerSize));
4369 __ lw(a1, MemOperand(sp, 1 * kPointerSize));
4370 __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
4372 __ CheckFastElements(a2, t1, &double_elements);
4373 // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
4374 __ JumpIfSmi(a0, &smi_element);
4375 __ CheckFastSmiElements(a2, t1, &fast_elements);
4377 // Store into the array literal requires a elements transition. Call into
4379 __ bind(&slow_elements);
4381 __ Push(a1, a3, a0);
4382 __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4383 __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset));
4385 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4387 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4388 __ bind(&fast_elements);
4389 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
4390 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
4391 __ Addu(t2, t1, t2);
4392 __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4393 __ sw(a0, MemOperand(t2, 0));
4394 // Update the write barrier for the array store.
4395 __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
4396 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
4397 __ Ret(USE_DELAY_SLOT);
4400 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
4401 // and value is Smi.
4402 __ bind(&smi_element);
4403 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
4404 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
4405 __ Addu(t2, t1, t2);
4406 __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
4407 __ Ret(USE_DELAY_SLOT);
4410 // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
4411 __ bind(&double_elements);
4412 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
4413 __ StoreNumberToDoubleElements(a0, a3, t1, t3, t5, a2, &slow_elements);
4414 __ Ret(USE_DELAY_SLOT);
4419 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4420 CEntryStub ces(isolate(), 1, kSaveFPRegs);
4421 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4422 int parameter_count_offset =
4423 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4424 __ lw(a1, MemOperand(fp, parameter_count_offset));
4425 if (function_mode() == JS_FUNCTION_STUB_MODE) {
4426 __ Addu(a1, a1, Operand(1));
4428 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4429 __ sll(a1, a1, kPointerSizeLog2);
4430 __ Ret(USE_DELAY_SLOT);
4431 __ Addu(sp, sp, a1);
4435 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
4436 EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4437 VectorLoadStub stub(isolate(), state());
4438 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4442 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
4443 EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4444 VectorKeyedLoadStub stub(isolate());
4445 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4449 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4450 if (masm->isolate()->function_entry_hook() != NULL) {
4451 ProfileEntryHookStub stub(masm->isolate());
4459 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4460 // The entry hook is a "push ra" instruction, followed by a call.
4461 // Note: on MIPS "push" is 2 instruction
4462 const int32_t kReturnAddressDistanceFromFunctionStart =
4463 Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
4465 // This should contain all kJSCallerSaved registers.
4466 const RegList kSavedRegs =
4467 kJSCallerSaved | // Caller saved registers.
4468 s5.bit(); // Saved stack pointer.
4470 // We also save ra, so the count here is one higher than the mask indicates.
4471 const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
4473 // Save all caller-save registers as this may be called from anywhere.
4474 __ MultiPush(kSavedRegs | ra.bit());
4476 // Compute the function's address for the first argument.
4477 __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
4479 // The caller's return address is above the saved temporaries.
4480 // Grab that for the second argument to the hook.
4481 __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
4483 // Align the stack if necessary.
4484 int frame_alignment = masm->ActivationFrameAlignment();
4485 if (frame_alignment > kPointerSize) {
4487 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4488 __ And(sp, sp, Operand(-frame_alignment));
4490 __ Subu(sp, sp, kCArgsSlotsSize);
4491 #if defined(V8_HOST_ARCH_MIPS)
4492 int32_t entry_hook =
4493 reinterpret_cast<int32_t>(isolate()->function_entry_hook());
4494 __ li(t9, Operand(entry_hook));
4496 // Under the simulator we need to indirect the entry hook through a
4497 // trampoline function at a known address.
4498 // It additionally takes an isolate as a third parameter.
4499 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
4501 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4502 __ li(t9, Operand(ExternalReference(&dispatcher,
4503 ExternalReference::BUILTIN_CALL,
4506 // Call C function through t9 to conform ABI for PIC.
4509 // Restore the stack pointer if needed.
4510 if (frame_alignment > kPointerSize) {
4513 __ Addu(sp, sp, kCArgsSlotsSize);
4516 // Also pop ra to get Ret(0).
4517 __ MultiPop(kSavedRegs | ra.bit());
4523 static void CreateArrayDispatch(MacroAssembler* masm,
4524 AllocationSiteOverrideMode mode) {
4525 if (mode == DISABLE_ALLOCATION_SITES) {
4526 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4527 __ TailCallStub(&stub);
4528 } else if (mode == DONT_OVERRIDE) {
4529 int last_index = GetSequenceIndexFromFastElementsKind(
4530 TERMINAL_FAST_ELEMENTS_KIND);
4531 for (int i = 0; i <= last_index; ++i) {
4532 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4533 T stub(masm->isolate(), kind);
4534 __ TailCallStub(&stub, eq, a3, Operand(kind));
4537 // If we reached this point there is a problem.
4538 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4545 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4546 AllocationSiteOverrideMode mode) {
4547 // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4548 // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
4549 // a0 - number of arguments
4550 // a1 - constructor?
4551 // sp[0] - last argument
4552 Label normal_sequence;
4553 if (mode == DONT_OVERRIDE) {
4554 DCHECK(FAST_SMI_ELEMENTS == 0);
4555 DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
4556 DCHECK(FAST_ELEMENTS == 2);
4557 DCHECK(FAST_HOLEY_ELEMENTS == 3);
4558 DCHECK(FAST_DOUBLE_ELEMENTS == 4);
4559 DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
4561 // is the low bit set? If so, we are holey and that is good.
4562 __ And(at, a3, Operand(1));
4563 __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
4566 // look at the first argument
4567 __ lw(t1, MemOperand(sp, 0));
4568 __ Branch(&normal_sequence, eq, t1, Operand(zero_reg));
4570 if (mode == DISABLE_ALLOCATION_SITES) {
4571 ElementsKind initial = GetInitialFastElementsKind();
4572 ElementsKind holey_initial = GetHoleyElementsKind(initial);
4574 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
4576 DISABLE_ALLOCATION_SITES);
4577 __ TailCallStub(&stub_holey);
4579 __ bind(&normal_sequence);
4580 ArraySingleArgumentConstructorStub stub(masm->isolate(),
4582 DISABLE_ALLOCATION_SITES);
4583 __ TailCallStub(&stub);
4584 } else if (mode == DONT_OVERRIDE) {
4585 // We are going to create a holey array, but our kind is non-holey.
4586 // Fix kind and retry (only if we have an allocation site in the slot).
4587 __ Addu(a3, a3, Operand(1));
4589 if (FLAG_debug_code) {
4590 __ lw(t1, FieldMemOperand(a2, 0));
4591 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
4592 __ Assert(eq, kExpectedAllocationSite, t1, Operand(at));
4595 // Save the resulting elements kind in type info. We can't just store a3
4596 // in the AllocationSite::transition_info field because elements kind is
4597 // restricted to a portion of the field...upper bits need to be left alone.
4598 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4599 __ lw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
4600 __ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
4601 __ sw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
4604 __ bind(&normal_sequence);
4605 int last_index = GetSequenceIndexFromFastElementsKind(
4606 TERMINAL_FAST_ELEMENTS_KIND);
4607 for (int i = 0; i <= last_index; ++i) {
4608 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4609 ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
4610 __ TailCallStub(&stub, eq, a3, Operand(kind));
4613 // If we reached this point there is a problem.
4614 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4622 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
4623 int to_index = GetSequenceIndexFromFastElementsKind(
4624 TERMINAL_FAST_ELEMENTS_KIND);
4625 for (int i = 0; i <= to_index; ++i) {
4626 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4627 T stub(isolate, kind);
4629 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
4630 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
4637 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
4638 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
4640 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
4642 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
4647 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
4649 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
4650 for (int i = 0; i < 2; i++) {
4651 // For internal arrays we only need a few things.
4652 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
4654 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
4656 InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
4662 void ArrayConstructorStub::GenerateDispatchToArrayStub(
4663 MacroAssembler* masm,
4664 AllocationSiteOverrideMode mode) {
4665 if (argument_count() == ANY) {
4666 Label not_zero_case, not_one_case;
4668 __ Branch(¬_zero_case, ne, at, Operand(zero_reg));
4669 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4671 __ bind(¬_zero_case);
4672 __ Branch(¬_one_case, gt, a0, Operand(1));
4673 CreateArrayDispatchOneArgument(masm, mode);
4675 __ bind(¬_one_case);
4676 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4677 } else if (argument_count() == NONE) {
4678 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4679 } else if (argument_count() == ONE) {
4680 CreateArrayDispatchOneArgument(masm, mode);
4681 } else if (argument_count() == MORE_THAN_ONE) {
4682 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4689 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
4690 // ----------- S t a t e -------------
4691 // -- a0 : argc (only if argument_count() == ANY)
4692 // -- a1 : constructor
4693 // -- a2 : AllocationSite or undefined
4694 // -- sp[0] : return address
4695 // -- sp[4] : last argument
4696 // -----------------------------------
4698 if (FLAG_debug_code) {
4699 // The array construct code is only set for the global and natives
4700 // builtin Array functions which always have maps.
4702 // Initial map for the builtin Array function should be a map.
4703 __ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
4704 // Will both indicate a NULL and a Smi.
4706 __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
4707 at, Operand(zero_reg));
4708 __ GetObjectType(t0, t0, t1);
4709 __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
4710 t1, Operand(MAP_TYPE));
4712 // We should either have undefined in a2 or a valid AllocationSite
4713 __ AssertUndefinedOrAllocationSite(a2, t0);
4717 // Get the elements kind and case on that.
4718 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4719 __ Branch(&no_info, eq, a2, Operand(at));
4721 __ lw(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
4723 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4724 __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
4725 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
4728 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
4732 void InternalArrayConstructorStub::GenerateCase(
4733 MacroAssembler* masm, ElementsKind kind) {
4735 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
4736 __ TailCallStub(&stub0, lo, a0, Operand(1));
4738 InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
4739 __ TailCallStub(&stubN, hi, a0, Operand(1));
4741 if (IsFastPackedElementsKind(kind)) {
4742 // We might need to create a holey array
4743 // look at the first argument.
4744 __ lw(at, MemOperand(sp, 0));
4746 InternalArraySingleArgumentConstructorStub
4747 stub1_holey(isolate(), GetHoleyElementsKind(kind));
4748 __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
4751 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
4752 __ TailCallStub(&stub1);
4756 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
4757 // ----------- S t a t e -------------
4759 // -- a1 : constructor
4760 // -- sp[0] : return address
4761 // -- sp[4] : last argument
4762 // -----------------------------------
4764 if (FLAG_debug_code) {
4765 // The array construct code is only set for the global and natives
4766 // builtin Array functions which always have maps.
4768 // Initial map for the builtin Array function should be a map.
4769 __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
4770 // Will both indicate a NULL and a Smi.
4772 __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
4773 at, Operand(zero_reg));
4774 __ GetObjectType(a3, a3, t0);
4775 __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
4776 t0, Operand(MAP_TYPE));
4779 // Figure out the right elements kind.
4780 __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
4782 // Load the map's "bit field 2" into a3. We only need the first byte,
4783 // but the following bit field extraction takes care of that anyway.
4784 __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
4785 // Retrieve elements_kind from bit field 2.
4786 __ DecodeField<Map::ElementsKindBits>(a3);
4788 if (FLAG_debug_code) {
4790 __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
4792 eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
4793 a3, Operand(FAST_HOLEY_ELEMENTS));
4797 Label fast_elements_case;
4798 __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
4799 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
4801 __ bind(&fast_elements_case);
4802 GenerateCase(masm, FAST_ELEMENTS);
4806 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
4807 // ----------- S t a t e -------------
4809 // -- t0 : call_data
4811 // -- a1 : api_function_address
4814 // -- sp[0] : last argument
4816 // -- sp[(argc - 1)* 4] : first argument
4817 // -- sp[argc * 4] : receiver
4818 // -----------------------------------
4820 Register callee = a0;
4821 Register call_data = t0;
4822 Register holder = a2;
4823 Register api_function_address = a1;
4824 Register context = cp;
4826 int argc = this->argc();
4827 bool is_store = this->is_store();
4828 bool call_data_undefined = this->call_data_undefined();
4830 typedef FunctionCallbackArguments FCA;
4832 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
4833 STATIC_ASSERT(FCA::kCalleeIndex == 5);
4834 STATIC_ASSERT(FCA::kDataIndex == 4);
4835 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
4836 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
4837 STATIC_ASSERT(FCA::kIsolateIndex == 1);
4838 STATIC_ASSERT(FCA::kHolderIndex == 0);
4839 STATIC_ASSERT(FCA::kArgsLength == 7);
4841 // Save context, callee and call data.
4842 __ Push(context, callee, call_data);
4843 // Load context from callee.
4844 __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
4846 Register scratch = call_data;
4847 if (!call_data_undefined) {
4848 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4850 // Push return value and default return value.
4851 __ Push(scratch, scratch);
4853 Operand(ExternalReference::isolate_address(isolate())));
4854 // Push isolate and holder.
4855 __ Push(scratch, holder);
4857 // Prepare arguments.
4858 __ mov(scratch, sp);
4860 // Allocate the v8::Arguments structure in the arguments' space since
4861 // it's not controlled by GC.
4862 const int kApiStackSpace = 4;
4864 FrameScope frame_scope(masm, StackFrame::MANUAL);
4865 __ EnterExitFrame(false, kApiStackSpace);
4867 DCHECK(!api_function_address.is(a0) && !scratch.is(a0));
4868 // a0 = FunctionCallbackInfo&
4869 // Arguments is after the return address.
4870 __ Addu(a0, sp, Operand(1 * kPointerSize));
4871 // FunctionCallbackInfo::implicit_args_
4872 __ sw(scratch, MemOperand(a0, 0 * kPointerSize));
4873 // FunctionCallbackInfo::values_
4874 __ Addu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
4875 __ sw(at, MemOperand(a0, 1 * kPointerSize));
4876 // FunctionCallbackInfo::length_ = argc
4877 __ li(at, Operand(argc));
4878 __ sw(at, MemOperand(a0, 2 * kPointerSize));
4879 // FunctionCallbackInfo::is_construct_call = 0
4880 __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
4882 const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
4883 ExternalReference thunk_ref =
4884 ExternalReference::invoke_function_callback(isolate());
4886 AllowExternalCallThatCantCauseGC scope(masm);
4887 MemOperand context_restore_operand(
4888 fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
4889 // Stores return the first js argument.
4890 int return_value_offset = 0;
4892 return_value_offset = 2 + FCA::kArgsLength;
4894 return_value_offset = 2 + FCA::kReturnValueOffset;
4896 MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
4898 __ CallApiFunctionAndReturn(api_function_address,
4901 return_value_operand,
4902 &context_restore_operand);
4906 void CallApiGetterStub::Generate(MacroAssembler* masm) {
4907 // ----------- S t a t e -------------
4909 // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
4911 // -- a2 : api_function_address
4912 // -----------------------------------
4914 Register api_function_address = ApiGetterDescriptor::function_address();
4915 DCHECK(api_function_address.is(a2));
4917 __ mov(a0, sp); // a0 = Handle<Name>
4918 __ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA
4920 const int kApiStackSpace = 1;
4921 FrameScope frame_scope(masm, StackFrame::MANUAL);
4922 __ EnterExitFrame(false, kApiStackSpace);
4924 // Create PropertyAccessorInfo instance on the stack above the exit frame with
4925 // a1 (internal::Object** args_) as the data.
4926 __ sw(a1, MemOperand(sp, 1 * kPointerSize));
4927 __ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo&
4929 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
4931 ExternalReference thunk_ref =
4932 ExternalReference::invoke_accessor_getter_callback(isolate());
4933 __ CallApiFunctionAndReturn(api_function_address,
4936 MemOperand(fp, 6 * kPointerSize),
4943 } } // namespace v8::internal
4945 #endif // V8_TARGET_ARCH_MIPS