1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #if V8_TARGET_ARCH_MIPS64
9 #include "src/bootstrapper.h"
10 #include "src/code-stubs.h"
11 #include "src/codegen.h"
12 #include "src/ic/handler-compiler.h"
13 #include "src/ic/ic.h"
14 #include "src/isolate.h"
15 #include "src/jsregexp.h"
16 #include "src/regexp-macro-assembler.h"
17 #include "src/runtime/runtime.h"
23 static void InitializeArrayConstructorDescriptor(
24 Isolate* isolate, CodeStubDescriptor* descriptor,
25 int constant_stack_parameter_count) {
26 Address deopt_handler = Runtime::FunctionForId(
27 Runtime::kArrayConstructor)->entry;
29 if (constant_stack_parameter_count == 0) {
30 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
31 JS_FUNCTION_STUB_MODE);
33 descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
34 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
39 static void InitializeInternalArrayConstructorDescriptor(
40 Isolate* isolate, CodeStubDescriptor* descriptor,
41 int constant_stack_parameter_count) {
42 Address deopt_handler = Runtime::FunctionForId(
43 Runtime::kInternalArrayConstructor)->entry;
45 if (constant_stack_parameter_count == 0) {
46 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
47 JS_FUNCTION_STUB_MODE);
49 descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
50 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
55 void ArrayNoArgumentConstructorStub::InitializeDescriptor(
56 CodeStubDescriptor* descriptor) {
57 InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
61 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
62 CodeStubDescriptor* descriptor) {
63 InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
67 void ArrayNArgumentsConstructorStub::InitializeDescriptor(
68 CodeStubDescriptor* descriptor) {
69 InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
73 void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
74 CodeStubDescriptor* descriptor) {
75 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
79 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
80 CodeStubDescriptor* descriptor) {
81 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
85 void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
86 CodeStubDescriptor* descriptor) {
87 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
91 #define __ ACCESS_MASM(masm)
94 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
97 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
103 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
108 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
109 ExternalReference miss) {
110 // Update the static counter each time a new code stub is generated.
111 isolate()->counters()->code_stubs()->Increment();
113 CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
114 int param_count = descriptor.GetEnvironmentParameterCount();
116 // Call the runtime system in a fresh internal frame.
117 FrameScope scope(masm, StackFrame::INTERNAL);
118 DCHECK((param_count == 0) ||
119 a0.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
120 // Push arguments, adjust sp.
121 __ Dsubu(sp, sp, Operand(param_count * kPointerSize));
122 for (int i = 0; i < param_count; ++i) {
123 // Store argument to stack.
124 __ sd(descriptor.GetEnvironmentParameterRegister(i),
125 MemOperand(sp, (param_count - 1 - i) * kPointerSize));
127 __ CallExternalReference(miss, param_count);
134 void DoubleToIStub::Generate(MacroAssembler* masm) {
135 Label out_of_range, only_low, negate, done;
136 Register input_reg = source();
137 Register result_reg = destination();
139 int double_offset = offset();
140 // Account for saved regs if input is sp.
141 if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
144 GetRegisterThatIsNotOneOf(input_reg, result_reg);
146 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
148 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
149 DoubleRegister double_scratch = kLithiumScratchDouble;
151 __ Push(scratch, scratch2, scratch3);
152 if (!skip_fastpath()) {
153 // Load double input.
154 __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
156 // Clear cumulative exception flags and save the FCSR.
157 __ cfc1(scratch2, FCSR);
158 __ ctc1(zero_reg, FCSR);
160 // Try a conversion to a signed integer.
161 __ Trunc_w_d(double_scratch, double_scratch);
162 // Move the converted value into the result register.
163 __ mfc1(scratch3, double_scratch);
165 // Retrieve and restore the FCSR.
166 __ cfc1(scratch, FCSR);
167 __ ctc1(scratch2, FCSR);
169 // Check for overflow and NaNs.
172 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
173 | kFCSRInvalidOpFlagMask);
174 // If we had no exceptions then set result_reg and we are done.
176 __ Branch(&error, ne, scratch, Operand(zero_reg));
177 __ Move(result_reg, scratch3);
182 // Load the double value and perform a manual truncation.
183 Register input_high = scratch2;
184 Register input_low = scratch3;
186 __ lw(input_low, MemOperand(input_reg, double_offset));
187 __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize));
189 Label normal_exponent, restore_sign;
190 // Extract the biased exponent in result.
193 HeapNumber::kExponentShift,
194 HeapNumber::kExponentBits);
196 // Check for Infinity and NaNs, which should return 0.
197 __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
198 __ Movz(result_reg, zero_reg, scratch);
199 __ Branch(&done, eq, scratch, Operand(zero_reg));
201 // Express exponent as delta to (number of mantissa bits + 31).
204 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
206 // If the delta is strictly positive, all bits would be shifted away,
207 // which means that we can return 0.
208 __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
209 __ mov(result_reg, zero_reg);
212 __ bind(&normal_exponent);
213 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
215 __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
218 Register sign = result_reg;
220 __ And(sign, input_high, Operand(HeapNumber::kSignMask));
222 // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
223 // to check for this specific case.
224 Label high_shift_needed, high_shift_done;
225 __ Branch(&high_shift_needed, lt, scratch, Operand(32));
226 __ mov(input_high, zero_reg);
227 __ Branch(&high_shift_done);
228 __ bind(&high_shift_needed);
230 // Set the implicit 1 before the mantissa part in input_high.
233 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
234 // Shift the mantissa bits to the correct position.
235 // We don't need to clear non-mantissa bits as they will be shifted away.
236 // If they weren't, it would mean that the answer is in the 32bit range.
237 __ sllv(input_high, input_high, scratch);
239 __ bind(&high_shift_done);
241 // Replace the shifted bits with bits from the lower mantissa word.
242 Label pos_shift, shift_done;
244 __ subu(scratch, at, scratch);
245 __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
248 __ Subu(scratch, zero_reg, scratch);
249 __ sllv(input_low, input_low, scratch);
250 __ Branch(&shift_done);
253 __ srlv(input_low, input_low, scratch);
255 __ bind(&shift_done);
256 __ Or(input_high, input_high, Operand(input_low));
257 // Restore sign if necessary.
258 __ mov(scratch, sign);
261 __ Subu(result_reg, zero_reg, input_high);
262 __ Movz(result_reg, input_high, scratch);
266 __ Pop(scratch, scratch2, scratch3);
271 void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
273 WriteInt32ToHeapNumberStub stub1(isolate, a1, v0, a2, a3);
274 WriteInt32ToHeapNumberStub stub2(isolate, a2, v0, a3, a0);
280 // See comment for class, this does NOT work for int32's that are in Smi range.
281 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
282 Label max_negative_int;
283 // the_int_ has the answer which is a signed int32 but not a Smi.
284 // We test for the special value that has a different exponent.
285 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
286 // Test sign, and save for later conditionals.
287 __ And(sign(), the_int(), Operand(0x80000000u));
288 __ Branch(&max_negative_int, eq, the_int(), Operand(0x80000000u));
290 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
291 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
292 uint32_t non_smi_exponent =
293 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
294 __ li(scratch(), Operand(non_smi_exponent));
295 // Set the sign bit in scratch_ if the value was negative.
296 __ or_(scratch(), scratch(), sign());
297 // Subtract from 0 if the value was negative.
298 __ subu(at, zero_reg, the_int());
299 __ Movn(the_int(), at, sign());
300 // We should be masking the implict first digit of the mantissa away here,
301 // but it just ends up combining harmlessly with the last digit of the
302 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
303 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
304 DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
305 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
306 __ srl(at, the_int(), shift_distance);
307 __ or_(scratch(), scratch(), at);
308 __ sw(scratch(), FieldMemOperand(the_heap_number(),
309 HeapNumber::kExponentOffset));
310 __ sll(scratch(), the_int(), 32 - shift_distance);
311 __ Ret(USE_DELAY_SLOT);
312 __ sw(scratch(), FieldMemOperand(the_heap_number(),
313 HeapNumber::kMantissaOffset));
315 __ bind(&max_negative_int);
316 // The max negative int32 is stored as a positive number in the mantissa of
317 // a double because it uses a sign bit instead of using two's complement.
318 // The actual mantissa bits stored are all 0 because the implicit most
319 // significant 1 bit is not stored.
320 non_smi_exponent += 1 << HeapNumber::kExponentShift;
321 __ li(scratch(), Operand(HeapNumber::kSignMask | non_smi_exponent));
323 FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
324 __ mov(scratch(), zero_reg);
325 __ Ret(USE_DELAY_SLOT);
327 FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
331 // Handle the case where the lhs and rhs are the same object.
332 // Equality is almost reflexive (everything but NaN), so this is a test
333 // for "identity and not NaN".
334 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
338 Label heap_number, return_equal;
339 Register exp_mask_reg = t1;
341 __ Branch(¬_identical, ne, a0, Operand(a1));
343 __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
345 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
346 // so we do the second best thing - test it ourselves.
347 // They are both equal and they are not both Smis so both of them are not
348 // Smis. If it's not a heap number, then return equal.
349 if (cc == less || cc == greater) {
350 __ GetObjectType(a0, t0, t0);
351 __ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
353 __ GetObjectType(a0, t0, t0);
354 __ Branch(&heap_number, eq, t0, Operand(HEAP_NUMBER_TYPE));
355 // Comparing JS objects with <=, >= is complicated.
357 __ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
358 // Normally here we fall through to return_equal, but undefined is
359 // special: (undefined == undefined) == true, but
360 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
361 if (cc == less_equal || cc == greater_equal) {
362 __ Branch(&return_equal, ne, t0, Operand(ODDBALL_TYPE));
363 __ LoadRoot(a6, Heap::kUndefinedValueRootIndex);
364 __ Branch(&return_equal, ne, a0, Operand(a6));
365 DCHECK(is_int16(GREATER) && is_int16(LESS));
366 __ Ret(USE_DELAY_SLOT);
368 // undefined <= undefined should fail.
369 __ li(v0, Operand(GREATER));
371 // undefined >= undefined should fail.
372 __ li(v0, Operand(LESS));
378 __ bind(&return_equal);
379 DCHECK(is_int16(GREATER) && is_int16(LESS));
380 __ Ret(USE_DELAY_SLOT);
382 __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
383 } else if (cc == greater) {
384 __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
386 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
388 // For less and greater we don't have to check for NaN since the result of
389 // x < x is false regardless. For the others here is some code to check
391 if (cc != lt && cc != gt) {
392 __ bind(&heap_number);
393 // It is a heap number, so return non-equal if it's NaN and equal if it's
396 // The representation of NaN values has all exponent bits (52..62) set,
397 // and not all mantissa bits (0..51) clear.
398 // Read top bits of double representation (second word of value).
399 __ lwu(a6, FieldMemOperand(a0, HeapNumber::kExponentOffset));
400 // Test that exponent bits are all set.
401 __ And(a7, a6, Operand(exp_mask_reg));
402 // If all bits not set (ne cond), then not a NaN, objects are equal.
403 __ Branch(&return_equal, ne, a7, Operand(exp_mask_reg));
405 // Shift out flag and all exponent bits, retaining only mantissa.
406 __ sll(a6, a6, HeapNumber::kNonMantissaBitsInTopWord);
407 // Or with all low-bits of mantissa.
408 __ lwu(a7, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
409 __ Or(v0, a7, Operand(a6));
410 // For equal we already have the right value in v0: Return zero (equal)
411 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
412 // not (it's a NaN). For <= and >= we need to load v0 with the failing
413 // value if it's a NaN.
415 // All-zero means Infinity means equal.
416 __ Ret(eq, v0, Operand(zero_reg));
417 DCHECK(is_int16(GREATER) && is_int16(LESS));
418 __ Ret(USE_DELAY_SLOT);
420 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
422 __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
426 // No fall through here.
428 __ bind(¬_identical);
432 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
435 Label* both_loaded_as_doubles,
438 DCHECK((lhs.is(a0) && rhs.is(a1)) ||
439 (lhs.is(a1) && rhs.is(a0)));
442 __ JumpIfSmi(lhs, &lhs_is_smi);
444 // Check whether the non-smi is a heap number.
445 __ GetObjectType(lhs, t0, t0);
447 // If lhs was not a number and rhs was a Smi then strict equality cannot
448 // succeed. Return non-equal (lhs is already not zero).
449 __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE));
452 // Smi compared non-strictly with a non-Smi non-heap-number. Call
454 __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE));
456 // Rhs is a smi, lhs is a number.
457 // Convert smi rhs to double.
458 __ SmiUntag(at, rhs);
460 __ cvt_d_w(f14, f14);
461 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
463 // We now have both loaded as doubles.
464 __ jmp(both_loaded_as_doubles);
466 __ bind(&lhs_is_smi);
467 // Lhs is a Smi. Check whether the non-smi is a heap number.
468 __ GetObjectType(rhs, t0, t0);
470 // If lhs was not a number and rhs was a Smi then strict equality cannot
471 // succeed. Return non-equal.
472 __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE));
473 __ li(v0, Operand(1));
475 // Smi compared non-strictly with a non-Smi non-heap-number. Call
477 __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE));
480 // Lhs is a smi, rhs is a number.
481 // Convert smi lhs to double.
482 __ SmiUntag(at, lhs);
484 __ cvt_d_w(f12, f12);
485 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
486 // Fall through to both_loaded_as_doubles.
490 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
493 // If either operand is a JS object or an oddball value, then they are
494 // not equal since their pointers are different.
495 // There is no test for undetectability in strict equality.
496 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
497 Label first_non_object;
498 // Get the type of the first operand into a2 and compare it with
499 // FIRST_SPEC_OBJECT_TYPE.
500 __ GetObjectType(lhs, a2, a2);
501 __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
504 Label return_not_equal;
505 __ bind(&return_not_equal);
506 __ Ret(USE_DELAY_SLOT);
507 __ li(v0, Operand(1));
509 __ bind(&first_non_object);
510 // Check for oddballs: true, false, null, undefined.
511 __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
513 __ GetObjectType(rhs, a3, a3);
514 __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
516 // Check for oddballs: true, false, null, undefined.
517 __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
519 // Now that we have the types we might as well check for
520 // internalized-internalized.
521 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
522 __ Or(a2, a2, Operand(a3));
523 __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
524 __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
528 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
531 Label* both_loaded_as_doubles,
532 Label* not_heap_numbers,
534 __ GetObjectType(lhs, a3, a2);
535 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
536 __ ld(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
537 // If first was a heap number & second wasn't, go to slow case.
538 __ Branch(slow, ne, a3, Operand(a2));
540 // Both are heap numbers. Load them up then jump to the code we have
542 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
543 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
545 __ jmp(both_loaded_as_doubles);
549 // Fast negative check for internalized-to-internalized equality.
550 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
553 Label* possible_strings,
554 Label* not_both_strings) {
555 DCHECK((lhs.is(a0) && rhs.is(a1)) ||
556 (lhs.is(a1) && rhs.is(a0)));
558 // a2 is object type of rhs.
560 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
561 __ And(at, a2, Operand(kIsNotStringMask));
562 __ Branch(&object_test, ne, at, Operand(zero_reg));
563 __ And(at, a2, Operand(kIsNotInternalizedMask));
564 __ Branch(possible_strings, ne, at, Operand(zero_reg));
565 __ GetObjectType(rhs, a3, a3);
566 __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
567 __ And(at, a3, Operand(kIsNotInternalizedMask));
568 __ Branch(possible_strings, ne, at, Operand(zero_reg));
570 // Both are internalized strings. We already checked they weren't the same
571 // pointer so they are not equal.
572 __ Ret(USE_DELAY_SLOT);
573 __ li(v0, Operand(1)); // Non-zero indicates not equal.
575 __ bind(&object_test);
576 __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
577 __ GetObjectType(rhs, a2, a3);
578 __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
580 // If both objects are undetectable, they are equal. Otherwise, they
581 // are not equal, since they are different objects and an object is not
582 // equal to undefined.
583 __ ld(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
584 __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
585 __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
587 __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
588 __ Ret(USE_DELAY_SLOT);
589 __ xori(v0, a0, 1 << Map::kIsUndetectable);
593 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
595 CompareICState::State expected,
598 if (expected == CompareICState::SMI) {
599 __ JumpIfNotSmi(input, fail);
600 } else if (expected == CompareICState::NUMBER) {
601 __ JumpIfSmi(input, &ok);
602 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
605 // We could be strict about internalized/string here, but as long as
606 // hydrogen doesn't care, the stub doesn't have to care either.
611 // On entry a1 and a2 are the values to be compared.
612 // On exit a0 is 0, positive or negative to indicate the result of
614 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
617 Condition cc = GetCondition();
620 CompareICStub_CheckInputType(masm, lhs, a2, left(), &miss);
621 CompareICStub_CheckInputType(masm, rhs, a3, right(), &miss);
623 Label slow; // Call builtin.
624 Label not_smis, both_loaded_as_doubles;
626 Label not_two_smis, smi_done;
628 __ JumpIfNotSmi(a2, ¬_two_smis);
632 __ Ret(USE_DELAY_SLOT);
633 __ dsubu(v0, a1, a0);
634 __ bind(¬_two_smis);
636 // NOTICE! This code is only reached after a smi-fast-case check, so
637 // it is certain that at least one operand isn't a smi.
639 // Handle the case where the objects are identical. Either returns the answer
640 // or goes to slow. Only falls through if the objects were not identical.
641 EmitIdenticalObjectComparison(masm, &slow, cc);
643 // If either is a Smi (we know that not both are), then they can only
644 // be strictly equal if the other is a HeapNumber.
645 STATIC_ASSERT(kSmiTag == 0);
646 DCHECK_EQ(0, Smi::FromInt(0));
647 __ And(a6, lhs, Operand(rhs));
648 __ JumpIfNotSmi(a6, ¬_smis, a4);
649 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
650 // 1) Return the answer.
652 // 3) Fall through to both_loaded_as_doubles.
653 // 4) Jump to rhs_not_nan.
654 // In cases 3 and 4 we have found out we were dealing with a number-number
655 // comparison and the numbers have been loaded into f12 and f14 as doubles,
656 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
657 EmitSmiNonsmiComparison(masm, lhs, rhs,
658 &both_loaded_as_doubles, &slow, strict());
660 __ bind(&both_loaded_as_doubles);
661 // f12, f14 are the double representations of the left hand side
662 // and the right hand side if we have FPU. Otherwise a2, a3 represent
663 // left hand side and a0, a1 represent right hand side.
666 __ li(a4, Operand(LESS));
667 __ li(a5, Operand(GREATER));
668 __ li(a6, Operand(EQUAL));
670 // Check if either rhs or lhs is NaN.
671 __ BranchF(NULL, &nan, eq, f12, f14);
673 // Check if LESS condition is satisfied. If true, move conditionally
675 if (kArchVariant != kMips64r6) {
676 __ c(OLT, D, f12, f14);
678 // Use previous check to store conditionally to v0 oposite condition
679 // (GREATER). If rhs is equal to lhs, this will be corrected in next
682 // Check if EQUAL condition is satisfied. If true, move conditionally
684 __ c(EQ, D, f12, f14);
688 __ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
689 __ mov(v0, a4); // Return LESS as result.
691 __ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
692 __ mov(v0, a6); // Return EQUAL as result.
694 __ mov(v0, a5); // Return GREATER as result.
700 // NaN comparisons always fail.
701 // Load whatever we need in v0 to make the comparison fail.
702 DCHECK(is_int16(GREATER) && is_int16(LESS));
703 __ Ret(USE_DELAY_SLOT);
704 if (cc == lt || cc == le) {
705 __ li(v0, Operand(GREATER));
707 __ li(v0, Operand(LESS));
712 // At this point we know we are dealing with two different objects,
713 // and neither of them is a Smi. The objects are in lhs_ and rhs_.
715 // This returns non-equal for some object types, or falls through if it
717 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
720 Label check_for_internalized_strings;
721 Label flat_string_check;
722 // Check for heap-number-heap-number comparison. Can jump to slow case,
723 // or load both doubles and jump to the code that handles
724 // that case. If the inputs are not doubles then jumps to
725 // check_for_internalized_strings.
726 // In this case a2 will contain the type of lhs_.
727 EmitCheckForTwoHeapNumbers(masm,
730 &both_loaded_as_doubles,
731 &check_for_internalized_strings,
734 __ bind(&check_for_internalized_strings);
735 if (cc == eq && !strict()) {
736 // Returns an answer for two internalized strings or two
737 // detectable objects.
738 // Otherwise jumps to string case or not both strings case.
739 // Assumes that a2 is the type of lhs_ on entry.
740 EmitCheckForInternalizedStringsOrObjects(
741 masm, lhs, rhs, &flat_string_check, &slow);
744 // Check for both being sequential one-byte strings,
745 // and inline if that is the case.
746 __ bind(&flat_string_check);
748 __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, a2, a3, &slow);
750 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
753 StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, a2, a3, a4);
755 StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, a2, a3, a4,
758 // Never falls through to here.
761 // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
764 // Figure out which native to call and setup the arguments.
765 Builtins::JavaScript native;
767 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
769 native = Builtins::COMPARE;
770 int ncr; // NaN compare result.
771 if (cc == lt || cc == le) {
774 DCHECK(cc == gt || cc == ge); // Remaining cases.
777 __ li(a0, Operand(Smi::FromInt(ncr)));
781 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
782 // tagged as a small integer.
783 __ InvokeBuiltin(native, JUMP_FUNCTION);
790 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
793 __ PushSafepointRegisters();
798 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
801 __ PopSafepointRegisters();
806 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
807 // We don't allow a GC during a store buffer overflow so there is no need to
808 // store the registers in any particular way, but we do have to store and
810 __ MultiPush(kJSCallerSaved | ra.bit());
811 if (save_doubles()) {
812 __ MultiPushFPU(kCallerSavedFPU);
814 const int argument_count = 1;
815 const int fp_argument_count = 0;
816 const Register scratch = a1;
818 AllowExternalCallThatCantCauseGC scope(masm);
819 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
820 __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
822 ExternalReference::store_buffer_overflow_function(isolate()),
824 if (save_doubles()) {
825 __ MultiPopFPU(kCallerSavedFPU);
828 __ MultiPop(kJSCallerSaved | ra.bit());
833 void MathPowStub::Generate(MacroAssembler* masm) {
834 const Register base = a1;
835 const Register exponent = MathPowTaggedDescriptor::exponent();
836 DCHECK(exponent.is(a2));
837 const Register heapnumbermap = a5;
838 const Register heapnumber = v0;
839 const DoubleRegister double_base = f2;
840 const DoubleRegister double_exponent = f4;
841 const DoubleRegister double_result = f0;
842 const DoubleRegister double_scratch = f6;
843 const FPURegister single_scratch = f8;
844 const Register scratch = t1;
845 const Register scratch2 = a7;
847 Label call_runtime, done, int_exponent;
848 if (exponent_type() == ON_STACK) {
849 Label base_is_smi, unpack_exponent;
850 // The exponent and base are supplied as arguments on the stack.
851 // This can only happen if the stub is called from non-optimized code.
852 // Load input parameters from stack to double registers.
853 __ ld(base, MemOperand(sp, 1 * kPointerSize));
854 __ ld(exponent, MemOperand(sp, 0 * kPointerSize));
856 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
858 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
859 __ ld(scratch, FieldMemOperand(base, JSObject::kMapOffset));
860 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
862 __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
863 __ jmp(&unpack_exponent);
865 __ bind(&base_is_smi);
866 __ mtc1(scratch, single_scratch);
867 __ cvt_d_w(double_base, single_scratch);
868 __ bind(&unpack_exponent);
870 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
872 __ ld(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
873 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
874 __ ldc1(double_exponent,
875 FieldMemOperand(exponent, HeapNumber::kValueOffset));
876 } else if (exponent_type() == TAGGED) {
877 // Base is already in double_base.
878 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
880 __ ldc1(double_exponent,
881 FieldMemOperand(exponent, HeapNumber::kValueOffset));
884 if (exponent_type() != INTEGER) {
885 Label int_exponent_convert;
886 // Detect integer exponents stored as double.
887 __ EmitFPUTruncate(kRoundToMinusInf,
893 kCheckForInexactConversion);
894 // scratch2 == 0 means there was no conversion error.
895 __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
897 if (exponent_type() == ON_STACK) {
898 // Detect square root case. Crankshaft detects constant +/-0.5 at
899 // compile time and uses DoMathPowHalf instead. We then skip this check
900 // for non-constant cases of +/-0.5 as these hardly occur.
904 __ Move(double_scratch, 0.5);
905 __ BranchF(USE_DELAY_SLOT,
911 // double_scratch can be overwritten in the delay slot.
912 // Calculates square root of base. Check for the special case of
913 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
914 __ Move(double_scratch, -V8_INFINITY);
915 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
916 __ neg_d(double_result, double_scratch);
918 // Add +0 to convert -0 to +0.
919 __ add_d(double_scratch, double_base, kDoubleRegZero);
920 __ sqrt_d(double_result, double_scratch);
923 __ bind(¬_plus_half);
924 __ Move(double_scratch, -0.5);
925 __ BranchF(USE_DELAY_SLOT,
931 // double_scratch can be overwritten in the delay slot.
932 // Calculates square root of base. Check for the special case of
933 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
934 __ Move(double_scratch, -V8_INFINITY);
935 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
936 __ Move(double_result, kDoubleRegZero);
938 // Add +0 to convert -0 to +0.
939 __ add_d(double_scratch, double_base, kDoubleRegZero);
940 __ Move(double_result, 1);
941 __ sqrt_d(double_scratch, double_scratch);
942 __ div_d(double_result, double_result, double_scratch);
948 AllowExternalCallThatCantCauseGC scope(masm);
949 __ PrepareCallCFunction(0, 2, scratch2);
950 __ MovToFloatParameters(double_base, double_exponent);
952 ExternalReference::power_double_double_function(isolate()),
956 __ MovFromFloatResult(double_result);
959 __ bind(&int_exponent_convert);
962 // Calculate power with integer exponent.
963 __ bind(&int_exponent);
965 // Get two copies of exponent in the registers scratch and exponent.
966 if (exponent_type() == INTEGER) {
967 __ mov(scratch, exponent);
969 // Exponent has previously been stored into scratch as untagged integer.
970 __ mov(exponent, scratch);
973 __ mov_d(double_scratch, double_base); // Back up base.
974 __ Move(double_result, 1.0);
976 // Get absolute value of exponent.
977 Label positive_exponent;
978 __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
979 __ Dsubu(scratch, zero_reg, scratch);
980 __ bind(&positive_exponent);
982 Label while_true, no_carry, loop_end;
983 __ bind(&while_true);
985 __ And(scratch2, scratch, 1);
987 __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
988 __ mul_d(double_result, double_result, double_scratch);
991 __ dsra(scratch, scratch, 1);
993 __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
994 __ mul_d(double_scratch, double_scratch, double_scratch);
996 __ Branch(&while_true);
1000 __ Branch(&done, ge, exponent, Operand(zero_reg));
1001 __ Move(double_scratch, 1.0);
1002 __ div_d(double_result, double_scratch, double_result);
1003 // Test whether result is zero. Bail out to check for subnormal result.
1004 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
1005 __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
1007 // double_exponent may not contain the exponent value if the input was a
1008 // smi. We set it with exponent value before bailing out.
1009 __ mtc1(exponent, single_scratch);
1010 __ cvt_d_w(double_exponent, single_scratch);
1012 // Returning or bailing out.
1013 Counters* counters = isolate()->counters();
1014 if (exponent_type() == ON_STACK) {
1015 // The arguments are still on the stack.
1016 __ bind(&call_runtime);
1017 __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
1019 // The stub is called from non-optimized code, which expects the result
1020 // as heap number in exponent.
1022 __ AllocateHeapNumber(
1023 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
1024 __ sdc1(double_result,
1025 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
1026 DCHECK(heapnumber.is(v0));
1027 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1032 AllowExternalCallThatCantCauseGC scope(masm);
1033 __ PrepareCallCFunction(0, 2, scratch);
1034 __ MovToFloatParameters(double_base, double_exponent);
1036 ExternalReference::power_double_double_function(isolate()),
1040 __ MovFromFloatResult(double_result);
1043 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1049 bool CEntryStub::NeedsImmovableCode() {
1054 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
1055 CEntryStub::GenerateAheadOfTime(isolate);
1056 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
1057 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
1058 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
1059 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
1060 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
1061 BinaryOpICStub::GenerateAheadOfTime(isolate);
1062 StoreRegistersStateStub::GenerateAheadOfTime(isolate);
1063 RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
1064 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
1068 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1069 StoreRegistersStateStub stub(isolate);
1074 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1075 RestoreRegistersStateStub stub(isolate);
1080 void CodeStub::GenerateFPStubs(Isolate* isolate) {
1081 // Generate if not already in cache.
1082 SaveFPRegsMode mode = kSaveFPRegs;
1083 CEntryStub(isolate, 1, mode).GetCode();
1084 StoreBufferOverflowStub(isolate, mode).GetCode();
1085 isolate->set_fp_stubs_generated(true);
1089 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1090 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
1095 void CEntryStub::Generate(MacroAssembler* masm) {
1096 // Called from JavaScript; parameters are on stack as if calling JS function
1097 // s0: number of arguments including receiver
1098 // s1: size of arguments excluding receiver
1099 // s2: pointer to builtin function
1100 // fp: frame pointer (restored after C call)
1101 // sp: stack pointer (restored as callee's sp after C call)
1102 // cp: current context (C callee-saved)
1104 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1106 // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
1107 // The reason for this is that these arguments would need to be saved anyway
1108 // so it's faster to set them up directly.
1109 // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
1111 // Compute the argv pointer in a callee-saved register.
1112 __ Daddu(s1, sp, s1);
1114 // Enter the exit frame that transitions from JavaScript to C++.
1115 FrameScope scope(masm, StackFrame::MANUAL);
1116 __ EnterExitFrame(save_doubles());
1118 // s0: number of arguments including receiver (C callee-saved)
1119 // s1: pointer to first argument (C callee-saved)
1120 // s2: pointer to builtin function (C callee-saved)
1122 // Prepare arguments for C routine.
1125 // a1 = argv (set in the delay slot after find_ra below).
1127 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
1128 // also need to reserve the 4 argument slots on the stack.
1130 __ AssertStackIsAligned();
1132 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
1134 // To let the GC traverse the return address of the exit frames, we need to
1135 // know where the return address is. The CEntryStub is unmovable, so
1136 // we can store the address on the stack to be able to find it again and
1137 // we never have to restore it, because it will not change.
1138 { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
1139 // This branch-and-link sequence is needed to find the current PC on mips,
1140 // saved to the ra register.
1141 // Use masm-> here instead of the double-underscore macro since extra
1142 // coverage code can interfere with the proper calculation of ra.
1144 masm->bal(&find_ra); // bal exposes branch delay slot.
1146 masm->bind(&find_ra);
1148 // Adjust the value in ra to point to the correct return location, 2nd
1149 // instruction past the real call into C code (the jalr(t9)), and push it.
1150 // This is the return address of the exit frame.
1151 const int kNumInstructionsToJump = 5;
1152 masm->Daddu(ra, ra, kNumInstructionsToJump * kInt32Size);
1153 masm->sd(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
1154 // Stack space reservation moved to the branch delay slot below.
1155 // Stack is still aligned.
1157 // Call the C routine.
1158 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
1160 // Set up sp in the delay slot.
1161 masm->daddiu(sp, sp, -kCArgsSlotsSize);
1162 // Make sure the stored 'ra' points to this position.
1163 DCHECK_EQ(kNumInstructionsToJump,
1164 masm->InstructionsGeneratedSince(&find_ra));
1167 // Runtime functions should not return 'the hole'. Allowing it to escape may
1168 // lead to crashes in the IC code later.
1169 if (FLAG_debug_code) {
1171 __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
1172 __ Branch(&okay, ne, v0, Operand(a4));
1173 __ stop("The hole escaped");
1177 // Check result for exception sentinel.
1178 Label exception_returned;
1179 __ LoadRoot(a4, Heap::kExceptionRootIndex);
1180 __ Branch(&exception_returned, eq, a4, Operand(v0));
1182 ExternalReference pending_exception_address(
1183 Isolate::kPendingExceptionAddress, isolate());
1185 // Check that there is no pending exception, otherwise we
1186 // should have returned the exception sentinel.
1187 if (FLAG_debug_code) {
1189 __ li(a2, Operand(pending_exception_address));
1190 __ ld(a2, MemOperand(a2));
1191 __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
1192 // Cannot use check here as it attempts to generate call into runtime.
1193 __ Branch(&okay, eq, a4, Operand(a2));
1194 __ stop("Unexpected pending exception");
1198 // Exit C frame and return.
1200 // sp: stack pointer
1201 // fp: frame pointer
1202 // s0: still holds argc (callee-saved).
1203 __ LeaveExitFrame(save_doubles(), s0, true, EMIT_RETURN);
1205 // Handling of exception.
1206 __ bind(&exception_returned);
1208 // Retrieve the pending exception.
1209 __ li(a2, Operand(pending_exception_address));
1210 __ ld(v0, MemOperand(a2));
1212 // Clear the pending exception.
1213 __ li(a3, Operand(isolate()->factory()->the_hole_value()));
1214 __ sd(a3, MemOperand(a2));
1216 // Special handling of termination exceptions which are uncatchable
1217 // by javascript code.
1218 Label throw_termination_exception;
1219 __ LoadRoot(a4, Heap::kTerminationExceptionRootIndex);
1220 __ Branch(&throw_termination_exception, eq, v0, Operand(a4));
1222 // Handle normal exception.
1225 __ bind(&throw_termination_exception);
1226 __ ThrowUncatchable(v0);
1230 void JSEntryStub::Generate(MacroAssembler* masm) {
1231 Label invoke, handler_entry, exit;
1232 Isolate* isolate = masm->isolate();
1234 // TODO(plind): unify the ABI description here.
1236 // a0: entry address
1240 // a4 (a4): on mips64
1243 // 0 arg slots on mips64 (4 args slots on mips)
1244 // args -- in a4/a4 on mips64, on stack on mips
1246 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1248 // Save callee saved registers on the stack.
1249 __ MultiPush(kCalleeSaved | ra.bit());
1251 // Save callee-saved FPU registers.
1252 __ MultiPushFPU(kCalleeSavedFPU);
1253 // Set up the reserved register for 0.0.
1254 __ Move(kDoubleRegZero, 0.0);
1256 // Load argv in s0 register.
1257 if (kMipsAbi == kN64) {
1258 __ mov(s0, a4); // 5th parameter in mips64 a4 (a4) register.
1259 } else { // Abi O32.
1260 // 5th parameter on stack for O32 abi.
1261 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1262 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
1263 __ ld(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
1266 __ InitializeRootRegister();
1268 // We build an EntryFrame.
1269 __ li(a7, Operand(-1)); // Push a bad frame pointer to fail if it is used.
1270 int marker = type();
1271 __ li(a6, Operand(Smi::FromInt(marker)));
1272 __ li(a5, Operand(Smi::FromInt(marker)));
1273 ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
1274 __ li(a4, Operand(c_entry_fp));
1275 __ ld(a4, MemOperand(a4));
1276 __ Push(a7, a6, a5, a4);
1277 // Set up frame pointer for the frame to be pushed.
1278 __ daddiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
1281 // a0: entry_address
1283 // a2: receiver_pointer
1289 // function slot | entry frame
1291 // bad fp (0xff...f) |
1292 // callee saved registers + ra
1293 // [ O32: 4 args slots]
1296 // If this is the outermost JS call, set js_entry_sp value.
1297 Label non_outermost_js;
1298 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
1299 __ li(a5, Operand(ExternalReference(js_entry_sp)));
1300 __ ld(a6, MemOperand(a5));
1301 __ Branch(&non_outermost_js, ne, a6, Operand(zero_reg));
1302 __ sd(fp, MemOperand(a5));
1303 __ li(a4, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1306 __ nop(); // Branch delay slot nop.
1307 __ bind(&non_outermost_js);
1308 __ li(a4, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1312 // Jump to a faked try block that does the invoke, with a faked catch
1313 // block that sets the pending exception.
1315 __ bind(&handler_entry);
1316 handler_offset_ = handler_entry.pos();
1317 // Caught exception: Store result (exception) in the pending exception
1318 // field in the JSEnv and return a failure sentinel. Coming in here the
1319 // fp will be invalid because the PushTryHandler below sets it to 0 to
1320 // signal the existence of the JSEntry frame.
1321 __ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1323 __ sd(v0, MemOperand(a4)); // We come back from 'invoke'. result is in v0.
1324 __ LoadRoot(v0, Heap::kExceptionRootIndex);
1325 __ b(&exit); // b exposes branch delay slot.
1326 __ nop(); // Branch delay slot nop.
1328 // Invoke: Link this frame into the handler chain. There's only one
1329 // handler block in this code object, so its index is 0.
1331 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
1332 // If an exception not caught by another handler occurs, this handler
1333 // returns control to the code after the bal(&invoke) above, which
1334 // restores all kCalleeSaved registers (including cp and fp) to their
1335 // saved values before returning a failure to C.
1337 // Clear any pending exceptions.
1338 __ LoadRoot(a5, Heap::kTheHoleValueRootIndex);
1339 __ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1341 __ sd(a5, MemOperand(a4));
1343 // Invoke the function by calling through JS entry trampoline builtin.
1344 // Notice that we cannot store a reference to the trampoline code directly in
1345 // this stub, because runtime stubs are not traversed when doing GC.
1348 // a0: entry_address
1350 // a2: receiver_pointer
1357 // callee saved registers + ra
1358 // [ O32: 4 args slots]
1361 if (type() == StackFrame::ENTRY_CONSTRUCT) {
1362 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1364 __ li(a4, Operand(construct_entry));
1366 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
1367 __ li(a4, Operand(entry));
1369 __ ld(t9, MemOperand(a4)); // Deref address.
1370 // Call JSEntryTrampoline.
1371 __ daddiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
1374 // Unlink this frame from the handler chain.
1377 __ bind(&exit); // v0 holds result
1378 // Check if the current stack frame is marked as the outermost JS frame.
1379 Label non_outermost_js_2;
1381 __ Branch(&non_outermost_js_2,
1384 Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1385 __ li(a5, Operand(ExternalReference(js_entry_sp)));
1386 __ sd(zero_reg, MemOperand(a5));
1387 __ bind(&non_outermost_js_2);
1389 // Restore the top frame descriptors from the stack.
1391 __ li(a4, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1393 __ sd(a5, MemOperand(a4));
1395 // Reset the stack to the callee saved registers.
1396 __ daddiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
1398 // Restore callee-saved fpu registers.
1399 __ MultiPopFPU(kCalleeSavedFPU);
1401 // Restore callee saved registers from the stack.
1402 __ MultiPop(kCalleeSaved | ra.bit());
1408 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
1409 // Return address is in ra.
1412 Register receiver = LoadDescriptor::ReceiverRegister();
1413 Register index = LoadDescriptor::NameRegister();
1414 Register scratch = a3;
1415 Register result = v0;
1416 DCHECK(!scratch.is(receiver) && !scratch.is(index));
1418 StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
1419 &miss, // When not a string.
1420 &miss, // When not a number.
1421 &miss, // When index out of range.
1422 STRING_INDEX_IS_ARRAY_INDEX,
1423 RECEIVER_IS_STRING);
1424 char_at_generator.GenerateFast(masm);
1427 StubRuntimeCallHelper call_helper;
1428 char_at_generator.GenerateSlow(masm, call_helper);
1431 PropertyAccessCompiler::TailCallBuiltin(
1432 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1436 // Uses registers a0 to a4.
1437 // Expected input (depending on whether args are in registers or on the stack):
1438 // * object: a0 or at sp + 1 * kPointerSize.
1439 // * function: a1 or at sp.
1441 // An inlined call site may have been generated before calling this stub.
1442 // In this case the offset to the inline site to patch is passed on the stack,
1443 // in the safepoint slot for register a4.
1444 void InstanceofStub::Generate(MacroAssembler* masm) {
1445 // Call site inlining and patching implies arguments in registers.
1446 DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
1447 // ReturnTrueFalse is only implemented for inlined call sites.
1448 DCHECK(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
1450 // Fixed register usage throughout the stub:
1451 const Register object = a0; // Object (lhs).
1452 Register map = a3; // Map of the object.
1453 const Register function = a1; // Function (rhs).
1454 const Register prototype = a4; // Prototype of the function.
1455 const Register inline_site = t1;
1456 const Register scratch = a2;
1458 const int32_t kDeltaToLoadBoolResult = 7 * Assembler::kInstrSize;
1460 Label slow, loop, is_instance, is_not_instance, not_js_object;
1462 if (!HasArgsInRegisters()) {
1463 __ ld(object, MemOperand(sp, 1 * kPointerSize));
1464 __ ld(function, MemOperand(sp, 0));
1467 // Check that the left hand is a JS object and load map.
1468 __ JumpIfSmi(object, ¬_js_object);
1469 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
1471 // If there is a call site cache don't look in the global cache, but do the
1472 // real lookup and update the call site cache.
1473 if (!HasCallSiteInlineCheck()) {
1475 __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
1476 __ Branch(&miss, ne, function, Operand(at));
1477 __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
1478 __ Branch(&miss, ne, map, Operand(at));
1479 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1480 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1485 // Get the prototype of the function.
1486 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
1488 // Check that the function prototype is a JS object.
1489 __ JumpIfSmi(prototype, &slow);
1490 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
1492 // Update the global instanceof or call site inlined cache with the current
1493 // map and function. The cached answer will be set when it is known below.
1494 if (!HasCallSiteInlineCheck()) {
1495 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1496 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
1498 DCHECK(HasArgsInRegisters());
1499 // Patch the (relocated) inlined map check.
1501 // The offset was stored in a4 safepoint slot.
1502 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
1503 __ LoadFromSafepointRegisterSlot(scratch, a4);
1504 __ Dsubu(inline_site, ra, scratch);
1505 // Get the map location in scratch and patch it.
1506 __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
1507 __ sd(map, FieldMemOperand(scratch, Cell::kValueOffset));
1510 // Register mapping: a3 is object map and a4 is function prototype.
1511 // Get prototype of object into a2.
1512 __ ld(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
1514 // We don't need map any more. Use it as a scratch register.
1515 Register scratch2 = map;
1518 // Loop through the prototype chain looking for the function prototype.
1519 __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
1521 __ Branch(&is_instance, eq, scratch, Operand(prototype));
1522 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
1523 __ ld(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
1524 __ ld(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
1527 __ bind(&is_instance);
1528 DCHECK(Smi::FromInt(0) == 0);
1529 if (!HasCallSiteInlineCheck()) {
1530 __ mov(v0, zero_reg);
1531 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1533 // Patch the call site to return true.
1534 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
1535 __ Daddu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1536 // Get the boolean result location in scratch and patch it.
1537 __ PatchRelocatedValue(inline_site, scratch, v0);
1539 if (!ReturnTrueFalseObject()) {
1540 DCHECK_EQ(Smi::FromInt(0), 0);
1541 __ mov(v0, zero_reg);
1544 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1546 __ bind(&is_not_instance);
1547 if (!HasCallSiteInlineCheck()) {
1548 __ li(v0, Operand(Smi::FromInt(1)));
1549 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1551 // Patch the call site to return false.
1552 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1553 __ Daddu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1554 // Get the boolean result location in scratch and patch it.
1555 __ PatchRelocatedValue(inline_site, scratch, v0);
1557 if (!ReturnTrueFalseObject()) {
1558 __ li(v0, Operand(Smi::FromInt(1)));
1562 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1564 Label object_not_null, object_not_null_or_smi;
1565 __ bind(¬_js_object);
1566 // Before null, smi and string value checks, check that the rhs is a function
1567 // as for a non-function rhs an exception needs to be thrown.
1568 __ JumpIfSmi(function, &slow);
1569 __ GetObjectType(function, scratch2, scratch);
1570 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
1572 // Null is not instance of anything.
1573 __ Branch(&object_not_null, ne, object,
1574 Operand(isolate()->factory()->null_value()));
1575 __ li(v0, Operand(Smi::FromInt(1)));
1576 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1578 __ bind(&object_not_null);
1579 // Smi values are not instances of anything.
1580 __ JumpIfNotSmi(object, &object_not_null_or_smi);
1581 __ li(v0, Operand(Smi::FromInt(1)));
1582 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1584 __ bind(&object_not_null_or_smi);
1585 // String values are not instances of anything.
1586 __ IsObjectJSStringType(object, scratch, &slow);
1587 __ li(v0, Operand(Smi::FromInt(1)));
1588 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1590 // Slow-case. Tail call builtin.
1592 if (!ReturnTrueFalseObject()) {
1593 if (HasArgsInRegisters()) {
1596 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
1599 FrameScope scope(masm, StackFrame::INTERNAL);
1601 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
1604 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
1605 __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
1606 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1607 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1612 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1614 Register receiver = LoadDescriptor::ReceiverRegister();
1615 NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3,
1618 PropertyAccessCompiler::TailCallBuiltin(
1619 masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1623 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
1624 // The displacement is the offset of the last parameter (if any)
1625 // relative to the frame pointer.
1626 const int kDisplacement =
1627 StandardFrameConstants::kCallerSPOffset - kPointerSize;
1628 DCHECK(a1.is(ArgumentsAccessReadDescriptor::index()));
1629 DCHECK(a0.is(ArgumentsAccessReadDescriptor::parameter_count()));
1631 // Check that the key is a smiGenerateReadElement.
1633 __ JumpIfNotSmi(a1, &slow);
1635 // Check if the calling frame is an arguments adaptor frame.
1637 __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1638 __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
1642 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1644 // Check index (a1) against formal parameters count limit passed in
1645 // through register a0. Use unsigned comparison to get negative
1647 __ Branch(&slow, hs, a1, Operand(a0));
1649 // Read the argument from the stack and return it.
1650 __ dsubu(a3, a0, a1);
1651 __ SmiScale(a7, a3, kPointerSizeLog2);
1652 __ Daddu(a3, fp, Operand(a7));
1653 __ Ret(USE_DELAY_SLOT);
1654 __ ld(v0, MemOperand(a3, kDisplacement));
1656 // Arguments adaptor case: Check index (a1) against actual arguments
1657 // limit found in the arguments adaptor frame. Use unsigned
1658 // comparison to get negative check for free.
1660 __ ld(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1661 __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
1663 // Read the argument from the adaptor frame and return it.
1664 __ dsubu(a3, a0, a1);
1665 __ SmiScale(a7, a3, kPointerSizeLog2);
1666 __ Daddu(a3, a2, Operand(a7));
1667 __ Ret(USE_DELAY_SLOT);
1668 __ ld(v0, MemOperand(a3, kDisplacement));
1670 // Slow-case: Handle non-smi or out-of-bounds access to arguments
1671 // by calling the runtime system.
1674 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
1678 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
1679 // sp[0] : number of parameters
1680 // sp[4] : receiver displacement
1682 // Check if the calling frame is an arguments adaptor frame.
1684 __ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1685 __ ld(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
1689 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1691 // Patch the arguments.length and the parameters pointer in the current frame.
1692 __ ld(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
1693 __ sd(a2, MemOperand(sp, 0 * kPointerSize));
1694 __ SmiScale(a7, a2, kPointerSizeLog2);
1695 __ Daddu(a3, a3, Operand(a7));
1696 __ daddiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
1697 __ sd(a3, MemOperand(sp, 1 * kPointerSize));
1700 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1704 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
1706 // sp[0] : number of parameters (tagged)
1707 // sp[4] : address of receiver argument
1709 // Registers used over whole function:
1710 // a6 : allocated object (tagged)
1711 // t1 : mapped parameter count (tagged)
1713 __ ld(a1, MemOperand(sp, 0 * kPointerSize));
1714 // a1 = parameter count (tagged)
1716 // Check if the calling frame is an arguments adaptor frame.
1718 Label adaptor_frame, try_allocate;
1719 __ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1720 __ ld(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
1721 __ Branch(&adaptor_frame,
1724 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1726 // No adaptor, parameter count = argument count.
1728 __ Branch(&try_allocate);
1730 // We have an adaptor frame. Patch the parameters pointer.
1731 __ bind(&adaptor_frame);
1732 __ ld(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
1733 __ SmiScale(t2, a2, kPointerSizeLog2);
1734 __ Daddu(a3, a3, Operand(t2));
1735 __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
1736 __ sd(a3, MemOperand(sp, 1 * kPointerSize));
1738 // a1 = parameter count (tagged)
1739 // a2 = argument count (tagged)
1740 // Compute the mapped parameter count = min(a1, a2) in a1.
1742 __ Branch(&skip_min, lt, a1, Operand(a2));
1746 __ bind(&try_allocate);
1748 // Compute the sizes of backing store, parameter map, and arguments object.
1749 // 1. Parameter map, has 2 extra words containing context and backing store.
1750 const int kParameterMapHeaderSize =
1751 FixedArray::kHeaderSize + 2 * kPointerSize;
1752 // If there are no mapped parameters, we do not need the parameter_map.
1753 Label param_map_size;
1754 DCHECK_EQ(0, Smi::FromInt(0));
1755 __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg));
1756 __ mov(t1, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
1757 __ SmiScale(t1, a1, kPointerSizeLog2);
1758 __ daddiu(t1, t1, kParameterMapHeaderSize);
1759 __ bind(¶m_map_size);
1761 // 2. Backing store.
1762 __ SmiScale(t2, a2, kPointerSizeLog2);
1763 __ Daddu(t1, t1, Operand(t2));
1764 __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize));
1766 // 3. Arguments object.
1767 __ Daddu(t1, t1, Operand(Heap::kSloppyArgumentsObjectSize));
1769 // Do the allocation of all three objects in one go.
1770 __ Allocate(t1, v0, a3, a4, &runtime, TAG_OBJECT);
1772 // v0 = address of new object(s) (tagged)
1773 // a2 = argument count (smi-tagged)
1774 // Get the arguments boilerplate from the current native context into a4.
1775 const int kNormalOffset =
1776 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
1777 const int kAliasedOffset =
1778 Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
1780 __ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1781 __ ld(a4, FieldMemOperand(a4, GlobalObject::kNativeContextOffset));
1782 Label skip2_ne, skip2_eq;
1783 __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
1784 __ ld(a4, MemOperand(a4, kNormalOffset));
1787 __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
1788 __ ld(a4, MemOperand(a4, kAliasedOffset));
1791 // v0 = address of new object (tagged)
1792 // a1 = mapped parameter count (tagged)
1793 // a2 = argument count (smi-tagged)
1794 // a4 = address of arguments map (tagged)
1795 __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
1796 __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
1797 __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
1798 __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
1800 // Set up the callee in-object property.
1801 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
1802 __ ld(a3, MemOperand(sp, 2 * kPointerSize));
1803 __ AssertNotSmi(a3);
1804 const int kCalleeOffset = JSObject::kHeaderSize +
1805 Heap::kArgumentsCalleeIndex * kPointerSize;
1806 __ sd(a3, FieldMemOperand(v0, kCalleeOffset));
1808 // Use the length (smi tagged) and set that as an in-object property too.
1809 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1810 const int kLengthOffset = JSObject::kHeaderSize +
1811 Heap::kArgumentsLengthIndex * kPointerSize;
1812 __ sd(a2, FieldMemOperand(v0, kLengthOffset));
1814 // Set up the elements pointer in the allocated arguments object.
1815 // If we allocated a parameter map, a4 will point there, otherwise
1816 // it will point to the backing store.
1817 __ Daddu(a4, v0, Operand(Heap::kSloppyArgumentsObjectSize));
1818 __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
1820 // v0 = address of new object (tagged)
1821 // a1 = mapped parameter count (tagged)
1822 // a2 = argument count (tagged)
1823 // a4 = address of parameter map or backing store (tagged)
1824 // Initialize parameter map. If there are no mapped arguments, we're done.
1825 Label skip_parameter_map;
1827 __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
1828 // Move backing store address to a3, because it is
1829 // expected there when filling in the unmapped arguments.
1833 __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
1835 __ LoadRoot(a6, Heap::kSloppyArgumentsElementsMapRootIndex);
1836 __ sd(a6, FieldMemOperand(a4, FixedArray::kMapOffset));
1837 __ Daddu(a6, a1, Operand(Smi::FromInt(2)));
1838 __ sd(a6, FieldMemOperand(a4, FixedArray::kLengthOffset));
1839 __ sd(cp, FieldMemOperand(a4, FixedArray::kHeaderSize + 0 * kPointerSize));
1840 __ SmiScale(t2, a1, kPointerSizeLog2);
1841 __ Daddu(a6, a4, Operand(t2));
1842 __ Daddu(a6, a6, Operand(kParameterMapHeaderSize));
1843 __ sd(a6, FieldMemOperand(a4, FixedArray::kHeaderSize + 1 * kPointerSize));
1845 // Copy the parameter slots and the holes in the arguments.
1846 // We need to fill in mapped_parameter_count slots. They index the context,
1847 // where parameters are stored in reverse order, at
1848 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
1849 // The mapped parameter thus need to get indices
1850 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
1851 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
1852 // We loop from right to left.
1853 Label parameters_loop, parameters_test;
1855 __ ld(t1, MemOperand(sp, 0 * kPointerSize));
1856 __ Daddu(t1, t1, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
1857 __ Dsubu(t1, t1, Operand(a1));
1858 __ LoadRoot(a7, Heap::kTheHoleValueRootIndex);
1859 __ SmiScale(t2, a6, kPointerSizeLog2);
1860 __ Daddu(a3, a4, Operand(t2));
1861 __ Daddu(a3, a3, Operand(kParameterMapHeaderSize));
1863 // a6 = loop variable (tagged)
1864 // a1 = mapping index (tagged)
1865 // a3 = address of backing store (tagged)
1866 // a4 = address of parameter map (tagged)
1867 // a5 = temporary scratch (a.o., for address calculation)
1868 // a7 = the hole value
1869 __ jmp(¶meters_test);
1871 __ bind(¶meters_loop);
1873 __ Dsubu(a6, a6, Operand(Smi::FromInt(1)));
1874 __ SmiScale(a5, a6, kPointerSizeLog2);
1875 __ Daddu(a5, a5, Operand(kParameterMapHeaderSize - kHeapObjectTag));
1876 __ Daddu(t2, a4, a5);
1877 __ sd(t1, MemOperand(t2));
1878 __ Dsubu(a5, a5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
1879 __ Daddu(t2, a3, a5);
1880 __ sd(a7, MemOperand(t2));
1881 __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
1882 __ bind(¶meters_test);
1883 __ Branch(¶meters_loop, ne, a6, Operand(Smi::FromInt(0)));
1885 __ bind(&skip_parameter_map);
1886 // a2 = argument count (tagged)
1887 // a3 = address of backing store (tagged)
1889 // Copy arguments header and remaining slots (if there are any).
1890 __ LoadRoot(a5, Heap::kFixedArrayMapRootIndex);
1891 __ sd(a5, FieldMemOperand(a3, FixedArray::kMapOffset));
1892 __ sd(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
1894 Label arguments_loop, arguments_test;
1896 __ ld(a4, MemOperand(sp, 1 * kPointerSize));
1897 __ SmiScale(t2, t1, kPointerSizeLog2);
1898 __ Dsubu(a4, a4, Operand(t2));
1899 __ jmp(&arguments_test);
1901 __ bind(&arguments_loop);
1902 __ Dsubu(a4, a4, Operand(kPointerSize));
1903 __ ld(a6, MemOperand(a4, 0));
1904 __ SmiScale(t2, t1, kPointerSizeLog2);
1905 __ Daddu(a5, a3, Operand(t2));
1906 __ sd(a6, FieldMemOperand(a5, FixedArray::kHeaderSize));
1907 __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
1909 __ bind(&arguments_test);
1910 __ Branch(&arguments_loop, lt, t1, Operand(a2));
1912 // Return and remove the on-stack parameters.
1915 // Do the runtime call to allocate the arguments object.
1916 // a2 = argument count (tagged)
1918 __ sd(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
1919 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1923 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
1924 // Return address is in ra.
1927 Register receiver = LoadDescriptor::ReceiverRegister();
1928 Register key = LoadDescriptor::NameRegister();
1930 // Check that the key is an array index, that is Uint32.
1931 __ And(t0, key, Operand(kSmiTagMask | kSmiSignMask));
1932 __ Branch(&slow, ne, t0, Operand(zero_reg));
1934 // Everything is fine, call runtime.
1935 __ Push(receiver, key); // Receiver, key.
1937 // Perform tail call to the entry.
1938 __ TailCallExternalReference(
1939 ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
1944 PropertyAccessCompiler::TailCallBuiltin(
1945 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1949 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
1950 // sp[0] : number of parameters
1951 // sp[4] : receiver displacement
1953 // Check if the calling frame is an arguments adaptor frame.
1954 Label adaptor_frame, try_allocate, runtime;
1955 __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1956 __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
1957 __ Branch(&adaptor_frame,
1960 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1962 // Get the length from the frame.
1963 __ ld(a1, MemOperand(sp, 0));
1964 __ Branch(&try_allocate);
1966 // Patch the arguments.length and the parameters pointer.
1967 __ bind(&adaptor_frame);
1968 __ ld(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1969 __ sd(a1, MemOperand(sp, 0));
1970 __ SmiScale(at, a1, kPointerSizeLog2);
1972 __ Daddu(a3, a2, Operand(at));
1974 __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
1975 __ sd(a3, MemOperand(sp, 1 * kPointerSize));
1977 // Try the new space allocation. Start out with computing the size
1978 // of the arguments object and the elements array in words.
1979 Label add_arguments_object;
1980 __ bind(&try_allocate);
1981 __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
1984 __ Daddu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
1985 __ bind(&add_arguments_object);
1986 __ Daddu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
1988 // Do the allocation of both objects in one go.
1989 __ Allocate(a1, v0, a2, a3, &runtime,
1990 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
1992 // Get the arguments boilerplate from the current native context.
1993 __ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1994 __ ld(a4, FieldMemOperand(a4, GlobalObject::kNativeContextOffset));
1995 __ ld(a4, MemOperand(a4, Context::SlotOffset(
1996 Context::STRICT_ARGUMENTS_MAP_INDEX)));
1998 __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
1999 __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
2000 __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
2001 __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
2003 // Get the length (smi tagged) and set that as an in-object property too.
2004 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2005 __ ld(a1, MemOperand(sp, 0 * kPointerSize));
2007 __ sd(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
2008 Heap::kArgumentsLengthIndex * kPointerSize));
2011 __ Branch(&done, eq, a1, Operand(zero_reg));
2013 // Get the parameters pointer from the stack.
2014 __ ld(a2, MemOperand(sp, 1 * kPointerSize));
2016 // Set up the elements pointer in the allocated arguments object and
2017 // initialize the header in the elements fixed array.
2018 __ Daddu(a4, v0, Operand(Heap::kStrictArgumentsObjectSize));
2019 __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
2020 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
2021 __ sd(a3, FieldMemOperand(a4, FixedArray::kMapOffset));
2022 __ sd(a1, FieldMemOperand(a4, FixedArray::kLengthOffset));
2023 // Untag the length for the loop.
2027 // Copy the fixed array slots.
2029 // Set up a4 to point to the first array slot.
2030 __ Daddu(a4, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2032 // Pre-decrement a2 with kPointerSize on each iteration.
2033 // Pre-decrement in order to skip receiver.
2034 __ Daddu(a2, a2, Operand(-kPointerSize));
2035 __ ld(a3, MemOperand(a2));
2036 // Post-increment a4 with kPointerSize on each iteration.
2037 __ sd(a3, MemOperand(a4));
2038 __ Daddu(a4, a4, Operand(kPointerSize));
2039 __ Dsubu(a1, a1, Operand(1));
2040 __ Branch(&loop, ne, a1, Operand(zero_reg));
2042 // Return and remove the on-stack parameters.
2046 // Do the runtime call to allocate the arguments object.
2048 __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
2052 void RegExpExecStub::Generate(MacroAssembler* masm) {
2053 // Just jump directly to runtime if native RegExp is not selected at compile
2054 // time or if regexp entry in generated code is turned off runtime switch or
2056 #ifdef V8_INTERPRETED_REGEXP
2057 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2058 #else // V8_INTERPRETED_REGEXP
2060 // Stack frame on entry.
2061 // sp[0]: last_match_info (expected JSArray)
2062 // sp[4]: previous index
2063 // sp[8]: subject string
2064 // sp[12]: JSRegExp object
2066 const int kLastMatchInfoOffset = 0 * kPointerSize;
2067 const int kPreviousIndexOffset = 1 * kPointerSize;
2068 const int kSubjectOffset = 2 * kPointerSize;
2069 const int kJSRegExpOffset = 3 * kPointerSize;
2072 // Allocation of registers for this function. These are in callee save
2073 // registers and will be preserved by the call to the native RegExp code, as
2074 // this code is called using the normal C calling convention. When calling
2075 // directly from generated code the native RegExp code will not do a GC and
2076 // therefore the content of these registers are safe to use after the call.
2077 // MIPS - using s0..s2, since we are not using CEntry Stub.
2078 Register subject = s0;
2079 Register regexp_data = s1;
2080 Register last_match_info_elements = s2;
2082 // Ensure that a RegExp stack is allocated.
2083 ExternalReference address_of_regexp_stack_memory_address =
2084 ExternalReference::address_of_regexp_stack_memory_address(
2086 ExternalReference address_of_regexp_stack_memory_size =
2087 ExternalReference::address_of_regexp_stack_memory_size(isolate());
2088 __ li(a0, Operand(address_of_regexp_stack_memory_size));
2089 __ ld(a0, MemOperand(a0, 0));
2090 __ Branch(&runtime, eq, a0, Operand(zero_reg));
2092 // Check that the first argument is a JSRegExp object.
2093 __ ld(a0, MemOperand(sp, kJSRegExpOffset));
2094 STATIC_ASSERT(kSmiTag == 0);
2095 __ JumpIfSmi(a0, &runtime);
2096 __ GetObjectType(a0, a1, a1);
2097 __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
2099 // Check that the RegExp has been compiled (data contains a fixed array).
2100 __ ld(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
2101 if (FLAG_debug_code) {
2102 __ SmiTst(regexp_data, a4);
2104 kUnexpectedTypeForRegExpDataFixedArrayExpected,
2107 __ GetObjectType(regexp_data, a0, a0);
2109 kUnexpectedTypeForRegExpDataFixedArrayExpected,
2111 Operand(FIXED_ARRAY_TYPE));
2114 // regexp_data: RegExp data (FixedArray)
2115 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2116 __ ld(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2117 __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
2119 // regexp_data: RegExp data (FixedArray)
2120 // Check that the number of captures fit in the static offsets vector buffer.
2122 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2123 // Check (number_of_captures + 1) * 2 <= offsets vector size
2124 // Or number_of_captures * 2 <= offsets vector size - 2
2125 // Or number_of_captures <= offsets vector size / 2 - 1
2126 // Multiplying by 2 comes for free since a2 is smi-tagged.
2127 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
2128 int temp = Isolate::kJSRegexpStaticOffsetsVectorSize / 2 - 1;
2129 __ Branch(&runtime, hi, a2, Operand(Smi::FromInt(temp)));
2131 // Reset offset for possibly sliced string.
2132 __ mov(t0, zero_reg);
2133 __ ld(subject, MemOperand(sp, kSubjectOffset));
2134 __ JumpIfSmi(subject, &runtime);
2135 __ mov(a3, subject); // Make a copy of the original subject string.
2136 __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2137 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2138 // subject: subject string
2139 // a3: subject string
2140 // a0: subject string instance type
2141 // regexp_data: RegExp data (FixedArray)
2142 // Handle subject string according to its encoding and representation:
2143 // (1) Sequential string? If yes, go to (5).
2144 // (2) Anything but sequential or cons? If yes, go to (6).
2145 // (3) Cons string. If the string is flat, replace subject with first string.
2146 // Otherwise bailout.
2147 // (4) Is subject external? If yes, go to (7).
2148 // (5) Sequential string. Load regexp code according to encoding.
2152 // Deferred code at the end of the stub:
2153 // (6) Not a long external string? If yes, go to (8).
2154 // (7) External string. Make it, offset-wise, look like a sequential string.
2156 // (8) Short external string or not a string? If yes, bail out to runtime.
2157 // (9) Sliced string. Replace subject with parent. Go to (4).
2159 Label check_underlying; // (4)
2160 Label seq_string; // (5)
2161 Label not_seq_nor_cons; // (6)
2162 Label external_string; // (7)
2163 Label not_long_external; // (8)
2165 // (1) Sequential string? If yes, go to (5).
2168 Operand(kIsNotStringMask |
2169 kStringRepresentationMask |
2170 kShortExternalStringMask));
2171 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
2172 __ Branch(&seq_string, eq, a1, Operand(zero_reg)); // Go to (5).
2174 // (2) Anything but sequential or cons? If yes, go to (6).
2175 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2176 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2177 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2178 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2180 __ Branch(¬_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
2182 // (3) Cons string. Check that it's flat.
2183 // Replace subject with first string and reload instance type.
2184 __ ld(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
2185 __ LoadRoot(a1, Heap::kempty_stringRootIndex);
2186 __ Branch(&runtime, ne, a0, Operand(a1));
2187 __ ld(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2189 // (4) Is subject external? If yes, go to (7).
2190 __ bind(&check_underlying);
2191 __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2192 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2193 STATIC_ASSERT(kSeqStringTag == 0);
2194 __ And(at, a0, Operand(kStringRepresentationMask));
2195 // The underlying external string is never a short external string.
2196 STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
2197 STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2198 __ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7).
2200 // (5) Sequential string. Load regexp code according to encoding.
2201 __ bind(&seq_string);
2202 // subject: sequential subject string (or look-alike, external string)
2203 // a3: original subject string
2204 // Load previous index and check range before a3 is overwritten. We have to
2205 // use a3 instead of subject here because subject might have been only made
2206 // to look like a sequential string when it actually is an external string.
2207 __ ld(a1, MemOperand(sp, kPreviousIndexOffset));
2208 __ JumpIfNotSmi(a1, &runtime);
2209 __ ld(a3, FieldMemOperand(a3, String::kLengthOffset));
2210 __ Branch(&runtime, ls, a3, Operand(a1));
2213 STATIC_ASSERT(kStringEncodingMask == 4);
2214 STATIC_ASSERT(kOneByteStringTag == 4);
2215 STATIC_ASSERT(kTwoByteStringTag == 0);
2216 __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for one_byte.
2217 __ ld(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
2218 __ dsra(a3, a0, 2); // a3 is 1 for one_byte, 0 for UC16 (used below).
2219 __ ld(a5, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
2220 __ Movz(t9, a5, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
2222 // (E) Carry on. String handling is done.
2223 // t9: irregexp code
2224 // Check that the irregexp code has been generated for the actual string
2225 // encoding. If it has, the field contains a code object otherwise it contains
2226 // a smi (code flushing support).
2227 __ JumpIfSmi(t9, &runtime);
2229 // a1: previous index
2230 // a3: encoding of subject string (1 if one_byte, 0 if two_byte);
2232 // subject: Subject string
2233 // regexp_data: RegExp data (FixedArray)
2234 // All checks done. Now push arguments for native regexp code.
2235 __ IncrementCounter(isolate()->counters()->regexp_entry_native(),
2238 // Isolates: note we add an additional parameter here (isolate pointer).
2239 const int kRegExpExecuteArguments = 9;
2240 const int kParameterRegisters = (kMipsAbi == kN64) ? 8 : 4;
2241 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
2243 // Stack pointer now points to cell where return address is to be written.
2244 // Arguments are before that on the stack or in registers, meaning we
2245 // treat the return address as argument 5. Thus every argument after that
2246 // needs to be shifted back by 1. Since DirectCEntryStub will handle
2247 // allocating space for the c argument slots, we don't need to calculate
2248 // that into the argument positions on the stack. This is how the stack will
2249 // look (sp meaning the value of sp at this moment):
2251 // [sp + 1] - Argument 9
2252 // [sp + 0] - saved ra
2254 // [sp + 5] - Argument 9
2255 // [sp + 4] - Argument 8
2256 // [sp + 3] - Argument 7
2257 // [sp + 2] - Argument 6
2258 // [sp + 1] - Argument 5
2259 // [sp + 0] - saved ra
2261 if (kMipsAbi == kN64) {
2262 // Argument 9: Pass current isolate address.
2263 __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
2264 __ sd(a0, MemOperand(sp, 1 * kPointerSize));
2266 // Argument 8: Indicate that this is a direct call from JavaScript.
2267 __ li(a7, Operand(1));
2269 // Argument 7: Start (high end) of backtracking stack memory area.
2270 __ li(a0, Operand(address_of_regexp_stack_memory_address));
2271 __ ld(a0, MemOperand(a0, 0));
2272 __ li(a2, Operand(address_of_regexp_stack_memory_size));
2273 __ ld(a2, MemOperand(a2, 0));
2274 __ daddu(a6, a0, a2);
2276 // Argument 6: Set the number of capture registers to zero to force global
2277 // regexps to behave as non-global. This does not affect non-global regexps.
2278 __ mov(a5, zero_reg);
2280 // Argument 5: static offsets vector buffer.
2282 ExternalReference::address_of_static_offsets_vector(isolate())));
2284 DCHECK(kMipsAbi == kO32);
2286 // Argument 9: Pass current isolate address.
2287 // CFunctionArgumentOperand handles MIPS stack argument slots.
2288 __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
2289 __ sd(a0, MemOperand(sp, 5 * kPointerSize));
2291 // Argument 8: Indicate that this is a direct call from JavaScript.
2292 __ li(a0, Operand(1));
2293 __ sd(a0, MemOperand(sp, 4 * kPointerSize));
2295 // Argument 7: Start (high end) of backtracking stack memory area.
2296 __ li(a0, Operand(address_of_regexp_stack_memory_address));
2297 __ ld(a0, MemOperand(a0, 0));
2298 __ li(a2, Operand(address_of_regexp_stack_memory_size));
2299 __ ld(a2, MemOperand(a2, 0));
2300 __ daddu(a0, a0, a2);
2301 __ sd(a0, MemOperand(sp, 3 * kPointerSize));
2303 // Argument 6: Set the number of capture registers to zero to force global
2304 // regexps to behave as non-global. This does not affect non-global regexps.
2305 __ mov(a0, zero_reg);
2306 __ sd(a0, MemOperand(sp, 2 * kPointerSize));
2308 // Argument 5: static offsets vector buffer.
2310 ExternalReference::address_of_static_offsets_vector(isolate())));
2311 __ sd(a0, MemOperand(sp, 1 * kPointerSize));
2314 // For arguments 4 and 3 get string length, calculate start of string data
2315 // and calculate the shift of the index (0 for one_byte and 1 for two byte).
2316 __ Daddu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
2317 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
2318 // Load the length from the original subject string from the previous stack
2319 // frame. Therefore we have to use fp, which points exactly to two pointer
2320 // sizes below the previous sp. (Because creating a new stack frame pushes
2321 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
2322 __ ld(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2323 // If slice offset is not 0, load the length from the original sliced string.
2324 // Argument 4, a3: End of string data
2325 // Argument 3, a2: Start of string data
2326 // Prepare start and end index of the input.
2327 __ dsllv(t1, t0, a3);
2328 __ daddu(t0, t2, t1);
2329 __ dsllv(t1, a1, a3);
2330 __ daddu(a2, t0, t1);
2332 __ ld(t2, FieldMemOperand(subject, String::kLengthOffset));
2335 __ dsllv(t1, t2, a3);
2336 __ daddu(a3, t0, t1);
2337 // Argument 2 (a1): Previous index.
2340 // Argument 1 (a0): Subject string.
2341 __ mov(a0, subject);
2343 // Locate the code entry and call it.
2344 __ Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
2345 DirectCEntryStub stub(isolate());
2346 stub.GenerateCall(masm, t9);
2348 __ LeaveExitFrame(false, no_reg, true);
2351 // subject: subject string (callee saved)
2352 // regexp_data: RegExp data (callee saved)
2353 // last_match_info_elements: Last match info elements (callee saved)
2354 // Check the result.
2356 __ Branch(&success, eq, v0, Operand(1));
2357 // We expect exactly one result since we force the called regexp to behave
2360 __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
2361 // If not exception it can only be retry. Handle that in the runtime system.
2362 __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
2363 // Result must now be exception. If there is no pending exception already a
2364 // stack overflow (on the backtrack stack) was detected in RegExp code but
2365 // haven't created the exception yet. Handle that in the runtime system.
2366 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
2367 __ li(a1, Operand(isolate()->factory()->the_hole_value()));
2368 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2370 __ ld(v0, MemOperand(a2, 0));
2371 __ Branch(&runtime, eq, v0, Operand(a1));
2373 __ sd(a1, MemOperand(a2, 0)); // Clear pending exception.
2375 // Check if the exception is a termination. If so, throw as uncatchable.
2376 __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
2377 Label termination_exception;
2378 __ Branch(&termination_exception, eq, v0, Operand(a0));
2382 __ bind(&termination_exception);
2383 __ ThrowUncatchable(v0);
2386 // For failure and exception return null.
2387 __ li(v0, Operand(isolate()->factory()->null_value()));
2390 // Process the result from the native regexp code.
2393 __ lw(a1, UntagSmiFieldMemOperand(
2394 regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2395 // Calculate number of capture registers (number_of_captures + 1) * 2.
2396 __ Daddu(a1, a1, Operand(1));
2397 __ dsll(a1, a1, 1); // Multiply by 2.
2399 __ ld(a0, MemOperand(sp, kLastMatchInfoOffset));
2400 __ JumpIfSmi(a0, &runtime);
2401 __ GetObjectType(a0, a2, a2);
2402 __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
2403 // Check that the JSArray is in fast case.
2404 __ ld(last_match_info_elements,
2405 FieldMemOperand(a0, JSArray::kElementsOffset));
2406 __ ld(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2407 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
2408 __ Branch(&runtime, ne, a0, Operand(at));
2409 // Check that the last match info has space for the capture registers and the
2410 // additional information.
2412 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
2413 __ Daddu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
2415 __ SmiUntag(at, a0);
2416 __ Branch(&runtime, gt, a2, Operand(at));
2418 // a1: number of capture registers
2419 // subject: subject string
2420 // Store the capture count.
2421 __ SmiTag(a2, a1); // To smi.
2422 __ sd(a2, FieldMemOperand(last_match_info_elements,
2423 RegExpImpl::kLastCaptureCountOffset));
2424 // Store last subject and last input.
2426 FieldMemOperand(last_match_info_elements,
2427 RegExpImpl::kLastSubjectOffset));
2428 __ mov(a2, subject);
2429 __ RecordWriteField(last_match_info_elements,
2430 RegExpImpl::kLastSubjectOffset,
2435 __ mov(subject, a2);
2437 FieldMemOperand(last_match_info_elements,
2438 RegExpImpl::kLastInputOffset));
2439 __ RecordWriteField(last_match_info_elements,
2440 RegExpImpl::kLastInputOffset,
2446 // Get the static offsets vector filled by the native regexp code.
2447 ExternalReference address_of_static_offsets_vector =
2448 ExternalReference::address_of_static_offsets_vector(isolate());
2449 __ li(a2, Operand(address_of_static_offsets_vector));
2451 // a1: number of capture registers
2452 // a2: offsets vector
2453 Label next_capture, done;
2454 // Capture register counter starts from number of capture registers and
2455 // counts down until wrapping after zero.
2457 last_match_info_elements,
2458 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
2459 __ bind(&next_capture);
2460 __ Dsubu(a1, a1, Operand(1));
2461 __ Branch(&done, lt, a1, Operand(zero_reg));
2462 // Read the value from the static offsets vector buffer.
2463 __ lw(a3, MemOperand(a2, 0));
2464 __ daddiu(a2, a2, kIntSize);
2465 // Store the smi value in the last match info.
2467 __ sd(a3, MemOperand(a0, 0));
2468 __ Branch(&next_capture, USE_DELAY_SLOT);
2469 __ daddiu(a0, a0, kPointerSize); // In branch delay slot.
2473 // Return last match info.
2474 __ ld(v0, MemOperand(sp, kLastMatchInfoOffset));
2477 // Do the runtime call to execute the regexp.
2479 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2481 // Deferred code for string handling.
2482 // (6) Not a long external string? If yes, go to (8).
2483 __ bind(¬_seq_nor_cons);
2485 __ Branch(¬_long_external, gt, a1, Operand(kExternalStringTag));
2487 // (7) External string. Make it, offset-wise, look like a sequential string.
2488 __ bind(&external_string);
2489 __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2490 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2491 if (FLAG_debug_code) {
2492 // Assert that we do not have a cons or slice (indirect strings) here.
2493 // Sequential strings have already been ruled out.
2494 __ And(at, a0, Operand(kIsIndirectStringMask));
2496 kExternalStringExpectedButNotFound,
2501 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
2502 // Move the pointer so that offset-wise, it looks like a sequential string.
2503 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2506 SeqTwoByteString::kHeaderSize - kHeapObjectTag);
2507 __ jmp(&seq_string); // Go to (5).
2509 // (8) Short external string or not a string? If yes, bail out to runtime.
2510 __ bind(¬_long_external);
2511 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
2512 __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
2513 __ Branch(&runtime, ne, at, Operand(zero_reg));
2515 // (9) Sliced string. Replace subject with parent. Go to (4).
2516 // Load offset into t0 and replace subject string with parent.
2517 __ ld(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
2519 __ ld(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
2520 __ jmp(&check_underlying); // Go to (4).
2521 #endif // V8_INTERPRETED_REGEXP
2525 static void GenerateRecordCallTarget(MacroAssembler* masm) {
2526 // Cache the called function in a feedback vector slot. Cache states
2527 // are uninitialized, monomorphic (indicated by a JSFunction), and
2529 // a0 : number of arguments to the construct function
2530 // a1 : the function to call
2531 // a2 : Feedback vector
2532 // a3 : slot in feedback vector (Smi)
2533 Label initialize, done, miss, megamorphic, not_array_function;
2535 DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
2536 masm->isolate()->heap()->megamorphic_symbol());
2537 DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
2538 masm->isolate()->heap()->uninitialized_symbol());
2540 // Load the cache state into a4.
2541 __ dsrl(a4, a3, 32 - kPointerSizeLog2);
2542 __ Daddu(a4, a2, Operand(a4));
2543 __ ld(a4, FieldMemOperand(a4, FixedArray::kHeaderSize));
2545 // A monomorphic cache hit or an already megamorphic state: invoke the
2546 // function without changing the state.
2547 __ Branch(&done, eq, a4, Operand(a1));
2549 if (!FLAG_pretenuring_call_new) {
2550 // If we came here, we need to see if we are the array function.
2551 // If we didn't have a matching function, and we didn't find the megamorph
2552 // sentinel, then we have in the slot either some other function or an
2553 // AllocationSite. Do a map check on the object in a3.
2554 __ ld(a5, FieldMemOperand(a4, 0));
2555 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2556 __ Branch(&miss, ne, a5, Operand(at));
2558 // Make sure the function is the Array() function
2559 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4);
2560 __ Branch(&megamorphic, ne, a1, Operand(a4));
2566 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2568 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
2569 __ Branch(&initialize, eq, a4, Operand(at));
2570 // MegamorphicSentinel is an immortal immovable object (undefined) so no
2571 // write-barrier is needed.
2572 __ bind(&megamorphic);
2573 __ dsrl(a4, a3, 32- kPointerSizeLog2);
2574 __ Daddu(a4, a2, Operand(a4));
2575 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2576 __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
2579 // An uninitialized cache is patched with the function.
2580 __ bind(&initialize);
2581 if (!FLAG_pretenuring_call_new) {
2582 // Make sure the function is the Array() function.
2583 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4);
2584 __ Branch(¬_array_function, ne, a1, Operand(a4));
2586 // The target function is the Array constructor,
2587 // Create an AllocationSite if we don't already have it, store it in the
2590 FrameScope scope(masm, StackFrame::INTERNAL);
2591 const RegList kSavedRegs =
2597 // Arguments register must be smi-tagged to call out.
2599 __ MultiPush(kSavedRegs);
2601 CreateAllocationSiteStub create_stub(masm->isolate());
2602 __ CallStub(&create_stub);
2604 __ MultiPop(kSavedRegs);
2609 __ bind(¬_array_function);
2612 __ dsrl(a4, a3, 32 - kPointerSizeLog2);
2613 __ Daddu(a4, a2, Operand(a4));
2614 __ Daddu(a4, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2615 __ sd(a1, MemOperand(a4, 0));
2617 __ Push(a4, a2, a1);
2618 __ RecordWrite(a2, a4, a1, kRAHasNotBeenSaved, kDontSaveFPRegs,
2619 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
2626 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
2627 __ ld(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2629 // Do not transform the receiver for strict mode functions.
2630 int32_t strict_mode_function_mask =
2631 1 << SharedFunctionInfo::kStrictModeBitWithinByte ;
2632 // Do not transform the receiver for native (Compilerhints already in a3).
2633 int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte;
2635 __ lbu(a4, FieldMemOperand(a3, SharedFunctionInfo::kStrictModeByteOffset));
2636 __ And(at, a4, Operand(strict_mode_function_mask));
2637 __ Branch(cont, ne, at, Operand(zero_reg));
2638 __ lbu(a4, FieldMemOperand(a3, SharedFunctionInfo::kNativeByteOffset));
2639 __ And(at, a4, Operand(native_mask));
2640 __ Branch(cont, ne, at, Operand(zero_reg));
2644 static void EmitSlowCase(MacroAssembler* masm,
2646 Label* non_function) {
2647 // Check for function proxy.
2648 __ Branch(non_function, ne, a4, Operand(JS_FUNCTION_PROXY_TYPE));
2649 __ push(a1); // put proxy as additional argument
2650 __ li(a0, Operand(argc + 1, RelocInfo::NONE32));
2651 __ mov(a2, zero_reg);
2652 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
2654 Handle<Code> adaptor =
2655 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
2656 __ Jump(adaptor, RelocInfo::CODE_TARGET);
2659 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
2660 // of the original receiver from the call site).
2661 __ bind(non_function);
2662 __ sd(a1, MemOperand(sp, argc * kPointerSize));
2663 __ li(a0, Operand(argc)); // Set up the number of arguments.
2664 __ mov(a2, zero_reg);
2665 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
2666 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2667 RelocInfo::CODE_TARGET);
2671 static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
2672 // Wrap the receiver and patch it back onto the stack.
2673 { FrameScope frame_scope(masm, StackFrame::INTERNAL);
2675 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
2678 __ Branch(USE_DELAY_SLOT, cont);
2679 __ sd(v0, MemOperand(sp, argc * kPointerSize));
2683 static void CallFunctionNoFeedback(MacroAssembler* masm,
2684 int argc, bool needs_checks,
2685 bool call_as_method) {
2686 // a1 : the function to call
2687 Label slow, non_function, wrap, cont;
2690 // Check that the function is really a JavaScript function.
2691 // a1: pushed function (to be verified)
2692 __ JumpIfSmi(a1, &non_function);
2694 // Goto slow case if we do not have a function.
2695 __ GetObjectType(a1, a4, a4);
2696 __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE));
2699 // Fast-case: Invoke the function now.
2700 // a1: pushed function
2701 ParameterCount actual(argc);
2703 if (call_as_method) {
2705 EmitContinueIfStrictOrNative(masm, &cont);
2708 // Compute the receiver in sloppy mode.
2709 __ ld(a3, MemOperand(sp, argc * kPointerSize));
2712 __ JumpIfSmi(a3, &wrap);
2713 __ GetObjectType(a3, a4, a4);
2714 __ Branch(&wrap, lt, a4, Operand(FIRST_SPEC_OBJECT_TYPE));
2721 __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
2724 // Slow-case: Non-function called.
2726 EmitSlowCase(masm, argc, &non_function);
2729 if (call_as_method) {
2731 // Wrap the receiver and patch it back onto the stack.
2732 EmitWrapCase(masm, argc, &cont);
2737 void CallFunctionStub::Generate(MacroAssembler* masm) {
2738 CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
2742 void CallConstructStub::Generate(MacroAssembler* masm) {
2743 // a0 : number of arguments
2744 // a1 : the function to call
2745 // a2 : feedback vector
2746 // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
2747 Label slow, non_function_call;
2748 // Check that the function is not a smi.
2749 __ JumpIfSmi(a1, &non_function_call);
2750 // Check that the function is a JSFunction.
2751 __ GetObjectType(a1, a4, a4);
2752 __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE));
2754 if (RecordCallTarget()) {
2755 GenerateRecordCallTarget(masm);
2757 __ dsrl(at, a3, 32 - kPointerSizeLog2);
2758 __ Daddu(a5, a2, at);
2759 if (FLAG_pretenuring_call_new) {
2760 // Put the AllocationSite from the feedback vector into a2.
2761 // By adding kPointerSize we encode that we know the AllocationSite
2762 // entry is at the feedback vector slot given by a3 + 1.
2763 __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
2765 Label feedback_register_initialized;
2766 // Put the AllocationSite from the feedback vector into a2, or undefined.
2767 __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize));
2768 __ ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
2769 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2770 __ Branch(&feedback_register_initialized, eq, a5, Operand(at));
2771 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
2772 __ bind(&feedback_register_initialized);
2775 __ AssertUndefinedOrAllocationSite(a2, a5);
2778 // Jump to the function-specific construct stub.
2779 Register jmp_reg = a4;
2780 __ ld(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2781 __ ld(jmp_reg, FieldMemOperand(jmp_reg,
2782 SharedFunctionInfo::kConstructStubOffset));
2783 __ Daddu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
2786 // a0: number of arguments
2787 // a1: called object
2791 __ Branch(&non_function_call, ne, a4, Operand(JS_FUNCTION_PROXY_TYPE));
2792 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
2795 __ bind(&non_function_call);
2796 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
2798 // Set expected number of arguments to zero (not changing r0).
2799 __ li(a2, Operand(0, RelocInfo::NONE32));
2800 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2801 RelocInfo::CODE_TARGET);
2805 // StringCharCodeAtGenerator.
2806 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
2807 DCHECK(!a4.is(index_));
2808 DCHECK(!a4.is(result_));
2809 DCHECK(!a4.is(object_));
2811 // If the receiver is a smi trigger the non-string case.
2812 if (check_mode_ == RECEIVER_IS_UNKNOWN) {
2813 __ JumpIfSmi(object_, receiver_not_string_);
2815 // Fetch the instance type of the receiver into result register.
2816 __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2817 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2818 // If the receiver is not a string trigger the non-string case.
2819 __ And(a4, result_, Operand(kIsNotStringMask));
2820 __ Branch(receiver_not_string_, ne, a4, Operand(zero_reg));
2823 // If the index is non-smi trigger the non-smi case.
2824 __ JumpIfNotSmi(index_, &index_not_smi_);
2826 __ bind(&got_smi_index_);
2828 // Check for index out of range.
2829 __ ld(a4, FieldMemOperand(object_, String::kLengthOffset));
2830 __ Branch(index_out_of_range_, ls, a4, Operand(index_));
2832 __ SmiUntag(index_);
2834 StringCharLoadGenerator::Generate(masm,
2845 static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
2846 __ ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
2847 __ ld(vector, FieldMemOperand(vector,
2848 JSFunction::kSharedFunctionInfoOffset));
2849 __ ld(vector, FieldMemOperand(vector,
2850 SharedFunctionInfo::kFeedbackVectorOffset));
2854 void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
2859 EmitLoadTypeFeedbackVector(masm, a2);
2861 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
2862 __ Branch(&miss, ne, a1, Operand(at));
2864 __ li(a0, Operand(arg_count()));
2865 __ dsrl(at, a3, 32 - kPointerSizeLog2);
2866 __ Daddu(at, a2, Operand(at));
2867 __ ld(a4, FieldMemOperand(at, FixedArray::kHeaderSize));
2869 // Verify that a4 contains an AllocationSite
2870 __ ld(a5, FieldMemOperand(a4, HeapObject::kMapOffset));
2871 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2872 __ Branch(&miss, ne, a5, Operand(at));
2875 ArrayConstructorStub stub(masm->isolate(), arg_count());
2876 __ TailCallStub(&stub);
2881 // The slow case, we need this no matter what to complete a call after a miss.
2882 CallFunctionNoFeedback(masm,
2888 __ stop("Unexpected code address");
2892 void CallICStub::Generate(MacroAssembler* masm) {
2894 // a3 - slot id (Smi)
2895 Label extra_checks_or_miss, slow_start;
2896 Label slow, non_function, wrap, cont;
2897 Label have_js_function;
2898 int argc = arg_count();
2899 ParameterCount actual(argc);
2901 EmitLoadTypeFeedbackVector(masm, a2);
2903 // The checks. First, does r1 match the recorded monomorphic target?
2904 __ dsrl(a4, a3, 32 - kPointerSizeLog2);
2905 __ Daddu(a4, a2, Operand(a4));
2906 __ ld(a4, FieldMemOperand(a4, FixedArray::kHeaderSize));
2907 __ Branch(&extra_checks_or_miss, ne, a1, Operand(a4));
2909 __ bind(&have_js_function);
2910 if (CallAsMethod()) {
2911 EmitContinueIfStrictOrNative(masm, &cont);
2912 // Compute the receiver in sloppy mode.
2913 __ ld(a3, MemOperand(sp, argc * kPointerSize));
2915 __ JumpIfSmi(a3, &wrap);
2916 __ GetObjectType(a3, a4, a4);
2917 __ Branch(&wrap, lt, a4, Operand(FIRST_SPEC_OBJECT_TYPE));
2922 __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
2925 EmitSlowCase(masm, argc, &non_function);
2927 if (CallAsMethod()) {
2929 EmitWrapCase(masm, argc, &cont);
2932 __ bind(&extra_checks_or_miss);
2935 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2936 __ Branch(&slow_start, eq, a4, Operand(at));
2937 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
2938 __ Branch(&miss, eq, a4, Operand(at));
2940 if (!FLAG_trace_ic) {
2941 // We are going megamorphic. If the feedback is a JSFunction, it is fine
2942 // to handle it here. More complex cases are dealt with in the runtime.
2943 __ AssertNotSmi(a4);
2944 __ GetObjectType(a4, a5, a5);
2945 __ Branch(&miss, ne, a5, Operand(JS_FUNCTION_TYPE));
2946 __ dsrl(a4, a3, 32 - kPointerSizeLog2);
2947 __ Daddu(a4, a2, Operand(a4));
2948 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2949 __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
2950 // We have to update statistics for runtime profiling.
2951 const int with_types_offset =
2952 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
2953 __ ld(a4, FieldMemOperand(a2, with_types_offset));
2954 __ Dsubu(a4, a4, Operand(Smi::FromInt(1)));
2955 __ sd(a4, FieldMemOperand(a2, with_types_offset));
2956 const int generic_offset =
2957 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
2958 __ ld(a4, FieldMemOperand(a2, generic_offset));
2959 __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
2960 __ Branch(USE_DELAY_SLOT, &slow_start);
2961 __ sd(a4, FieldMemOperand(a2, generic_offset)); // In delay slot.
2964 // We are here because tracing is on or we are going monomorphic.
2969 __ bind(&slow_start);
2970 // Check that the function is really a JavaScript function.
2971 // r1: pushed function (to be verified)
2972 __ JumpIfSmi(a1, &non_function);
2974 // Goto slow case if we do not have a function.
2975 __ GetObjectType(a1, a4, a4);
2976 __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE));
2977 __ Branch(&have_js_function);
2981 void CallICStub::GenerateMiss(MacroAssembler* masm) {
2982 // Get the receiver of the function from the stack; 1 ~ return address.
2983 __ ld(a4, MemOperand(sp, (arg_count() + 1) * kPointerSize));
2986 FrameScope scope(masm, StackFrame::INTERNAL);
2988 // Push the receiver and the function and feedback info.
2989 __ Push(a4, a1, a2, a3);
2992 IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
2993 : IC::kCallIC_Customization_Miss;
2995 ExternalReference miss = ExternalReference(IC_Utility(id),
2997 __ CallExternalReference(miss, 4);
2999 // Move result to a1 and exit the internal frame.
3005 void StringCharCodeAtGenerator::GenerateSlow(
3006 MacroAssembler* masm,
3007 const RuntimeCallHelper& call_helper) {
3008 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
3010 // Index is not a smi.
3011 __ bind(&index_not_smi_);
3012 // If index is a heap number, try converting it to an integer.
3015 Heap::kHeapNumberMapRootIndex,
3018 call_helper.BeforeCall(masm);
3019 // Consumed by runtime conversion function:
3020 __ Push(object_, index_);
3021 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3022 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3024 DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3025 // NumberToSmi discards numbers that are not exact integers.
3026 __ CallRuntime(Runtime::kNumberToSmi, 1);
3029 // Save the conversion result before the pop instructions below
3030 // have a chance to overwrite it.
3032 __ Move(index_, v0);
3034 // Reload the instance type.
3035 __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3036 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3037 call_helper.AfterCall(masm);
3038 // If index is still not a smi, it must be out of range.
3039 __ JumpIfNotSmi(index_, index_out_of_range_);
3040 // Otherwise, return to the fast path.
3041 __ Branch(&got_smi_index_);
3043 // Call runtime. We get here when the receiver is a string and the
3044 // index is a number, but the code of getting the actual character
3045 // is too complex (e.g., when the string needs to be flattened).
3046 __ bind(&call_runtime_);
3047 call_helper.BeforeCall(masm);
3049 __ Push(object_, index_);
3050 __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
3052 __ Move(result_, v0);
3054 call_helper.AfterCall(masm);
3057 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3061 // -------------------------------------------------------------------------
3062 // StringCharFromCodeGenerator
3064 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3065 // Fast case of Heap::LookupSingleCharacterStringFromCode.
3067 DCHECK(!a4.is(result_));
3068 DCHECK(!a4.is(code_));
3070 STATIC_ASSERT(kSmiTag == 0);
3071 DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
3074 Operand(kSmiTagMask |
3075 ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
3076 __ Branch(&slow_case_, ne, a4, Operand(zero_reg));
3079 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3080 // At this point code register contains smi tagged one_byte char code.
3081 STATIC_ASSERT(kSmiTag == 0);
3082 __ SmiScale(a4, code_, kPointerSizeLog2);
3083 __ Daddu(result_, result_, a4);
3084 __ ld(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3085 __ LoadRoot(a4, Heap::kUndefinedValueRootIndex);
3086 __ Branch(&slow_case_, eq, result_, Operand(a4));
3091 void StringCharFromCodeGenerator::GenerateSlow(
3092 MacroAssembler* masm,
3093 const RuntimeCallHelper& call_helper) {
3094 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3096 __ bind(&slow_case_);
3097 call_helper.BeforeCall(masm);
3099 __ CallRuntime(Runtime::kCharFromCode, 1);
3100 __ Move(result_, v0);
3102 call_helper.AfterCall(masm);
3105 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3109 enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
3112 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
3117 String::Encoding encoding) {
3118 if (FLAG_debug_code) {
3119 // Check that destination is word aligned.
3120 __ And(scratch, dest, Operand(kPointerAlignmentMask));
3122 kDestinationOfCopyNotAligned,
3127 // Assumes word reads and writes are little endian.
3128 // Nothing to do for zero characters.
3131 if (encoding == String::TWO_BYTE_ENCODING) {
3132 __ Daddu(count, count, count);
3135 Register limit = count; // Read until dest equals this.
3136 __ Daddu(limit, dest, Operand(count));
3138 Label loop_entry, loop;
3139 // Copy bytes from src to dest until dest hits limit.
3140 __ Branch(&loop_entry);
3142 __ lbu(scratch, MemOperand(src));
3143 __ daddiu(src, src, 1);
3144 __ sb(scratch, MemOperand(dest));
3145 __ daddiu(dest, dest, 1);
3146 __ bind(&loop_entry);
3147 __ Branch(&loop, lt, dest, Operand(limit));
3153 void SubStringStub::Generate(MacroAssembler* masm) {
3155 // Stack frame on entry.
3156 // ra: return address
3161 // This stub is called from the native-call %_SubString(...), so
3162 // nothing can be assumed about the arguments. It is tested that:
3163 // "string" is a sequential string,
3164 // both "from" and "to" are smis, and
3165 // 0 <= from <= to <= string.length.
3166 // If any of these assumptions fail, we call the runtime system.
3168 const int kToOffset = 0 * kPointerSize;
3169 const int kFromOffset = 1 * kPointerSize;
3170 const int kStringOffset = 2 * kPointerSize;
3172 __ ld(a2, MemOperand(sp, kToOffset));
3173 __ ld(a3, MemOperand(sp, kFromOffset));
3175 // STATIC_ASSERT(kFromOffset == kToOffset + 4);
3176 STATIC_ASSERT(kSmiTag == 0);
3178 // STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3180 // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
3181 // safe in this case.
3182 __ JumpIfNotSmi(a2, &runtime);
3183 __ JumpIfNotSmi(a3, &runtime);
3184 // Both a2 and a3 are untagged integers.
3186 __ SmiUntag(a2, a2);
3187 __ SmiUntag(a3, a3);
3188 __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
3190 __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to.
3191 __ Dsubu(a2, a2, a3);
3193 // Make sure first argument is a string.
3194 __ ld(v0, MemOperand(sp, kStringOffset));
3195 __ JumpIfSmi(v0, &runtime);
3196 __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
3197 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3198 __ And(a4, a1, Operand(kIsNotStringMask));
3200 __ Branch(&runtime, ne, a4, Operand(zero_reg));
3203 __ Branch(&single_char, eq, a2, Operand(1));
3205 // Short-cut for the case of trivial substring.
3207 // v0: original string
3208 // a2: result string length
3209 __ ld(a4, FieldMemOperand(v0, String::kLengthOffset));
3211 // Return original string.
3212 __ Branch(&return_v0, eq, a2, Operand(a4));
3213 // Longer than original string's length or negative: unsafe arguments.
3214 __ Branch(&runtime, hi, a2, Operand(a4));
3215 // Shorter than original string's length: an actual substring.
3217 // Deal with different string types: update the index if necessary
3218 // and put the underlying string into a5.
3219 // v0: original string
3220 // a1: instance type
3222 // a3: from index (untagged)
3223 Label underlying_unpacked, sliced_string, seq_or_external_string;
3224 // If the string is not indirect, it can only be sequential or external.
3225 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
3226 STATIC_ASSERT(kIsIndirectStringMask != 0);
3227 __ And(a4, a1, Operand(kIsIndirectStringMask));
3228 __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, a4, Operand(zero_reg));
3229 // a4 is used as a scratch register and can be overwritten in either case.
3230 __ And(a4, a1, Operand(kSlicedNotConsMask));
3231 __ Branch(&sliced_string, ne, a4, Operand(zero_reg));
3232 // Cons string. Check whether it is flat, then fetch first part.
3233 __ ld(a5, FieldMemOperand(v0, ConsString::kSecondOffset));
3234 __ LoadRoot(a4, Heap::kempty_stringRootIndex);
3235 __ Branch(&runtime, ne, a5, Operand(a4));
3236 __ ld(a5, FieldMemOperand(v0, ConsString::kFirstOffset));
3237 // Update instance type.
3238 __ ld(a1, FieldMemOperand(a5, HeapObject::kMapOffset));
3239 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3240 __ jmp(&underlying_unpacked);
3242 __ bind(&sliced_string);
3243 // Sliced string. Fetch parent and correct start index by offset.
3244 __ ld(a5, FieldMemOperand(v0, SlicedString::kParentOffset));
3245 __ ld(a4, FieldMemOperand(v0, SlicedString::kOffsetOffset));
3246 __ SmiUntag(a4); // Add offset to index.
3247 __ Daddu(a3, a3, a4);
3248 // Update instance type.
3249 __ ld(a1, FieldMemOperand(a5, HeapObject::kMapOffset));
3250 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3251 __ jmp(&underlying_unpacked);
3253 __ bind(&seq_or_external_string);
3254 // Sequential or external string. Just move string to the expected register.
3257 __ bind(&underlying_unpacked);
3259 if (FLAG_string_slices) {
3261 // a5: underlying subject string
3262 // a1: instance type of underlying subject string
3264 // a3: adjusted start index (untagged)
3265 // Short slice. Copy instead of slicing.
3266 __ Branch(©_routine, lt, a2, Operand(SlicedString::kMinLength));
3267 // Allocate new sliced string. At this point we do not reload the instance
3268 // type including the string encoding because we simply rely on the info
3269 // provided by the original string. It does not matter if the original
3270 // string's encoding is wrong because we always have to recheck encoding of
3271 // the newly created string's parent anyways due to externalized strings.
3272 Label two_byte_slice, set_slice_header;
3273 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
3274 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
3275 __ And(a4, a1, Operand(kStringEncodingMask));
3276 __ Branch(&two_byte_slice, eq, a4, Operand(zero_reg));
3277 __ AllocateOneByteSlicedString(v0, a2, a6, a7, &runtime);
3278 __ jmp(&set_slice_header);
3279 __ bind(&two_byte_slice);
3280 __ AllocateTwoByteSlicedString(v0, a2, a6, a7, &runtime);
3281 __ bind(&set_slice_header);
3283 __ sd(a5, FieldMemOperand(v0, SlicedString::kParentOffset));
3284 __ sd(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
3287 __ bind(©_routine);
3290 // a5: underlying subject string
3291 // a1: instance type of underlying subject string
3293 // a3: adjusted start index (untagged)
3294 Label two_byte_sequential, sequential_string, allocate_result;
3295 STATIC_ASSERT(kExternalStringTag != 0);
3296 STATIC_ASSERT(kSeqStringTag == 0);
3297 __ And(a4, a1, Operand(kExternalStringTag));
3298 __ Branch(&sequential_string, eq, a4, Operand(zero_reg));
3300 // Handle external string.
3301 // Rule out short external strings.
3302 STATIC_ASSERT(kShortExternalStringTag != 0);
3303 __ And(a4, a1, Operand(kShortExternalStringTag));
3304 __ Branch(&runtime, ne, a4, Operand(zero_reg));
3305 __ ld(a5, FieldMemOperand(a5, ExternalString::kResourceDataOffset));
3306 // a5 already points to the first character of underlying string.
3307 __ jmp(&allocate_result);
3309 __ bind(&sequential_string);
3310 // Locate first character of underlying subject string.
3311 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3312 __ Daddu(a5, a5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3314 __ bind(&allocate_result);
3315 // Sequential acii string. Allocate the result.
3316 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3317 __ And(a4, a1, Operand(kStringEncodingMask));
3318 __ Branch(&two_byte_sequential, eq, a4, Operand(zero_reg));
3320 // Allocate and copy the resulting one_byte string.
3321 __ AllocateOneByteString(v0, a2, a4, a6, a7, &runtime);
3323 // Locate first character of substring to copy.
3324 __ Daddu(a5, a5, a3);
3326 // Locate first character of result.
3327 __ Daddu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3329 // v0: result string
3330 // a1: first character of result string
3331 // a2: result string length
3332 // a5: first character of substring to copy
3333 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3334 StringHelper::GenerateCopyCharacters(
3335 masm, a1, a5, a2, a3, String::ONE_BYTE_ENCODING);
3338 // Allocate and copy the resulting two-byte string.
3339 __ bind(&two_byte_sequential);
3340 __ AllocateTwoByteString(v0, a2, a4, a6, a7, &runtime);
3342 // Locate first character of substring to copy.
3343 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
3345 __ Daddu(a5, a5, a4);
3346 // Locate first character of result.
3347 __ Daddu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3349 // v0: result string.
3350 // a1: first character of result.
3351 // a2: result length.
3352 // a5: first character of substring to copy.
3353 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3354 StringHelper::GenerateCopyCharacters(
3355 masm, a1, a5, a2, a3, String::TWO_BYTE_ENCODING);
3357 __ bind(&return_v0);
3358 Counters* counters = isolate()->counters();
3359 __ IncrementCounter(counters->sub_string_native(), 1, a3, a4);
3362 // Just jump to runtime to create the sub string.
3364 __ TailCallRuntime(Runtime::kSubString, 3, 1);
3366 __ bind(&single_char);
3367 // v0: original string
3368 // a1: instance type
3370 // a3: from index (untagged)
3371 StringCharAtGenerator generator(v0, a3, a2, v0, &runtime, &runtime, &runtime,
3372 STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
3373 generator.GenerateFast(masm);
3375 generator.SkipSlow(masm, &runtime);
3379 void StringHelper::GenerateFlatOneByteStringEquals(
3380 MacroAssembler* masm, Register left, Register right, Register scratch1,
3381 Register scratch2, Register scratch3) {
3382 Register length = scratch1;
3385 Label strings_not_equal, check_zero_length;
3386 __ ld(length, FieldMemOperand(left, String::kLengthOffset));
3387 __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
3388 __ Branch(&check_zero_length, eq, length, Operand(scratch2));
3389 __ bind(&strings_not_equal);
3390 // Can not put li in delayslot, it has multi instructions.
3391 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
3394 // Check if the length is zero.
3395 Label compare_chars;
3396 __ bind(&check_zero_length);
3397 STATIC_ASSERT(kSmiTag == 0);
3398 __ Branch(&compare_chars, ne, length, Operand(zero_reg));
3399 DCHECK(is_int16((intptr_t)Smi::FromInt(EQUAL)));
3400 __ Ret(USE_DELAY_SLOT);
3401 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3403 // Compare characters.
3404 __ bind(&compare_chars);
3406 GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
3407 v0, &strings_not_equal);
3409 // Characters are equal.
3410 __ Ret(USE_DELAY_SLOT);
3411 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3415 void StringHelper::GenerateCompareFlatOneByteStrings(
3416 MacroAssembler* masm, Register left, Register right, Register scratch1,
3417 Register scratch2, Register scratch3, Register scratch4) {
3418 Label result_not_equal, compare_lengths;
3419 // Find minimum length and length difference.
3420 __ ld(scratch1, FieldMemOperand(left, String::kLengthOffset));
3421 __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
3422 __ Dsubu(scratch3, scratch1, Operand(scratch2));
3423 Register length_delta = scratch3;
3424 __ slt(scratch4, scratch2, scratch1);
3425 __ Movn(scratch1, scratch2, scratch4);
3426 Register min_length = scratch1;
3427 STATIC_ASSERT(kSmiTag == 0);
3428 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
3431 GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
3432 scratch4, v0, &result_not_equal);
3434 // Compare lengths - strings up to min-length are equal.
3435 __ bind(&compare_lengths);
3436 DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
3437 // Use length_delta as result if it's zero.
3438 __ mov(scratch2, length_delta);
3439 __ mov(scratch4, zero_reg);
3440 __ mov(v0, zero_reg);
3442 __ bind(&result_not_equal);
3443 // Conditionally update the result based either on length_delta or
3444 // the last comparion performed in the loop above.
3446 __ Branch(&ret, eq, scratch2, Operand(scratch4));
3447 __ li(v0, Operand(Smi::FromInt(GREATER)));
3448 __ Branch(&ret, gt, scratch2, Operand(scratch4));
3449 __ li(v0, Operand(Smi::FromInt(LESS)));
3455 void StringHelper::GenerateOneByteCharsCompareLoop(
3456 MacroAssembler* masm, Register left, Register right, Register length,
3457 Register scratch1, Register scratch2, Register scratch3,
3458 Label* chars_not_equal) {
3459 // Change index to run from -length to -1 by adding length to string
3460 // start. This means that loop ends when index reaches zero, which
3461 // doesn't need an additional compare.
3462 __ SmiUntag(length);
3463 __ Daddu(scratch1, length,
3464 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3465 __ Daddu(left, left, Operand(scratch1));
3466 __ Daddu(right, right, Operand(scratch1));
3467 __ Dsubu(length, zero_reg, length);
3468 Register index = length; // index = -length;
3474 __ Daddu(scratch3, left, index);
3475 __ lbu(scratch1, MemOperand(scratch3));
3476 __ Daddu(scratch3, right, index);
3477 __ lbu(scratch2, MemOperand(scratch3));
3478 __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
3479 __ Daddu(index, index, 1);
3480 __ Branch(&loop, ne, index, Operand(zero_reg));
3484 void StringCompareStub::Generate(MacroAssembler* masm) {
3487 Counters* counters = isolate()->counters();
3489 // Stack frame on entry.
3490 // sp[0]: right string
3491 // sp[4]: left string
3492 __ ld(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
3493 __ ld(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
3496 __ Branch(¬_same, ne, a0, Operand(a1));
3497 STATIC_ASSERT(EQUAL == 0);
3498 STATIC_ASSERT(kSmiTag == 0);
3499 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3500 __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
3505 // Check that both objects are sequential one_byte strings.
3506 __ JumpIfNotBothSequentialOneByteStrings(a1, a0, a2, a3, &runtime);
3508 // Compare flat one_byte strings natively. Remove arguments from stack first.
3509 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
3510 __ Daddu(sp, sp, Operand(2 * kPointerSize));
3511 StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, a4, a5);
3514 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3518 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
3519 // ----------- S t a t e -------------
3522 // -- ra : return address
3523 // -----------------------------------
3525 // Load a2 with the allocation site. We stick an undefined dummy value here
3526 // and replace it with the real allocation site later when we instantiate this
3527 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
3528 __ li(a2, handle(isolate()->heap()->undefined_value()));
3530 // Make sure that we actually patched the allocation site.
3531 if (FLAG_debug_code) {
3532 __ And(at, a2, Operand(kSmiTagMask));
3533 __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
3534 __ ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset));
3535 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
3536 __ Assert(eq, kExpectedAllocationSite, a4, Operand(at));
3539 // Tail call into the stub that handles binary operations with allocation
3541 BinaryOpWithAllocationSiteStub stub(isolate(), state());
3542 __ TailCallStub(&stub);
3546 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
3547 DCHECK(state() == CompareICState::SMI);
3550 __ JumpIfNotSmi(a2, &miss);
3552 if (GetCondition() == eq) {
3553 // For equality we do not care about the sign of the result.
3554 __ Ret(USE_DELAY_SLOT);
3555 __ Dsubu(v0, a0, a1);
3557 // Untag before subtracting to avoid handling overflow.
3560 __ Ret(USE_DELAY_SLOT);
3561 __ Dsubu(v0, a1, a0);
3569 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
3570 DCHECK(state() == CompareICState::NUMBER);
3573 Label unordered, maybe_undefined1, maybe_undefined2;
3576 if (left() == CompareICState::SMI) {
3577 __ JumpIfNotSmi(a1, &miss);
3579 if (right() == CompareICState::SMI) {
3580 __ JumpIfNotSmi(a0, &miss);
3583 // Inlining the double comparison and falling back to the general compare
3584 // stub if NaN is involved.
3585 // Load left and right operand.
3586 Label done, left, left_smi, right_smi;
3587 __ JumpIfSmi(a0, &right_smi);
3588 __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
3590 __ Dsubu(a2, a0, Operand(kHeapObjectTag));
3591 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
3593 __ bind(&right_smi);
3594 __ SmiUntag(a2, a0); // Can't clobber a0 yet.
3595 FPURegister single_scratch = f6;
3596 __ mtc1(a2, single_scratch);
3597 __ cvt_d_w(f2, single_scratch);
3600 __ JumpIfSmi(a1, &left_smi);
3601 __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
3603 __ Dsubu(a2, a1, Operand(kHeapObjectTag));
3604 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
3607 __ SmiUntag(a2, a1); // Can't clobber a1 yet.
3608 single_scratch = f8;
3609 __ mtc1(a2, single_scratch);
3610 __ cvt_d_w(f0, single_scratch);
3614 // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
3615 Label fpu_eq, fpu_lt;
3616 // Test if equal, and also handle the unordered/NaN case.
3617 __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
3619 // Test if less (unordered case is already handled).
3620 __ BranchF(&fpu_lt, NULL, lt, f0, f2);
3622 // Otherwise it's greater, so just fall thru, and return.
3623 DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
3624 __ Ret(USE_DELAY_SLOT);
3625 __ li(v0, Operand(GREATER));
3628 __ Ret(USE_DELAY_SLOT);
3629 __ li(v0, Operand(EQUAL));
3632 __ Ret(USE_DELAY_SLOT);
3633 __ li(v0, Operand(LESS));
3635 __ bind(&unordered);
3636 __ bind(&generic_stub);
3637 CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
3638 CompareICState::GENERIC, CompareICState::GENERIC);
3639 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3641 __ bind(&maybe_undefined1);
3642 if (Token::IsOrderedRelationalCompareOp(op())) {
3643 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3644 __ Branch(&miss, ne, a0, Operand(at));
3645 __ JumpIfSmi(a1, &unordered);
3646 __ GetObjectType(a1, a2, a2);
3647 __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
3651 __ bind(&maybe_undefined2);
3652 if (Token::IsOrderedRelationalCompareOp(op())) {
3653 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3654 __ Branch(&unordered, eq, a1, Operand(at));
3662 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3663 DCHECK(state() == CompareICState::INTERNALIZED_STRING);
3666 // Registers containing left and right operands respectively.
3668 Register right = a0;
3672 // Check that both operands are heap objects.
3673 __ JumpIfEitherSmi(left, right, &miss);
3675 // Check that both operands are internalized strings.
3676 __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3677 __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3678 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3679 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3680 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3681 __ Or(tmp1, tmp1, Operand(tmp2));
3682 __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3683 __ Branch(&miss, ne, at, Operand(zero_reg));
3685 // Make sure a0 is non-zero. At this point input operands are
3686 // guaranteed to be non-zero.
3687 DCHECK(right.is(a0));
3688 STATIC_ASSERT(EQUAL == 0);
3689 STATIC_ASSERT(kSmiTag == 0);
3691 // Internalized strings are compared by identity.
3692 __ Ret(ne, left, Operand(right));
3693 DCHECK(is_int16(EQUAL));
3694 __ Ret(USE_DELAY_SLOT);
3695 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3702 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
3703 DCHECK(state() == CompareICState::UNIQUE_NAME);
3704 DCHECK(GetCondition() == eq);
3707 // Registers containing left and right operands respectively.
3709 Register right = a0;
3713 // Check that both operands are heap objects.
3714 __ JumpIfEitherSmi(left, right, &miss);
3716 // Check that both operands are unique names. This leaves the instance
3717 // types loaded in tmp1 and tmp2.
3718 __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3719 __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3720 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3721 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3723 __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
3724 __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
3729 // Unique names are compared by identity.
3731 __ Branch(&done, ne, left, Operand(right));
3732 // Make sure a0 is non-zero. At this point input operands are
3733 // guaranteed to be non-zero.
3734 DCHECK(right.is(a0));
3735 STATIC_ASSERT(EQUAL == 0);
3736 STATIC_ASSERT(kSmiTag == 0);
3737 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3746 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
3747 DCHECK(state() == CompareICState::STRING);
3750 bool equality = Token::IsEqualityOp(op());
3752 // Registers containing left and right operands respectively.
3754 Register right = a0;
3761 // Check that both operands are heap objects.
3762 __ JumpIfEitherSmi(left, right, &miss);
3764 // Check that both operands are strings. This leaves the instance
3765 // types loaded in tmp1 and tmp2.
3766 __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3767 __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3768 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3769 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3770 STATIC_ASSERT(kNotStringTag != 0);
3771 __ Or(tmp3, tmp1, tmp2);
3772 __ And(tmp5, tmp3, Operand(kIsNotStringMask));
3773 __ Branch(&miss, ne, tmp5, Operand(zero_reg));
3775 // Fast check for identical strings.
3776 Label left_ne_right;
3777 STATIC_ASSERT(EQUAL == 0);
3778 STATIC_ASSERT(kSmiTag == 0);
3779 __ Branch(&left_ne_right, ne, left, Operand(right));
3780 __ Ret(USE_DELAY_SLOT);
3781 __ mov(v0, zero_reg); // In the delay slot.
3782 __ bind(&left_ne_right);
3784 // Handle not identical strings.
3786 // Check that both strings are internalized strings. If they are, we're done
3787 // because we already know they are not identical. We know they are both
3790 DCHECK(GetCondition() == eq);
3791 STATIC_ASSERT(kInternalizedTag == 0);
3792 __ Or(tmp3, tmp1, Operand(tmp2));
3793 __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
3795 __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
3796 // Make sure a0 is non-zero. At this point input operands are
3797 // guaranteed to be non-zero.
3798 DCHECK(right.is(a0));
3799 __ Ret(USE_DELAY_SLOT);
3800 __ mov(v0, a0); // In the delay slot.
3801 __ bind(&is_symbol);
3804 // Check that both strings are sequential one_byte.
3806 __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
3809 // Compare flat one_byte strings. Returns when done.
3811 StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
3814 StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
3818 // Handle more complex cases in runtime.
3820 __ Push(left, right);
3822 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
3824 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3832 void CompareICStub::GenerateObjects(MacroAssembler* masm) {
3833 DCHECK(state() == CompareICState::OBJECT);
3835 __ And(a2, a1, Operand(a0));
3836 __ JumpIfSmi(a2, &miss);
3838 __ GetObjectType(a0, a2, a2);
3839 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
3840 __ GetObjectType(a1, a2, a2);
3841 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
3843 DCHECK(GetCondition() == eq);
3844 __ Ret(USE_DELAY_SLOT);
3845 __ dsubu(v0, a0, a1);
3852 void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
3855 __ JumpIfSmi(a2, &miss);
3856 __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
3857 __ ld(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
3858 __ Branch(&miss, ne, a2, Operand(known_map_));
3859 __ Branch(&miss, ne, a3, Operand(known_map_));
3861 __ Ret(USE_DELAY_SLOT);
3862 __ dsubu(v0, a0, a1);
3869 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
3871 // Call the runtime system in a fresh internal frame.
3872 ExternalReference miss =
3873 ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
3874 FrameScope scope(masm, StackFrame::INTERNAL);
3876 __ Push(ra, a1, a0);
3877 __ li(a4, Operand(Smi::FromInt(op())));
3878 __ daddiu(sp, sp, -kPointerSize);
3879 __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
3880 __ sd(a4, MemOperand(sp)); // In the delay slot.
3881 // Compute the entry point of the rewritten stub.
3882 __ Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
3883 // Restore registers.
3890 void DirectCEntryStub::Generate(MacroAssembler* masm) {
3891 // Make place for arguments to fit C calling convention. Most of the callers
3892 // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
3893 // so they handle stack restoring and we don't have to do that here.
3894 // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
3895 // kCArgsSlotsSize stack space after the call.
3896 __ daddiu(sp, sp, -kCArgsSlotsSize);
3897 // Place the return address on the stack, making the call
3898 // GC safe. The RegExp backend also relies on this.
3899 __ sd(ra, MemOperand(sp, kCArgsSlotsSize));
3900 __ Call(t9); // Call the C++ function.
3901 __ ld(t9, MemOperand(sp, kCArgsSlotsSize));
3903 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
3904 // In case of an error the return address may point to a memory area
3905 // filled with kZapValue by the GC.
3906 // Dereference the address and check for this.
3907 __ Uld(a4, MemOperand(t9));
3908 __ Assert(ne, kReceivedInvalidReturnAddress, a4,
3909 Operand(reinterpret_cast<uint64_t>(kZapValue)));
3915 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
3918 reinterpret_cast<intptr_t>(GetCode().location());
3919 __ Move(t9, target);
3920 __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
3925 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
3929 Register properties,
3931 Register scratch0) {
3932 DCHECK(name->IsUniqueName());
3933 // If names of slots in range from 1 to kProbes - 1 for the hash value are
3934 // not equal to the name and kProbes-th slot is not used (its name is the
3935 // undefined value), it guarantees the hash table doesn't contain the
3936 // property. It's true even if some slots represent deleted properties
3937 // (their names are the hole value).
3938 for (int i = 0; i < kInlinedProbes; i++) {
3939 // scratch0 points to properties hash.
3940 // Compute the masked index: (hash + i + i * i) & mask.
3941 Register index = scratch0;
3942 // Capacity is smi 2^n.
3943 __ SmiLoadUntag(index, FieldMemOperand(properties, kCapacityOffset));
3944 __ Dsubu(index, index, Operand(1));
3945 __ And(index, index,
3946 Operand(name->Hash() + NameDictionary::GetProbeOffset(i)));
3948 // Scale the index by multiplying by the entry size.
3949 DCHECK(NameDictionary::kEntrySize == 3);
3950 __ dsll(at, index, 1);
3951 __ Daddu(index, index, at); // index *= 3.
3953 Register entity_name = scratch0;
3954 // Having undefined at this place means the name is not contained.
3955 DCHECK_EQ(kSmiTagSize, 1);
3956 Register tmp = properties;
3958 __ dsll(scratch0, index, kPointerSizeLog2);
3959 __ Daddu(tmp, properties, scratch0);
3960 __ ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
3962 DCHECK(!tmp.is(entity_name));
3963 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
3964 __ Branch(done, eq, entity_name, Operand(tmp));
3966 // Load the hole ready for use below:
3967 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
3969 // Stop if found the property.
3970 __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
3973 __ Branch(&good, eq, entity_name, Operand(tmp));
3975 // Check if the entry name is not a unique name.
3976 __ ld(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
3978 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
3979 __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
3982 // Restore the properties.
3984 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
3987 const int spill_mask =
3988 (ra.bit() | a6.bit() | a5.bit() | a4.bit() | a3.bit() |
3989 a2.bit() | a1.bit() | a0.bit() | v0.bit());
3991 __ MultiPush(spill_mask);
3992 __ ld(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
3993 __ li(a1, Operand(Handle<Name>(name)));
3994 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
3997 __ MultiPop(spill_mask);
3999 __ Branch(done, eq, at, Operand(zero_reg));
4000 __ Branch(miss, ne, at, Operand(zero_reg));
4004 // Probe the name dictionary in the |elements| register. Jump to the
4005 // |done| label if a property with the given name is found. Jump to
4006 // the |miss| label otherwise.
4007 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
4008 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
4014 Register scratch2) {
4015 DCHECK(!elements.is(scratch1));
4016 DCHECK(!elements.is(scratch2));
4017 DCHECK(!name.is(scratch1));
4018 DCHECK(!name.is(scratch2));
4020 __ AssertName(name);
4022 // Compute the capacity mask.
4023 __ ld(scratch1, FieldMemOperand(elements, kCapacityOffset));
4024 __ SmiUntag(scratch1);
4025 __ Dsubu(scratch1, scratch1, Operand(1));
4027 // Generate an unrolled loop that performs a few probes before
4028 // giving up. Measurements done on Gmail indicate that 2 probes
4029 // cover ~93% of loads from dictionaries.
4030 for (int i = 0; i < kInlinedProbes; i++) {
4031 // Compute the masked index: (hash + i + i * i) & mask.
4032 __ lwu(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
4034 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4035 // the hash in a separate instruction. The value hash + i + i * i is right
4036 // shifted in the following and instruction.
4037 DCHECK(NameDictionary::GetProbeOffset(i) <
4038 1 << (32 - Name::kHashFieldOffset));
4039 __ Daddu(scratch2, scratch2, Operand(
4040 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4042 __ dsrl(scratch2, scratch2, Name::kHashShift);
4043 __ And(scratch2, scratch1, scratch2);
4045 // Scale the index by multiplying by the element size.
4046 DCHECK(NameDictionary::kEntrySize == 3);
4047 // scratch2 = scratch2 * 3.
4049 __ dsll(at, scratch2, 1);
4050 __ Daddu(scratch2, scratch2, at);
4052 // Check if the key is identical to the name.
4053 __ dsll(at, scratch2, kPointerSizeLog2);
4054 __ Daddu(scratch2, elements, at);
4055 __ ld(at, FieldMemOperand(scratch2, kElementsStartOffset));
4056 __ Branch(done, eq, name, Operand(at));
4059 const int spill_mask =
4060 (ra.bit() | a6.bit() | a5.bit() | a4.bit() |
4061 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
4062 ~(scratch1.bit() | scratch2.bit());
4064 __ MultiPush(spill_mask);
4066 DCHECK(!elements.is(a1));
4068 __ Move(a0, elements);
4070 __ Move(a0, elements);
4073 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
4075 __ mov(scratch2, a2);
4077 __ MultiPop(spill_mask);
4079 __ Branch(done, ne, at, Operand(zero_reg));
4080 __ Branch(miss, eq, at, Operand(zero_reg));
4084 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
4085 // This stub overrides SometimesSetsUpAFrame() to return false. That means
4086 // we cannot call anything that could cause a GC from this stub.
4088 // result: NameDictionary to probe
4090 // dictionary: NameDictionary to probe.
4091 // index: will hold an index of entry if lookup is successful.
4092 // might alias with result_.
4094 // result_ is zero if lookup failed, non zero otherwise.
4096 Register result = v0;
4097 Register dictionary = a0;
4099 Register index = a2;
4102 Register undefined = a5;
4103 Register entry_key = a6;
4105 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
4107 __ ld(mask, FieldMemOperand(dictionary, kCapacityOffset));
4109 __ Dsubu(mask, mask, Operand(1));
4111 __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
4113 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
4115 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
4116 // Compute the masked index: (hash + i + i * i) & mask.
4117 // Capacity is smi 2^n.
4119 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4120 // the hash in a separate instruction. The value hash + i + i * i is right
4121 // shifted in the following and instruction.
4122 DCHECK(NameDictionary::GetProbeOffset(i) <
4123 1 << (32 - Name::kHashFieldOffset));
4124 __ Daddu(index, hash, Operand(
4125 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4127 __ mov(index, hash);
4129 __ dsrl(index, index, Name::kHashShift);
4130 __ And(index, mask, index);
4132 // Scale the index by multiplying by the entry size.
4133 DCHECK(NameDictionary::kEntrySize == 3);
4136 __ dsll(index, index, 1);
4137 __ Daddu(index, index, at);
4140 DCHECK_EQ(kSmiTagSize, 1);
4141 __ dsll(index, index, kPointerSizeLog2);
4142 __ Daddu(index, index, dictionary);
4143 __ ld(entry_key, FieldMemOperand(index, kElementsStartOffset));
4145 // Having undefined at this place means the name is not contained.
4146 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined));
4148 // Stop if found the property.
4149 __ Branch(&in_dictionary, eq, entry_key, Operand(key));
4151 if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
4152 // Check if the entry name is not a unique name.
4153 __ ld(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
4155 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
4156 __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
4160 __ bind(&maybe_in_dictionary);
4161 // If we are doing negative lookup then probing failure should be
4162 // treated as a lookup success. For positive lookup probing failure
4163 // should be treated as lookup failure.
4164 if (mode() == POSITIVE_LOOKUP) {
4165 __ Ret(USE_DELAY_SLOT);
4166 __ mov(result, zero_reg);
4169 __ bind(&in_dictionary);
4170 __ Ret(USE_DELAY_SLOT);
4173 __ bind(¬_in_dictionary);
4174 __ Ret(USE_DELAY_SLOT);
4175 __ mov(result, zero_reg);
4179 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
4181 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
4183 // Hydrogen code stubs need stub2 at snapshot time.
4184 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
4189 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
4190 // the value has just been written into the object, now this stub makes sure
4191 // we keep the GC informed. The word in the object where the value has been
4192 // written is in the address register.
4193 void RecordWriteStub::Generate(MacroAssembler* masm) {
4194 Label skip_to_incremental_noncompacting;
4195 Label skip_to_incremental_compacting;
4197 // The first two branch+nop instructions are generated with labels so as to
4198 // get the offset fixed up correctly by the bind(Label*) call. We patch it
4199 // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
4200 // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
4201 // incremental heap marking.
4202 // See RecordWriteStub::Patch for details.
4203 __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
4205 __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
4208 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4209 __ RememberedSetHelper(object(),
4212 save_fp_regs_mode(),
4213 MacroAssembler::kReturnAtEnd);
4217 __ bind(&skip_to_incremental_noncompacting);
4218 GenerateIncremental(masm, INCREMENTAL);
4220 __ bind(&skip_to_incremental_compacting);
4221 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4223 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
4224 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
4226 PatchBranchIntoNop(masm, 0);
4227 PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
4231 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4234 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4235 Label dont_need_remembered_set;
4237 __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
4238 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
4240 &dont_need_remembered_set);
4242 __ CheckPageFlag(regs_.object(),
4244 1 << MemoryChunk::SCAN_ON_SCAVENGE,
4246 &dont_need_remembered_set);
4248 // First notify the incremental marker if necessary, then update the
4250 CheckNeedsToInformIncrementalMarker(
4251 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4252 InformIncrementalMarker(masm);
4253 regs_.Restore(masm);
4254 __ RememberedSetHelper(object(),
4257 save_fp_regs_mode(),
4258 MacroAssembler::kReturnAtEnd);
4260 __ bind(&dont_need_remembered_set);
4263 CheckNeedsToInformIncrementalMarker(
4264 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4265 InformIncrementalMarker(masm);
4266 regs_.Restore(masm);
4271 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4272 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
4273 int argument_count = 3;
4274 __ PrepareCallCFunction(argument_count, regs_.scratch0());
4276 a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
4277 DCHECK(!address.is(regs_.object()));
4278 DCHECK(!address.is(a0));
4279 __ Move(address, regs_.address());
4280 __ Move(a0, regs_.object());
4281 __ Move(a1, address);
4282 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
4284 AllowExternalCallThatCantCauseGC scope(masm);
4286 ExternalReference::incremental_marking_record_write_function(isolate()),
4288 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
4292 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4293 MacroAssembler* masm,
4294 OnNoNeedToInformIncrementalMarker on_no_need,
4297 Label need_incremental;
4298 Label need_incremental_pop_scratch;
4300 __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
4301 __ ld(regs_.scratch1(),
4302 MemOperand(regs_.scratch0(),
4303 MemoryChunk::kWriteBarrierCounterOffset));
4304 __ Dsubu(regs_.scratch1(), regs_.scratch1(), Operand(1));
4305 __ sd(regs_.scratch1(),
4306 MemOperand(regs_.scratch0(),
4307 MemoryChunk::kWriteBarrierCounterOffset));
4308 __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
4310 // Let's look at the color of the object: If it is not black we don't have
4311 // to inform the incremental marker.
4312 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4314 regs_.Restore(masm);
4315 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4316 __ RememberedSetHelper(object(),
4319 save_fp_regs_mode(),
4320 MacroAssembler::kReturnAtEnd);
4327 // Get the value from the slot.
4328 __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
4330 if (mode == INCREMENTAL_COMPACTION) {
4331 Label ensure_not_white;
4333 __ CheckPageFlag(regs_.scratch0(), // Contains value.
4334 regs_.scratch1(), // Scratch.
4335 MemoryChunk::kEvacuationCandidateMask,
4339 __ CheckPageFlag(regs_.object(),
4340 regs_.scratch1(), // Scratch.
4341 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
4345 __ bind(&ensure_not_white);
4348 // We need extra registers for this, so we push the object and the address
4349 // register temporarily.
4350 __ Push(regs_.object(), regs_.address());
4351 __ EnsureNotWhite(regs_.scratch0(), // The value.
4352 regs_.scratch1(), // Scratch.
4353 regs_.object(), // Scratch.
4354 regs_.address(), // Scratch.
4355 &need_incremental_pop_scratch);
4356 __ Pop(regs_.object(), regs_.address());
4358 regs_.Restore(masm);
4359 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4360 __ RememberedSetHelper(object(),
4363 save_fp_regs_mode(),
4364 MacroAssembler::kReturnAtEnd);
4369 __ bind(&need_incremental_pop_scratch);
4370 __ Pop(regs_.object(), regs_.address());
4372 __ bind(&need_incremental);
4374 // Fall through when we need to inform the incremental marker.
4378 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4379 // ----------- S t a t e -------------
4380 // -- a0 : element value to store
4381 // -- a3 : element index as smi
4382 // -- sp[0] : array literal index in function as smi
4383 // -- sp[4] : array literal
4384 // clobbers a1, a2, a4
4385 // -----------------------------------
4388 Label double_elements;
4390 Label slow_elements;
4391 Label fast_elements;
4393 // Get array literal index, array literal and its map.
4394 __ ld(a4, MemOperand(sp, 0 * kPointerSize));
4395 __ ld(a1, MemOperand(sp, 1 * kPointerSize));
4396 __ ld(a2, FieldMemOperand(a1, JSObject::kMapOffset));
4398 __ CheckFastElements(a2, a5, &double_elements);
4399 // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
4400 __ JumpIfSmi(a0, &smi_element);
4401 __ CheckFastSmiElements(a2, a5, &fast_elements);
4403 // Store into the array literal requires a elements transition. Call into
4405 __ bind(&slow_elements);
4407 __ Push(a1, a3, a0);
4408 __ ld(a5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4409 __ ld(a5, FieldMemOperand(a5, JSFunction::kLiteralsOffset));
4411 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4413 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4414 __ bind(&fast_elements);
4415 __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
4416 __ SmiScale(a6, a3, kPointerSizeLog2);
4417 __ Daddu(a6, a5, a6);
4418 __ Daddu(a6, a6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4419 __ sd(a0, MemOperand(a6, 0));
4420 // Update the write barrier for the array store.
4421 __ RecordWrite(a5, a6, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
4422 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
4423 __ Ret(USE_DELAY_SLOT);
4426 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
4427 // and value is Smi.
4428 __ bind(&smi_element);
4429 __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
4430 __ SmiScale(a6, a3, kPointerSizeLog2);
4431 __ Daddu(a6, a5, a6);
4432 __ sd(a0, FieldMemOperand(a6, FixedArray::kHeaderSize));
4433 __ Ret(USE_DELAY_SLOT);
4436 // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
4437 __ bind(&double_elements);
4438 __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
4439 __ StoreNumberToDoubleElements(a0, a3, a5, a7, t1, a2, &slow_elements);
4440 __ Ret(USE_DELAY_SLOT);
4445 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4446 CEntryStub ces(isolate(), 1, kSaveFPRegs);
4447 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4448 int parameter_count_offset =
4449 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4450 __ ld(a1, MemOperand(fp, parameter_count_offset));
4451 if (function_mode() == JS_FUNCTION_STUB_MODE) {
4452 __ Daddu(a1, a1, Operand(1));
4454 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4455 __ dsll(a1, a1, kPointerSizeLog2);
4456 __ Ret(USE_DELAY_SLOT);
4457 __ Daddu(sp, sp, a1);
4461 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
4462 EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4463 VectorLoadStub stub(isolate(), state());
4464 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4468 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
4469 EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4470 VectorKeyedLoadStub stub(isolate());
4471 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4475 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4476 if (masm->isolate()->function_entry_hook() != NULL) {
4477 ProfileEntryHookStub stub(masm->isolate());
4485 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4486 // The entry hook is a "push ra" instruction, followed by a call.
4487 // Note: on MIPS "push" is 2 instruction
4488 const int32_t kReturnAddressDistanceFromFunctionStart =
4489 Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
4491 // This should contain all kJSCallerSaved registers.
4492 const RegList kSavedRegs =
4493 kJSCallerSaved | // Caller saved registers.
4494 s5.bit(); // Saved stack pointer.
4496 // We also save ra, so the count here is one higher than the mask indicates.
4497 const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
4499 // Save all caller-save registers as this may be called from anywhere.
4500 __ MultiPush(kSavedRegs | ra.bit());
4502 // Compute the function's address for the first argument.
4503 __ Dsubu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
4505 // The caller's return address is above the saved temporaries.
4506 // Grab that for the second argument to the hook.
4507 __ Daddu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
4509 // Align the stack if necessary.
4510 int frame_alignment = masm->ActivationFrameAlignment();
4511 if (frame_alignment > kPointerSize) {
4513 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4514 __ And(sp, sp, Operand(-frame_alignment));
4517 __ Dsubu(sp, sp, kCArgsSlotsSize);
4518 #if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
4519 int64_t entry_hook =
4520 reinterpret_cast<int64_t>(isolate()->function_entry_hook());
4521 __ li(t9, Operand(entry_hook));
4523 // Under the simulator we need to indirect the entry hook through a
4524 // trampoline function at a known address.
4525 // It additionally takes an isolate as a third parameter.
4526 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
4528 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4529 __ li(t9, Operand(ExternalReference(&dispatcher,
4530 ExternalReference::BUILTIN_CALL,
4533 // Call C function through t9 to conform ABI for PIC.
4536 // Restore the stack pointer if needed.
4537 if (frame_alignment > kPointerSize) {
4540 __ Daddu(sp, sp, kCArgsSlotsSize);
4543 // Also pop ra to get Ret(0).
4544 __ MultiPop(kSavedRegs | ra.bit());
4550 static void CreateArrayDispatch(MacroAssembler* masm,
4551 AllocationSiteOverrideMode mode) {
4552 if (mode == DISABLE_ALLOCATION_SITES) {
4553 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4554 __ TailCallStub(&stub);
4555 } else if (mode == DONT_OVERRIDE) {
4556 int last_index = GetSequenceIndexFromFastElementsKind(
4557 TERMINAL_FAST_ELEMENTS_KIND);
4558 for (int i = 0; i <= last_index; ++i) {
4559 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4560 T stub(masm->isolate(), kind);
4561 __ TailCallStub(&stub, eq, a3, Operand(kind));
4564 // If we reached this point there is a problem.
4565 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4572 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4573 AllocationSiteOverrideMode mode) {
4574 // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4575 // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
4576 // a0 - number of arguments
4577 // a1 - constructor?
4578 // sp[0] - last argument
4579 Label normal_sequence;
4580 if (mode == DONT_OVERRIDE) {
4581 DCHECK(FAST_SMI_ELEMENTS == 0);
4582 DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
4583 DCHECK(FAST_ELEMENTS == 2);
4584 DCHECK(FAST_HOLEY_ELEMENTS == 3);
4585 DCHECK(FAST_DOUBLE_ELEMENTS == 4);
4586 DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
4588 // is the low bit set? If so, we are holey and that is good.
4589 __ And(at, a3, Operand(1));
4590 __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
4592 // look at the first argument
4593 __ ld(a5, MemOperand(sp, 0));
4594 __ Branch(&normal_sequence, eq, a5, Operand(zero_reg));
4596 if (mode == DISABLE_ALLOCATION_SITES) {
4597 ElementsKind initial = GetInitialFastElementsKind();
4598 ElementsKind holey_initial = GetHoleyElementsKind(initial);
4600 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
4602 DISABLE_ALLOCATION_SITES);
4603 __ TailCallStub(&stub_holey);
4605 __ bind(&normal_sequence);
4606 ArraySingleArgumentConstructorStub stub(masm->isolate(),
4608 DISABLE_ALLOCATION_SITES);
4609 __ TailCallStub(&stub);
4610 } else if (mode == DONT_OVERRIDE) {
4611 // We are going to create a holey array, but our kind is non-holey.
4612 // Fix kind and retry (only if we have an allocation site in the slot).
4613 __ Daddu(a3, a3, Operand(1));
4615 if (FLAG_debug_code) {
4616 __ ld(a5, FieldMemOperand(a2, 0));
4617 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
4618 __ Assert(eq, kExpectedAllocationSite, a5, Operand(at));
4621 // Save the resulting elements kind in type info. We can't just store a3
4622 // in the AllocationSite::transition_info field because elements kind is
4623 // restricted to a portion of the field...upper bits need to be left alone.
4624 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4625 __ ld(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
4626 __ Daddu(a4, a4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
4627 __ sd(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
4630 __ bind(&normal_sequence);
4631 int last_index = GetSequenceIndexFromFastElementsKind(
4632 TERMINAL_FAST_ELEMENTS_KIND);
4633 for (int i = 0; i <= last_index; ++i) {
4634 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4635 ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
4636 __ TailCallStub(&stub, eq, a3, Operand(kind));
4639 // If we reached this point there is a problem.
4640 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4648 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
4649 int to_index = GetSequenceIndexFromFastElementsKind(
4650 TERMINAL_FAST_ELEMENTS_KIND);
4651 for (int i = 0; i <= to_index; ++i) {
4652 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4653 T stub(isolate, kind);
4655 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
4656 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
4663 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
4664 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
4666 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
4668 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
4673 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
4675 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
4676 for (int i = 0; i < 2; i++) {
4677 // For internal arrays we only need a few things.
4678 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
4680 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
4682 InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
4688 void ArrayConstructorStub::GenerateDispatchToArrayStub(
4689 MacroAssembler* masm,
4690 AllocationSiteOverrideMode mode) {
4691 if (argument_count() == ANY) {
4692 Label not_zero_case, not_one_case;
4694 __ Branch(¬_zero_case, ne, at, Operand(zero_reg));
4695 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4697 __ bind(¬_zero_case);
4698 __ Branch(¬_one_case, gt, a0, Operand(1));
4699 CreateArrayDispatchOneArgument(masm, mode);
4701 __ bind(¬_one_case);
4702 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4703 } else if (argument_count() == NONE) {
4704 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4705 } else if (argument_count() == ONE) {
4706 CreateArrayDispatchOneArgument(masm, mode);
4707 } else if (argument_count() == MORE_THAN_ONE) {
4708 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4715 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
4716 // ----------- S t a t e -------------
4717 // -- a0 : argc (only if argument_count() == ANY)
4718 // -- a1 : constructor
4719 // -- a2 : AllocationSite or undefined
4720 // -- sp[0] : return address
4721 // -- sp[4] : last argument
4722 // -----------------------------------
4724 if (FLAG_debug_code) {
4725 // The array construct code is only set for the global and natives
4726 // builtin Array functions which always have maps.
4728 // Initial map for the builtin Array function should be a map.
4729 __ ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
4730 // Will both indicate a NULL and a Smi.
4732 __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
4733 at, Operand(zero_reg));
4734 __ GetObjectType(a4, a4, a5);
4735 __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
4736 a5, Operand(MAP_TYPE));
4738 // We should either have undefined in a2 or a valid AllocationSite
4739 __ AssertUndefinedOrAllocationSite(a2, a4);
4743 // Get the elements kind and case on that.
4744 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4745 __ Branch(&no_info, eq, a2, Operand(at));
4747 __ ld(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
4749 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4750 __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
4751 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
4754 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
4758 void InternalArrayConstructorStub::GenerateCase(
4759 MacroAssembler* masm, ElementsKind kind) {
4761 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
4762 __ TailCallStub(&stub0, lo, a0, Operand(1));
4764 InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
4765 __ TailCallStub(&stubN, hi, a0, Operand(1));
4767 if (IsFastPackedElementsKind(kind)) {
4768 // We might need to create a holey array
4769 // look at the first argument.
4770 __ ld(at, MemOperand(sp, 0));
4772 InternalArraySingleArgumentConstructorStub
4773 stub1_holey(isolate(), GetHoleyElementsKind(kind));
4774 __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
4777 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
4778 __ TailCallStub(&stub1);
4782 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
4783 // ----------- S t a t e -------------
4785 // -- a1 : constructor
4786 // -- sp[0] : return address
4787 // -- sp[4] : last argument
4788 // -----------------------------------
4790 if (FLAG_debug_code) {
4791 // The array construct code is only set for the global and natives
4792 // builtin Array functions which always have maps.
4794 // Initial map for the builtin Array function should be a map.
4795 __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
4796 // Will both indicate a NULL and a Smi.
4798 __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
4799 at, Operand(zero_reg));
4800 __ GetObjectType(a3, a3, a4);
4801 __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
4802 a4, Operand(MAP_TYPE));
4805 // Figure out the right elements kind.
4806 __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
4808 // Load the map's "bit field 2" into a3. We only need the first byte,
4809 // but the following bit field extraction takes care of that anyway.
4810 __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
4811 // Retrieve elements_kind from bit field 2.
4812 __ DecodeField<Map::ElementsKindBits>(a3);
4814 if (FLAG_debug_code) {
4816 __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
4818 eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
4819 a3, Operand(FAST_HOLEY_ELEMENTS));
4823 Label fast_elements_case;
4824 __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
4825 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
4827 __ bind(&fast_elements_case);
4828 GenerateCase(masm, FAST_ELEMENTS);
4832 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
4833 // ----------- S t a t e -------------
4835 // -- a4 : call_data
4837 // -- a1 : api_function_address
4840 // -- sp[0] : last argument
4842 // -- sp[(argc - 1)* 4] : first argument
4843 // -- sp[argc * 4] : receiver
4844 // -----------------------------------
4846 Register callee = a0;
4847 Register call_data = a4;
4848 Register holder = a2;
4849 Register api_function_address = a1;
4850 Register context = cp;
4852 int argc = this->argc();
4853 bool is_store = this->is_store();
4854 bool call_data_undefined = this->call_data_undefined();
4856 typedef FunctionCallbackArguments FCA;
4858 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
4859 STATIC_ASSERT(FCA::kCalleeIndex == 5);
4860 STATIC_ASSERT(FCA::kDataIndex == 4);
4861 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
4862 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
4863 STATIC_ASSERT(FCA::kIsolateIndex == 1);
4864 STATIC_ASSERT(FCA::kHolderIndex == 0);
4865 STATIC_ASSERT(FCA::kArgsLength == 7);
4867 // Save context, callee and call data.
4868 __ Push(context, callee, call_data);
4869 // Load context from callee.
4870 __ ld(context, FieldMemOperand(callee, JSFunction::kContextOffset));
4872 Register scratch = call_data;
4873 if (!call_data_undefined) {
4874 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4876 // Push return value and default return value.
4877 __ Push(scratch, scratch);
4879 Operand(ExternalReference::isolate_address(isolate())));
4880 // Push isolate and holder.
4881 __ Push(scratch, holder);
4883 // Prepare arguments.
4884 __ mov(scratch, sp);
4886 // Allocate the v8::Arguments structure in the arguments' space since
4887 // it's not controlled by GC.
4888 const int kApiStackSpace = 4;
4890 FrameScope frame_scope(masm, StackFrame::MANUAL);
4891 __ EnterExitFrame(false, kApiStackSpace);
4893 DCHECK(!api_function_address.is(a0) && !scratch.is(a0));
4894 // a0 = FunctionCallbackInfo&
4895 // Arguments is after the return address.
4896 __ Daddu(a0, sp, Operand(1 * kPointerSize));
4897 // FunctionCallbackInfo::implicit_args_
4898 __ sd(scratch, MemOperand(a0, 0 * kPointerSize));
4899 // FunctionCallbackInfo::values_
4900 __ Daddu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
4901 __ sd(at, MemOperand(a0, 1 * kPointerSize));
4902 // FunctionCallbackInfo::length_ = argc
4903 __ li(at, Operand(argc));
4904 __ sd(at, MemOperand(a0, 2 * kPointerSize));
4905 // FunctionCallbackInfo::is_construct_call = 0
4906 __ sd(zero_reg, MemOperand(a0, 3 * kPointerSize));
4908 const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
4909 ExternalReference thunk_ref =
4910 ExternalReference::invoke_function_callback(isolate());
4912 AllowExternalCallThatCantCauseGC scope(masm);
4913 MemOperand context_restore_operand(
4914 fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
4915 // Stores return the first js argument.
4916 int return_value_offset = 0;
4918 return_value_offset = 2 + FCA::kArgsLength;
4920 return_value_offset = 2 + FCA::kReturnValueOffset;
4922 MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
4924 __ CallApiFunctionAndReturn(api_function_address,
4927 return_value_operand,
4928 &context_restore_operand);
4932 void CallApiGetterStub::Generate(MacroAssembler* masm) {
4933 // ----------- S t a t e -------------
4935 // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
4937 // -- a2 : api_function_address
4938 // -----------------------------------
4940 Register api_function_address = ApiGetterDescriptor::function_address();
4941 DCHECK(api_function_address.is(a2));
4943 __ mov(a0, sp); // a0 = Handle<Name>
4944 __ Daddu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA
4946 const int kApiStackSpace = 1;
4947 FrameScope frame_scope(masm, StackFrame::MANUAL);
4948 __ EnterExitFrame(false, kApiStackSpace);
4950 // Create PropertyAccessorInfo instance on the stack above the exit frame with
4951 // a1 (internal::Object** args_) as the data.
4952 __ sd(a1, MemOperand(sp, 1 * kPointerSize));
4953 __ Daddu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo&
4955 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
4957 ExternalReference thunk_ref =
4958 ExternalReference::invoke_accessor_getter_callback(isolate());
4959 __ CallApiFunctionAndReturn(api_function_address,
4962 MemOperand(fp, 6 * kPointerSize),
4969 } } // namespace v8::internal
4971 #endif // V8_TARGET_ARCH_MIPS64