1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #if V8_TARGET_ARCH_MIPS64
9 #include "src/bootstrapper.h"
10 #include "src/code-stubs.h"
11 #include "src/codegen.h"
12 #include "src/ic/handler-compiler.h"
13 #include "src/ic/ic.h"
14 #include "src/ic/stub-cache.h"
15 #include "src/isolate.h"
16 #include "src/jsregexp.h"
17 #include "src/regexp-macro-assembler.h"
18 #include "src/runtime/runtime.h"
24 static void InitializeArrayConstructorDescriptor(
25 Isolate* isolate, CodeStubDescriptor* descriptor,
26 int constant_stack_parameter_count) {
27 Address deopt_handler = Runtime::FunctionForId(
28 Runtime::kArrayConstructor)->entry;
30 if (constant_stack_parameter_count == 0) {
31 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
32 JS_FUNCTION_STUB_MODE);
34 descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
35 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
40 static void InitializeInternalArrayConstructorDescriptor(
41 Isolate* isolate, CodeStubDescriptor* descriptor,
42 int constant_stack_parameter_count) {
43 Address deopt_handler = Runtime::FunctionForId(
44 Runtime::kInternalArrayConstructor)->entry;
46 if (constant_stack_parameter_count == 0) {
47 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
48 JS_FUNCTION_STUB_MODE);
50 descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
51 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
56 void ArrayNoArgumentConstructorStub::InitializeDescriptor(
57 CodeStubDescriptor* descriptor) {
58 InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
62 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
63 CodeStubDescriptor* descriptor) {
64 InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
68 void ArrayNArgumentsConstructorStub::InitializeDescriptor(
69 CodeStubDescriptor* descriptor) {
70 InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
74 void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
75 CodeStubDescriptor* descriptor) {
76 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
80 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
81 CodeStubDescriptor* descriptor) {
82 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
86 void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
87 CodeStubDescriptor* descriptor) {
88 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
92 #define __ ACCESS_MASM(masm)
95 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
98 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
104 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
109 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
110 ExternalReference miss) {
111 // Update the static counter each time a new code stub is generated.
112 isolate()->counters()->code_stubs()->Increment();
114 CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
115 int param_count = descriptor.GetEnvironmentParameterCount();
117 // Call the runtime system in a fresh internal frame.
118 FrameScope scope(masm, StackFrame::INTERNAL);
119 DCHECK((param_count == 0) ||
120 a0.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
121 // Push arguments, adjust sp.
122 __ Dsubu(sp, sp, Operand(param_count * kPointerSize));
123 for (int i = 0; i < param_count; ++i) {
124 // Store argument to stack.
125 __ sd(descriptor.GetEnvironmentParameterRegister(i),
126 MemOperand(sp, (param_count - 1 - i) * kPointerSize));
128 __ CallExternalReference(miss, param_count);
135 void DoubleToIStub::Generate(MacroAssembler* masm) {
136 Label out_of_range, only_low, negate, done;
137 Register input_reg = source();
138 Register result_reg = destination();
140 int double_offset = offset();
141 // Account for saved regs if input is sp.
142 if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
145 GetRegisterThatIsNotOneOf(input_reg, result_reg);
147 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
149 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
150 DoubleRegister double_scratch = kLithiumScratchDouble;
152 __ Push(scratch, scratch2, scratch3);
153 if (!skip_fastpath()) {
154 // Load double input.
155 __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
157 // Clear cumulative exception flags and save the FCSR.
158 __ cfc1(scratch2, FCSR);
159 __ ctc1(zero_reg, FCSR);
161 // Try a conversion to a signed integer.
162 __ Trunc_w_d(double_scratch, double_scratch);
163 // Move the converted value into the result register.
164 __ mfc1(scratch3, double_scratch);
166 // Retrieve and restore the FCSR.
167 __ cfc1(scratch, FCSR);
168 __ ctc1(scratch2, FCSR);
170 // Check for overflow and NaNs.
173 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
174 | kFCSRInvalidOpFlagMask);
175 // If we had no exceptions then set result_reg and we are done.
177 __ Branch(&error, ne, scratch, Operand(zero_reg));
178 __ Move(result_reg, scratch3);
183 // Load the double value and perform a manual truncation.
184 Register input_high = scratch2;
185 Register input_low = scratch3;
187 __ lw(input_low, MemOperand(input_reg, double_offset));
188 __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize));
190 Label normal_exponent, restore_sign;
191 // Extract the biased exponent in result.
194 HeapNumber::kExponentShift,
195 HeapNumber::kExponentBits);
197 // Check for Infinity and NaNs, which should return 0.
198 __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
199 __ Movz(result_reg, zero_reg, scratch);
200 __ Branch(&done, eq, scratch, Operand(zero_reg));
202 // Express exponent as delta to (number of mantissa bits + 31).
205 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
207 // If the delta is strictly positive, all bits would be shifted away,
208 // which means that we can return 0.
209 __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
210 __ mov(result_reg, zero_reg);
213 __ bind(&normal_exponent);
214 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
216 __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
219 Register sign = result_reg;
221 __ And(sign, input_high, Operand(HeapNumber::kSignMask));
223 // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
224 // to check for this specific case.
225 Label high_shift_needed, high_shift_done;
226 __ Branch(&high_shift_needed, lt, scratch, Operand(32));
227 __ mov(input_high, zero_reg);
228 __ Branch(&high_shift_done);
229 __ bind(&high_shift_needed);
231 // Set the implicit 1 before the mantissa part in input_high.
234 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
235 // Shift the mantissa bits to the correct position.
236 // We don't need to clear non-mantissa bits as they will be shifted away.
237 // If they weren't, it would mean that the answer is in the 32bit range.
238 __ sllv(input_high, input_high, scratch);
240 __ bind(&high_shift_done);
242 // Replace the shifted bits with bits from the lower mantissa word.
243 Label pos_shift, shift_done;
245 __ subu(scratch, at, scratch);
246 __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
249 __ Subu(scratch, zero_reg, scratch);
250 __ sllv(input_low, input_low, scratch);
251 __ Branch(&shift_done);
254 __ srlv(input_low, input_low, scratch);
256 __ bind(&shift_done);
257 __ Or(input_high, input_high, Operand(input_low));
258 // Restore sign if necessary.
259 __ mov(scratch, sign);
262 __ Subu(result_reg, zero_reg, input_high);
263 __ Movz(result_reg, input_high, scratch);
267 __ Pop(scratch, scratch2, scratch3);
272 // Handle the case where the lhs and rhs are the same object.
273 // Equality is almost reflexive (everything but NaN), so this is a test
274 // for "identity and not NaN".
275 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
279 Label heap_number, return_equal;
280 Register exp_mask_reg = t1;
282 __ Branch(¬_identical, ne, a0, Operand(a1));
284 __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
286 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
287 // so we do the second best thing - test it ourselves.
288 // They are both equal and they are not both Smis so both of them are not
289 // Smis. If it's not a heap number, then return equal.
290 if (cc == less || cc == greater) {
291 __ GetObjectType(a0, t0, t0);
292 __ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
294 __ GetObjectType(a0, t0, t0);
295 __ Branch(&heap_number, eq, t0, Operand(HEAP_NUMBER_TYPE));
296 // Comparing JS objects with <=, >= is complicated.
298 __ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
299 // Normally here we fall through to return_equal, but undefined is
300 // special: (undefined == undefined) == true, but
301 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
302 if (cc == less_equal || cc == greater_equal) {
303 __ Branch(&return_equal, ne, t0, Operand(ODDBALL_TYPE));
304 __ LoadRoot(a6, Heap::kUndefinedValueRootIndex);
305 __ Branch(&return_equal, ne, a0, Operand(a6));
306 DCHECK(is_int16(GREATER) && is_int16(LESS));
307 __ Ret(USE_DELAY_SLOT);
309 // undefined <= undefined should fail.
310 __ li(v0, Operand(GREATER));
312 // undefined >= undefined should fail.
313 __ li(v0, Operand(LESS));
319 __ bind(&return_equal);
320 DCHECK(is_int16(GREATER) && is_int16(LESS));
321 __ Ret(USE_DELAY_SLOT);
323 __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
324 } else if (cc == greater) {
325 __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
327 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
329 // For less and greater we don't have to check for NaN since the result of
330 // x < x is false regardless. For the others here is some code to check
332 if (cc != lt && cc != gt) {
333 __ bind(&heap_number);
334 // It is a heap number, so return non-equal if it's NaN and equal if it's
337 // The representation of NaN values has all exponent bits (52..62) set,
338 // and not all mantissa bits (0..51) clear.
339 // Read top bits of double representation (second word of value).
340 __ lwu(a6, FieldMemOperand(a0, HeapNumber::kExponentOffset));
341 // Test that exponent bits are all set.
342 __ And(a7, a6, Operand(exp_mask_reg));
343 // If all bits not set (ne cond), then not a NaN, objects are equal.
344 __ Branch(&return_equal, ne, a7, Operand(exp_mask_reg));
346 // Shift out flag and all exponent bits, retaining only mantissa.
347 __ sll(a6, a6, HeapNumber::kNonMantissaBitsInTopWord);
348 // Or with all low-bits of mantissa.
349 __ lwu(a7, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
350 __ Or(v0, a7, Operand(a6));
351 // For equal we already have the right value in v0: Return zero (equal)
352 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
353 // not (it's a NaN). For <= and >= we need to load v0 with the failing
354 // value if it's a NaN.
356 // All-zero means Infinity means equal.
357 __ Ret(eq, v0, Operand(zero_reg));
358 DCHECK(is_int16(GREATER) && is_int16(LESS));
359 __ Ret(USE_DELAY_SLOT);
361 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
363 __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
367 // No fall through here.
369 __ bind(¬_identical);
373 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
376 Label* both_loaded_as_doubles,
379 DCHECK((lhs.is(a0) && rhs.is(a1)) ||
380 (lhs.is(a1) && rhs.is(a0)));
383 __ JumpIfSmi(lhs, &lhs_is_smi);
385 // Check whether the non-smi is a heap number.
386 __ GetObjectType(lhs, t0, t0);
388 // If lhs was not a number and rhs was a Smi then strict equality cannot
389 // succeed. Return non-equal (lhs is already not zero).
390 __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE));
393 // Smi compared non-strictly with a non-Smi non-heap-number. Call
395 __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE));
397 // Rhs is a smi, lhs is a number.
398 // Convert smi rhs to double.
399 __ SmiUntag(at, rhs);
401 __ cvt_d_w(f14, f14);
402 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
404 // We now have both loaded as doubles.
405 __ jmp(both_loaded_as_doubles);
407 __ bind(&lhs_is_smi);
408 // Lhs is a Smi. Check whether the non-smi is a heap number.
409 __ GetObjectType(rhs, t0, t0);
411 // If lhs was not a number and rhs was a Smi then strict equality cannot
412 // succeed. Return non-equal.
413 __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE));
414 __ li(v0, Operand(1));
416 // Smi compared non-strictly with a non-Smi non-heap-number. Call
418 __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE));
421 // Lhs is a smi, rhs is a number.
422 // Convert smi lhs to double.
423 __ SmiUntag(at, lhs);
425 __ cvt_d_w(f12, f12);
426 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
427 // Fall through to both_loaded_as_doubles.
431 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
434 // If either operand is a JS object or an oddball value, then they are
435 // not equal since their pointers are different.
436 // There is no test for undetectability in strict equality.
437 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
438 Label first_non_object;
439 // Get the type of the first operand into a2 and compare it with
440 // FIRST_SPEC_OBJECT_TYPE.
441 __ GetObjectType(lhs, a2, a2);
442 __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
445 Label return_not_equal;
446 __ bind(&return_not_equal);
447 __ Ret(USE_DELAY_SLOT);
448 __ li(v0, Operand(1));
450 __ bind(&first_non_object);
451 // Check for oddballs: true, false, null, undefined.
452 __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
454 __ GetObjectType(rhs, a3, a3);
455 __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
457 // Check for oddballs: true, false, null, undefined.
458 __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
460 // Now that we have the types we might as well check for
461 // internalized-internalized.
462 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
463 __ Or(a2, a2, Operand(a3));
464 __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
465 __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
469 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
472 Label* both_loaded_as_doubles,
473 Label* not_heap_numbers,
475 __ GetObjectType(lhs, a3, a2);
476 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
477 __ ld(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
478 // If first was a heap number & second wasn't, go to slow case.
479 __ Branch(slow, ne, a3, Operand(a2));
481 // Both are heap numbers. Load them up then jump to the code we have
483 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
484 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
486 __ jmp(both_loaded_as_doubles);
490 // Fast negative check for internalized-to-internalized equality.
491 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
494 Label* possible_strings,
495 Label* not_both_strings) {
496 DCHECK((lhs.is(a0) && rhs.is(a1)) ||
497 (lhs.is(a1) && rhs.is(a0)));
499 // a2 is object type of rhs.
501 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
502 __ And(at, a2, Operand(kIsNotStringMask));
503 __ Branch(&object_test, ne, at, Operand(zero_reg));
504 __ And(at, a2, Operand(kIsNotInternalizedMask));
505 __ Branch(possible_strings, ne, at, Operand(zero_reg));
506 __ GetObjectType(rhs, a3, a3);
507 __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
508 __ And(at, a3, Operand(kIsNotInternalizedMask));
509 __ Branch(possible_strings, ne, at, Operand(zero_reg));
511 // Both are internalized strings. We already checked they weren't the same
512 // pointer so they are not equal.
513 __ Ret(USE_DELAY_SLOT);
514 __ li(v0, Operand(1)); // Non-zero indicates not equal.
516 __ bind(&object_test);
517 __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
518 __ GetObjectType(rhs, a2, a3);
519 __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
521 // If both objects are undetectable, they are equal. Otherwise, they
522 // are not equal, since they are different objects and an object is not
523 // equal to undefined.
524 __ ld(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
525 __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
526 __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
528 __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
529 __ Ret(USE_DELAY_SLOT);
530 __ xori(v0, a0, 1 << Map::kIsUndetectable);
534 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
536 CompareICState::State expected,
539 if (expected == CompareICState::SMI) {
540 __ JumpIfNotSmi(input, fail);
541 } else if (expected == CompareICState::NUMBER) {
542 __ JumpIfSmi(input, &ok);
543 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
546 // We could be strict about internalized/string here, but as long as
547 // hydrogen doesn't care, the stub doesn't have to care either.
552 // On entry a1 and a2 are the values to be compared.
553 // On exit a0 is 0, positive or negative to indicate the result of
555 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
558 Condition cc = GetCondition();
561 CompareICStub_CheckInputType(masm, lhs, a2, left(), &miss);
562 CompareICStub_CheckInputType(masm, rhs, a3, right(), &miss);
564 Label slow; // Call builtin.
565 Label not_smis, both_loaded_as_doubles;
567 Label not_two_smis, smi_done;
569 __ JumpIfNotSmi(a2, ¬_two_smis);
573 __ Ret(USE_DELAY_SLOT);
574 __ dsubu(v0, a1, a0);
575 __ bind(¬_two_smis);
577 // NOTICE! This code is only reached after a smi-fast-case check, so
578 // it is certain that at least one operand isn't a smi.
580 // Handle the case where the objects are identical. Either returns the answer
581 // or goes to slow. Only falls through if the objects were not identical.
582 EmitIdenticalObjectComparison(masm, &slow, cc);
584 // If either is a Smi (we know that not both are), then they can only
585 // be strictly equal if the other is a HeapNumber.
586 STATIC_ASSERT(kSmiTag == 0);
587 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
588 __ And(a6, lhs, Operand(rhs));
589 __ JumpIfNotSmi(a6, ¬_smis, a4);
590 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
591 // 1) Return the answer.
593 // 3) Fall through to both_loaded_as_doubles.
594 // 4) Jump to rhs_not_nan.
595 // In cases 3 and 4 we have found out we were dealing with a number-number
596 // comparison and the numbers have been loaded into f12 and f14 as doubles,
597 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
598 EmitSmiNonsmiComparison(masm, lhs, rhs,
599 &both_loaded_as_doubles, &slow, strict());
601 __ bind(&both_loaded_as_doubles);
602 // f12, f14 are the double representations of the left hand side
603 // and the right hand side if we have FPU. Otherwise a2, a3 represent
604 // left hand side and a0, a1 represent right hand side.
607 __ li(a4, Operand(LESS));
608 __ li(a5, Operand(GREATER));
609 __ li(a6, Operand(EQUAL));
611 // Check if either rhs or lhs is NaN.
612 __ BranchF(NULL, &nan, eq, f12, f14);
614 // Check if LESS condition is satisfied. If true, move conditionally
616 if (kArchVariant != kMips64r6) {
617 __ c(OLT, D, f12, f14);
619 // Use previous check to store conditionally to v0 oposite condition
620 // (GREATER). If rhs is equal to lhs, this will be corrected in next
623 // Check if EQUAL condition is satisfied. If true, move conditionally
625 __ c(EQ, D, f12, f14);
629 __ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
630 __ mov(v0, a4); // Return LESS as result.
632 __ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
633 __ mov(v0, a6); // Return EQUAL as result.
635 __ mov(v0, a5); // Return GREATER as result.
641 // NaN comparisons always fail.
642 // Load whatever we need in v0 to make the comparison fail.
643 DCHECK(is_int16(GREATER) && is_int16(LESS));
644 __ Ret(USE_DELAY_SLOT);
645 if (cc == lt || cc == le) {
646 __ li(v0, Operand(GREATER));
648 __ li(v0, Operand(LESS));
653 // At this point we know we are dealing with two different objects,
654 // and neither of them is a Smi. The objects are in lhs_ and rhs_.
656 // This returns non-equal for some object types, or falls through if it
658 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
661 Label check_for_internalized_strings;
662 Label flat_string_check;
663 // Check for heap-number-heap-number comparison. Can jump to slow case,
664 // or load both doubles and jump to the code that handles
665 // that case. If the inputs are not doubles then jumps to
666 // check_for_internalized_strings.
667 // In this case a2 will contain the type of lhs_.
668 EmitCheckForTwoHeapNumbers(masm,
671 &both_loaded_as_doubles,
672 &check_for_internalized_strings,
675 __ bind(&check_for_internalized_strings);
676 if (cc == eq && !strict()) {
677 // Returns an answer for two internalized strings or two
678 // detectable objects.
679 // Otherwise jumps to string case or not both strings case.
680 // Assumes that a2 is the type of lhs_ on entry.
681 EmitCheckForInternalizedStringsOrObjects(
682 masm, lhs, rhs, &flat_string_check, &slow);
685 // Check for both being sequential one-byte strings,
686 // and inline if that is the case.
687 __ bind(&flat_string_check);
689 __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, a2, a3, &slow);
691 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
694 StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, a2, a3, a4);
696 StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, a2, a3, a4,
699 // Never falls through to here.
702 // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
705 // Figure out which native to call and setup the arguments.
706 Builtins::JavaScript native;
708 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
710 native = Builtins::COMPARE;
711 int ncr; // NaN compare result.
712 if (cc == lt || cc == le) {
715 DCHECK(cc == gt || cc == ge); // Remaining cases.
718 __ li(a0, Operand(Smi::FromInt(ncr)));
722 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
723 // tagged as a small integer.
724 __ InvokeBuiltin(native, JUMP_FUNCTION);
731 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
734 __ PushSafepointRegisters();
739 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
742 __ PopSafepointRegisters();
747 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
748 // We don't allow a GC during a store buffer overflow so there is no need to
749 // store the registers in any particular way, but we do have to store and
751 __ MultiPush(kJSCallerSaved | ra.bit());
752 if (save_doubles()) {
753 __ MultiPushFPU(kCallerSavedFPU);
755 const int argument_count = 1;
756 const int fp_argument_count = 0;
757 const Register scratch = a1;
759 AllowExternalCallThatCantCauseGC scope(masm);
760 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
761 __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
763 ExternalReference::store_buffer_overflow_function(isolate()),
765 if (save_doubles()) {
766 __ MultiPopFPU(kCallerSavedFPU);
769 __ MultiPop(kJSCallerSaved | ra.bit());
774 void MathPowStub::Generate(MacroAssembler* masm) {
775 const Register base = a1;
776 const Register exponent = MathPowTaggedDescriptor::exponent();
777 DCHECK(exponent.is(a2));
778 const Register heapnumbermap = a5;
779 const Register heapnumber = v0;
780 const DoubleRegister double_base = f2;
781 const DoubleRegister double_exponent = f4;
782 const DoubleRegister double_result = f0;
783 const DoubleRegister double_scratch = f6;
784 const FPURegister single_scratch = f8;
785 const Register scratch = t1;
786 const Register scratch2 = a7;
788 Label call_runtime, done, int_exponent;
789 if (exponent_type() == ON_STACK) {
790 Label base_is_smi, unpack_exponent;
791 // The exponent and base are supplied as arguments on the stack.
792 // This can only happen if the stub is called from non-optimized code.
793 // Load input parameters from stack to double registers.
794 __ ld(base, MemOperand(sp, 1 * kPointerSize));
795 __ ld(exponent, MemOperand(sp, 0 * kPointerSize));
797 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
799 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
800 __ ld(scratch, FieldMemOperand(base, JSObject::kMapOffset));
801 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
803 __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
804 __ jmp(&unpack_exponent);
806 __ bind(&base_is_smi);
807 __ mtc1(scratch, single_scratch);
808 __ cvt_d_w(double_base, single_scratch);
809 __ bind(&unpack_exponent);
811 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
813 __ ld(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
814 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
815 __ ldc1(double_exponent,
816 FieldMemOperand(exponent, HeapNumber::kValueOffset));
817 } else if (exponent_type() == TAGGED) {
818 // Base is already in double_base.
819 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
821 __ ldc1(double_exponent,
822 FieldMemOperand(exponent, HeapNumber::kValueOffset));
825 if (exponent_type() != INTEGER) {
826 Label int_exponent_convert;
827 // Detect integer exponents stored as double.
828 __ EmitFPUTruncate(kRoundToMinusInf,
834 kCheckForInexactConversion);
835 // scratch2 == 0 means there was no conversion error.
836 __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
838 if (exponent_type() == ON_STACK) {
839 // Detect square root case. Crankshaft detects constant +/-0.5 at
840 // compile time and uses DoMathPowHalf instead. We then skip this check
841 // for non-constant cases of +/-0.5 as these hardly occur.
845 __ Move(double_scratch, 0.5);
846 __ BranchF(USE_DELAY_SLOT,
852 // double_scratch can be overwritten in the delay slot.
853 // Calculates square root of base. Check for the special case of
854 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
855 __ Move(double_scratch, static_cast<double>(-V8_INFINITY));
856 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
857 __ neg_d(double_result, double_scratch);
859 // Add +0 to convert -0 to +0.
860 __ add_d(double_scratch, double_base, kDoubleRegZero);
861 __ sqrt_d(double_result, double_scratch);
864 __ bind(¬_plus_half);
865 __ Move(double_scratch, -0.5);
866 __ BranchF(USE_DELAY_SLOT,
872 // double_scratch can be overwritten in the delay slot.
873 // Calculates square root of base. Check for the special case of
874 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
875 __ Move(double_scratch, static_cast<double>(-V8_INFINITY));
876 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
877 __ Move(double_result, kDoubleRegZero);
879 // Add +0 to convert -0 to +0.
880 __ add_d(double_scratch, double_base, kDoubleRegZero);
881 __ Move(double_result, 1.);
882 __ sqrt_d(double_scratch, double_scratch);
883 __ div_d(double_result, double_result, double_scratch);
889 AllowExternalCallThatCantCauseGC scope(masm);
890 __ PrepareCallCFunction(0, 2, scratch2);
891 __ MovToFloatParameters(double_base, double_exponent);
893 ExternalReference::power_double_double_function(isolate()),
897 __ MovFromFloatResult(double_result);
900 __ bind(&int_exponent_convert);
903 // Calculate power with integer exponent.
904 __ bind(&int_exponent);
906 // Get two copies of exponent in the registers scratch and exponent.
907 if (exponent_type() == INTEGER) {
908 __ mov(scratch, exponent);
910 // Exponent has previously been stored into scratch as untagged integer.
911 __ mov(exponent, scratch);
914 __ mov_d(double_scratch, double_base); // Back up base.
915 __ Move(double_result, 1.0);
917 // Get absolute value of exponent.
918 Label positive_exponent;
919 __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
920 __ Dsubu(scratch, zero_reg, scratch);
921 __ bind(&positive_exponent);
923 Label while_true, no_carry, loop_end;
924 __ bind(&while_true);
926 __ And(scratch2, scratch, 1);
928 __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
929 __ mul_d(double_result, double_result, double_scratch);
932 __ dsra(scratch, scratch, 1);
934 __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
935 __ mul_d(double_scratch, double_scratch, double_scratch);
937 __ Branch(&while_true);
941 __ Branch(&done, ge, exponent, Operand(zero_reg));
942 __ Move(double_scratch, 1.0);
943 __ div_d(double_result, double_scratch, double_result);
944 // Test whether result is zero. Bail out to check for subnormal result.
945 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
946 __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
948 // double_exponent may not contain the exponent value if the input was a
949 // smi. We set it with exponent value before bailing out.
950 __ mtc1(exponent, single_scratch);
951 __ cvt_d_w(double_exponent, single_scratch);
953 // Returning or bailing out.
954 Counters* counters = isolate()->counters();
955 if (exponent_type() == ON_STACK) {
956 // The arguments are still on the stack.
957 __ bind(&call_runtime);
958 __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
960 // The stub is called from non-optimized code, which expects the result
961 // as heap number in exponent.
963 __ AllocateHeapNumber(
964 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
965 __ sdc1(double_result,
966 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
967 DCHECK(heapnumber.is(v0));
968 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
973 AllowExternalCallThatCantCauseGC scope(masm);
974 __ PrepareCallCFunction(0, 2, scratch);
975 __ MovToFloatParameters(double_base, double_exponent);
977 ExternalReference::power_double_double_function(isolate()),
981 __ MovFromFloatResult(double_result);
984 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
990 bool CEntryStub::NeedsImmovableCode() {
995 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
996 CEntryStub::GenerateAheadOfTime(isolate);
997 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
998 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
999 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
1000 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
1001 CreateWeakCellStub::GenerateAheadOfTime(isolate);
1002 BinaryOpICStub::GenerateAheadOfTime(isolate);
1003 StoreRegistersStateStub::GenerateAheadOfTime(isolate);
1004 RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
1005 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
1009 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1010 StoreRegistersStateStub stub(isolate);
1015 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1016 RestoreRegistersStateStub stub(isolate);
1021 void CodeStub::GenerateFPStubs(Isolate* isolate) {
1022 // Generate if not already in cache.
1023 SaveFPRegsMode mode = kSaveFPRegs;
1024 CEntryStub(isolate, 1, mode).GetCode();
1025 StoreBufferOverflowStub(isolate, mode).GetCode();
1026 isolate->set_fp_stubs_generated(true);
1030 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1031 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
1036 void CEntryStub::Generate(MacroAssembler* masm) {
1037 // Called from JavaScript; parameters are on stack as if calling JS function
1038 // a0: number of arguments including receiver
1039 // a1: pointer to builtin function
1040 // fp: frame pointer (restored after C call)
1041 // sp: stack pointer (restored as callee's sp after C call)
1042 // cp: current context (C callee-saved)
1044 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1046 // Compute the argv pointer in a callee-saved register.
1047 __ dsll(s1, a0, kPointerSizeLog2);
1048 __ Daddu(s1, sp, s1);
1049 __ Dsubu(s1, s1, kPointerSize);
1051 // Enter the exit frame that transitions from JavaScript to C++.
1052 FrameScope scope(masm, StackFrame::MANUAL);
1053 __ EnterExitFrame(save_doubles());
1055 // s0: number of arguments including receiver (C callee-saved)
1056 // s1: pointer to first argument (C callee-saved)
1057 // s2: pointer to builtin function (C callee-saved)
1059 // Prepare arguments for C routine.
1063 // a1 = argv (set in the delay slot after find_ra below).
1065 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
1066 // also need to reserve the 4 argument slots on the stack.
1068 __ AssertStackIsAligned();
1070 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
1072 // To let the GC traverse the return address of the exit frames, we need to
1073 // know where the return address is. The CEntryStub is unmovable, so
1074 // we can store the address on the stack to be able to find it again and
1075 // we never have to restore it, because it will not change.
1076 { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
1077 // This branch-and-link sequence is needed to find the current PC on mips,
1078 // saved to the ra register.
1079 // Use masm-> here instead of the double-underscore macro since extra
1080 // coverage code can interfere with the proper calculation of ra.
1082 masm->bal(&find_ra); // bal exposes branch delay slot.
1084 masm->bind(&find_ra);
1086 // Adjust the value in ra to point to the correct return location, 2nd
1087 // instruction past the real call into C code (the jalr(t9)), and push it.
1088 // This is the return address of the exit frame.
1089 const int kNumInstructionsToJump = 5;
1090 masm->Daddu(ra, ra, kNumInstructionsToJump * kInt32Size);
1091 masm->sd(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
1092 // Stack space reservation moved to the branch delay slot below.
1093 // Stack is still aligned.
1095 // Call the C routine.
1096 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
1098 // Set up sp in the delay slot.
1099 masm->daddiu(sp, sp, -kCArgsSlotsSize);
1100 // Make sure the stored 'ra' points to this position.
1101 DCHECK_EQ(kNumInstructionsToJump,
1102 masm->InstructionsGeneratedSince(&find_ra));
1105 // Runtime functions should not return 'the hole'. Allowing it to escape may
1106 // lead to crashes in the IC code later.
1107 if (FLAG_debug_code) {
1109 __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
1110 __ Branch(&okay, ne, v0, Operand(a4));
1111 __ stop("The hole escaped");
1115 // Check result for exception sentinel.
1116 Label exception_returned;
1117 __ LoadRoot(a4, Heap::kExceptionRootIndex);
1118 __ Branch(&exception_returned, eq, a4, Operand(v0));
1120 // Check that there is no pending exception, otherwise we
1121 // should have returned the exception sentinel.
1122 if (FLAG_debug_code) {
1124 ExternalReference pending_exception_address(
1125 Isolate::kPendingExceptionAddress, isolate());
1126 __ li(a2, Operand(pending_exception_address));
1127 __ ld(a2, MemOperand(a2));
1128 __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
1129 // Cannot use check here as it attempts to generate call into runtime.
1130 __ Branch(&okay, eq, a4, Operand(a2));
1131 __ stop("Unexpected pending exception");
1135 // Exit C frame and return.
1137 // sp: stack pointer
1138 // fp: frame pointer
1139 // s0: still holds argc (callee-saved).
1140 __ LeaveExitFrame(save_doubles(), s0, true, EMIT_RETURN);
1142 // Handling of exception.
1143 __ bind(&exception_returned);
1145 ExternalReference pending_handler_context_address(
1146 Isolate::kPendingHandlerContextAddress, isolate());
1147 ExternalReference pending_handler_code_address(
1148 Isolate::kPendingHandlerCodeAddress, isolate());
1149 ExternalReference pending_handler_offset_address(
1150 Isolate::kPendingHandlerOffsetAddress, isolate());
1151 ExternalReference pending_handler_fp_address(
1152 Isolate::kPendingHandlerFPAddress, isolate());
1153 ExternalReference pending_handler_sp_address(
1154 Isolate::kPendingHandlerSPAddress, isolate());
1156 // Ask the runtime for help to determine the handler. This will set v0 to
1157 // contain the current pending exception, don't clobber it.
1158 ExternalReference find_handler(Runtime::kFindExceptionHandler, isolate());
1160 FrameScope scope(masm, StackFrame::MANUAL);
1161 __ PrepareCallCFunction(3, 0, a0);
1162 __ mov(a0, zero_reg);
1163 __ mov(a1, zero_reg);
1164 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
1165 __ CallCFunction(find_handler, 3);
1168 // Retrieve the handler context, SP and FP.
1169 __ li(cp, Operand(pending_handler_context_address));
1170 __ ld(cp, MemOperand(cp));
1171 __ li(sp, Operand(pending_handler_sp_address));
1172 __ ld(sp, MemOperand(sp));
1173 __ li(fp, Operand(pending_handler_fp_address));
1174 __ ld(fp, MemOperand(fp));
1176 // If the handler is a JS frame, restore the context to the frame. Note that
1177 // the context will be set to (cp == 0) for non-JS frames.
1179 __ Branch(&zero, eq, cp, Operand(zero_reg));
1180 __ sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1183 // Compute the handler entry address and jump to it.
1184 __ li(a1, Operand(pending_handler_code_address));
1185 __ ld(a1, MemOperand(a1));
1186 __ li(a2, Operand(pending_handler_offset_address));
1187 __ ld(a2, MemOperand(a2));
1188 __ Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
1189 __ Daddu(t9, a1, a2);
1194 void JSEntryStub::Generate(MacroAssembler* masm) {
1195 Label invoke, handler_entry, exit;
1196 Isolate* isolate = masm->isolate();
1198 // TODO(plind): unify the ABI description here.
1200 // a0: entry address
1204 // a4 (a4): on mips64
1207 // 0 arg slots on mips64 (4 args slots on mips)
1208 // args -- in a4/a4 on mips64, on stack on mips
1210 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1212 // Save callee saved registers on the stack.
1213 __ MultiPush(kCalleeSaved | ra.bit());
1215 // Save callee-saved FPU registers.
1216 __ MultiPushFPU(kCalleeSavedFPU);
1217 // Set up the reserved register for 0.0.
1218 __ Move(kDoubleRegZero, 0.0);
1220 // Load argv in s0 register.
1221 if (kMipsAbi == kN64) {
1222 __ mov(s0, a4); // 5th parameter in mips64 a4 (a4) register.
1223 } else { // Abi O32.
1224 // 5th parameter on stack for O32 abi.
1225 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1226 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
1227 __ ld(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
1230 __ InitializeRootRegister();
1232 // We build an EntryFrame.
1233 __ li(a7, Operand(-1)); // Push a bad frame pointer to fail if it is used.
1234 int marker = type();
1235 __ li(a6, Operand(Smi::FromInt(marker)));
1236 __ li(a5, Operand(Smi::FromInt(marker)));
1237 ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
1238 __ li(a4, Operand(c_entry_fp));
1239 __ ld(a4, MemOperand(a4));
1240 __ Push(a7, a6, a5, a4);
1241 // Set up frame pointer for the frame to be pushed.
1242 __ daddiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
1245 // a0: entry_address
1247 // a2: receiver_pointer
1253 // function slot | entry frame
1255 // bad fp (0xff...f) |
1256 // callee saved registers + ra
1257 // [ O32: 4 args slots]
1260 // If this is the outermost JS call, set js_entry_sp value.
1261 Label non_outermost_js;
1262 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
1263 __ li(a5, Operand(ExternalReference(js_entry_sp)));
1264 __ ld(a6, MemOperand(a5));
1265 __ Branch(&non_outermost_js, ne, a6, Operand(zero_reg));
1266 __ sd(fp, MemOperand(a5));
1267 __ li(a4, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1270 __ nop(); // Branch delay slot nop.
1271 __ bind(&non_outermost_js);
1272 __ li(a4, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1276 // Jump to a faked try block that does the invoke, with a faked catch
1277 // block that sets the pending exception.
1279 __ bind(&handler_entry);
1280 handler_offset_ = handler_entry.pos();
1281 // Caught exception: Store result (exception) in the pending exception
1282 // field in the JSEnv and return a failure sentinel. Coming in here the
1283 // fp will be invalid because the PushStackHandler below sets it to 0 to
1284 // signal the existence of the JSEntry frame.
1285 __ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1287 __ sd(v0, MemOperand(a4)); // We come back from 'invoke'. result is in v0.
1288 __ LoadRoot(v0, Heap::kExceptionRootIndex);
1289 __ b(&exit); // b exposes branch delay slot.
1290 __ nop(); // Branch delay slot nop.
1292 // Invoke: Link this frame into the handler chain.
1294 __ PushStackHandler();
1295 // If an exception not caught by another handler occurs, this handler
1296 // returns control to the code after the bal(&invoke) above, which
1297 // restores all kCalleeSaved registers (including cp and fp) to their
1298 // saved values before returning a failure to C.
1300 // Clear any pending exceptions.
1301 __ LoadRoot(a5, Heap::kTheHoleValueRootIndex);
1302 __ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1304 __ sd(a5, MemOperand(a4));
1306 // Invoke the function by calling through JS entry trampoline builtin.
1307 // Notice that we cannot store a reference to the trampoline code directly in
1308 // this stub, because runtime stubs are not traversed when doing GC.
1311 // a0: entry_address
1313 // a2: receiver_pointer
1320 // callee saved registers + ra
1321 // [ O32: 4 args slots]
1324 if (type() == StackFrame::ENTRY_CONSTRUCT) {
1325 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1327 __ li(a4, Operand(construct_entry));
1329 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
1330 __ li(a4, Operand(entry));
1332 __ ld(t9, MemOperand(a4)); // Deref address.
1333 // Call JSEntryTrampoline.
1334 __ daddiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
1337 // Unlink this frame from the handler chain.
1338 __ PopStackHandler();
1340 __ bind(&exit); // v0 holds result
1341 // Check if the current stack frame is marked as the outermost JS frame.
1342 Label non_outermost_js_2;
1344 __ Branch(&non_outermost_js_2,
1347 Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1348 __ li(a5, Operand(ExternalReference(js_entry_sp)));
1349 __ sd(zero_reg, MemOperand(a5));
1350 __ bind(&non_outermost_js_2);
1352 // Restore the top frame descriptors from the stack.
1354 __ li(a4, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1356 __ sd(a5, MemOperand(a4));
1358 // Reset the stack to the callee saved registers.
1359 __ daddiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
1361 // Restore callee-saved fpu registers.
1362 __ MultiPopFPU(kCalleeSavedFPU);
1364 // Restore callee saved registers from the stack.
1365 __ MultiPop(kCalleeSaved | ra.bit());
1371 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
1372 // Return address is in ra.
1375 Register receiver = LoadDescriptor::ReceiverRegister();
1376 Register index = LoadDescriptor::NameRegister();
1377 Register scratch = a5;
1378 Register result = v0;
1379 DCHECK(!scratch.is(receiver) && !scratch.is(index));
1380 DCHECK(!FLAG_vector_ics ||
1381 !scratch.is(VectorLoadICDescriptor::VectorRegister()));
1383 StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
1384 &miss, // When not a string.
1385 &miss, // When not a number.
1386 &miss, // When index out of range.
1387 STRING_INDEX_IS_ARRAY_INDEX,
1388 RECEIVER_IS_STRING);
1389 char_at_generator.GenerateFast(masm);
1392 StubRuntimeCallHelper call_helper;
1393 char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
1396 PropertyAccessCompiler::TailCallBuiltin(
1397 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1401 // Uses registers a0 to a4.
1402 // Expected input (depending on whether args are in registers or on the stack):
1403 // * object: a0 or at sp + 1 * kPointerSize.
1404 // * function: a1 or at sp.
1406 // An inlined call site may have been generated before calling this stub.
1407 // In this case the offset to the inline site to patch is passed on the stack,
1408 // in the safepoint slot for register a4.
1409 void InstanceofStub::Generate(MacroAssembler* masm) {
1410 // Call site inlining and patching implies arguments in registers.
1411 DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
1413 // Fixed register usage throughout the stub:
1414 const Register object = a0; // Object (lhs).
1415 Register map = a3; // Map of the object.
1416 const Register function = a1; // Function (rhs).
1417 const Register prototype = a4; // Prototype of the function.
1418 const Register inline_site = t1;
1419 const Register scratch = a2;
1421 const int32_t kDeltaToLoadBoolResult = 7 * Assembler::kInstrSize;
1423 Label slow, loop, is_instance, is_not_instance, not_js_object;
1425 if (!HasArgsInRegisters()) {
1426 __ ld(object, MemOperand(sp, 1 * kPointerSize));
1427 __ ld(function, MemOperand(sp, 0));
1430 // Check that the left hand is a JS object and load map.
1431 __ JumpIfSmi(object, ¬_js_object);
1432 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
1434 // If there is a call site cache don't look in the global cache, but do the
1435 // real lookup and update the call site cache.
1436 if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
1438 __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
1439 __ Branch(&miss, ne, function, Operand(at));
1440 __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
1441 __ Branch(&miss, ne, map, Operand(at));
1442 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1443 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1448 // Get the prototype of the function.
1449 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
1451 // Check that the function prototype is a JS object.
1452 __ JumpIfSmi(prototype, &slow);
1453 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
1455 // Update the global instanceof or call site inlined cache with the current
1456 // map and function. The cached answer will be set when it is known below.
1457 if (!HasCallSiteInlineCheck()) {
1458 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1459 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
1461 DCHECK(HasArgsInRegisters());
1462 // Patch the (relocated) inlined map check.
1464 // The offset was stored in a4 safepoint slot.
1465 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
1466 __ LoadFromSafepointRegisterSlot(scratch, a4);
1467 __ Dsubu(inline_site, ra, scratch);
1468 // Get the map location in scratch and patch it.
1469 __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
1470 __ sd(map, FieldMemOperand(scratch, Cell::kValueOffset));
1473 // Register mapping: a3 is object map and a4 is function prototype.
1474 // Get prototype of object into a2.
1475 __ ld(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
1477 // We don't need map any more. Use it as a scratch register.
1478 Register scratch2 = map;
1481 // Loop through the prototype chain looking for the function prototype.
1482 __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
1484 __ Branch(&is_instance, eq, scratch, Operand(prototype));
1485 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
1486 __ ld(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
1487 __ ld(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
1490 __ bind(&is_instance);
1491 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
1492 if (!HasCallSiteInlineCheck()) {
1493 __ mov(v0, zero_reg);
1494 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1495 if (ReturnTrueFalseObject()) {
1496 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
1499 // Patch the call site to return true.
1500 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
1501 __ Daddu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1502 // Get the boolean result location in scratch and patch it.
1503 __ PatchRelocatedValue(inline_site, scratch, v0);
1505 if (!ReturnTrueFalseObject()) {
1506 __ mov(v0, zero_reg);
1509 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1511 __ bind(&is_not_instance);
1512 if (!HasCallSiteInlineCheck()) {
1513 __ li(v0, Operand(Smi::FromInt(1)));
1514 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1515 if (ReturnTrueFalseObject()) {
1516 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1519 // Patch the call site to return false.
1520 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1521 __ Daddu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1522 // Get the boolean result location in scratch and patch it.
1523 __ PatchRelocatedValue(inline_site, scratch, v0);
1525 if (!ReturnTrueFalseObject()) {
1526 __ li(v0, Operand(Smi::FromInt(1)));
1530 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1532 Label object_not_null, object_not_null_or_smi;
1533 __ bind(¬_js_object);
1534 // Before null, smi and string value checks, check that the rhs is a function
1535 // as for a non-function rhs an exception needs to be thrown.
1536 __ JumpIfSmi(function, &slow);
1537 __ GetObjectType(function, scratch2, scratch);
1538 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
1540 // Null is not instance of anything.
1541 __ Branch(&object_not_null, ne, object,
1542 Operand(isolate()->factory()->null_value()));
1543 if (ReturnTrueFalseObject()) {
1544 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1546 __ li(v0, Operand(Smi::FromInt(1)));
1548 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1550 __ bind(&object_not_null);
1551 // Smi values are not instances of anything.
1552 __ JumpIfNotSmi(object, &object_not_null_or_smi);
1553 if (ReturnTrueFalseObject()) {
1554 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1556 __ li(v0, Operand(Smi::FromInt(1)));
1558 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1560 __ bind(&object_not_null_or_smi);
1561 // String values are not instances of anything.
1562 __ IsObjectJSStringType(object, scratch, &slow);
1563 if (ReturnTrueFalseObject()) {
1564 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1566 __ li(v0, Operand(Smi::FromInt(1)));
1568 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1570 // Slow-case. Tail call builtin.
1572 if (!ReturnTrueFalseObject()) {
1573 if (HasArgsInRegisters()) {
1576 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
1579 FrameScope scope(masm, StackFrame::INTERNAL);
1581 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
1584 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
1585 __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
1586 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1587 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1592 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1594 Register receiver = LoadDescriptor::ReceiverRegister();
1595 // Ensure that the vector and slot registers won't be clobbered before
1596 // calling the miss handler.
1597 DCHECK(!FLAG_vector_ics ||
1598 !AreAliased(a4, a5, VectorLoadICDescriptor::VectorRegister(),
1599 VectorLoadICDescriptor::SlotRegister()));
1601 NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a4,
1604 PropertyAccessCompiler::TailCallBuiltin(
1605 masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1609 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
1610 CHECK(!has_new_target());
1611 // The displacement is the offset of the last parameter (if any)
1612 // relative to the frame pointer.
1613 const int kDisplacement =
1614 StandardFrameConstants::kCallerSPOffset - kPointerSize;
1615 DCHECK(a1.is(ArgumentsAccessReadDescriptor::index()));
1616 DCHECK(a0.is(ArgumentsAccessReadDescriptor::parameter_count()));
1618 // Check that the key is a smiGenerateReadElement.
1620 __ JumpIfNotSmi(a1, &slow);
1622 // Check if the calling frame is an arguments adaptor frame.
1624 __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1625 __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
1629 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1631 // Check index (a1) against formal parameters count limit passed in
1632 // through register a0. Use unsigned comparison to get negative
1634 __ Branch(&slow, hs, a1, Operand(a0));
1636 // Read the argument from the stack and return it.
1637 __ dsubu(a3, a0, a1);
1638 __ SmiScale(a7, a3, kPointerSizeLog2);
1639 __ Daddu(a3, fp, Operand(a7));
1640 __ Ret(USE_DELAY_SLOT);
1641 __ ld(v0, MemOperand(a3, kDisplacement));
1643 // Arguments adaptor case: Check index (a1) against actual arguments
1644 // limit found in the arguments adaptor frame. Use unsigned
1645 // comparison to get negative check for free.
1647 __ ld(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1648 __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
1650 // Read the argument from the adaptor frame and return it.
1651 __ dsubu(a3, a0, a1);
1652 __ SmiScale(a7, a3, kPointerSizeLog2);
1653 __ Daddu(a3, a2, Operand(a7));
1654 __ Ret(USE_DELAY_SLOT);
1655 __ ld(v0, MemOperand(a3, kDisplacement));
1657 // Slow-case: Handle non-smi or out-of-bounds access to arguments
1658 // by calling the runtime system.
1661 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
1665 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
1666 // sp[0] : number of parameters
1667 // sp[4] : receiver displacement
1670 CHECK(!has_new_target());
1672 // Check if the calling frame is an arguments adaptor frame.
1674 __ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1675 __ ld(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
1679 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1681 // Patch the arguments.length and the parameters pointer in the current frame.
1682 __ ld(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
1683 __ sd(a2, MemOperand(sp, 0 * kPointerSize));
1684 __ SmiScale(a7, a2, kPointerSizeLog2);
1685 __ Daddu(a3, a3, Operand(a7));
1686 __ daddiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
1687 __ sd(a3, MemOperand(sp, 1 * kPointerSize));
1690 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1694 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
1696 // sp[0] : number of parameters (tagged)
1697 // sp[4] : address of receiver argument
1699 // Registers used over whole function:
1700 // a6 : allocated object (tagged)
1701 // t1 : mapped parameter count (tagged)
1703 CHECK(!has_new_target());
1705 __ ld(a1, MemOperand(sp, 0 * kPointerSize));
1706 // a1 = parameter count (tagged)
1708 // Check if the calling frame is an arguments adaptor frame.
1710 Label adaptor_frame, try_allocate;
1711 __ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1712 __ ld(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
1713 __ Branch(&adaptor_frame,
1716 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1718 // No adaptor, parameter count = argument count.
1720 __ Branch(&try_allocate);
1722 // We have an adaptor frame. Patch the parameters pointer.
1723 __ bind(&adaptor_frame);
1724 __ ld(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
1725 __ SmiScale(t2, a2, kPointerSizeLog2);
1726 __ Daddu(a3, a3, Operand(t2));
1727 __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
1728 __ sd(a3, MemOperand(sp, 1 * kPointerSize));
1730 // a1 = parameter count (tagged)
1731 // a2 = argument count (tagged)
1732 // Compute the mapped parameter count = min(a1, a2) in a1.
1734 __ Branch(&skip_min, lt, a1, Operand(a2));
1738 __ bind(&try_allocate);
1740 // Compute the sizes of backing store, parameter map, and arguments object.
1741 // 1. Parameter map, has 2 extra words containing context and backing store.
1742 const int kParameterMapHeaderSize =
1743 FixedArray::kHeaderSize + 2 * kPointerSize;
1744 // If there are no mapped parameters, we do not need the parameter_map.
1745 Label param_map_size;
1746 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
1747 __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg));
1748 __ mov(t1, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
1749 __ SmiScale(t1, a1, kPointerSizeLog2);
1750 __ daddiu(t1, t1, kParameterMapHeaderSize);
1751 __ bind(¶m_map_size);
1753 // 2. Backing store.
1754 __ SmiScale(t2, a2, kPointerSizeLog2);
1755 __ Daddu(t1, t1, Operand(t2));
1756 __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize));
1758 // 3. Arguments object.
1759 __ Daddu(t1, t1, Operand(Heap::kSloppyArgumentsObjectSize));
1761 // Do the allocation of all three objects in one go.
1762 __ Allocate(t1, v0, a3, a4, &runtime, TAG_OBJECT);
1764 // v0 = address of new object(s) (tagged)
1765 // a2 = argument count (smi-tagged)
1766 // Get the arguments boilerplate from the current native context into a4.
1767 const int kNormalOffset =
1768 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
1769 const int kAliasedOffset =
1770 Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
1772 __ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1773 __ ld(a4, FieldMemOperand(a4, GlobalObject::kNativeContextOffset));
1774 Label skip2_ne, skip2_eq;
1775 __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
1776 __ ld(a4, MemOperand(a4, kNormalOffset));
1779 __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
1780 __ ld(a4, MemOperand(a4, kAliasedOffset));
1783 // v0 = address of new object (tagged)
1784 // a1 = mapped parameter count (tagged)
1785 // a2 = argument count (smi-tagged)
1786 // a4 = address of arguments map (tagged)
1787 __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
1788 __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
1789 __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
1790 __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
1792 // Set up the callee in-object property.
1793 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
1794 __ ld(a3, MemOperand(sp, 2 * kPointerSize));
1795 __ AssertNotSmi(a3);
1796 const int kCalleeOffset = JSObject::kHeaderSize +
1797 Heap::kArgumentsCalleeIndex * kPointerSize;
1798 __ sd(a3, FieldMemOperand(v0, kCalleeOffset));
1800 // Use the length (smi tagged) and set that as an in-object property too.
1801 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1802 const int kLengthOffset = JSObject::kHeaderSize +
1803 Heap::kArgumentsLengthIndex * kPointerSize;
1804 __ sd(a2, FieldMemOperand(v0, kLengthOffset));
1806 // Set up the elements pointer in the allocated arguments object.
1807 // If we allocated a parameter map, a4 will point there, otherwise
1808 // it will point to the backing store.
1809 __ Daddu(a4, v0, Operand(Heap::kSloppyArgumentsObjectSize));
1810 __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
1812 // v0 = address of new object (tagged)
1813 // a1 = mapped parameter count (tagged)
1814 // a2 = argument count (tagged)
1815 // a4 = address of parameter map or backing store (tagged)
1816 // Initialize parameter map. If there are no mapped arguments, we're done.
1817 Label skip_parameter_map;
1819 __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
1820 // Move backing store address to a3, because it is
1821 // expected there when filling in the unmapped arguments.
1825 __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
1827 __ LoadRoot(a6, Heap::kSloppyArgumentsElementsMapRootIndex);
1828 __ sd(a6, FieldMemOperand(a4, FixedArray::kMapOffset));
1829 __ Daddu(a6, a1, Operand(Smi::FromInt(2)));
1830 __ sd(a6, FieldMemOperand(a4, FixedArray::kLengthOffset));
1831 __ sd(cp, FieldMemOperand(a4, FixedArray::kHeaderSize + 0 * kPointerSize));
1832 __ SmiScale(t2, a1, kPointerSizeLog2);
1833 __ Daddu(a6, a4, Operand(t2));
1834 __ Daddu(a6, a6, Operand(kParameterMapHeaderSize));
1835 __ sd(a6, FieldMemOperand(a4, FixedArray::kHeaderSize + 1 * kPointerSize));
1837 // Copy the parameter slots and the holes in the arguments.
1838 // We need to fill in mapped_parameter_count slots. They index the context,
1839 // where parameters are stored in reverse order, at
1840 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
1841 // The mapped parameter thus need to get indices
1842 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
1843 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
1844 // We loop from right to left.
1845 Label parameters_loop, parameters_test;
1847 __ ld(t1, MemOperand(sp, 0 * kPointerSize));
1848 __ Daddu(t1, t1, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
1849 __ Dsubu(t1, t1, Operand(a1));
1850 __ LoadRoot(a7, Heap::kTheHoleValueRootIndex);
1851 __ SmiScale(t2, a6, kPointerSizeLog2);
1852 __ Daddu(a3, a4, Operand(t2));
1853 __ Daddu(a3, a3, Operand(kParameterMapHeaderSize));
1855 // a6 = loop variable (tagged)
1856 // a1 = mapping index (tagged)
1857 // a3 = address of backing store (tagged)
1858 // a4 = address of parameter map (tagged)
1859 // a5 = temporary scratch (a.o., for address calculation)
1860 // a7 = the hole value
1861 __ jmp(¶meters_test);
1863 __ bind(¶meters_loop);
1865 __ Dsubu(a6, a6, Operand(Smi::FromInt(1)));
1866 __ SmiScale(a5, a6, kPointerSizeLog2);
1867 __ Daddu(a5, a5, Operand(kParameterMapHeaderSize - kHeapObjectTag));
1868 __ Daddu(t2, a4, a5);
1869 __ sd(t1, MemOperand(t2));
1870 __ Dsubu(a5, a5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
1871 __ Daddu(t2, a3, a5);
1872 __ sd(a7, MemOperand(t2));
1873 __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
1874 __ bind(¶meters_test);
1875 __ Branch(¶meters_loop, ne, a6, Operand(Smi::FromInt(0)));
1877 __ bind(&skip_parameter_map);
1878 // a2 = argument count (tagged)
1879 // a3 = address of backing store (tagged)
1881 // Copy arguments header and remaining slots (if there are any).
1882 __ LoadRoot(a5, Heap::kFixedArrayMapRootIndex);
1883 __ sd(a5, FieldMemOperand(a3, FixedArray::kMapOffset));
1884 __ sd(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
1886 Label arguments_loop, arguments_test;
1888 __ ld(a4, MemOperand(sp, 1 * kPointerSize));
1889 __ SmiScale(t2, t1, kPointerSizeLog2);
1890 __ Dsubu(a4, a4, Operand(t2));
1891 __ jmp(&arguments_test);
1893 __ bind(&arguments_loop);
1894 __ Dsubu(a4, a4, Operand(kPointerSize));
1895 __ ld(a6, MemOperand(a4, 0));
1896 __ SmiScale(t2, t1, kPointerSizeLog2);
1897 __ Daddu(a5, a3, Operand(t2));
1898 __ sd(a6, FieldMemOperand(a5, FixedArray::kHeaderSize));
1899 __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
1901 __ bind(&arguments_test);
1902 __ Branch(&arguments_loop, lt, t1, Operand(a2));
1904 // Return and remove the on-stack parameters.
1907 // Do the runtime call to allocate the arguments object.
1908 // a2 = argument count (tagged)
1910 __ sd(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
1911 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1915 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
1916 // Return address is in ra.
1919 Register receiver = LoadDescriptor::ReceiverRegister();
1920 Register key = LoadDescriptor::NameRegister();
1922 // Check that the key is an array index, that is Uint32.
1923 __ And(t0, key, Operand(kSmiTagMask | kSmiSignMask));
1924 __ Branch(&slow, ne, t0, Operand(zero_reg));
1926 // Everything is fine, call runtime.
1927 __ Push(receiver, key); // Receiver, key.
1929 // Perform tail call to the entry.
1930 __ TailCallExternalReference(
1931 ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
1936 PropertyAccessCompiler::TailCallBuiltin(
1937 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1941 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
1942 // sp[0] : number of parameters
1943 // sp[4] : receiver displacement
1945 // Check if the calling frame is an arguments adaptor frame.
1946 Label adaptor_frame, try_allocate, runtime;
1947 __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1948 __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
1949 __ Branch(&adaptor_frame,
1952 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1954 // Get the length from the frame.
1955 __ ld(a1, MemOperand(sp, 0));
1956 __ Branch(&try_allocate);
1958 // Patch the arguments.length and the parameters pointer.
1959 __ bind(&adaptor_frame);
1960 __ ld(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1961 if (has_new_target()) {
1962 Label skip_decrement;
1963 __ Branch(&skip_decrement, eq, a1, Operand(Smi::FromInt(0)));
1964 // Subtract 1 from smi-tagged arguments count.
1966 __ Daddu(a1, a1, Operand(-1));
1968 __ bind(&skip_decrement);
1970 __ sd(a1, MemOperand(sp, 0));
1971 __ SmiScale(at, a1, kPointerSizeLog2);
1973 __ Daddu(a3, a2, Operand(at));
1975 __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
1976 __ sd(a3, MemOperand(sp, 1 * kPointerSize));
1978 // Try the new space allocation. Start out with computing the size
1979 // of the arguments object and the elements array in words.
1980 Label add_arguments_object;
1981 __ bind(&try_allocate);
1982 __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
1985 __ Daddu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
1986 __ bind(&add_arguments_object);
1987 __ Daddu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
1989 // Do the allocation of both objects in one go.
1990 __ Allocate(a1, v0, a2, a3, &runtime,
1991 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
1993 // Get the arguments boilerplate from the current native context.
1994 __ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1995 __ ld(a4, FieldMemOperand(a4, GlobalObject::kNativeContextOffset));
1996 __ ld(a4, MemOperand(a4, Context::SlotOffset(
1997 Context::STRICT_ARGUMENTS_MAP_INDEX)));
1999 __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
2000 __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
2001 __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
2002 __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
2004 // Get the length (smi tagged) and set that as an in-object property too.
2005 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2006 __ ld(a1, MemOperand(sp, 0 * kPointerSize));
2008 __ sd(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
2009 Heap::kArgumentsLengthIndex * kPointerSize));
2012 __ Branch(&done, eq, a1, Operand(zero_reg));
2014 // Get the parameters pointer from the stack.
2015 __ ld(a2, MemOperand(sp, 1 * kPointerSize));
2017 // Set up the elements pointer in the allocated arguments object and
2018 // initialize the header in the elements fixed array.
2019 __ Daddu(a4, v0, Operand(Heap::kStrictArgumentsObjectSize));
2020 __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
2021 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
2022 __ sd(a3, FieldMemOperand(a4, FixedArray::kMapOffset));
2023 __ sd(a1, FieldMemOperand(a4, FixedArray::kLengthOffset));
2024 // Untag the length for the loop.
2028 // Copy the fixed array slots.
2030 // Set up a4 to point to the first array slot.
2031 __ Daddu(a4, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2033 // Pre-decrement a2 with kPointerSize on each iteration.
2034 // Pre-decrement in order to skip receiver.
2035 __ Daddu(a2, a2, Operand(-kPointerSize));
2036 __ ld(a3, MemOperand(a2));
2037 // Post-increment a4 with kPointerSize on each iteration.
2038 __ sd(a3, MemOperand(a4));
2039 __ Daddu(a4, a4, Operand(kPointerSize));
2040 __ Dsubu(a1, a1, Operand(1));
2041 __ Branch(&loop, ne, a1, Operand(zero_reg));
2043 // Return and remove the on-stack parameters.
2047 // Do the runtime call to allocate the arguments object.
2049 __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
2053 void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
2054 // sp[0] : index of rest parameter
2055 // sp[4] : number of parameters
2056 // sp[8] : receiver displacement
2057 // Check if the calling frame is an arguments adaptor frame.
2060 __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2061 __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
2062 __ Branch(&runtime, ne, a3,
2063 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2065 // Patch the arguments.length and the parameters pointer.
2066 __ ld(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
2067 __ sd(a1, MemOperand(sp, 1 * kPointerSize));
2068 __ SmiScale(at, a1, kPointerSizeLog2);
2070 __ Daddu(a3, a2, Operand(at));
2072 __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
2073 __ sd(a3, MemOperand(sp, 2 * kPointerSize));
2075 // Do the runtime call to allocate the arguments object.
2077 __ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
2081 void RegExpExecStub::Generate(MacroAssembler* masm) {
2082 // Just jump directly to runtime if native RegExp is not selected at compile
2083 // time or if regexp entry in generated code is turned off runtime switch or
2085 #ifdef V8_INTERPRETED_REGEXP
2086 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
2087 #else // V8_INTERPRETED_REGEXP
2089 // Stack frame on entry.
2090 // sp[0]: last_match_info (expected JSArray)
2091 // sp[4]: previous index
2092 // sp[8]: subject string
2093 // sp[12]: JSRegExp object
2095 const int kLastMatchInfoOffset = 0 * kPointerSize;
2096 const int kPreviousIndexOffset = 1 * kPointerSize;
2097 const int kSubjectOffset = 2 * kPointerSize;
2098 const int kJSRegExpOffset = 3 * kPointerSize;
2101 // Allocation of registers for this function. These are in callee save
2102 // registers and will be preserved by the call to the native RegExp code, as
2103 // this code is called using the normal C calling convention. When calling
2104 // directly from generated code the native RegExp code will not do a GC and
2105 // therefore the content of these registers are safe to use after the call.
2106 // MIPS - using s0..s2, since we are not using CEntry Stub.
2107 Register subject = s0;
2108 Register regexp_data = s1;
2109 Register last_match_info_elements = s2;
2111 // Ensure that a RegExp stack is allocated.
2112 ExternalReference address_of_regexp_stack_memory_address =
2113 ExternalReference::address_of_regexp_stack_memory_address(
2115 ExternalReference address_of_regexp_stack_memory_size =
2116 ExternalReference::address_of_regexp_stack_memory_size(isolate());
2117 __ li(a0, Operand(address_of_regexp_stack_memory_size));
2118 __ ld(a0, MemOperand(a0, 0));
2119 __ Branch(&runtime, eq, a0, Operand(zero_reg));
2121 // Check that the first argument is a JSRegExp object.
2122 __ ld(a0, MemOperand(sp, kJSRegExpOffset));
2123 STATIC_ASSERT(kSmiTag == 0);
2124 __ JumpIfSmi(a0, &runtime);
2125 __ GetObjectType(a0, a1, a1);
2126 __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
2128 // Check that the RegExp has been compiled (data contains a fixed array).
2129 __ ld(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
2130 if (FLAG_debug_code) {
2131 __ SmiTst(regexp_data, a4);
2133 kUnexpectedTypeForRegExpDataFixedArrayExpected,
2136 __ GetObjectType(regexp_data, a0, a0);
2138 kUnexpectedTypeForRegExpDataFixedArrayExpected,
2140 Operand(FIXED_ARRAY_TYPE));
2143 // regexp_data: RegExp data (FixedArray)
2144 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2145 __ ld(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2146 __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
2148 // regexp_data: RegExp data (FixedArray)
2149 // Check that the number of captures fit in the static offsets vector buffer.
2151 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2152 // Check (number_of_captures + 1) * 2 <= offsets vector size
2153 // Or number_of_captures * 2 <= offsets vector size - 2
2154 // Or number_of_captures <= offsets vector size / 2 - 1
2155 // Multiplying by 2 comes for free since a2 is smi-tagged.
2156 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
2157 int temp = Isolate::kJSRegexpStaticOffsetsVectorSize / 2 - 1;
2158 __ Branch(&runtime, hi, a2, Operand(Smi::FromInt(temp)));
2160 // Reset offset for possibly sliced string.
2161 __ mov(t0, zero_reg);
2162 __ ld(subject, MemOperand(sp, kSubjectOffset));
2163 __ JumpIfSmi(subject, &runtime);
2164 __ mov(a3, subject); // Make a copy of the original subject string.
2165 __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2166 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2167 // subject: subject string
2168 // a3: subject string
2169 // a0: subject string instance type
2170 // regexp_data: RegExp data (FixedArray)
2171 // Handle subject string according to its encoding and representation:
2172 // (1) Sequential string? If yes, go to (5).
2173 // (2) Anything but sequential or cons? If yes, go to (6).
2174 // (3) Cons string. If the string is flat, replace subject with first string.
2175 // Otherwise bailout.
2176 // (4) Is subject external? If yes, go to (7).
2177 // (5) Sequential string. Load regexp code according to encoding.
2181 // Deferred code at the end of the stub:
2182 // (6) Not a long external string? If yes, go to (8).
2183 // (7) External string. Make it, offset-wise, look like a sequential string.
2185 // (8) Short external string or not a string? If yes, bail out to runtime.
2186 // (9) Sliced string. Replace subject with parent. Go to (4).
2188 Label check_underlying; // (4)
2189 Label seq_string; // (5)
2190 Label not_seq_nor_cons; // (6)
2191 Label external_string; // (7)
2192 Label not_long_external; // (8)
2194 // (1) Sequential string? If yes, go to (5).
2197 Operand(kIsNotStringMask |
2198 kStringRepresentationMask |
2199 kShortExternalStringMask));
2200 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
2201 __ Branch(&seq_string, eq, a1, Operand(zero_reg)); // Go to (5).
2203 // (2) Anything but sequential or cons? If yes, go to (6).
2204 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2205 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2206 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2207 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2209 __ Branch(¬_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
2211 // (3) Cons string. Check that it's flat.
2212 // Replace subject with first string and reload instance type.
2213 __ ld(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
2214 __ LoadRoot(a1, Heap::kempty_stringRootIndex);
2215 __ Branch(&runtime, ne, a0, Operand(a1));
2216 __ ld(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2218 // (4) Is subject external? If yes, go to (7).
2219 __ bind(&check_underlying);
2220 __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2221 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2222 STATIC_ASSERT(kSeqStringTag == 0);
2223 __ And(at, a0, Operand(kStringRepresentationMask));
2224 // The underlying external string is never a short external string.
2225 STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
2226 STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2227 __ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7).
2229 // (5) Sequential string. Load regexp code according to encoding.
2230 __ bind(&seq_string);
2231 // subject: sequential subject string (or look-alike, external string)
2232 // a3: original subject string
2233 // Load previous index and check range before a3 is overwritten. We have to
2234 // use a3 instead of subject here because subject might have been only made
2235 // to look like a sequential string when it actually is an external string.
2236 __ ld(a1, MemOperand(sp, kPreviousIndexOffset));
2237 __ JumpIfNotSmi(a1, &runtime);
2238 __ ld(a3, FieldMemOperand(a3, String::kLengthOffset));
2239 __ Branch(&runtime, ls, a3, Operand(a1));
2242 STATIC_ASSERT(kStringEncodingMask == 4);
2243 STATIC_ASSERT(kOneByteStringTag == 4);
2244 STATIC_ASSERT(kTwoByteStringTag == 0);
2245 __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for one_byte.
2246 __ ld(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
2247 __ dsra(a3, a0, 2); // a3 is 1 for one_byte, 0 for UC16 (used below).
2248 __ ld(a5, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
2249 __ Movz(t9, a5, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
2251 // (E) Carry on. String handling is done.
2252 // t9: irregexp code
2253 // Check that the irregexp code has been generated for the actual string
2254 // encoding. If it has, the field contains a code object otherwise it contains
2255 // a smi (code flushing support).
2256 __ JumpIfSmi(t9, &runtime);
2258 // a1: previous index
2259 // a3: encoding of subject string (1 if one_byte, 0 if two_byte);
2261 // subject: Subject string
2262 // regexp_data: RegExp data (FixedArray)
2263 // All checks done. Now push arguments for native regexp code.
2264 __ IncrementCounter(isolate()->counters()->regexp_entry_native(),
2267 // Isolates: note we add an additional parameter here (isolate pointer).
2268 const int kRegExpExecuteArguments = 9;
2269 const int kParameterRegisters = (kMipsAbi == kN64) ? 8 : 4;
2270 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
2272 // Stack pointer now points to cell where return address is to be written.
2273 // Arguments are before that on the stack or in registers, meaning we
2274 // treat the return address as argument 5. Thus every argument after that
2275 // needs to be shifted back by 1. Since DirectCEntryStub will handle
2276 // allocating space for the c argument slots, we don't need to calculate
2277 // that into the argument positions on the stack. This is how the stack will
2278 // look (sp meaning the value of sp at this moment):
2280 // [sp + 1] - Argument 9
2281 // [sp + 0] - saved ra
2283 // [sp + 5] - Argument 9
2284 // [sp + 4] - Argument 8
2285 // [sp + 3] - Argument 7
2286 // [sp + 2] - Argument 6
2287 // [sp + 1] - Argument 5
2288 // [sp + 0] - saved ra
2290 if (kMipsAbi == kN64) {
2291 // Argument 9: Pass current isolate address.
2292 __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
2293 __ sd(a0, MemOperand(sp, 1 * kPointerSize));
2295 // Argument 8: Indicate that this is a direct call from JavaScript.
2296 __ li(a7, Operand(1));
2298 // Argument 7: Start (high end) of backtracking stack memory area.
2299 __ li(a0, Operand(address_of_regexp_stack_memory_address));
2300 __ ld(a0, MemOperand(a0, 0));
2301 __ li(a2, Operand(address_of_regexp_stack_memory_size));
2302 __ ld(a2, MemOperand(a2, 0));
2303 __ daddu(a6, a0, a2);
2305 // Argument 6: Set the number of capture registers to zero to force global
2306 // regexps to behave as non-global. This does not affect non-global regexps.
2307 __ mov(a5, zero_reg);
2309 // Argument 5: static offsets vector buffer.
2311 ExternalReference::address_of_static_offsets_vector(isolate())));
2313 DCHECK(kMipsAbi == kO32);
2315 // Argument 9: Pass current isolate address.
2316 // CFunctionArgumentOperand handles MIPS stack argument slots.
2317 __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
2318 __ sd(a0, MemOperand(sp, 5 * kPointerSize));
2320 // Argument 8: Indicate that this is a direct call from JavaScript.
2321 __ li(a0, Operand(1));
2322 __ sd(a0, MemOperand(sp, 4 * kPointerSize));
2324 // Argument 7: Start (high end) of backtracking stack memory area.
2325 __ li(a0, Operand(address_of_regexp_stack_memory_address));
2326 __ ld(a0, MemOperand(a0, 0));
2327 __ li(a2, Operand(address_of_regexp_stack_memory_size));
2328 __ ld(a2, MemOperand(a2, 0));
2329 __ daddu(a0, a0, a2);
2330 __ sd(a0, MemOperand(sp, 3 * kPointerSize));
2332 // Argument 6: Set the number of capture registers to zero to force global
2333 // regexps to behave as non-global. This does not affect non-global regexps.
2334 __ mov(a0, zero_reg);
2335 __ sd(a0, MemOperand(sp, 2 * kPointerSize));
2337 // Argument 5: static offsets vector buffer.
2339 ExternalReference::address_of_static_offsets_vector(isolate())));
2340 __ sd(a0, MemOperand(sp, 1 * kPointerSize));
2343 // For arguments 4 and 3 get string length, calculate start of string data
2344 // and calculate the shift of the index (0 for one_byte and 1 for two byte).
2345 __ Daddu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
2346 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
2347 // Load the length from the original subject string from the previous stack
2348 // frame. Therefore we have to use fp, which points exactly to two pointer
2349 // sizes below the previous sp. (Because creating a new stack frame pushes
2350 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
2351 __ ld(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2352 // If slice offset is not 0, load the length from the original sliced string.
2353 // Argument 4, a3: End of string data
2354 // Argument 3, a2: Start of string data
2355 // Prepare start and end index of the input.
2356 __ dsllv(t1, t0, a3);
2357 __ daddu(t0, t2, t1);
2358 __ dsllv(t1, a1, a3);
2359 __ daddu(a2, t0, t1);
2361 __ ld(t2, FieldMemOperand(subject, String::kLengthOffset));
2364 __ dsllv(t1, t2, a3);
2365 __ daddu(a3, t0, t1);
2366 // Argument 2 (a1): Previous index.
2369 // Argument 1 (a0): Subject string.
2370 __ mov(a0, subject);
2372 // Locate the code entry and call it.
2373 __ Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
2374 DirectCEntryStub stub(isolate());
2375 stub.GenerateCall(masm, t9);
2377 __ LeaveExitFrame(false, no_reg, true);
2380 // subject: subject string (callee saved)
2381 // regexp_data: RegExp data (callee saved)
2382 // last_match_info_elements: Last match info elements (callee saved)
2383 // Check the result.
2385 __ Branch(&success, eq, v0, Operand(1));
2386 // We expect exactly one result since we force the called regexp to behave
2389 __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
2390 // If not exception it can only be retry. Handle that in the runtime system.
2391 __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
2392 // Result must now be exception. If there is no pending exception already a
2393 // stack overflow (on the backtrack stack) was detected in RegExp code but
2394 // haven't created the exception yet. Handle that in the runtime system.
2395 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
2396 __ li(a1, Operand(isolate()->factory()->the_hole_value()));
2397 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2399 __ ld(v0, MemOperand(a2, 0));
2400 __ Branch(&runtime, eq, v0, Operand(a1));
2402 // For exception, throw the exception again.
2403 __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
2406 // For failure and exception return null.
2407 __ li(v0, Operand(isolate()->factory()->null_value()));
2410 // Process the result from the native regexp code.
2413 __ lw(a1, UntagSmiFieldMemOperand(
2414 regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2415 // Calculate number of capture registers (number_of_captures + 1) * 2.
2416 __ Daddu(a1, a1, Operand(1));
2417 __ dsll(a1, a1, 1); // Multiply by 2.
2419 __ ld(a0, MemOperand(sp, kLastMatchInfoOffset));
2420 __ JumpIfSmi(a0, &runtime);
2421 __ GetObjectType(a0, a2, a2);
2422 __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
2423 // Check that the JSArray is in fast case.
2424 __ ld(last_match_info_elements,
2425 FieldMemOperand(a0, JSArray::kElementsOffset));
2426 __ ld(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2427 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
2428 __ Branch(&runtime, ne, a0, Operand(at));
2429 // Check that the last match info has space for the capture registers and the
2430 // additional information.
2432 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
2433 __ Daddu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
2435 __ SmiUntag(at, a0);
2436 __ Branch(&runtime, gt, a2, Operand(at));
2438 // a1: number of capture registers
2439 // subject: subject string
2440 // Store the capture count.
2441 __ SmiTag(a2, a1); // To smi.
2442 __ sd(a2, FieldMemOperand(last_match_info_elements,
2443 RegExpImpl::kLastCaptureCountOffset));
2444 // Store last subject and last input.
2446 FieldMemOperand(last_match_info_elements,
2447 RegExpImpl::kLastSubjectOffset));
2448 __ mov(a2, subject);
2449 __ RecordWriteField(last_match_info_elements,
2450 RegExpImpl::kLastSubjectOffset,
2455 __ mov(subject, a2);
2457 FieldMemOperand(last_match_info_elements,
2458 RegExpImpl::kLastInputOffset));
2459 __ RecordWriteField(last_match_info_elements,
2460 RegExpImpl::kLastInputOffset,
2466 // Get the static offsets vector filled by the native regexp code.
2467 ExternalReference address_of_static_offsets_vector =
2468 ExternalReference::address_of_static_offsets_vector(isolate());
2469 __ li(a2, Operand(address_of_static_offsets_vector));
2471 // a1: number of capture registers
2472 // a2: offsets vector
2473 Label next_capture, done;
2474 // Capture register counter starts from number of capture registers and
2475 // counts down until wrapping after zero.
2477 last_match_info_elements,
2478 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
2479 __ bind(&next_capture);
2480 __ Dsubu(a1, a1, Operand(1));
2481 __ Branch(&done, lt, a1, Operand(zero_reg));
2482 // Read the value from the static offsets vector buffer.
2483 __ lw(a3, MemOperand(a2, 0));
2484 __ daddiu(a2, a2, kIntSize);
2485 // Store the smi value in the last match info.
2487 __ sd(a3, MemOperand(a0, 0));
2488 __ Branch(&next_capture, USE_DELAY_SLOT);
2489 __ daddiu(a0, a0, kPointerSize); // In branch delay slot.
2493 // Return last match info.
2494 __ ld(v0, MemOperand(sp, kLastMatchInfoOffset));
2497 // Do the runtime call to execute the regexp.
2499 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
2501 // Deferred code for string handling.
2502 // (6) Not a long external string? If yes, go to (8).
2503 __ bind(¬_seq_nor_cons);
2505 __ Branch(¬_long_external, gt, a1, Operand(kExternalStringTag));
2507 // (7) External string. Make it, offset-wise, look like a sequential string.
2508 __ bind(&external_string);
2509 __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2510 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2511 if (FLAG_debug_code) {
2512 // Assert that we do not have a cons or slice (indirect strings) here.
2513 // Sequential strings have already been ruled out.
2514 __ And(at, a0, Operand(kIsIndirectStringMask));
2516 kExternalStringExpectedButNotFound,
2521 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
2522 // Move the pointer so that offset-wise, it looks like a sequential string.
2523 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2526 SeqTwoByteString::kHeaderSize - kHeapObjectTag);
2527 __ jmp(&seq_string); // Go to (5).
2529 // (8) Short external string or not a string? If yes, bail out to runtime.
2530 __ bind(¬_long_external);
2531 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
2532 __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
2533 __ Branch(&runtime, ne, at, Operand(zero_reg));
2535 // (9) Sliced string. Replace subject with parent. Go to (4).
2536 // Load offset into t0 and replace subject string with parent.
2537 __ ld(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
2539 __ ld(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
2540 __ jmp(&check_underlying); // Go to (4).
2541 #endif // V8_INTERPRETED_REGEXP
2545 static void GenerateRecordCallTarget(MacroAssembler* masm) {
2546 // Cache the called function in a feedback vector slot. Cache states
2547 // are uninitialized, monomorphic (indicated by a JSFunction), and
2549 // a0 : number of arguments to the construct function
2550 // a1 : the function to call
2551 // a2 : Feedback vector
2552 // a3 : slot in feedback vector (Smi)
2553 Label initialize, done, miss, megamorphic, not_array_function;
2555 DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
2556 masm->isolate()->heap()->megamorphic_symbol());
2557 DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
2558 masm->isolate()->heap()->uninitialized_symbol());
2560 // Load the cache state into a4.
2561 __ dsrl(a4, a3, 32 - kPointerSizeLog2);
2562 __ Daddu(a4, a2, Operand(a4));
2563 __ ld(a4, FieldMemOperand(a4, FixedArray::kHeaderSize));
2565 // A monomorphic cache hit or an already megamorphic state: invoke the
2566 // function without changing the state.
2567 __ Branch(&done, eq, a4, Operand(a1));
2569 if (!FLAG_pretenuring_call_new) {
2570 // If we came here, we need to see if we are the array function.
2571 // If we didn't have a matching function, and we didn't find the megamorph
2572 // sentinel, then we have in the slot either some other function or an
2573 // AllocationSite. Do a map check on the object in a3.
2574 __ ld(a5, FieldMemOperand(a4, 0));
2575 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2576 __ Branch(&miss, ne, a5, Operand(at));
2578 // Make sure the function is the Array() function
2579 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4);
2580 __ Branch(&megamorphic, ne, a1, Operand(a4));
2586 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2588 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
2589 __ Branch(&initialize, eq, a4, Operand(at));
2590 // MegamorphicSentinel is an immortal immovable object (undefined) so no
2591 // write-barrier is needed.
2592 __ bind(&megamorphic);
2593 __ dsrl(a4, a3, 32- kPointerSizeLog2);
2594 __ Daddu(a4, a2, Operand(a4));
2595 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2596 __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
2599 // An uninitialized cache is patched with the function.
2600 __ bind(&initialize);
2601 if (!FLAG_pretenuring_call_new) {
2602 // Make sure the function is the Array() function.
2603 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4);
2604 __ Branch(¬_array_function, ne, a1, Operand(a4));
2606 // The target function is the Array constructor,
2607 // Create an AllocationSite if we don't already have it, store it in the
2610 FrameScope scope(masm, StackFrame::INTERNAL);
2611 const RegList kSavedRegs =
2617 // Arguments register must be smi-tagged to call out.
2619 __ MultiPush(kSavedRegs);
2621 CreateAllocationSiteStub create_stub(masm->isolate());
2622 __ CallStub(&create_stub);
2624 __ MultiPop(kSavedRegs);
2629 __ bind(¬_array_function);
2632 __ dsrl(a4, a3, 32 - kPointerSizeLog2);
2633 __ Daddu(a4, a2, Operand(a4));
2634 __ Daddu(a4, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2635 __ sd(a1, MemOperand(a4, 0));
2637 __ Push(a4, a2, a1);
2638 __ RecordWrite(a2, a4, a1, kRAHasNotBeenSaved, kDontSaveFPRegs,
2639 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
2646 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
2647 __ ld(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2649 // Do not transform the receiver for strict mode functions.
2650 int32_t strict_mode_function_mask =
2651 1 << SharedFunctionInfo::kStrictModeBitWithinByte ;
2652 // Do not transform the receiver for native (Compilerhints already in a3).
2653 int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte;
2655 __ lbu(a4, FieldMemOperand(a3, SharedFunctionInfo::kStrictModeByteOffset));
2656 __ And(at, a4, Operand(strict_mode_function_mask));
2657 __ Branch(cont, ne, at, Operand(zero_reg));
2658 __ lbu(a4, FieldMemOperand(a3, SharedFunctionInfo::kNativeByteOffset));
2659 __ And(at, a4, Operand(native_mask));
2660 __ Branch(cont, ne, at, Operand(zero_reg));
2664 static void EmitSlowCase(MacroAssembler* masm,
2666 Label* non_function) {
2667 // Check for function proxy.
2668 __ Branch(non_function, ne, a4, Operand(JS_FUNCTION_PROXY_TYPE));
2669 __ push(a1); // put proxy as additional argument
2670 __ li(a0, Operand(argc + 1, RelocInfo::NONE32));
2671 __ mov(a2, zero_reg);
2672 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
2674 Handle<Code> adaptor =
2675 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
2676 __ Jump(adaptor, RelocInfo::CODE_TARGET);
2679 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
2680 // of the original receiver from the call site).
2681 __ bind(non_function);
2682 __ sd(a1, MemOperand(sp, argc * kPointerSize));
2683 __ li(a0, Operand(argc)); // Set up the number of arguments.
2684 __ mov(a2, zero_reg);
2685 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
2686 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2687 RelocInfo::CODE_TARGET);
2691 static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
2692 // Wrap the receiver and patch it back onto the stack.
2693 { FrameScope frame_scope(masm, StackFrame::INTERNAL);
2695 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
2698 __ Branch(USE_DELAY_SLOT, cont);
2699 __ sd(v0, MemOperand(sp, argc * kPointerSize));
2703 static void CallFunctionNoFeedback(MacroAssembler* masm,
2704 int argc, bool needs_checks,
2705 bool call_as_method) {
2706 // a1 : the function to call
2707 Label slow, non_function, wrap, cont;
2710 // Check that the function is really a JavaScript function.
2711 // a1: pushed function (to be verified)
2712 __ JumpIfSmi(a1, &non_function);
2714 // Goto slow case if we do not have a function.
2715 __ GetObjectType(a1, a4, a4);
2716 __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE));
2719 // Fast-case: Invoke the function now.
2720 // a1: pushed function
2721 ParameterCount actual(argc);
2723 if (call_as_method) {
2725 EmitContinueIfStrictOrNative(masm, &cont);
2728 // Compute the receiver in sloppy mode.
2729 __ ld(a3, MemOperand(sp, argc * kPointerSize));
2732 __ JumpIfSmi(a3, &wrap);
2733 __ GetObjectType(a3, a4, a4);
2734 __ Branch(&wrap, lt, a4, Operand(FIRST_SPEC_OBJECT_TYPE));
2741 __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
2744 // Slow-case: Non-function called.
2746 EmitSlowCase(masm, argc, &non_function);
2749 if (call_as_method) {
2751 // Wrap the receiver and patch it back onto the stack.
2752 EmitWrapCase(masm, argc, &cont);
2757 void CallFunctionStub::Generate(MacroAssembler* masm) {
2758 CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
2762 void CallConstructStub::Generate(MacroAssembler* masm) {
2763 // a0 : number of arguments
2764 // a1 : the function to call
2765 // a2 : feedback vector
2766 // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
2767 Label slow, non_function_call;
2768 // Check that the function is not a smi.
2769 __ JumpIfSmi(a1, &non_function_call);
2770 // Check that the function is a JSFunction.
2771 __ GetObjectType(a1, a4, a4);
2772 __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE));
2774 if (RecordCallTarget()) {
2775 GenerateRecordCallTarget(masm);
2777 __ dsrl(at, a3, 32 - kPointerSizeLog2);
2778 __ Daddu(a5, a2, at);
2779 if (FLAG_pretenuring_call_new) {
2780 // Put the AllocationSite from the feedback vector into a2.
2781 // By adding kPointerSize we encode that we know the AllocationSite
2782 // entry is at the feedback vector slot given by a3 + 1.
2783 __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
2785 Label feedback_register_initialized;
2786 // Put the AllocationSite from the feedback vector into a2, or undefined.
2787 __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize));
2788 __ ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
2789 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2790 __ Branch(&feedback_register_initialized, eq, a5, Operand(at));
2791 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
2792 __ bind(&feedback_register_initialized);
2795 __ AssertUndefinedOrAllocationSite(a2, a5);
2798 // Pass function as original constructor.
2799 if (IsSuperConstructorCall()) {
2800 __ li(a4, Operand(1 * kPointerSize));
2801 __ dsll(at, a0, kPointerSizeLog2);
2802 __ daddu(a4, a4, at);
2803 __ daddu(at, sp, a4);
2804 __ ld(a3, MemOperand(at, 0));
2809 // Jump to the function-specific construct stub.
2810 Register jmp_reg = a4;
2811 __ ld(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2812 __ ld(jmp_reg, FieldMemOperand(jmp_reg,
2813 SharedFunctionInfo::kConstructStubOffset));
2814 __ Daddu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
2817 // a0: number of arguments
2818 // a1: called object
2822 __ Branch(&non_function_call, ne, a4, Operand(JS_FUNCTION_PROXY_TYPE));
2823 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
2826 __ bind(&non_function_call);
2827 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
2829 // Set expected number of arguments to zero (not changing r0).
2830 __ li(a2, Operand(0, RelocInfo::NONE32));
2831 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2832 RelocInfo::CODE_TARGET);
2836 // StringCharCodeAtGenerator.
2837 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
2838 DCHECK(!a4.is(index_));
2839 DCHECK(!a4.is(result_));
2840 DCHECK(!a4.is(object_));
2842 // If the receiver is a smi trigger the non-string case.
2843 if (check_mode_ == RECEIVER_IS_UNKNOWN) {
2844 __ JumpIfSmi(object_, receiver_not_string_);
2846 // Fetch the instance type of the receiver into result register.
2847 __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2848 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2849 // If the receiver is not a string trigger the non-string case.
2850 __ And(a4, result_, Operand(kIsNotStringMask));
2851 __ Branch(receiver_not_string_, ne, a4, Operand(zero_reg));
2854 // If the index is non-smi trigger the non-smi case.
2855 __ JumpIfNotSmi(index_, &index_not_smi_);
2857 __ bind(&got_smi_index_);
2859 // Check for index out of range.
2860 __ ld(a4, FieldMemOperand(object_, String::kLengthOffset));
2861 __ Branch(index_out_of_range_, ls, a4, Operand(index_));
2863 __ SmiUntag(index_);
2865 StringCharLoadGenerator::Generate(masm,
2876 static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
2877 __ ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
2878 __ ld(vector, FieldMemOperand(vector,
2879 JSFunction::kSharedFunctionInfoOffset));
2880 __ ld(vector, FieldMemOperand(vector,
2881 SharedFunctionInfo::kFeedbackVectorOffset));
2885 void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
2891 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
2892 __ Branch(&miss, ne, a1, Operand(at));
2894 __ li(a0, Operand(arg_count()));
2895 __ dsrl(at, a3, 32 - kPointerSizeLog2);
2896 __ Daddu(at, a2, Operand(at));
2897 __ ld(a4, FieldMemOperand(at, FixedArray::kHeaderSize));
2899 // Verify that a4 contains an AllocationSite
2900 __ ld(a5, FieldMemOperand(a4, HeapObject::kMapOffset));
2901 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2902 __ Branch(&miss, ne, a5, Operand(at));
2906 ArrayConstructorStub stub(masm->isolate(), arg_count());
2907 __ TailCallStub(&stub);
2912 // The slow case, we need this no matter what to complete a call after a miss.
2913 CallFunctionNoFeedback(masm,
2919 __ stop("Unexpected code address");
2923 void CallICStub::Generate(MacroAssembler* masm) {
2925 // a3 - slot id (Smi)
2927 const int with_types_offset =
2928 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
2929 const int generic_offset =
2930 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
2931 Label extra_checks_or_miss, slow_start;
2932 Label slow, non_function, wrap, cont;
2933 Label have_js_function;
2934 int argc = arg_count();
2935 ParameterCount actual(argc);
2937 // The checks. First, does r1 match the recorded monomorphic target?
2938 __ dsrl(a4, a3, 32 - kPointerSizeLog2);
2939 __ Daddu(a4, a2, Operand(a4));
2940 __ ld(a4, FieldMemOperand(a4, FixedArray::kHeaderSize));
2942 // We don't know that we have a weak cell. We might have a private symbol
2943 // or an AllocationSite, but the memory is safe to examine.
2944 // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
2946 // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
2947 // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
2948 // computed, meaning that it can't appear to be a pointer. If the low bit is
2949 // 0, then hash is computed, but the 0 bit prevents the field from appearing
2951 STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
2952 STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
2953 WeakCell::kValueOffset &&
2954 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
2956 __ ld(a5, FieldMemOperand(a4, WeakCell::kValueOffset));
2957 __ Branch(&extra_checks_or_miss, ne, a1, Operand(a5));
2959 // The compare above could have been a SMI/SMI comparison. Guard against this
2960 // convincing us that we have a monomorphic JSFunction.
2961 __ JumpIfSmi(a1, &extra_checks_or_miss);
2963 __ bind(&have_js_function);
2964 if (CallAsMethod()) {
2965 EmitContinueIfStrictOrNative(masm, &cont);
2966 // Compute the receiver in sloppy mode.
2967 __ ld(a3, MemOperand(sp, argc * kPointerSize));
2969 __ JumpIfSmi(a3, &wrap);
2970 __ GetObjectType(a3, a4, a4);
2971 __ Branch(&wrap, lt, a4, Operand(FIRST_SPEC_OBJECT_TYPE));
2976 __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
2979 EmitSlowCase(masm, argc, &non_function);
2981 if (CallAsMethod()) {
2983 EmitWrapCase(masm, argc, &cont);
2986 __ bind(&extra_checks_or_miss);
2987 Label uninitialized, miss;
2989 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2990 __ Branch(&slow_start, eq, a4, Operand(at));
2992 // The following cases attempt to handle MISS cases without going to the
2994 if (FLAG_trace_ic) {
2998 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
2999 __ Branch(&uninitialized, eq, a4, Operand(at));
3001 // We are going megamorphic. If the feedback is a JSFunction, it is fine
3002 // to handle it here. More complex cases are dealt with in the runtime.
3003 __ AssertNotSmi(a4);
3004 __ GetObjectType(a4, a5, a5);
3005 __ Branch(&miss, ne, a5, Operand(JS_FUNCTION_TYPE));
3006 __ dsrl(a4, a3, 32 - kPointerSizeLog2);
3007 __ Daddu(a4, a2, Operand(a4));
3008 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
3009 __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
3010 // We have to update statistics for runtime profiling.
3011 __ ld(a4, FieldMemOperand(a2, with_types_offset));
3012 __ Dsubu(a4, a4, Operand(Smi::FromInt(1)));
3013 __ sd(a4, FieldMemOperand(a2, with_types_offset));
3014 __ ld(a4, FieldMemOperand(a2, generic_offset));
3015 __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
3016 __ Branch(USE_DELAY_SLOT, &slow_start);
3017 __ sd(a4, FieldMemOperand(a2, generic_offset)); // In delay slot.
3019 __ bind(&uninitialized);
3021 // We are going monomorphic, provided we actually have a JSFunction.
3022 __ JumpIfSmi(a1, &miss);
3024 // Goto miss case if we do not have a function.
3025 __ GetObjectType(a1, a4, a4);
3026 __ Branch(&miss, ne, a4, Operand(JS_FUNCTION_TYPE));
3028 // Make sure the function is not the Array() function, which requires special
3029 // behavior on MISS.
3030 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4);
3031 __ Branch(&miss, eq, a1, Operand(a4));
3034 __ ld(a4, FieldMemOperand(a2, with_types_offset));
3035 __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
3036 __ sd(a4, FieldMemOperand(a2, with_types_offset));
3038 // Store the function. Use a stub since we need a frame for allocation.
3043 FrameScope scope(masm, StackFrame::INTERNAL);
3044 CreateWeakCellStub create_stub(masm->isolate());
3046 __ CallStub(&create_stub);
3050 __ Branch(&have_js_function);
3052 // We are here because tracing is on or we encountered a MISS case we can't
3058 __ bind(&slow_start);
3059 // Check that the function is really a JavaScript function.
3060 // r1: pushed function (to be verified)
3061 __ JumpIfSmi(a1, &non_function);
3063 // Goto slow case if we do not have a function.
3064 __ GetObjectType(a1, a4, a4);
3065 __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE));
3066 __ Branch(&have_js_function);
3070 void CallICStub::GenerateMiss(MacroAssembler* masm) {
3071 FrameScope scope(masm, StackFrame::INTERNAL);
3073 // Push the receiver and the function and feedback info.
3074 __ Push(a1, a2, a3);
3077 IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
3078 : IC::kCallIC_Customization_Miss;
3080 ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
3081 __ CallExternalReference(miss, 3);
3083 // Move result to a1 and exit the internal frame.
3088 void StringCharCodeAtGenerator::GenerateSlow(
3089 MacroAssembler* masm, EmbedMode embed_mode,
3090 const RuntimeCallHelper& call_helper) {
3091 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
3093 // Index is not a smi.
3094 __ bind(&index_not_smi_);
3095 // If index is a heap number, try converting it to an integer.
3098 Heap::kHeapNumberMapRootIndex,
3101 call_helper.BeforeCall(masm);
3102 // Consumed by runtime conversion function:
3103 if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
3104 __ Push(VectorLoadICDescriptor::VectorRegister(),
3105 VectorLoadICDescriptor::SlotRegister(), object_, index_);
3107 __ Push(object_, index_);
3109 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3110 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3112 DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3113 // NumberToSmi discards numbers that are not exact integers.
3114 __ CallRuntime(Runtime::kNumberToSmi, 1);
3117 // Save the conversion result before the pop instructions below
3118 // have a chance to overwrite it.
3120 __ Move(index_, v0);
3121 if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
3122 __ Pop(VectorLoadICDescriptor::SlotRegister(),
3123 VectorLoadICDescriptor::VectorRegister(), object_);
3127 // Reload the instance type.
3128 __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3129 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3130 call_helper.AfterCall(masm);
3131 // If index is still not a smi, it must be out of range.
3132 __ JumpIfNotSmi(index_, index_out_of_range_);
3133 // Otherwise, return to the fast path.
3134 __ Branch(&got_smi_index_);
3136 // Call runtime. We get here when the receiver is a string and the
3137 // index is a number, but the code of getting the actual character
3138 // is too complex (e.g., when the string needs to be flattened).
3139 __ bind(&call_runtime_);
3140 call_helper.BeforeCall(masm);
3142 __ Push(object_, index_);
3143 __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
3145 __ Move(result_, v0);
3147 call_helper.AfterCall(masm);
3150 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3154 // -------------------------------------------------------------------------
3155 // StringCharFromCodeGenerator
3157 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3158 // Fast case of Heap::LookupSingleCharacterStringFromCode.
3160 DCHECK(!a4.is(result_));
3161 DCHECK(!a4.is(code_));
3163 STATIC_ASSERT(kSmiTag == 0);
3164 DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
3167 Operand(kSmiTagMask |
3168 ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
3169 __ Branch(&slow_case_, ne, a4, Operand(zero_reg));
3172 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3173 // At this point code register contains smi tagged one_byte char code.
3174 STATIC_ASSERT(kSmiTag == 0);
3175 __ SmiScale(a4, code_, kPointerSizeLog2);
3176 __ Daddu(result_, result_, a4);
3177 __ ld(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3178 __ LoadRoot(a4, Heap::kUndefinedValueRootIndex);
3179 __ Branch(&slow_case_, eq, result_, Operand(a4));
3184 void StringCharFromCodeGenerator::GenerateSlow(
3185 MacroAssembler* masm,
3186 const RuntimeCallHelper& call_helper) {
3187 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3189 __ bind(&slow_case_);
3190 call_helper.BeforeCall(masm);
3192 __ CallRuntime(Runtime::kCharFromCode, 1);
3193 __ Move(result_, v0);
3195 call_helper.AfterCall(masm);
3198 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3202 enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
3205 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
3210 String::Encoding encoding) {
3211 if (FLAG_debug_code) {
3212 // Check that destination is word aligned.
3213 __ And(scratch, dest, Operand(kPointerAlignmentMask));
3215 kDestinationOfCopyNotAligned,
3220 // Assumes word reads and writes are little endian.
3221 // Nothing to do for zero characters.
3224 if (encoding == String::TWO_BYTE_ENCODING) {
3225 __ Daddu(count, count, count);
3228 Register limit = count; // Read until dest equals this.
3229 __ Daddu(limit, dest, Operand(count));
3231 Label loop_entry, loop;
3232 // Copy bytes from src to dest until dest hits limit.
3233 __ Branch(&loop_entry);
3235 __ lbu(scratch, MemOperand(src));
3236 __ daddiu(src, src, 1);
3237 __ sb(scratch, MemOperand(dest));
3238 __ daddiu(dest, dest, 1);
3239 __ bind(&loop_entry);
3240 __ Branch(&loop, lt, dest, Operand(limit));
3246 void SubStringStub::Generate(MacroAssembler* masm) {
3248 // Stack frame on entry.
3249 // ra: return address
3254 // This stub is called from the native-call %_SubString(...), so
3255 // nothing can be assumed about the arguments. It is tested that:
3256 // "string" is a sequential string,
3257 // both "from" and "to" are smis, and
3258 // 0 <= from <= to <= string.length.
3259 // If any of these assumptions fail, we call the runtime system.
3261 const int kToOffset = 0 * kPointerSize;
3262 const int kFromOffset = 1 * kPointerSize;
3263 const int kStringOffset = 2 * kPointerSize;
3265 __ ld(a2, MemOperand(sp, kToOffset));
3266 __ ld(a3, MemOperand(sp, kFromOffset));
3268 // STATIC_ASSERT(kFromOffset == kToOffset + 4);
3269 STATIC_ASSERT(kSmiTag == 0);
3271 // STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3273 // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
3274 // safe in this case.
3275 __ JumpIfNotSmi(a2, &runtime);
3276 __ JumpIfNotSmi(a3, &runtime);
3277 // Both a2 and a3 are untagged integers.
3279 __ SmiUntag(a2, a2);
3280 __ SmiUntag(a3, a3);
3281 __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
3283 __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to.
3284 __ Dsubu(a2, a2, a3);
3286 // Make sure first argument is a string.
3287 __ ld(v0, MemOperand(sp, kStringOffset));
3288 __ JumpIfSmi(v0, &runtime);
3289 __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
3290 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3291 __ And(a4, a1, Operand(kIsNotStringMask));
3293 __ Branch(&runtime, ne, a4, Operand(zero_reg));
3296 __ Branch(&single_char, eq, a2, Operand(1));
3298 // Short-cut for the case of trivial substring.
3300 // v0: original string
3301 // a2: result string length
3302 __ ld(a4, FieldMemOperand(v0, String::kLengthOffset));
3304 // Return original string.
3305 __ Branch(&return_v0, eq, a2, Operand(a4));
3306 // Longer than original string's length or negative: unsafe arguments.
3307 __ Branch(&runtime, hi, a2, Operand(a4));
3308 // Shorter than original string's length: an actual substring.
3310 // Deal with different string types: update the index if necessary
3311 // and put the underlying string into a5.
3312 // v0: original string
3313 // a1: instance type
3315 // a3: from index (untagged)
3316 Label underlying_unpacked, sliced_string, seq_or_external_string;
3317 // If the string is not indirect, it can only be sequential or external.
3318 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
3319 STATIC_ASSERT(kIsIndirectStringMask != 0);
3320 __ And(a4, a1, Operand(kIsIndirectStringMask));
3321 __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, a4, Operand(zero_reg));
3322 // a4 is used as a scratch register and can be overwritten in either case.
3323 __ And(a4, a1, Operand(kSlicedNotConsMask));
3324 __ Branch(&sliced_string, ne, a4, Operand(zero_reg));
3325 // Cons string. Check whether it is flat, then fetch first part.
3326 __ ld(a5, FieldMemOperand(v0, ConsString::kSecondOffset));
3327 __ LoadRoot(a4, Heap::kempty_stringRootIndex);
3328 __ Branch(&runtime, ne, a5, Operand(a4));
3329 __ ld(a5, FieldMemOperand(v0, ConsString::kFirstOffset));
3330 // Update instance type.
3331 __ ld(a1, FieldMemOperand(a5, HeapObject::kMapOffset));
3332 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3333 __ jmp(&underlying_unpacked);
3335 __ bind(&sliced_string);
3336 // Sliced string. Fetch parent and correct start index by offset.
3337 __ ld(a5, FieldMemOperand(v0, SlicedString::kParentOffset));
3338 __ ld(a4, FieldMemOperand(v0, SlicedString::kOffsetOffset));
3339 __ SmiUntag(a4); // Add offset to index.
3340 __ Daddu(a3, a3, a4);
3341 // Update instance type.
3342 __ ld(a1, FieldMemOperand(a5, HeapObject::kMapOffset));
3343 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3344 __ jmp(&underlying_unpacked);
3346 __ bind(&seq_or_external_string);
3347 // Sequential or external string. Just move string to the expected register.
3350 __ bind(&underlying_unpacked);
3352 if (FLAG_string_slices) {
3354 // a5: underlying subject string
3355 // a1: instance type of underlying subject string
3357 // a3: adjusted start index (untagged)
3358 // Short slice. Copy instead of slicing.
3359 __ Branch(©_routine, lt, a2, Operand(SlicedString::kMinLength));
3360 // Allocate new sliced string. At this point we do not reload the instance
3361 // type including the string encoding because we simply rely on the info
3362 // provided by the original string. It does not matter if the original
3363 // string's encoding is wrong because we always have to recheck encoding of
3364 // the newly created string's parent anyways due to externalized strings.
3365 Label two_byte_slice, set_slice_header;
3366 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
3367 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
3368 __ And(a4, a1, Operand(kStringEncodingMask));
3369 __ Branch(&two_byte_slice, eq, a4, Operand(zero_reg));
3370 __ AllocateOneByteSlicedString(v0, a2, a6, a7, &runtime);
3371 __ jmp(&set_slice_header);
3372 __ bind(&two_byte_slice);
3373 __ AllocateTwoByteSlicedString(v0, a2, a6, a7, &runtime);
3374 __ bind(&set_slice_header);
3376 __ sd(a5, FieldMemOperand(v0, SlicedString::kParentOffset));
3377 __ sd(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
3380 __ bind(©_routine);
3383 // a5: underlying subject string
3384 // a1: instance type of underlying subject string
3386 // a3: adjusted start index (untagged)
3387 Label two_byte_sequential, sequential_string, allocate_result;
3388 STATIC_ASSERT(kExternalStringTag != 0);
3389 STATIC_ASSERT(kSeqStringTag == 0);
3390 __ And(a4, a1, Operand(kExternalStringTag));
3391 __ Branch(&sequential_string, eq, a4, Operand(zero_reg));
3393 // Handle external string.
3394 // Rule out short external strings.
3395 STATIC_ASSERT(kShortExternalStringTag != 0);
3396 __ And(a4, a1, Operand(kShortExternalStringTag));
3397 __ Branch(&runtime, ne, a4, Operand(zero_reg));
3398 __ ld(a5, FieldMemOperand(a5, ExternalString::kResourceDataOffset));
3399 // a5 already points to the first character of underlying string.
3400 __ jmp(&allocate_result);
3402 __ bind(&sequential_string);
3403 // Locate first character of underlying subject string.
3404 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3405 __ Daddu(a5, a5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3407 __ bind(&allocate_result);
3408 // Sequential acii string. Allocate the result.
3409 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3410 __ And(a4, a1, Operand(kStringEncodingMask));
3411 __ Branch(&two_byte_sequential, eq, a4, Operand(zero_reg));
3413 // Allocate and copy the resulting one_byte string.
3414 __ AllocateOneByteString(v0, a2, a4, a6, a7, &runtime);
3416 // Locate first character of substring to copy.
3417 __ Daddu(a5, a5, a3);
3419 // Locate first character of result.
3420 __ Daddu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3422 // v0: result string
3423 // a1: first character of result string
3424 // a2: result string length
3425 // a5: first character of substring to copy
3426 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3427 StringHelper::GenerateCopyCharacters(
3428 masm, a1, a5, a2, a3, String::ONE_BYTE_ENCODING);
3431 // Allocate and copy the resulting two-byte string.
3432 __ bind(&two_byte_sequential);
3433 __ AllocateTwoByteString(v0, a2, a4, a6, a7, &runtime);
3435 // Locate first character of substring to copy.
3436 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
3438 __ Daddu(a5, a5, a4);
3439 // Locate first character of result.
3440 __ Daddu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3442 // v0: result string.
3443 // a1: first character of result.
3444 // a2: result length.
3445 // a5: first character of substring to copy.
3446 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3447 StringHelper::GenerateCopyCharacters(
3448 masm, a1, a5, a2, a3, String::TWO_BYTE_ENCODING);
3450 __ bind(&return_v0);
3451 Counters* counters = isolate()->counters();
3452 __ IncrementCounter(counters->sub_string_native(), 1, a3, a4);
3455 // Just jump to runtime to create the sub string.
3457 __ TailCallRuntime(Runtime::kSubStringRT, 3, 1);
3459 __ bind(&single_char);
3460 // v0: original string
3461 // a1: instance type
3463 // a3: from index (untagged)
3464 StringCharAtGenerator generator(v0, a3, a2, v0, &runtime, &runtime, &runtime,
3465 STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
3466 generator.GenerateFast(masm);
3468 generator.SkipSlow(masm, &runtime);
3472 void ToNumberStub::Generate(MacroAssembler* masm) {
3473 // The ToNumber stub takes one argument in a0.
3475 __ JumpIfNotSmi(a0, ¬_smi);
3476 __ Ret(USE_DELAY_SLOT);
3480 Label not_heap_number;
3481 __ ld(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
3482 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3484 // a1: instance type.
3485 __ Branch(¬_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
3486 __ Ret(USE_DELAY_SLOT);
3488 __ bind(¬_heap_number);
3490 Label not_string, slow_string;
3491 __ Branch(¬_string, hs, a1, Operand(FIRST_NONSTRING_TYPE));
3492 // Check if string has a cached array index.
3493 __ ld(a2, FieldMemOperand(a0, String::kHashFieldOffset));
3494 __ And(at, a2, Operand(String::kContainsCachedArrayIndexMask));
3495 __ Branch(&slow_string, ne, at, Operand(zero_reg));
3496 __ IndexFromHash(a2, a0);
3497 __ Ret(USE_DELAY_SLOT);
3499 __ bind(&slow_string);
3500 __ push(a0); // Push argument.
3501 __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
3502 __ bind(¬_string);
3505 __ Branch(¬_oddball, ne, a1, Operand(ODDBALL_TYPE));
3506 __ Ret(USE_DELAY_SLOT);
3507 __ ld(v0, FieldMemOperand(a0, Oddball::kToNumberOffset));
3508 __ bind(¬_oddball);
3510 __ push(a0); // Push argument.
3511 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
3515 void StringHelper::GenerateFlatOneByteStringEquals(
3516 MacroAssembler* masm, Register left, Register right, Register scratch1,
3517 Register scratch2, Register scratch3) {
3518 Register length = scratch1;
3521 Label strings_not_equal, check_zero_length;
3522 __ ld(length, FieldMemOperand(left, String::kLengthOffset));
3523 __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
3524 __ Branch(&check_zero_length, eq, length, Operand(scratch2));
3525 __ bind(&strings_not_equal);
3526 // Can not put li in delayslot, it has multi instructions.
3527 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
3530 // Check if the length is zero.
3531 Label compare_chars;
3532 __ bind(&check_zero_length);
3533 STATIC_ASSERT(kSmiTag == 0);
3534 __ Branch(&compare_chars, ne, length, Operand(zero_reg));
3535 DCHECK(is_int16((intptr_t)Smi::FromInt(EQUAL)));
3536 __ Ret(USE_DELAY_SLOT);
3537 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3539 // Compare characters.
3540 __ bind(&compare_chars);
3542 GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
3543 v0, &strings_not_equal);
3545 // Characters are equal.
3546 __ Ret(USE_DELAY_SLOT);
3547 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3551 void StringHelper::GenerateCompareFlatOneByteStrings(
3552 MacroAssembler* masm, Register left, Register right, Register scratch1,
3553 Register scratch2, Register scratch3, Register scratch4) {
3554 Label result_not_equal, compare_lengths;
3555 // Find minimum length and length difference.
3556 __ ld(scratch1, FieldMemOperand(left, String::kLengthOffset));
3557 __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
3558 __ Dsubu(scratch3, scratch1, Operand(scratch2));
3559 Register length_delta = scratch3;
3560 __ slt(scratch4, scratch2, scratch1);
3561 __ Movn(scratch1, scratch2, scratch4);
3562 Register min_length = scratch1;
3563 STATIC_ASSERT(kSmiTag == 0);
3564 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
3567 GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
3568 scratch4, v0, &result_not_equal);
3570 // Compare lengths - strings up to min-length are equal.
3571 __ bind(&compare_lengths);
3572 DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
3573 // Use length_delta as result if it's zero.
3574 __ mov(scratch2, length_delta);
3575 __ mov(scratch4, zero_reg);
3576 __ mov(v0, zero_reg);
3578 __ bind(&result_not_equal);
3579 // Conditionally update the result based either on length_delta or
3580 // the last comparion performed in the loop above.
3582 __ Branch(&ret, eq, scratch2, Operand(scratch4));
3583 __ li(v0, Operand(Smi::FromInt(GREATER)));
3584 __ Branch(&ret, gt, scratch2, Operand(scratch4));
3585 __ li(v0, Operand(Smi::FromInt(LESS)));
3591 void StringHelper::GenerateOneByteCharsCompareLoop(
3592 MacroAssembler* masm, Register left, Register right, Register length,
3593 Register scratch1, Register scratch2, Register scratch3,
3594 Label* chars_not_equal) {
3595 // Change index to run from -length to -1 by adding length to string
3596 // start. This means that loop ends when index reaches zero, which
3597 // doesn't need an additional compare.
3598 __ SmiUntag(length);
3599 __ Daddu(scratch1, length,
3600 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3601 __ Daddu(left, left, Operand(scratch1));
3602 __ Daddu(right, right, Operand(scratch1));
3603 __ Dsubu(length, zero_reg, length);
3604 Register index = length; // index = -length;
3610 __ Daddu(scratch3, left, index);
3611 __ lbu(scratch1, MemOperand(scratch3));
3612 __ Daddu(scratch3, right, index);
3613 __ lbu(scratch2, MemOperand(scratch3));
3614 __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
3615 __ Daddu(index, index, 1);
3616 __ Branch(&loop, ne, index, Operand(zero_reg));
3620 void StringCompareStub::Generate(MacroAssembler* masm) {
3623 Counters* counters = isolate()->counters();
3625 // Stack frame on entry.
3626 // sp[0]: right string
3627 // sp[4]: left string
3628 __ ld(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
3629 __ ld(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
3632 __ Branch(¬_same, ne, a0, Operand(a1));
3633 STATIC_ASSERT(EQUAL == 0);
3634 STATIC_ASSERT(kSmiTag == 0);
3635 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3636 __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
3641 // Check that both objects are sequential one_byte strings.
3642 __ JumpIfNotBothSequentialOneByteStrings(a1, a0, a2, a3, &runtime);
3644 // Compare flat one_byte strings natively. Remove arguments from stack first.
3645 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
3646 __ Daddu(sp, sp, Operand(2 * kPointerSize));
3647 StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, a4, a5);
3650 __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
3654 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
3655 // ----------- S t a t e -------------
3658 // -- ra : return address
3659 // -----------------------------------
3661 // Load a2 with the allocation site. We stick an undefined dummy value here
3662 // and replace it with the real allocation site later when we instantiate this
3663 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
3664 __ li(a2, handle(isolate()->heap()->undefined_value()));
3666 // Make sure that we actually patched the allocation site.
3667 if (FLAG_debug_code) {
3668 __ And(at, a2, Operand(kSmiTagMask));
3669 __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
3670 __ ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset));
3671 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
3672 __ Assert(eq, kExpectedAllocationSite, a4, Operand(at));
3675 // Tail call into the stub that handles binary operations with allocation
3677 BinaryOpWithAllocationSiteStub stub(isolate(), state());
3678 __ TailCallStub(&stub);
3682 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
3683 DCHECK(state() == CompareICState::SMI);
3686 __ JumpIfNotSmi(a2, &miss);
3688 if (GetCondition() == eq) {
3689 // For equality we do not care about the sign of the result.
3690 __ Ret(USE_DELAY_SLOT);
3691 __ Dsubu(v0, a0, a1);
3693 // Untag before subtracting to avoid handling overflow.
3696 __ Ret(USE_DELAY_SLOT);
3697 __ Dsubu(v0, a1, a0);
3705 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
3706 DCHECK(state() == CompareICState::NUMBER);
3709 Label unordered, maybe_undefined1, maybe_undefined2;
3712 if (left() == CompareICState::SMI) {
3713 __ JumpIfNotSmi(a1, &miss);
3715 if (right() == CompareICState::SMI) {
3716 __ JumpIfNotSmi(a0, &miss);
3719 // Inlining the double comparison and falling back to the general compare
3720 // stub if NaN is involved.
3721 // Load left and right operand.
3722 Label done, left, left_smi, right_smi;
3723 __ JumpIfSmi(a0, &right_smi);
3724 __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
3726 __ Dsubu(a2, a0, Operand(kHeapObjectTag));
3727 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
3729 __ bind(&right_smi);
3730 __ SmiUntag(a2, a0); // Can't clobber a0 yet.
3731 FPURegister single_scratch = f6;
3732 __ mtc1(a2, single_scratch);
3733 __ cvt_d_w(f2, single_scratch);
3736 __ JumpIfSmi(a1, &left_smi);
3737 __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
3739 __ Dsubu(a2, a1, Operand(kHeapObjectTag));
3740 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
3743 __ SmiUntag(a2, a1); // Can't clobber a1 yet.
3744 single_scratch = f8;
3745 __ mtc1(a2, single_scratch);
3746 __ cvt_d_w(f0, single_scratch);
3750 // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
3751 Label fpu_eq, fpu_lt;
3752 // Test if equal, and also handle the unordered/NaN case.
3753 __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
3755 // Test if less (unordered case is already handled).
3756 __ BranchF(&fpu_lt, NULL, lt, f0, f2);
3758 // Otherwise it's greater, so just fall thru, and return.
3759 DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
3760 __ Ret(USE_DELAY_SLOT);
3761 __ li(v0, Operand(GREATER));
3764 __ Ret(USE_DELAY_SLOT);
3765 __ li(v0, Operand(EQUAL));
3768 __ Ret(USE_DELAY_SLOT);
3769 __ li(v0, Operand(LESS));
3771 __ bind(&unordered);
3772 __ bind(&generic_stub);
3773 CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
3774 CompareICState::GENERIC, CompareICState::GENERIC);
3775 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3777 __ bind(&maybe_undefined1);
3778 if (Token::IsOrderedRelationalCompareOp(op())) {
3779 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3780 __ Branch(&miss, ne, a0, Operand(at));
3781 __ JumpIfSmi(a1, &unordered);
3782 __ GetObjectType(a1, a2, a2);
3783 __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
3787 __ bind(&maybe_undefined2);
3788 if (Token::IsOrderedRelationalCompareOp(op())) {
3789 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3790 __ Branch(&unordered, eq, a1, Operand(at));
3798 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3799 DCHECK(state() == CompareICState::INTERNALIZED_STRING);
3802 // Registers containing left and right operands respectively.
3804 Register right = a0;
3808 // Check that both operands are heap objects.
3809 __ JumpIfEitherSmi(left, right, &miss);
3811 // Check that both operands are internalized strings.
3812 __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3813 __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3814 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3815 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3816 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3817 __ Or(tmp1, tmp1, Operand(tmp2));
3818 __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3819 __ Branch(&miss, ne, at, Operand(zero_reg));
3821 // Make sure a0 is non-zero. At this point input operands are
3822 // guaranteed to be non-zero.
3823 DCHECK(right.is(a0));
3824 STATIC_ASSERT(EQUAL == 0);
3825 STATIC_ASSERT(kSmiTag == 0);
3827 // Internalized strings are compared by identity.
3828 __ Ret(ne, left, Operand(right));
3829 DCHECK(is_int16(EQUAL));
3830 __ Ret(USE_DELAY_SLOT);
3831 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3838 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
3839 DCHECK(state() == CompareICState::UNIQUE_NAME);
3840 DCHECK(GetCondition() == eq);
3843 // Registers containing left and right operands respectively.
3845 Register right = a0;
3849 // Check that both operands are heap objects.
3850 __ JumpIfEitherSmi(left, right, &miss);
3852 // Check that both operands are unique names. This leaves the instance
3853 // types loaded in tmp1 and tmp2.
3854 __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3855 __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3856 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3857 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3859 __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
3860 __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
3865 // Unique names are compared by identity.
3867 __ Branch(&done, ne, left, Operand(right));
3868 // Make sure a0 is non-zero. At this point input operands are
3869 // guaranteed to be non-zero.
3870 DCHECK(right.is(a0));
3871 STATIC_ASSERT(EQUAL == 0);
3872 STATIC_ASSERT(kSmiTag == 0);
3873 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3882 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
3883 DCHECK(state() == CompareICState::STRING);
3886 bool equality = Token::IsEqualityOp(op());
3888 // Registers containing left and right operands respectively.
3890 Register right = a0;
3897 // Check that both operands are heap objects.
3898 __ JumpIfEitherSmi(left, right, &miss);
3900 // Check that both operands are strings. This leaves the instance
3901 // types loaded in tmp1 and tmp2.
3902 __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3903 __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3904 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3905 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3906 STATIC_ASSERT(kNotStringTag != 0);
3907 __ Or(tmp3, tmp1, tmp2);
3908 __ And(tmp5, tmp3, Operand(kIsNotStringMask));
3909 __ Branch(&miss, ne, tmp5, Operand(zero_reg));
3911 // Fast check for identical strings.
3912 Label left_ne_right;
3913 STATIC_ASSERT(EQUAL == 0);
3914 STATIC_ASSERT(kSmiTag == 0);
3915 __ Branch(&left_ne_right, ne, left, Operand(right));
3916 __ Ret(USE_DELAY_SLOT);
3917 __ mov(v0, zero_reg); // In the delay slot.
3918 __ bind(&left_ne_right);
3920 // Handle not identical strings.
3922 // Check that both strings are internalized strings. If they are, we're done
3923 // because we already know they are not identical. We know they are both
3926 DCHECK(GetCondition() == eq);
3927 STATIC_ASSERT(kInternalizedTag == 0);
3928 __ Or(tmp3, tmp1, Operand(tmp2));
3929 __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
3931 __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
3932 // Make sure a0 is non-zero. At this point input operands are
3933 // guaranteed to be non-zero.
3934 DCHECK(right.is(a0));
3935 __ Ret(USE_DELAY_SLOT);
3936 __ mov(v0, a0); // In the delay slot.
3937 __ bind(&is_symbol);
3940 // Check that both strings are sequential one_byte.
3942 __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
3945 // Compare flat one_byte strings. Returns when done.
3947 StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
3950 StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
3954 // Handle more complex cases in runtime.
3956 __ Push(left, right);
3958 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
3960 __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
3968 void CompareICStub::GenerateObjects(MacroAssembler* masm) {
3969 DCHECK(state() == CompareICState::OBJECT);
3971 __ And(a2, a1, Operand(a0));
3972 __ JumpIfSmi(a2, &miss);
3974 __ GetObjectType(a0, a2, a2);
3975 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
3976 __ GetObjectType(a1, a2, a2);
3977 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
3979 DCHECK(GetCondition() == eq);
3980 __ Ret(USE_DELAY_SLOT);
3981 __ dsubu(v0, a0, a1);
3988 void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
3990 Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
3992 __ JumpIfSmi(a2, &miss);
3993 __ GetWeakValue(a4, cell);
3994 __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
3995 __ ld(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
3996 __ Branch(&miss, ne, a2, Operand(a4));
3997 __ Branch(&miss, ne, a3, Operand(a4));
3999 __ Ret(USE_DELAY_SLOT);
4000 __ dsubu(v0, a0, a1);
4007 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
4009 // Call the runtime system in a fresh internal frame.
4010 ExternalReference miss =
4011 ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
4012 FrameScope scope(masm, StackFrame::INTERNAL);
4014 __ Push(ra, a1, a0);
4015 __ li(a4, Operand(Smi::FromInt(op())));
4016 __ daddiu(sp, sp, -kPointerSize);
4017 __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
4018 __ sd(a4, MemOperand(sp)); // In the delay slot.
4019 // Compute the entry point of the rewritten stub.
4020 __ Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
4021 // Restore registers.
4028 void DirectCEntryStub::Generate(MacroAssembler* masm) {
4029 // Make place for arguments to fit C calling convention. Most of the callers
4030 // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
4031 // so they handle stack restoring and we don't have to do that here.
4032 // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
4033 // kCArgsSlotsSize stack space after the call.
4034 __ daddiu(sp, sp, -kCArgsSlotsSize);
4035 // Place the return address on the stack, making the call
4036 // GC safe. The RegExp backend also relies on this.
4037 __ sd(ra, MemOperand(sp, kCArgsSlotsSize));
4038 __ Call(t9); // Call the C++ function.
4039 __ ld(t9, MemOperand(sp, kCArgsSlotsSize));
4041 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
4042 // In case of an error the return address may point to a memory area
4043 // filled with kZapValue by the GC.
4044 // Dereference the address and check for this.
4045 __ Uld(a4, MemOperand(t9));
4046 __ Assert(ne, kReceivedInvalidReturnAddress, a4,
4047 Operand(reinterpret_cast<uint64_t>(kZapValue)));
4053 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
4056 reinterpret_cast<intptr_t>(GetCode().location());
4057 __ Move(t9, target);
4058 __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
4063 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
4067 Register properties,
4069 Register scratch0) {
4070 DCHECK(name->IsUniqueName());
4071 // If names of slots in range from 1 to kProbes - 1 for the hash value are
4072 // not equal to the name and kProbes-th slot is not used (its name is the
4073 // undefined value), it guarantees the hash table doesn't contain the
4074 // property. It's true even if some slots represent deleted properties
4075 // (their names are the hole value).
4076 for (int i = 0; i < kInlinedProbes; i++) {
4077 // scratch0 points to properties hash.
4078 // Compute the masked index: (hash + i + i * i) & mask.
4079 Register index = scratch0;
4080 // Capacity is smi 2^n.
4081 __ SmiLoadUntag(index, FieldMemOperand(properties, kCapacityOffset));
4082 __ Dsubu(index, index, Operand(1));
4083 __ And(index, index,
4084 Operand(name->Hash() + NameDictionary::GetProbeOffset(i)));
4086 // Scale the index by multiplying by the entry size.
4087 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
4088 __ dsll(at, index, 1);
4089 __ Daddu(index, index, at); // index *= 3.
4091 Register entity_name = scratch0;
4092 // Having undefined at this place means the name is not contained.
4093 DCHECK_EQ(kSmiTagSize, 1);
4094 Register tmp = properties;
4096 __ dsll(scratch0, index, kPointerSizeLog2);
4097 __ Daddu(tmp, properties, scratch0);
4098 __ ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
4100 DCHECK(!tmp.is(entity_name));
4101 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
4102 __ Branch(done, eq, entity_name, Operand(tmp));
4104 // Load the hole ready for use below:
4105 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
4107 // Stop if found the property.
4108 __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
4111 __ Branch(&good, eq, entity_name, Operand(tmp));
4113 // Check if the entry name is not a unique name.
4114 __ ld(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
4116 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
4117 __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
4120 // Restore the properties.
4122 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4125 const int spill_mask =
4126 (ra.bit() | a6.bit() | a5.bit() | a4.bit() | a3.bit() |
4127 a2.bit() | a1.bit() | a0.bit() | v0.bit());
4129 __ MultiPush(spill_mask);
4130 __ ld(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4131 __ li(a1, Operand(Handle<Name>(name)));
4132 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
4135 __ MultiPop(spill_mask);
4137 __ Branch(done, eq, at, Operand(zero_reg));
4138 __ Branch(miss, ne, at, Operand(zero_reg));
4142 // Probe the name dictionary in the |elements| register. Jump to the
4143 // |done| label if a property with the given name is found. Jump to
4144 // the |miss| label otherwise.
4145 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
4146 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
4152 Register scratch2) {
4153 DCHECK(!elements.is(scratch1));
4154 DCHECK(!elements.is(scratch2));
4155 DCHECK(!name.is(scratch1));
4156 DCHECK(!name.is(scratch2));
4158 __ AssertName(name);
4160 // Compute the capacity mask.
4161 __ ld(scratch1, FieldMemOperand(elements, kCapacityOffset));
4162 __ SmiUntag(scratch1);
4163 __ Dsubu(scratch1, scratch1, Operand(1));
4165 // Generate an unrolled loop that performs a few probes before
4166 // giving up. Measurements done on Gmail indicate that 2 probes
4167 // cover ~93% of loads from dictionaries.
4168 for (int i = 0; i < kInlinedProbes; i++) {
4169 // Compute the masked index: (hash + i + i * i) & mask.
4170 __ lwu(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
4172 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4173 // the hash in a separate instruction. The value hash + i + i * i is right
4174 // shifted in the following and instruction.
4175 DCHECK(NameDictionary::GetProbeOffset(i) <
4176 1 << (32 - Name::kHashFieldOffset));
4177 __ Daddu(scratch2, scratch2, Operand(
4178 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4180 __ dsrl(scratch2, scratch2, Name::kHashShift);
4181 __ And(scratch2, scratch1, scratch2);
4183 // Scale the index by multiplying by the element size.
4184 DCHECK(NameDictionary::kEntrySize == 3);
4185 // scratch2 = scratch2 * 3.
4187 __ dsll(at, scratch2, 1);
4188 __ Daddu(scratch2, scratch2, at);
4190 // Check if the key is identical to the name.
4191 __ dsll(at, scratch2, kPointerSizeLog2);
4192 __ Daddu(scratch2, elements, at);
4193 __ ld(at, FieldMemOperand(scratch2, kElementsStartOffset));
4194 __ Branch(done, eq, name, Operand(at));
4197 const int spill_mask =
4198 (ra.bit() | a6.bit() | a5.bit() | a4.bit() |
4199 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
4200 ~(scratch1.bit() | scratch2.bit());
4202 __ MultiPush(spill_mask);
4204 DCHECK(!elements.is(a1));
4206 __ Move(a0, elements);
4208 __ Move(a0, elements);
4211 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
4213 __ mov(scratch2, a2);
4215 __ MultiPop(spill_mask);
4217 __ Branch(done, ne, at, Operand(zero_reg));
4218 __ Branch(miss, eq, at, Operand(zero_reg));
4222 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
4223 // This stub overrides SometimesSetsUpAFrame() to return false. That means
4224 // we cannot call anything that could cause a GC from this stub.
4226 // result: NameDictionary to probe
4228 // dictionary: NameDictionary to probe.
4229 // index: will hold an index of entry if lookup is successful.
4230 // might alias with result_.
4232 // result_ is zero if lookup failed, non zero otherwise.
4234 Register result = v0;
4235 Register dictionary = a0;
4237 Register index = a2;
4240 Register undefined = a5;
4241 Register entry_key = a6;
4243 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
4245 __ ld(mask, FieldMemOperand(dictionary, kCapacityOffset));
4247 __ Dsubu(mask, mask, Operand(1));
4249 __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
4251 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
4253 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
4254 // Compute the masked index: (hash + i + i * i) & mask.
4255 // Capacity is smi 2^n.
4257 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4258 // the hash in a separate instruction. The value hash + i + i * i is right
4259 // shifted in the following and instruction.
4260 DCHECK(NameDictionary::GetProbeOffset(i) <
4261 1 << (32 - Name::kHashFieldOffset));
4262 __ Daddu(index, hash, Operand(
4263 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4265 __ mov(index, hash);
4267 __ dsrl(index, index, Name::kHashShift);
4268 __ And(index, mask, index);
4270 // Scale the index by multiplying by the entry size.
4271 DCHECK(NameDictionary::kEntrySize == 3);
4274 __ dsll(index, index, 1);
4275 __ Daddu(index, index, at);
4278 DCHECK_EQ(kSmiTagSize, 1);
4279 __ dsll(index, index, kPointerSizeLog2);
4280 __ Daddu(index, index, dictionary);
4281 __ ld(entry_key, FieldMemOperand(index, kElementsStartOffset));
4283 // Having undefined at this place means the name is not contained.
4284 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined));
4286 // Stop if found the property.
4287 __ Branch(&in_dictionary, eq, entry_key, Operand(key));
4289 if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
4290 // Check if the entry name is not a unique name.
4291 __ ld(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
4293 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
4294 __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
4298 __ bind(&maybe_in_dictionary);
4299 // If we are doing negative lookup then probing failure should be
4300 // treated as a lookup success. For positive lookup probing failure
4301 // should be treated as lookup failure.
4302 if (mode() == POSITIVE_LOOKUP) {
4303 __ Ret(USE_DELAY_SLOT);
4304 __ mov(result, zero_reg);
4307 __ bind(&in_dictionary);
4308 __ Ret(USE_DELAY_SLOT);
4311 __ bind(¬_in_dictionary);
4312 __ Ret(USE_DELAY_SLOT);
4313 __ mov(result, zero_reg);
4317 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
4319 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
4321 // Hydrogen code stubs need stub2 at snapshot time.
4322 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
4327 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
4328 // the value has just been written into the object, now this stub makes sure
4329 // we keep the GC informed. The word in the object where the value has been
4330 // written is in the address register.
4331 void RecordWriteStub::Generate(MacroAssembler* masm) {
4332 Label skip_to_incremental_noncompacting;
4333 Label skip_to_incremental_compacting;
4335 // The first two branch+nop instructions are generated with labels so as to
4336 // get the offset fixed up correctly by the bind(Label*) call. We patch it
4337 // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
4338 // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
4339 // incremental heap marking.
4340 // See RecordWriteStub::Patch for details.
4341 __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
4343 __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
4346 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4347 __ RememberedSetHelper(object(),
4350 save_fp_regs_mode(),
4351 MacroAssembler::kReturnAtEnd);
4355 __ bind(&skip_to_incremental_noncompacting);
4356 GenerateIncremental(masm, INCREMENTAL);
4358 __ bind(&skip_to_incremental_compacting);
4359 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4361 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
4362 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
4364 PatchBranchIntoNop(masm, 0);
4365 PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
4369 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4372 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4373 Label dont_need_remembered_set;
4375 __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
4376 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
4378 &dont_need_remembered_set);
4380 __ CheckPageFlag(regs_.object(),
4382 1 << MemoryChunk::SCAN_ON_SCAVENGE,
4384 &dont_need_remembered_set);
4386 // First notify the incremental marker if necessary, then update the
4388 CheckNeedsToInformIncrementalMarker(
4389 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4390 InformIncrementalMarker(masm);
4391 regs_.Restore(masm);
4392 __ RememberedSetHelper(object(),
4395 save_fp_regs_mode(),
4396 MacroAssembler::kReturnAtEnd);
4398 __ bind(&dont_need_remembered_set);
4401 CheckNeedsToInformIncrementalMarker(
4402 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4403 InformIncrementalMarker(masm);
4404 regs_.Restore(masm);
4409 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4410 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
4411 int argument_count = 3;
4412 __ PrepareCallCFunction(argument_count, regs_.scratch0());
4414 a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
4415 DCHECK(!address.is(regs_.object()));
4416 DCHECK(!address.is(a0));
4417 __ Move(address, regs_.address());
4418 __ Move(a0, regs_.object());
4419 __ Move(a1, address);
4420 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
4422 AllowExternalCallThatCantCauseGC scope(masm);
4424 ExternalReference::incremental_marking_record_write_function(isolate()),
4426 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
4430 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4431 MacroAssembler* masm,
4432 OnNoNeedToInformIncrementalMarker on_no_need,
4435 Label need_incremental;
4436 Label need_incremental_pop_scratch;
4438 __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
4439 __ ld(regs_.scratch1(),
4440 MemOperand(regs_.scratch0(),
4441 MemoryChunk::kWriteBarrierCounterOffset));
4442 __ Dsubu(regs_.scratch1(), regs_.scratch1(), Operand(1));
4443 __ sd(regs_.scratch1(),
4444 MemOperand(regs_.scratch0(),
4445 MemoryChunk::kWriteBarrierCounterOffset));
4446 __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
4448 // Let's look at the color of the object: If it is not black we don't have
4449 // to inform the incremental marker.
4450 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4452 regs_.Restore(masm);
4453 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4454 __ RememberedSetHelper(object(),
4457 save_fp_regs_mode(),
4458 MacroAssembler::kReturnAtEnd);
4465 // Get the value from the slot.
4466 __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
4468 if (mode == INCREMENTAL_COMPACTION) {
4469 Label ensure_not_white;
4471 __ CheckPageFlag(regs_.scratch0(), // Contains value.
4472 regs_.scratch1(), // Scratch.
4473 MemoryChunk::kEvacuationCandidateMask,
4477 __ CheckPageFlag(regs_.object(),
4478 regs_.scratch1(), // Scratch.
4479 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
4483 __ bind(&ensure_not_white);
4486 // We need extra registers for this, so we push the object and the address
4487 // register temporarily.
4488 __ Push(regs_.object(), regs_.address());
4489 __ EnsureNotWhite(regs_.scratch0(), // The value.
4490 regs_.scratch1(), // Scratch.
4491 regs_.object(), // Scratch.
4492 regs_.address(), // Scratch.
4493 &need_incremental_pop_scratch);
4494 __ Pop(regs_.object(), regs_.address());
4496 regs_.Restore(masm);
4497 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4498 __ RememberedSetHelper(object(),
4501 save_fp_regs_mode(),
4502 MacroAssembler::kReturnAtEnd);
4507 __ bind(&need_incremental_pop_scratch);
4508 __ Pop(regs_.object(), regs_.address());
4510 __ bind(&need_incremental);
4512 // Fall through when we need to inform the incremental marker.
4516 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4517 // ----------- S t a t e -------------
4518 // -- a0 : element value to store
4519 // -- a3 : element index as smi
4520 // -- sp[0] : array literal index in function as smi
4521 // -- sp[4] : array literal
4522 // clobbers a1, a2, a4
4523 // -----------------------------------
4526 Label double_elements;
4528 Label slow_elements;
4529 Label fast_elements;
4531 // Get array literal index, array literal and its map.
4532 __ ld(a4, MemOperand(sp, 0 * kPointerSize));
4533 __ ld(a1, MemOperand(sp, 1 * kPointerSize));
4534 __ ld(a2, FieldMemOperand(a1, JSObject::kMapOffset));
4536 __ CheckFastElements(a2, a5, &double_elements);
4537 // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
4538 __ JumpIfSmi(a0, &smi_element);
4539 __ CheckFastSmiElements(a2, a5, &fast_elements);
4541 // Store into the array literal requires a elements transition. Call into
4543 __ bind(&slow_elements);
4545 __ Push(a1, a3, a0);
4546 __ ld(a5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4547 __ ld(a5, FieldMemOperand(a5, JSFunction::kLiteralsOffset));
4549 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4551 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4552 __ bind(&fast_elements);
4553 __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
4554 __ SmiScale(a6, a3, kPointerSizeLog2);
4555 __ Daddu(a6, a5, a6);
4556 __ Daddu(a6, a6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4557 __ sd(a0, MemOperand(a6, 0));
4558 // Update the write barrier for the array store.
4559 __ RecordWrite(a5, a6, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
4560 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
4561 __ Ret(USE_DELAY_SLOT);
4564 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
4565 // and value is Smi.
4566 __ bind(&smi_element);
4567 __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
4568 __ SmiScale(a6, a3, kPointerSizeLog2);
4569 __ Daddu(a6, a5, a6);
4570 __ sd(a0, FieldMemOperand(a6, FixedArray::kHeaderSize));
4571 __ Ret(USE_DELAY_SLOT);
4574 // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
4575 __ bind(&double_elements);
4576 __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
4577 __ StoreNumberToDoubleElements(a0, a3, a5, a7, t1, &slow_elements);
4578 __ Ret(USE_DELAY_SLOT);
4583 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4584 CEntryStub ces(isolate(), 1, kSaveFPRegs);
4585 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4586 int parameter_count_offset =
4587 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4588 __ ld(a1, MemOperand(fp, parameter_count_offset));
4589 if (function_mode() == JS_FUNCTION_STUB_MODE) {
4590 __ Daddu(a1, a1, Operand(1));
4592 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4593 __ dsll(a1, a1, kPointerSizeLog2);
4594 __ Ret(USE_DELAY_SLOT);
4595 __ Daddu(sp, sp, a1);
4599 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
4600 EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4601 VectorRawLoadStub stub(isolate(), state());
4602 stub.GenerateForTrampoline(masm);
4606 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
4607 EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4608 VectorRawKeyedLoadStub stub(isolate());
4609 stub.GenerateForTrampoline(masm);
4613 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
4614 EmitLoadTypeFeedbackVector(masm, a2);
4615 CallICStub stub(isolate(), state());
4616 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4620 void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
4621 EmitLoadTypeFeedbackVector(masm, a2);
4622 CallIC_ArrayStub stub(isolate(), state());
4623 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4627 void VectorRawLoadStub::Generate(MacroAssembler* masm) {
4628 GenerateImpl(masm, false);
4632 void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
4633 GenerateImpl(masm, true);
4637 static void HandleArrayCases(MacroAssembler* masm, Register receiver,
4638 Register key, Register vector, Register slot,
4639 Register feedback, Register scratch1,
4640 Register scratch2, Register scratch3,
4641 bool is_polymorphic, Label* miss) {
4642 // feedback initially contains the feedback array
4643 Label next_loop, prepare_next;
4644 Label load_smi_map, compare_map;
4645 Label start_polymorphic;
4647 Register receiver_map = scratch1;
4648 Register cached_map = scratch2;
4650 // Receiver might not be a heap object.
4651 __ JumpIfSmi(receiver, &load_smi_map);
4652 __ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
4653 __ bind(&compare_map);
4655 FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
4656 __ ld(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
4657 __ Branch(&start_polymorphic, ne, receiver_map, Operand(cached_map));
4658 // found, now call handler.
4659 Register handler = feedback;
4660 __ ld(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
4661 __ Daddu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
4664 Register length = scratch3;
4665 __ bind(&start_polymorphic);
4666 __ ld(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
4667 if (!is_polymorphic) {
4668 // If the IC could be monomorphic we have to make sure we don't go past the
4669 // end of the feedback array.
4670 __ Branch(miss, eq, length, Operand(Smi::FromInt(2)));
4673 Register too_far = length;
4674 Register pointer_reg = feedback;
4676 // +-----+------+------+-----+-----+ ... ----+
4677 // | map | len | wm0 | h0 | wm1 | hN |
4678 // +-----+------+------+-----+-----+ ... ----+
4682 // pointer_reg too_far
4683 // aka feedback scratch3
4684 // also need receiver_map (aka scratch1)
4685 // use cached_map (scratch2) to look in the weak map values.
4686 __ SmiScale(too_far, length, kPointerSizeLog2);
4687 __ Daddu(too_far, feedback, Operand(too_far));
4688 __ Daddu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4689 __ Daddu(pointer_reg, feedback,
4690 Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
4692 __ bind(&next_loop);
4693 __ ld(cached_map, MemOperand(pointer_reg));
4694 __ ld(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
4695 __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
4696 __ ld(handler, MemOperand(pointer_reg, kPointerSize));
4697 __ Daddu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
4700 __ bind(&prepare_next);
4701 __ Daddu(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
4702 __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
4704 // We exhausted our array of map handler pairs.
4707 __ bind(&load_smi_map);
4708 __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
4709 __ Branch(&compare_map);
4713 static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
4714 Register key, Register vector, Register slot,
4715 Register weak_cell, Register scratch,
4717 // feedback initially contains the feedback array
4718 Label compare_smi_map;
4719 Register receiver_map = scratch;
4720 Register cached_map = weak_cell;
4722 // Move the weak map into the weak_cell register.
4723 __ ld(cached_map, FieldMemOperand(weak_cell, WeakCell::kValueOffset));
4725 // Receiver might not be a heap object.
4726 __ JumpIfSmi(receiver, &compare_smi_map);
4727 __ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
4728 __ Branch(miss, ne, cached_map, Operand(receiver_map));
4730 Register handler = weak_cell;
4731 __ SmiScale(handler, slot, kPointerSizeLog2);
4732 __ Daddu(handler, vector, Operand(handler));
4734 FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
4735 __ Daddu(t9, handler, Code::kHeaderSize - kHeapObjectTag);
4738 // In microbenchmarks, it made sense to unroll this code so that the call to
4739 // the handler is duplicated for a HeapObject receiver and a Smi receiver.
4740 // TODO(mvstanton): does this hold on ARM?
4741 __ bind(&compare_smi_map);
4742 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4743 __ Branch(miss, ne, weak_cell, Operand(at));
4744 __ SmiScale(handler, slot, kPointerSizeLog2);
4745 __ Daddu(handler, vector, Operand(handler));
4747 FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
4748 __ Daddu(t9, handler, Code::kHeaderSize - kHeapObjectTag);
4753 void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4754 Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // a1
4755 Register name = VectorLoadICDescriptor::NameRegister(); // a2
4756 Register vector = VectorLoadICDescriptor::VectorRegister(); // a3
4757 Register slot = VectorLoadICDescriptor::SlotRegister(); // a0
4758 Register feedback = a4;
4759 Register scratch1 = a5;
4761 __ SmiScale(feedback, slot, kPointerSizeLog2);
4762 __ Daddu(feedback, vector, Operand(feedback));
4763 __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
4765 // Is it a weak cell?
4767 Label not_array, smi_key, key_okay, miss;
4768 __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
4769 __ LoadRoot(at, Heap::kWeakCellMapRootIndex);
4770 __ Branch(&try_array, ne, scratch1, Operand(at));
4771 HandleMonomorphicCase(masm, receiver, name, vector, slot, feedback, scratch1,
4774 // Is it a fixed array?
4775 __ bind(&try_array);
4776 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4777 __ Branch(¬_array, ne, scratch1, Operand(at));
4778 HandleArrayCases(masm, receiver, name, vector, slot, feedback, scratch1, a6,
4781 __ bind(¬_array);
4782 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
4783 __ Branch(&miss, ne, feedback, Operand(at));
4784 Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
4785 Code::ComputeHandlerFlags(Code::LOAD_IC));
4786 masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
4787 false, receiver, name, feedback,
4791 LoadIC::GenerateMiss(masm);
4795 void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
4796 GenerateImpl(masm, false);
4800 void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
4801 GenerateImpl(masm, true);
4805 void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4806 Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // a1
4807 Register key = VectorLoadICDescriptor::NameRegister(); // a2
4808 Register vector = VectorLoadICDescriptor::VectorRegister(); // a3
4809 Register slot = VectorLoadICDescriptor::SlotRegister(); // a0
4810 Register feedback = a4;
4811 Register scratch1 = a5;
4813 __ SmiScale(feedback, slot, kPointerSizeLog2);
4814 __ Daddu(feedback, vector, Operand(feedback));
4815 __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
4817 // Is it a weak cell?
4819 Label not_array, smi_key, key_okay, miss;
4820 __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
4821 __ LoadRoot(at, Heap::kWeakCellMapRootIndex);
4822 __ Branch(&try_array, ne, scratch1, Operand(at));
4823 __ JumpIfNotSmi(key, &miss);
4824 HandleMonomorphicCase(masm, receiver, key, vector, slot, feedback, scratch1,
4827 __ bind(&try_array);
4828 // Is it a fixed array?
4829 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4830 __ Branch(¬_array, ne, scratch1, Operand(at));
4831 // We have a polymorphic element handler.
4832 __ JumpIfNotSmi(key, &miss);
4834 Label polymorphic, try_poly_name;
4835 __ bind(&polymorphic);
4836 HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, a6,
4839 __ bind(¬_array);
4841 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
4842 __ Branch(&try_poly_name, ne, feedback, Operand(at));
4843 Handle<Code> megamorphic_stub =
4844 KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
4845 __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
4847 __ bind(&try_poly_name);
4848 // We might have a name in feedback, and a fixed array in the next slot.
4849 __ Branch(&miss, ne, key, Operand(feedback));
4850 // If the name comparison succeeded, we know we have a fixed array with
4851 // at least one map/handler pair.
4852 __ SmiScale(feedback, slot, kPointerSizeLog2);
4853 __ Daddu(feedback, vector, Operand(feedback));
4855 FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
4856 HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, a6,
4860 KeyedLoadIC::GenerateMiss(masm);
4864 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4865 if (masm->isolate()->function_entry_hook() != NULL) {
4866 ProfileEntryHookStub stub(masm->isolate());
4874 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4875 // The entry hook is a "push ra" instruction, followed by a call.
4876 // Note: on MIPS "push" is 2 instruction
4877 const int32_t kReturnAddressDistanceFromFunctionStart =
4878 Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
4880 // This should contain all kJSCallerSaved registers.
4881 const RegList kSavedRegs =
4882 kJSCallerSaved | // Caller saved registers.
4883 s5.bit(); // Saved stack pointer.
4885 // We also save ra, so the count here is one higher than the mask indicates.
4886 const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
4888 // Save all caller-save registers as this may be called from anywhere.
4889 __ MultiPush(kSavedRegs | ra.bit());
4891 // Compute the function's address for the first argument.
4892 __ Dsubu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
4894 // The caller's return address is above the saved temporaries.
4895 // Grab that for the second argument to the hook.
4896 __ Daddu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
4898 // Align the stack if necessary.
4899 int frame_alignment = masm->ActivationFrameAlignment();
4900 if (frame_alignment > kPointerSize) {
4902 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4903 __ And(sp, sp, Operand(-frame_alignment));
4906 __ Dsubu(sp, sp, kCArgsSlotsSize);
4907 #if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
4908 int64_t entry_hook =
4909 reinterpret_cast<int64_t>(isolate()->function_entry_hook());
4910 __ li(t9, Operand(entry_hook));
4912 // Under the simulator we need to indirect the entry hook through a
4913 // trampoline function at a known address.
4914 // It additionally takes an isolate as a third parameter.
4915 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
4917 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4918 __ li(t9, Operand(ExternalReference(&dispatcher,
4919 ExternalReference::BUILTIN_CALL,
4922 // Call C function through t9 to conform ABI for PIC.
4925 // Restore the stack pointer if needed.
4926 if (frame_alignment > kPointerSize) {
4929 __ Daddu(sp, sp, kCArgsSlotsSize);
4932 // Also pop ra to get Ret(0).
4933 __ MultiPop(kSavedRegs | ra.bit());
4939 static void CreateArrayDispatch(MacroAssembler* masm,
4940 AllocationSiteOverrideMode mode) {
4941 if (mode == DISABLE_ALLOCATION_SITES) {
4942 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4943 __ TailCallStub(&stub);
4944 } else if (mode == DONT_OVERRIDE) {
4945 int last_index = GetSequenceIndexFromFastElementsKind(
4946 TERMINAL_FAST_ELEMENTS_KIND);
4947 for (int i = 0; i <= last_index; ++i) {
4948 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4949 T stub(masm->isolate(), kind);
4950 __ TailCallStub(&stub, eq, a3, Operand(kind));
4953 // If we reached this point there is a problem.
4954 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4961 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4962 AllocationSiteOverrideMode mode) {
4963 // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4964 // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
4965 // a0 - number of arguments
4966 // a1 - constructor?
4967 // sp[0] - last argument
4968 Label normal_sequence;
4969 if (mode == DONT_OVERRIDE) {
4970 DCHECK(FAST_SMI_ELEMENTS == 0);
4971 DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
4972 DCHECK(FAST_ELEMENTS == 2);
4973 DCHECK(FAST_HOLEY_ELEMENTS == 3);
4974 DCHECK(FAST_DOUBLE_ELEMENTS == 4);
4975 DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
4977 // is the low bit set? If so, we are holey and that is good.
4978 __ And(at, a3, Operand(1));
4979 __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
4981 // look at the first argument
4982 __ ld(a5, MemOperand(sp, 0));
4983 __ Branch(&normal_sequence, eq, a5, Operand(zero_reg));
4985 if (mode == DISABLE_ALLOCATION_SITES) {
4986 ElementsKind initial = GetInitialFastElementsKind();
4987 ElementsKind holey_initial = GetHoleyElementsKind(initial);
4989 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
4991 DISABLE_ALLOCATION_SITES);
4992 __ TailCallStub(&stub_holey);
4994 __ bind(&normal_sequence);
4995 ArraySingleArgumentConstructorStub stub(masm->isolate(),
4997 DISABLE_ALLOCATION_SITES);
4998 __ TailCallStub(&stub);
4999 } else if (mode == DONT_OVERRIDE) {
5000 // We are going to create a holey array, but our kind is non-holey.
5001 // Fix kind and retry (only if we have an allocation site in the slot).
5002 __ Daddu(a3, a3, Operand(1));
5004 if (FLAG_debug_code) {
5005 __ ld(a5, FieldMemOperand(a2, 0));
5006 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
5007 __ Assert(eq, kExpectedAllocationSite, a5, Operand(at));
5010 // Save the resulting elements kind in type info. We can't just store a3
5011 // in the AllocationSite::transition_info field because elements kind is
5012 // restricted to a portion of the field...upper bits need to be left alone.
5013 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
5014 __ ld(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
5015 __ Daddu(a4, a4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
5016 __ sd(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
5019 __ bind(&normal_sequence);
5020 int last_index = GetSequenceIndexFromFastElementsKind(
5021 TERMINAL_FAST_ELEMENTS_KIND);
5022 for (int i = 0; i <= last_index; ++i) {
5023 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
5024 ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
5025 __ TailCallStub(&stub, eq, a3, Operand(kind));
5028 // If we reached this point there is a problem.
5029 __ Abort(kUnexpectedElementsKindInArrayConstructor);
5037 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
5038 int to_index = GetSequenceIndexFromFastElementsKind(
5039 TERMINAL_FAST_ELEMENTS_KIND);
5040 for (int i = 0; i <= to_index; ++i) {
5041 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
5042 T stub(isolate, kind);
5044 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
5045 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
5052 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
5053 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
5055 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
5057 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
5062 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
5064 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
5065 for (int i = 0; i < 2; i++) {
5066 // For internal arrays we only need a few things.
5067 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
5069 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
5071 InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
5077 void ArrayConstructorStub::GenerateDispatchToArrayStub(
5078 MacroAssembler* masm,
5079 AllocationSiteOverrideMode mode) {
5080 if (argument_count() == ANY) {
5081 Label not_zero_case, not_one_case;
5083 __ Branch(¬_zero_case, ne, at, Operand(zero_reg));
5084 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5086 __ bind(¬_zero_case);
5087 __ Branch(¬_one_case, gt, a0, Operand(1));
5088 CreateArrayDispatchOneArgument(masm, mode);
5090 __ bind(¬_one_case);
5091 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5092 } else if (argument_count() == NONE) {
5093 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5094 } else if (argument_count() == ONE) {
5095 CreateArrayDispatchOneArgument(masm, mode);
5096 } else if (argument_count() == MORE_THAN_ONE) {
5097 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5104 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
5105 // ----------- S t a t e -------------
5106 // -- a0 : argc (only if argument_count() == ANY)
5107 // -- a1 : constructor
5108 // -- a2 : AllocationSite or undefined
5109 // -- a3 : original constructor
5110 // -- sp[0] : last argument
5111 // -----------------------------------
5113 if (FLAG_debug_code) {
5114 // The array construct code is only set for the global and natives
5115 // builtin Array functions which always have maps.
5117 // Initial map for the builtin Array function should be a map.
5118 __ ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
5119 // Will both indicate a NULL and a Smi.
5121 __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
5122 at, Operand(zero_reg));
5123 __ GetObjectType(a4, a4, a5);
5124 __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
5125 a5, Operand(MAP_TYPE));
5127 // We should either have undefined in a2 or a valid AllocationSite
5128 __ AssertUndefinedOrAllocationSite(a2, a4);
5132 __ Branch(&subclassing, ne, a1, Operand(a3));
5135 // Get the elements kind and case on that.
5136 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5137 __ Branch(&no_info, eq, a2, Operand(at));
5139 __ ld(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
5141 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
5142 __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
5143 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
5146 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
5149 __ bind(&subclassing);
5154 switch (argument_count()) {
5157 __ li(at, Operand(2));
5158 __ addu(a0, a0, at);
5161 __ li(a0, Operand(2));
5164 __ li(a0, Operand(3));
5168 __ JumpToExternalReference(
5169 ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
5173 void InternalArrayConstructorStub::GenerateCase(
5174 MacroAssembler* masm, ElementsKind kind) {
5176 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
5177 __ TailCallStub(&stub0, lo, a0, Operand(1));
5179 InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
5180 __ TailCallStub(&stubN, hi, a0, Operand(1));
5182 if (IsFastPackedElementsKind(kind)) {
5183 // We might need to create a holey array
5184 // look at the first argument.
5185 __ ld(at, MemOperand(sp, 0));
5187 InternalArraySingleArgumentConstructorStub
5188 stub1_holey(isolate(), GetHoleyElementsKind(kind));
5189 __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
5192 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
5193 __ TailCallStub(&stub1);
5197 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
5198 // ----------- S t a t e -------------
5200 // -- a1 : constructor
5201 // -- sp[0] : return address
5202 // -- sp[4] : last argument
5203 // -----------------------------------
5205 if (FLAG_debug_code) {
5206 // The array construct code is only set for the global and natives
5207 // builtin Array functions which always have maps.
5209 // Initial map for the builtin Array function should be a map.
5210 __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
5211 // Will both indicate a NULL and a Smi.
5213 __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
5214 at, Operand(zero_reg));
5215 __ GetObjectType(a3, a3, a4);
5216 __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
5217 a4, Operand(MAP_TYPE));
5220 // Figure out the right elements kind.
5221 __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
5223 // Load the map's "bit field 2" into a3. We only need the first byte,
5224 // but the following bit field extraction takes care of that anyway.
5225 __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
5226 // Retrieve elements_kind from bit field 2.
5227 __ DecodeField<Map::ElementsKindBits>(a3);
5229 if (FLAG_debug_code) {
5231 __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
5233 eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
5234 a3, Operand(FAST_HOLEY_ELEMENTS));
5238 Label fast_elements_case;
5239 __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
5240 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
5242 __ bind(&fast_elements_case);
5243 GenerateCase(masm, FAST_ELEMENTS);
5247 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
5248 int64_t offset = (ref0.address() - ref1.address());
5249 DCHECK(static_cast<int>(offset) == offset);
5250 return static_cast<int>(offset);
5254 // Calls an API function. Allocates HandleScope, extracts returned value
5255 // from handle and propagates exceptions. Restores context. stack_space
5256 // - space to be unwound on exit (includes the call JS arguments space and
5257 // the additional space allocated for the fast call).
5258 static void CallApiFunctionAndReturn(
5259 MacroAssembler* masm, Register function_address,
5260 ExternalReference thunk_ref, int stack_space, int32_t stack_space_offset,
5261 MemOperand return_value_operand, MemOperand* context_restore_operand) {
5262 Isolate* isolate = masm->isolate();
5263 ExternalReference next_address =
5264 ExternalReference::handle_scope_next_address(isolate);
5265 const int kNextOffset = 0;
5266 const int kLimitOffset = AddressOffset(
5267 ExternalReference::handle_scope_limit_address(isolate), next_address);
5268 const int kLevelOffset = AddressOffset(
5269 ExternalReference::handle_scope_level_address(isolate), next_address);
5271 DCHECK(function_address.is(a1) || function_address.is(a2));
5273 Label profiler_disabled;
5274 Label end_profiler_check;
5275 __ li(t9, Operand(ExternalReference::is_profiling_address(isolate)));
5276 __ lb(t9, MemOperand(t9, 0));
5277 __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
5279 // Additional parameter is the address of the actual callback.
5280 __ li(t9, Operand(thunk_ref));
5281 __ jmp(&end_profiler_check);
5283 __ bind(&profiler_disabled);
5284 __ mov(t9, function_address);
5285 __ bind(&end_profiler_check);
5287 // Allocate HandleScope in callee-save registers.
5288 __ li(s3, Operand(next_address));
5289 __ ld(s0, MemOperand(s3, kNextOffset));
5290 __ ld(s1, MemOperand(s3, kLimitOffset));
5291 __ ld(s2, MemOperand(s3, kLevelOffset));
5292 __ Daddu(s2, s2, Operand(1));
5293 __ sd(s2, MemOperand(s3, kLevelOffset));
5295 if (FLAG_log_timer_events) {
5296 FrameScope frame(masm, StackFrame::MANUAL);
5297 __ PushSafepointRegisters();
5298 __ PrepareCallCFunction(1, a0);
5299 __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
5300 __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
5302 __ PopSafepointRegisters();
5305 // Native call returns to the DirectCEntry stub which redirects to the
5306 // return address pushed on stack (could have moved after GC).
5307 // DirectCEntry stub itself is generated early and never moves.
5308 DirectCEntryStub stub(isolate);
5309 stub.GenerateCall(masm, t9);
5311 if (FLAG_log_timer_events) {
5312 FrameScope frame(masm, StackFrame::MANUAL);
5313 __ PushSafepointRegisters();
5314 __ PrepareCallCFunction(1, a0);
5315 __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
5316 __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
5318 __ PopSafepointRegisters();
5321 Label promote_scheduled_exception;
5322 Label delete_allocated_handles;
5323 Label leave_exit_frame;
5324 Label return_value_loaded;
5326 // Load value from ReturnValue.
5327 __ ld(v0, return_value_operand);
5328 __ bind(&return_value_loaded);
5330 // No more valid handles (the result handle was the last one). Restore
5331 // previous handle scope.
5332 __ sd(s0, MemOperand(s3, kNextOffset));
5333 if (__ emit_debug_code()) {
5334 __ ld(a1, MemOperand(s3, kLevelOffset));
5335 __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
5337 __ Dsubu(s2, s2, Operand(1));
5338 __ sd(s2, MemOperand(s3, kLevelOffset));
5339 __ ld(at, MemOperand(s3, kLimitOffset));
5340 __ Branch(&delete_allocated_handles, ne, s1, Operand(at));
5342 // Leave the API exit frame.
5343 __ bind(&leave_exit_frame);
5345 bool restore_context = context_restore_operand != NULL;
5346 if (restore_context) {
5347 __ ld(cp, *context_restore_operand);
5349 if (stack_space_offset != kInvalidStackOffset) {
5350 DCHECK(kCArgsSlotsSize == 0);
5351 __ ld(s0, MemOperand(sp, stack_space_offset));
5353 __ li(s0, Operand(stack_space));
5355 __ LeaveExitFrame(false, s0, !restore_context, NO_EMIT_RETURN,
5356 stack_space_offset != kInvalidStackOffset);
5358 // Check if the function scheduled an exception.
5359 __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
5360 __ li(at, Operand(ExternalReference::scheduled_exception_address(isolate)));
5361 __ ld(a5, MemOperand(at));
5362 __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
5366 // Re-throw by promoting a scheduled exception.
5367 __ bind(&promote_scheduled_exception);
5368 __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
5370 // HandleScope limit has changed. Delete allocated extensions.
5371 __ bind(&delete_allocated_handles);
5372 __ sd(s1, MemOperand(s3, kLimitOffset));
5375 __ PrepareCallCFunction(1, s1);
5376 __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
5377 __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
5380 __ jmp(&leave_exit_frame);
5384 static void CallApiFunctionStubHelper(MacroAssembler* masm,
5385 const ParameterCount& argc,
5386 bool return_first_arg,
5387 bool call_data_undefined) {
5388 // ----------- S t a t e -------------
5390 // -- a4 : call_data
5392 // -- a1 : api_function_address
5393 // -- a3 : number of arguments if argc is a register
5396 // -- sp[0] : last argument
5398 // -- sp[(argc - 1)* 4] : first argument
5399 // -- sp[argc * 4] : receiver
5400 // -----------------------------------
5402 Register callee = a0;
5403 Register call_data = a4;
5404 Register holder = a2;
5405 Register api_function_address = a1;
5406 Register context = cp;
5408 typedef FunctionCallbackArguments FCA;
5410 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
5411 STATIC_ASSERT(FCA::kCalleeIndex == 5);
5412 STATIC_ASSERT(FCA::kDataIndex == 4);
5413 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
5414 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
5415 STATIC_ASSERT(FCA::kIsolateIndex == 1);
5416 STATIC_ASSERT(FCA::kHolderIndex == 0);
5417 STATIC_ASSERT(FCA::kArgsLength == 7);
5419 DCHECK(argc.is_immediate() || a3.is(argc.reg()));
5421 // Save context, callee and call data.
5422 __ Push(context, callee, call_data);
5423 // Load context from callee.
5424 __ ld(context, FieldMemOperand(callee, JSFunction::kContextOffset));
5426 Register scratch = call_data;
5427 if (!call_data_undefined) {
5428 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5430 // Push return value and default return value.
5431 __ Push(scratch, scratch);
5432 __ li(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
5433 // Push isolate and holder.
5434 __ Push(scratch, holder);
5436 // Prepare arguments.
5437 __ mov(scratch, sp);
5439 // Allocate the v8::Arguments structure in the arguments' space since
5440 // it's not controlled by GC.
5441 const int kApiStackSpace = 4;
5443 FrameScope frame_scope(masm, StackFrame::MANUAL);
5444 __ EnterExitFrame(false, kApiStackSpace);
5446 DCHECK(!api_function_address.is(a0) && !scratch.is(a0));
5447 // a0 = FunctionCallbackInfo&
5448 // Arguments is after the return address.
5449 __ Daddu(a0, sp, Operand(1 * kPointerSize));
5450 // FunctionCallbackInfo::implicit_args_
5451 __ sd(scratch, MemOperand(a0, 0 * kPointerSize));
5452 if (argc.is_immediate()) {
5453 // FunctionCallbackInfo::values_
5454 __ Daddu(at, scratch,
5455 Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
5456 __ sd(at, MemOperand(a0, 1 * kPointerSize));
5457 // FunctionCallbackInfo::length_ = argc
5458 __ li(at, Operand(argc.immediate()));
5459 __ sd(at, MemOperand(a0, 2 * kPointerSize));
5460 // FunctionCallbackInfo::is_construct_call_ = 0
5461 __ sd(zero_reg, MemOperand(a0, 3 * kPointerSize));
5463 // FunctionCallbackInfo::values_
5464 __ dsll(at, argc.reg(), kPointerSizeLog2);
5465 __ Daddu(at, at, scratch);
5466 __ Daddu(at, at, Operand((FCA::kArgsLength - 1) * kPointerSize));
5467 __ sd(at, MemOperand(a0, 1 * kPointerSize));
5468 // FunctionCallbackInfo::length_ = argc
5469 __ sd(argc.reg(), MemOperand(a0, 2 * kPointerSize));
5470 // FunctionCallbackInfo::is_construct_call_
5471 __ Daddu(argc.reg(), argc.reg(), Operand(FCA::kArgsLength + 1));
5472 __ dsll(at, argc.reg(), kPointerSizeLog2);
5473 __ sd(at, MemOperand(a0, 3 * kPointerSize));
5476 ExternalReference thunk_ref =
5477 ExternalReference::invoke_function_callback(masm->isolate());
5479 AllowExternalCallThatCantCauseGC scope(masm);
5480 MemOperand context_restore_operand(
5481 fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
5482 // Stores return the first js argument.
5483 int return_value_offset = 0;
5484 if (return_first_arg) {
5485 return_value_offset = 2 + FCA::kArgsLength;
5487 return_value_offset = 2 + FCA::kReturnValueOffset;
5489 MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
5490 int stack_space = 0;
5491 int32_t stack_space_offset = 4 * kPointerSize;
5492 if (argc.is_immediate()) {
5493 stack_space = argc.immediate() + FCA::kArgsLength + 1;
5494 stack_space_offset = kInvalidStackOffset;
5496 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
5497 stack_space_offset, return_value_operand,
5498 &context_restore_operand);
5502 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
5503 bool call_data_undefined = this->call_data_undefined();
5504 CallApiFunctionStubHelper(masm, ParameterCount(a3), false,
5505 call_data_undefined);
5509 void CallApiAccessorStub::Generate(MacroAssembler* masm) {
5510 bool is_store = this->is_store();
5511 int argc = this->argc();
5512 bool call_data_undefined = this->call_data_undefined();
5513 CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
5514 call_data_undefined);
5518 void CallApiGetterStub::Generate(MacroAssembler* masm) {
5519 // ----------- S t a t e -------------
5521 // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
5523 // -- a2 : api_function_address
5524 // -----------------------------------
5526 Register api_function_address = ApiGetterDescriptor::function_address();
5527 DCHECK(api_function_address.is(a2));
5529 __ mov(a0, sp); // a0 = Handle<Name>
5530 __ Daddu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA
5532 const int kApiStackSpace = 1;
5533 FrameScope frame_scope(masm, StackFrame::MANUAL);
5534 __ EnterExitFrame(false, kApiStackSpace);
5536 // Create PropertyAccessorInfo instance on the stack above the exit frame with
5537 // a1 (internal::Object** args_) as the data.
5538 __ sd(a1, MemOperand(sp, 1 * kPointerSize));
5539 __ Daddu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo&
5541 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
5543 ExternalReference thunk_ref =
5544 ExternalReference::invoke_accessor_getter_callback(isolate());
5545 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
5546 kStackUnwindSpace, kInvalidStackOffset,
5547 MemOperand(fp, 6 * kPointerSize), NULL);
5553 } } // namespace v8::internal
5555 #endif // V8_TARGET_ARCH_MIPS64