1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #if V8_TARGET_ARCH_MIPS
9 #include "src/bootstrapper.h"
10 #include "src/code-stubs.h"
11 #include "src/codegen.h"
12 #include "src/regexp-macro-assembler.h"
13 #include "src/stub-cache.h"
19 void FastNewClosureStub::InitializeInterfaceDescriptor(
20 CodeStubInterfaceDescriptor* descriptor) {
21 Register registers[] = { cp, a2 };
22 descriptor->Initialize(
23 MajorKey(), ARRAY_SIZE(registers), registers,
24 Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
28 void FastNewContextStub::InitializeInterfaceDescriptor(
29 CodeStubInterfaceDescriptor* descriptor) {
30 Register registers[] = { cp, a1 };
31 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
35 void ToNumberStub::InitializeInterfaceDescriptor(
36 CodeStubInterfaceDescriptor* descriptor) {
37 Register registers[] = { cp, a0 };
38 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
42 void NumberToStringStub::InitializeInterfaceDescriptor(
43 CodeStubInterfaceDescriptor* descriptor) {
44 Register registers[] = { cp, a0 };
45 descriptor->Initialize(
46 MajorKey(), ARRAY_SIZE(registers), registers,
47 Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry);
51 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
52 CodeStubInterfaceDescriptor* descriptor) {
53 Register registers[] = { cp, a3, a2, a1 };
54 Representation representations[] = {
55 Representation::Tagged(),
56 Representation::Tagged(),
57 Representation::Smi(),
58 Representation::Tagged() };
59 descriptor->Initialize(
60 MajorKey(), ARRAY_SIZE(registers), registers,
61 Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry,
66 void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
67 CodeStubInterfaceDescriptor* descriptor) {
68 Register registers[] = { cp, a3, a2, a1, a0 };
69 descriptor->Initialize(
70 MajorKey(), ARRAY_SIZE(registers), registers,
71 Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
75 void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
76 CodeStubInterfaceDescriptor* descriptor) {
77 Register registers[] = { cp, a2, a3 };
78 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
82 void CallFunctionStub::InitializeInterfaceDescriptor(
83 CodeStubInterfaceDescriptor* descriptor) {
88 void CallConstructStub::InitializeInterfaceDescriptor(
89 CodeStubInterfaceDescriptor* descriptor) {
94 void RegExpConstructResultStub::InitializeInterfaceDescriptor(
95 CodeStubInterfaceDescriptor* descriptor) {
96 Register registers[] = { cp, a2, a1, a0 };
97 descriptor->Initialize(
98 MajorKey(), ARRAY_SIZE(registers), registers,
99 Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
103 void TransitionElementsKindStub::InitializeInterfaceDescriptor(
104 CodeStubInterfaceDescriptor* descriptor) {
105 Register registers[] = { cp, a0, a1 };
107 Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
108 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
109 FUNCTION_ADDR(entry));
113 void CompareNilICStub::InitializeInterfaceDescriptor(
114 CodeStubInterfaceDescriptor* descriptor) {
115 Register registers[] = { cp, a0 };
116 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
117 FUNCTION_ADDR(CompareNilIC_Miss));
118 descriptor->SetMissHandler(
119 ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
123 const Register InterfaceDescriptor::ContextRegister() { return cp; }
126 static void InitializeArrayConstructorDescriptor(
127 CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
128 int constant_stack_parameter_count) {
131 // a0 -- number of arguments
133 // a2 -- allocation site with elements kind
134 Address deopt_handler = Runtime::FunctionForId(
135 Runtime::kArrayConstructor)->entry;
137 if (constant_stack_parameter_count == 0) {
138 Register registers[] = { cp, a1, a2 };
139 descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
140 deopt_handler, NULL, constant_stack_parameter_count,
141 JS_FUNCTION_STUB_MODE);
143 // stack param count needs (constructor pointer, and single argument)
144 Register registers[] = { cp, a1, a2, a0 };
145 Representation representations[] = {
146 Representation::Tagged(),
147 Representation::Tagged(),
148 Representation::Tagged(),
149 Representation::Integer32() };
150 descriptor->Initialize(major, ARRAY_SIZE(registers), registers, a0,
151 deopt_handler, representations,
152 constant_stack_parameter_count,
153 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
158 static void InitializeInternalArrayConstructorDescriptor(
159 CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
160 int constant_stack_parameter_count) {
163 // a0 -- number of arguments
164 // a1 -- constructor function
165 Address deopt_handler = Runtime::FunctionForId(
166 Runtime::kInternalArrayConstructor)->entry;
168 if (constant_stack_parameter_count == 0) {
169 Register registers[] = { cp, a1 };
170 descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
171 deopt_handler, NULL, constant_stack_parameter_count,
172 JS_FUNCTION_STUB_MODE);
174 // stack param count needs (constructor pointer, and single argument)
175 Register registers[] = { cp, a1, a0 };
176 Representation representations[] = {
177 Representation::Tagged(),
178 Representation::Tagged(),
179 Representation::Integer32() };
180 descriptor->Initialize(major, ARRAY_SIZE(registers), registers, a0,
181 deopt_handler, representations,
182 constant_stack_parameter_count,
183 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
188 void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
189 CodeStubInterfaceDescriptor* descriptor) {
190 InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 0);
194 void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
195 CodeStubInterfaceDescriptor* descriptor) {
196 InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 1);
200 void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
201 CodeStubInterfaceDescriptor* descriptor) {
202 InitializeArrayConstructorDescriptor(MajorKey(), descriptor, -1);
206 void ToBooleanStub::InitializeInterfaceDescriptor(
207 CodeStubInterfaceDescriptor* descriptor) {
208 Register registers[] = { cp, a0 };
209 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
210 FUNCTION_ADDR(ToBooleanIC_Miss));
211 descriptor->SetMissHandler(
212 ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
216 void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
217 CodeStubInterfaceDescriptor* descriptor) {
218 InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0);
222 void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
223 CodeStubInterfaceDescriptor* descriptor) {
224 InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1);
228 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
229 CodeStubInterfaceDescriptor* descriptor) {
230 InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1);
234 void BinaryOpICStub::InitializeInterfaceDescriptor(
235 CodeStubInterfaceDescriptor* descriptor) {
236 Register registers[] = { cp, a1, a0 };
237 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
238 FUNCTION_ADDR(BinaryOpIC_Miss));
239 descriptor->SetMissHandler(
240 ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
244 void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
245 CodeStubInterfaceDescriptor* descriptor) {
246 Register registers[] = { cp, a2, a1, a0 };
247 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
248 FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite));
252 void StringAddStub::InitializeInterfaceDescriptor(
253 CodeStubInterfaceDescriptor* descriptor) {
254 Register registers[] = { cp, a1, a0 };
255 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
256 Runtime::FunctionForId(Runtime::kStringAdd)->entry);
260 void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
262 CallInterfaceDescriptor* descriptor =
263 isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
264 Register registers[] = { cp, // context,
266 a0, // actual number of arguments
267 a2, // expected number of arguments
269 Representation representations[] = {
270 Representation::Tagged(), // context
271 Representation::Tagged(), // JSFunction
272 Representation::Integer32(), // actual number of arguments
273 Representation::Integer32(), // expected number of arguments
275 descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
278 CallInterfaceDescriptor* descriptor =
279 isolate->call_descriptor(Isolate::KeyedCall);
280 Register registers[] = { cp, // context
283 Representation representations[] = {
284 Representation::Tagged(), // context
285 Representation::Tagged(), // key
287 descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
290 CallInterfaceDescriptor* descriptor =
291 isolate->call_descriptor(Isolate::NamedCall);
292 Register registers[] = { cp, // context
295 Representation representations[] = {
296 Representation::Tagged(), // context
297 Representation::Tagged(), // name
299 descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
302 CallInterfaceDescriptor* descriptor =
303 isolate->call_descriptor(Isolate::CallHandler);
304 Register registers[] = { cp, // context
307 Representation representations[] = {
308 Representation::Tagged(), // context
309 Representation::Tagged(), // receiver
311 descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
314 CallInterfaceDescriptor* descriptor =
315 isolate->call_descriptor(Isolate::ApiFunctionCall);
316 Register registers[] = { cp, // context
320 a1, // api_function_address
322 Representation representations[] = {
323 Representation::Tagged(), // context
324 Representation::Tagged(), // callee
325 Representation::Tagged(), // call_data
326 Representation::Tagged(), // holder
327 Representation::External(), // api_function_address
329 descriptor->Initialize(ARRAY_SIZE(registers), registers, representations);
334 #define __ ACCESS_MASM(masm)
337 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
340 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
346 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
351 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
352 // Update the static counter each time a new code stub is generated.
353 isolate()->counters()->code_stubs()->Increment();
355 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
356 int param_count = descriptor->GetEnvironmentParameterCount();
358 // Call the runtime system in a fresh internal frame.
359 FrameScope scope(masm, StackFrame::INTERNAL);
360 DCHECK(param_count == 0 ||
361 a0.is(descriptor->GetEnvironmentParameterRegister(
363 // Push arguments, adjust sp.
364 __ Subu(sp, sp, Operand(param_count * kPointerSize));
365 for (int i = 0; i < param_count; ++i) {
366 // Store argument to stack.
367 __ sw(descriptor->GetEnvironmentParameterRegister(i),
368 MemOperand(sp, (param_count-1-i) * kPointerSize));
370 ExternalReference miss = descriptor->miss_handler();
371 __ CallExternalReference(miss, param_count);
378 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
379 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
380 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
381 // scratch register. Destroys the source register. No GC occurs during this
382 // stub so you don't have to set up the frame.
383 class ConvertToDoubleStub : public PlatformCodeStub {
385 ConvertToDoubleStub(Isolate* isolate,
386 Register result_reg_1,
387 Register result_reg_2,
389 Register scratch_reg)
390 : PlatformCodeStub(isolate),
391 result1_(result_reg_1),
392 result2_(result_reg_2),
394 zeros_(scratch_reg) { }
402 // Minor key encoding in 16 bits.
403 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
404 class OpBits: public BitField<Token::Value, 2, 14> {};
406 Major MajorKey() const { return ConvertToDouble; }
407 int MinorKey() const {
408 // Encode the parameters in a unique 16 bit value.
409 return result1_.code() +
410 (result2_.code() << 4) +
411 (source_.code() << 8) +
412 (zeros_.code() << 12);
415 void Generate(MacroAssembler* masm);
419 void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
420 Register exponent, mantissa;
421 if (kArchEndian == kLittle) {
429 // Convert from Smi to integer.
430 __ sra(source_, source_, kSmiTagSize);
431 // Move sign bit from source to destination. This works because the sign bit
432 // in the exponent word of the double has the same position and polarity as
433 // the 2's complement sign bit in a Smi.
434 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
435 __ And(exponent, source_, Operand(HeapNumber::kSignMask));
436 // Subtract from 0 if source was negative.
437 __ subu(at, zero_reg, source_);
438 __ Movn(source_, at, exponent);
440 // We have -1, 0 or 1, which we treat specially. Register source_ contains
441 // absolute value: it is either equal to 1 (special case of -1 and 1),
442 // greater than 1 (not a special case) or less than 1 (special case of 0).
443 __ Branch(¬_special, gt, source_, Operand(1));
445 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
446 const uint32_t exponent_word_for_1 =
447 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
448 // Safe to use 'at' as dest reg here.
449 __ Or(at, exponent, Operand(exponent_word_for_1));
450 __ Movn(exponent, at, source_); // Write exp when source not 0.
451 // 1, 0 and -1 all have 0 for the second word.
452 __ Ret(USE_DELAY_SLOT);
453 __ mov(mantissa, zero_reg);
455 __ bind(¬_special);
456 // Count leading zeros.
457 // Gets the wrong answer for 0, but we already checked for that case above.
458 __ Clz(zeros_, source_);
459 // Compute exponent and or it into the exponent register.
460 // We use mantissa as a scratch register here.
461 __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
462 __ subu(mantissa, mantissa, zeros_);
463 __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
464 __ Or(exponent, exponent, mantissa);
466 // Shift up the source chopping the top bit off.
467 __ Addu(zeros_, zeros_, Operand(1));
468 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
469 __ sllv(source_, source_, zeros_);
470 // Compute lower part of fraction (last 12 bits).
471 __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
472 // And the top (top 20 bits).
473 __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
475 __ Ret(USE_DELAY_SLOT);
476 __ or_(exponent, exponent, source_);
480 void DoubleToIStub::Generate(MacroAssembler* masm) {
481 Label out_of_range, only_low, negate, done;
482 Register input_reg = source();
483 Register result_reg = destination();
485 int double_offset = offset();
486 // Account for saved regs if input is sp.
487 if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
490 GetRegisterThatIsNotOneOf(input_reg, result_reg);
492 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
494 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
495 DoubleRegister double_scratch = kLithiumScratchDouble;
497 __ Push(scratch, scratch2, scratch3);
499 if (!skip_fastpath()) {
500 // Load double input.
501 __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
503 // Clear cumulative exception flags and save the FCSR.
504 __ cfc1(scratch2, FCSR);
505 __ ctc1(zero_reg, FCSR);
507 // Try a conversion to a signed integer.
508 __ Trunc_w_d(double_scratch, double_scratch);
509 // Move the converted value into the result register.
510 __ mfc1(scratch3, double_scratch);
512 // Retrieve and restore the FCSR.
513 __ cfc1(scratch, FCSR);
514 __ ctc1(scratch2, FCSR);
516 // Check for overflow and NaNs.
519 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
520 | kFCSRInvalidOpFlagMask);
521 // If we had no exceptions then set result_reg and we are done.
523 __ Branch(&error, ne, scratch, Operand(zero_reg));
524 __ Move(result_reg, scratch3);
529 // Load the double value and perform a manual truncation.
530 Register input_high = scratch2;
531 Register input_low = scratch3;
534 MemOperand(input_reg, double_offset + Register::kMantissaOffset));
536 MemOperand(input_reg, double_offset + Register::kExponentOffset));
538 Label normal_exponent, restore_sign;
539 // Extract the biased exponent in result.
542 HeapNumber::kExponentShift,
543 HeapNumber::kExponentBits);
545 // Check for Infinity and NaNs, which should return 0.
546 __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
547 __ Movz(result_reg, zero_reg, scratch);
548 __ Branch(&done, eq, scratch, Operand(zero_reg));
550 // Express exponent as delta to (number of mantissa bits + 31).
553 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
555 // If the delta is strictly positive, all bits would be shifted away,
556 // which means that we can return 0.
557 __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
558 __ mov(result_reg, zero_reg);
561 __ bind(&normal_exponent);
562 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
564 __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
567 Register sign = result_reg;
569 __ And(sign, input_high, Operand(HeapNumber::kSignMask));
571 // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
572 // to check for this specific case.
573 Label high_shift_needed, high_shift_done;
574 __ Branch(&high_shift_needed, lt, scratch, Operand(32));
575 __ mov(input_high, zero_reg);
576 __ Branch(&high_shift_done);
577 __ bind(&high_shift_needed);
579 // Set the implicit 1 before the mantissa part in input_high.
582 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
583 // Shift the mantissa bits to the correct position.
584 // We don't need to clear non-mantissa bits as they will be shifted away.
585 // If they weren't, it would mean that the answer is in the 32bit range.
586 __ sllv(input_high, input_high, scratch);
588 __ bind(&high_shift_done);
590 // Replace the shifted bits with bits from the lower mantissa word.
591 Label pos_shift, shift_done;
593 __ subu(scratch, at, scratch);
594 __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
597 __ Subu(scratch, zero_reg, scratch);
598 __ sllv(input_low, input_low, scratch);
599 __ Branch(&shift_done);
602 __ srlv(input_low, input_low, scratch);
604 __ bind(&shift_done);
605 __ Or(input_high, input_high, Operand(input_low));
606 // Restore sign if necessary.
607 __ mov(scratch, sign);
610 __ Subu(result_reg, zero_reg, input_high);
611 __ Movz(result_reg, input_high, scratch);
615 __ Pop(scratch, scratch2, scratch3);
620 void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
622 WriteInt32ToHeapNumberStub stub1(isolate, a1, v0, a2, a3);
623 WriteInt32ToHeapNumberStub stub2(isolate, a2, v0, a3, a0);
629 // See comment for class, this does NOT work for int32's that are in Smi range.
630 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
631 Label max_negative_int;
632 // the_int_ has the answer which is a signed int32 but not a Smi.
633 // We test for the special value that has a different exponent.
634 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
635 // Test sign, and save for later conditionals.
636 __ And(sign_, the_int_, Operand(0x80000000u));
637 __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
639 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
640 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
641 uint32_t non_smi_exponent =
642 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
643 __ li(scratch_, Operand(non_smi_exponent));
644 // Set the sign bit in scratch_ if the value was negative.
645 __ or_(scratch_, scratch_, sign_);
646 // Subtract from 0 if the value was negative.
647 __ subu(at, zero_reg, the_int_);
648 __ Movn(the_int_, at, sign_);
649 // We should be masking the implict first digit of the mantissa away here,
650 // but it just ends up combining harmlessly with the last digit of the
651 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
652 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
653 DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
654 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
655 __ srl(at, the_int_, shift_distance);
656 __ or_(scratch_, scratch_, at);
657 __ sw(scratch_, FieldMemOperand(the_heap_number_,
658 HeapNumber::kExponentOffset));
659 __ sll(scratch_, the_int_, 32 - shift_distance);
660 __ Ret(USE_DELAY_SLOT);
661 __ sw(scratch_, FieldMemOperand(the_heap_number_,
662 HeapNumber::kMantissaOffset));
664 __ bind(&max_negative_int);
665 // The max negative int32 is stored as a positive number in the mantissa of
666 // a double because it uses a sign bit instead of using two's complement.
667 // The actual mantissa bits stored are all 0 because the implicit most
668 // significant 1 bit is not stored.
669 non_smi_exponent += 1 << HeapNumber::kExponentShift;
670 __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
672 FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
673 __ mov(scratch_, zero_reg);
674 __ Ret(USE_DELAY_SLOT);
676 FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
680 // Handle the case where the lhs and rhs are the same object.
681 // Equality is almost reflexive (everything but NaN), so this is a test
682 // for "identity and not NaN".
683 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
687 Label heap_number, return_equal;
688 Register exp_mask_reg = t5;
690 __ Branch(¬_identical, ne, a0, Operand(a1));
692 __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
694 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
695 // so we do the second best thing - test it ourselves.
696 // They are both equal and they are not both Smis so both of them are not
697 // Smis. If it's not a heap number, then return equal.
698 if (cc == less || cc == greater) {
699 __ GetObjectType(a0, t4, t4);
700 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
702 __ GetObjectType(a0, t4, t4);
703 __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
704 // Comparing JS objects with <=, >= is complicated.
706 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
707 // Normally here we fall through to return_equal, but undefined is
708 // special: (undefined == undefined) == true, but
709 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
710 if (cc == less_equal || cc == greater_equal) {
711 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
712 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
713 __ Branch(&return_equal, ne, a0, Operand(t2));
714 DCHECK(is_int16(GREATER) && is_int16(LESS));
715 __ Ret(USE_DELAY_SLOT);
717 // undefined <= undefined should fail.
718 __ li(v0, Operand(GREATER));
720 // undefined >= undefined should fail.
721 __ li(v0, Operand(LESS));
727 __ bind(&return_equal);
728 DCHECK(is_int16(GREATER) && is_int16(LESS));
729 __ Ret(USE_DELAY_SLOT);
731 __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
732 } else if (cc == greater) {
733 __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
735 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
738 // For less and greater we don't have to check for NaN since the result of
739 // x < x is false regardless. For the others here is some code to check
741 if (cc != lt && cc != gt) {
742 __ bind(&heap_number);
743 // It is a heap number, so return non-equal if it's NaN and equal if it's
746 // The representation of NaN values has all exponent bits (52..62) set,
747 // and not all mantissa bits (0..51) clear.
748 // Read top bits of double representation (second word of value).
749 __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
750 // Test that exponent bits are all set.
751 __ And(t3, t2, Operand(exp_mask_reg));
752 // If all bits not set (ne cond), then not a NaN, objects are equal.
753 __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
755 // Shift out flag and all exponent bits, retaining only mantissa.
756 __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
757 // Or with all low-bits of mantissa.
758 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
759 __ Or(v0, t3, Operand(t2));
760 // For equal we already have the right value in v0: Return zero (equal)
761 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
762 // not (it's a NaN). For <= and >= we need to load v0 with the failing
763 // value if it's a NaN.
765 // All-zero means Infinity means equal.
766 __ Ret(eq, v0, Operand(zero_reg));
767 DCHECK(is_int16(GREATER) && is_int16(LESS));
768 __ Ret(USE_DELAY_SLOT);
770 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
772 __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
776 // No fall through here.
778 __ bind(¬_identical);
782 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
785 Label* both_loaded_as_doubles,
788 DCHECK((lhs.is(a0) && rhs.is(a1)) ||
789 (lhs.is(a1) && rhs.is(a0)));
792 __ JumpIfSmi(lhs, &lhs_is_smi);
794 // Check whether the non-smi is a heap number.
795 __ GetObjectType(lhs, t4, t4);
797 // If lhs was not a number and rhs was a Smi then strict equality cannot
798 // succeed. Return non-equal (lhs is already not zero).
799 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
802 // Smi compared non-strictly with a non-Smi non-heap-number. Call
804 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
807 // Rhs is a smi, lhs is a number.
808 // Convert smi rhs to double.
809 __ sra(at, rhs, kSmiTagSize);
811 __ cvt_d_w(f14, f14);
812 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
814 // We now have both loaded as doubles.
815 __ jmp(both_loaded_as_doubles);
817 __ bind(&lhs_is_smi);
818 // Lhs is a Smi. Check whether the non-smi is a heap number.
819 __ GetObjectType(rhs, t4, t4);
821 // If lhs was not a number and rhs was a Smi then strict equality cannot
822 // succeed. Return non-equal.
823 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
824 __ li(v0, Operand(1));
826 // Smi compared non-strictly with a non-Smi non-heap-number. Call
828 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
831 // Lhs is a smi, rhs is a number.
832 // Convert smi lhs to double.
833 __ sra(at, lhs, kSmiTagSize);
835 __ cvt_d_w(f12, f12);
836 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
837 // Fall through to both_loaded_as_doubles.
841 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
844 // If either operand is a JS object or an oddball value, then they are
845 // not equal since their pointers are different.
846 // There is no test for undetectability in strict equality.
847 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
848 Label first_non_object;
849 // Get the type of the first operand into a2 and compare it with
850 // FIRST_SPEC_OBJECT_TYPE.
851 __ GetObjectType(lhs, a2, a2);
852 __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
855 Label return_not_equal;
856 __ bind(&return_not_equal);
857 __ Ret(USE_DELAY_SLOT);
858 __ li(v0, Operand(1));
860 __ bind(&first_non_object);
861 // Check for oddballs: true, false, null, undefined.
862 __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
864 __ GetObjectType(rhs, a3, a3);
865 __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
867 // Check for oddballs: true, false, null, undefined.
868 __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
870 // Now that we have the types we might as well check for
871 // internalized-internalized.
872 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
873 __ Or(a2, a2, Operand(a3));
874 __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
875 __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
879 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
882 Label* both_loaded_as_doubles,
883 Label* not_heap_numbers,
885 __ GetObjectType(lhs, a3, a2);
886 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
887 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
888 // If first was a heap number & second wasn't, go to slow case.
889 __ Branch(slow, ne, a3, Operand(a2));
891 // Both are heap numbers. Load them up then jump to the code we have
893 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
894 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
896 __ jmp(both_loaded_as_doubles);
900 // Fast negative check for internalized-to-internalized equality.
901 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
904 Label* possible_strings,
905 Label* not_both_strings) {
906 DCHECK((lhs.is(a0) && rhs.is(a1)) ||
907 (lhs.is(a1) && rhs.is(a0)));
909 // a2 is object type of rhs.
911 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
912 __ And(at, a2, Operand(kIsNotStringMask));
913 __ Branch(&object_test, ne, at, Operand(zero_reg));
914 __ And(at, a2, Operand(kIsNotInternalizedMask));
915 __ Branch(possible_strings, ne, at, Operand(zero_reg));
916 __ GetObjectType(rhs, a3, a3);
917 __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
918 __ And(at, a3, Operand(kIsNotInternalizedMask));
919 __ Branch(possible_strings, ne, at, Operand(zero_reg));
921 // Both are internalized strings. We already checked they weren't the same
922 // pointer so they are not equal.
923 __ Ret(USE_DELAY_SLOT);
924 __ li(v0, Operand(1)); // Non-zero indicates not equal.
926 __ bind(&object_test);
927 __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
928 __ GetObjectType(rhs, a2, a3);
929 __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
931 // If both objects are undetectable, they are equal. Otherwise, they
932 // are not equal, since they are different objects and an object is not
933 // equal to undefined.
934 __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
935 __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
936 __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
938 __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
939 __ Ret(USE_DELAY_SLOT);
940 __ xori(v0, a0, 1 << Map::kIsUndetectable);
944 static void ICCompareStub_CheckInputType(MacroAssembler* masm,
947 CompareIC::State expected,
950 if (expected == CompareIC::SMI) {
951 __ JumpIfNotSmi(input, fail);
952 } else if (expected == CompareIC::NUMBER) {
953 __ JumpIfSmi(input, &ok);
954 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
957 // We could be strict about internalized/string here, but as long as
958 // hydrogen doesn't care, the stub doesn't have to care either.
963 // On entry a1 and a2 are the values to be compared.
964 // On exit a0 is 0, positive or negative to indicate the result of
966 void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
969 Condition cc = GetCondition();
972 ICCompareStub_CheckInputType(masm, lhs, a2, left_, &miss);
973 ICCompareStub_CheckInputType(masm, rhs, a3, right_, &miss);
975 Label slow; // Call builtin.
976 Label not_smis, both_loaded_as_doubles;
978 Label not_two_smis, smi_done;
980 __ JumpIfNotSmi(a2, ¬_two_smis);
983 __ Ret(USE_DELAY_SLOT);
985 __ bind(¬_two_smis);
987 // NOTICE! This code is only reached after a smi-fast-case check, so
988 // it is certain that at least one operand isn't a smi.
990 // Handle the case where the objects are identical. Either returns the answer
991 // or goes to slow. Only falls through if the objects were not identical.
992 EmitIdenticalObjectComparison(masm, &slow, cc);
994 // If either is a Smi (we know that not both are), then they can only
995 // be strictly equal if the other is a HeapNumber.
996 STATIC_ASSERT(kSmiTag == 0);
997 DCHECK_EQ(0, Smi::FromInt(0));
998 __ And(t2, lhs, Operand(rhs));
999 __ JumpIfNotSmi(t2, ¬_smis, t0);
1000 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
1001 // 1) Return the answer.
1003 // 3) Fall through to both_loaded_as_doubles.
1004 // 4) Jump to rhs_not_nan.
1005 // In cases 3 and 4 we have found out we were dealing with a number-number
1006 // comparison and the numbers have been loaded into f12 and f14 as doubles,
1007 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
1008 EmitSmiNonsmiComparison(masm, lhs, rhs,
1009 &both_loaded_as_doubles, &slow, strict());
1011 __ bind(&both_loaded_as_doubles);
1012 // f12, f14 are the double representations of the left hand side
1013 // and the right hand side if we have FPU. Otherwise a2, a3 represent
1014 // left hand side and a0, a1 represent right hand side.
1016 __ li(t0, Operand(LESS));
1017 __ li(t1, Operand(GREATER));
1018 __ li(t2, Operand(EQUAL));
1020 // Check if either rhs or lhs is NaN.
1021 __ BranchF(NULL, &nan, eq, f12, f14);
1023 // Check if LESS condition is satisfied. If true, move conditionally
1025 __ c(OLT, D, f12, f14);
1027 // Use previous check to store conditionally to v0 oposite condition
1028 // (GREATER). If rhs is equal to lhs, this will be corrected in next
1031 // Check if EQUAL condition is satisfied. If true, move conditionally
1033 __ c(EQ, D, f12, f14);
1039 // NaN comparisons always fail.
1040 // Load whatever we need in v0 to make the comparison fail.
1041 DCHECK(is_int16(GREATER) && is_int16(LESS));
1042 __ Ret(USE_DELAY_SLOT);
1043 if (cc == lt || cc == le) {
1044 __ li(v0, Operand(GREATER));
1046 __ li(v0, Operand(LESS));
1051 // At this point we know we are dealing with two different objects,
1052 // and neither of them is a Smi. The objects are in lhs_ and rhs_.
1054 // This returns non-equal for some object types, or falls through if it
1056 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
1059 Label check_for_internalized_strings;
1060 Label flat_string_check;
1061 // Check for heap-number-heap-number comparison. Can jump to slow case,
1062 // or load both doubles and jump to the code that handles
1063 // that case. If the inputs are not doubles then jumps to
1064 // check_for_internalized_strings.
1065 // In this case a2 will contain the type of lhs_.
1066 EmitCheckForTwoHeapNumbers(masm,
1069 &both_loaded_as_doubles,
1070 &check_for_internalized_strings,
1071 &flat_string_check);
1073 __ bind(&check_for_internalized_strings);
1074 if (cc == eq && !strict()) {
1075 // Returns an answer for two internalized strings or two
1076 // detectable objects.
1077 // Otherwise jumps to string case or not both strings case.
1078 // Assumes that a2 is the type of lhs_ on entry.
1079 EmitCheckForInternalizedStringsOrObjects(
1080 masm, lhs, rhs, &flat_string_check, &slow);
1083 // Check for both being sequential ASCII strings, and inline if that is the
1085 __ bind(&flat_string_check);
1087 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow);
1089 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
1092 StringCompareStub::GenerateFlatAsciiStringEquals(masm,
1099 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
1107 // Never falls through to here.
1110 // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
1113 // Figure out which native to call and setup the arguments.
1114 Builtins::JavaScript native;
1116 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1118 native = Builtins::COMPARE;
1119 int ncr; // NaN compare result.
1120 if (cc == lt || cc == le) {
1123 DCHECK(cc == gt || cc == ge); // Remaining cases.
1126 __ li(a0, Operand(Smi::FromInt(ncr)));
1130 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1131 // tagged as a small integer.
1132 __ InvokeBuiltin(native, JUMP_FUNCTION);
1139 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
1142 __ PushSafepointRegisters();
1147 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
1150 __ PopSafepointRegisters();
1155 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1156 // We don't allow a GC during a store buffer overflow so there is no need to
1157 // store the registers in any particular way, but we do have to store and
1159 __ MultiPush(kJSCallerSaved | ra.bit());
1160 if (save_doubles_ == kSaveFPRegs) {
1161 __ MultiPushFPU(kCallerSavedFPU);
1163 const int argument_count = 1;
1164 const int fp_argument_count = 0;
1165 const Register scratch = a1;
1167 AllowExternalCallThatCantCauseGC scope(masm);
1168 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
1169 __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
1171 ExternalReference::store_buffer_overflow_function(isolate()),
1173 if (save_doubles_ == kSaveFPRegs) {
1174 __ MultiPopFPU(kCallerSavedFPU);
1177 __ MultiPop(kJSCallerSaved | ra.bit());
1182 void MathPowStub::Generate(MacroAssembler* masm) {
1183 const Register base = a1;
1184 const Register exponent = a2;
1185 const Register heapnumbermap = t1;
1186 const Register heapnumber = v0;
1187 const DoubleRegister double_base = f2;
1188 const DoubleRegister double_exponent = f4;
1189 const DoubleRegister double_result = f0;
1190 const DoubleRegister double_scratch = f6;
1191 const FPURegister single_scratch = f8;
1192 const Register scratch = t5;
1193 const Register scratch2 = t3;
1195 Label call_runtime, done, int_exponent;
1196 if (exponent_type_ == ON_STACK) {
1197 Label base_is_smi, unpack_exponent;
1198 // The exponent and base are supplied as arguments on the stack.
1199 // This can only happen if the stub is called from non-optimized code.
1200 // Load input parameters from stack to double registers.
1201 __ lw(base, MemOperand(sp, 1 * kPointerSize));
1202 __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
1204 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
1206 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
1207 __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
1208 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
1210 __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
1211 __ jmp(&unpack_exponent);
1213 __ bind(&base_is_smi);
1214 __ mtc1(scratch, single_scratch);
1215 __ cvt_d_w(double_base, single_scratch);
1216 __ bind(&unpack_exponent);
1218 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
1220 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
1221 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
1222 __ ldc1(double_exponent,
1223 FieldMemOperand(exponent, HeapNumber::kValueOffset));
1224 } else if (exponent_type_ == TAGGED) {
1225 // Base is already in double_base.
1226 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
1228 __ ldc1(double_exponent,
1229 FieldMemOperand(exponent, HeapNumber::kValueOffset));
1232 if (exponent_type_ != INTEGER) {
1233 Label int_exponent_convert;
1234 // Detect integer exponents stored as double.
1235 __ EmitFPUTruncate(kRoundToMinusInf,
1241 kCheckForInexactConversion);
1242 // scratch2 == 0 means there was no conversion error.
1243 __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
1245 if (exponent_type_ == ON_STACK) {
1246 // Detect square root case. Crankshaft detects constant +/-0.5 at
1247 // compile time and uses DoMathPowHalf instead. We then skip this check
1248 // for non-constant cases of +/-0.5 as these hardly occur.
1249 Label not_plus_half;
1252 __ Move(double_scratch, 0.5);
1253 __ BranchF(USE_DELAY_SLOT,
1259 // double_scratch can be overwritten in the delay slot.
1260 // Calculates square root of base. Check for the special case of
1261 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
1262 __ Move(double_scratch, -V8_INFINITY);
1263 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
1264 __ neg_d(double_result, double_scratch);
1266 // Add +0 to convert -0 to +0.
1267 __ add_d(double_scratch, double_base, kDoubleRegZero);
1268 __ sqrt_d(double_result, double_scratch);
1271 __ bind(¬_plus_half);
1272 __ Move(double_scratch, -0.5);
1273 __ BranchF(USE_DELAY_SLOT,
1279 // double_scratch can be overwritten in the delay slot.
1280 // Calculates square root of base. Check for the special case of
1281 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
1282 __ Move(double_scratch, -V8_INFINITY);
1283 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
1284 __ Move(double_result, kDoubleRegZero);
1286 // Add +0 to convert -0 to +0.
1287 __ add_d(double_scratch, double_base, kDoubleRegZero);
1288 __ Move(double_result, 1);
1289 __ sqrt_d(double_scratch, double_scratch);
1290 __ div_d(double_result, double_result, double_scratch);
1296 AllowExternalCallThatCantCauseGC scope(masm);
1297 __ PrepareCallCFunction(0, 2, scratch2);
1298 __ MovToFloatParameters(double_base, double_exponent);
1300 ExternalReference::power_double_double_function(isolate()),
1304 __ MovFromFloatResult(double_result);
1307 __ bind(&int_exponent_convert);
1310 // Calculate power with integer exponent.
1311 __ bind(&int_exponent);
1313 // Get two copies of exponent in the registers scratch and exponent.
1314 if (exponent_type_ == INTEGER) {
1315 __ mov(scratch, exponent);
1317 // Exponent has previously been stored into scratch as untagged integer.
1318 __ mov(exponent, scratch);
1321 __ mov_d(double_scratch, double_base); // Back up base.
1322 __ Move(double_result, 1.0);
1324 // Get absolute value of exponent.
1325 Label positive_exponent;
1326 __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
1327 __ Subu(scratch, zero_reg, scratch);
1328 __ bind(&positive_exponent);
1330 Label while_true, no_carry, loop_end;
1331 __ bind(&while_true);
1333 __ And(scratch2, scratch, 1);
1335 __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
1336 __ mul_d(double_result, double_result, double_scratch);
1339 __ sra(scratch, scratch, 1);
1341 __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
1342 __ mul_d(double_scratch, double_scratch, double_scratch);
1344 __ Branch(&while_true);
1348 __ Branch(&done, ge, exponent, Operand(zero_reg));
1349 __ Move(double_scratch, 1.0);
1350 __ div_d(double_result, double_scratch, double_result);
1351 // Test whether result is zero. Bail out to check for subnormal result.
1352 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
1353 __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
1355 // double_exponent may not contain the exponent value if the input was a
1356 // smi. We set it with exponent value before bailing out.
1357 __ mtc1(exponent, single_scratch);
1358 __ cvt_d_w(double_exponent, single_scratch);
1360 // Returning or bailing out.
1361 Counters* counters = isolate()->counters();
1362 if (exponent_type_ == ON_STACK) {
1363 // The arguments are still on the stack.
1364 __ bind(&call_runtime);
1365 __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
1367 // The stub is called from non-optimized code, which expects the result
1368 // as heap number in exponent.
1370 __ AllocateHeapNumber(
1371 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
1372 __ sdc1(double_result,
1373 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
1374 DCHECK(heapnumber.is(v0));
1375 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1380 AllowExternalCallThatCantCauseGC scope(masm);
1381 __ PrepareCallCFunction(0, 2, scratch);
1382 __ MovToFloatParameters(double_base, double_exponent);
1384 ExternalReference::power_double_double_function(isolate()),
1388 __ MovFromFloatResult(double_result);
1391 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1397 bool CEntryStub::NeedsImmovableCode() {
1402 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
1403 CEntryStub::GenerateAheadOfTime(isolate);
1404 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
1405 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
1406 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
1407 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
1408 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
1409 BinaryOpICStub::GenerateAheadOfTime(isolate);
1410 StoreRegistersStateStub::GenerateAheadOfTime(isolate);
1411 RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
1412 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
1416 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1417 StoreRegistersStateStub stub(isolate);
1422 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1423 RestoreRegistersStateStub stub(isolate);
1428 void CodeStub::GenerateFPStubs(Isolate* isolate) {
1429 SaveFPRegsMode mode = kSaveFPRegs;
1430 CEntryStub save_doubles(isolate, 1, mode);
1431 StoreBufferOverflowStub stub(isolate, mode);
1432 // These stubs might already be in the snapshot, detect that and don't
1433 // regenerate, which would lead to code stub initialization state being messed
1435 Code* save_doubles_code;
1436 if (!save_doubles.FindCodeInCache(&save_doubles_code)) {
1437 save_doubles_code = *save_doubles.GetCode();
1439 Code* store_buffer_overflow_code;
1440 if (!stub.FindCodeInCache(&store_buffer_overflow_code)) {
1441 store_buffer_overflow_code = *stub.GetCode();
1443 isolate->set_fp_stubs_generated(true);
1447 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1448 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
1453 void CEntryStub::Generate(MacroAssembler* masm) {
1454 // Called from JavaScript; parameters are on stack as if calling JS function
1455 // s0: number of arguments including receiver
1456 // s1: size of arguments excluding receiver
1457 // s2: pointer to builtin function
1458 // fp: frame pointer (restored after C call)
1459 // sp: stack pointer (restored as callee's sp after C call)
1460 // cp: current context (C callee-saved)
1462 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1464 // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
1465 // The reason for this is that these arguments would need to be saved anyway
1466 // so it's faster to set them up directly.
1467 // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
1469 // Compute the argv pointer in a callee-saved register.
1470 __ Addu(s1, sp, s1);
1472 // Enter the exit frame that transitions from JavaScript to C++.
1473 FrameScope scope(masm, StackFrame::MANUAL);
1474 __ EnterExitFrame(save_doubles_);
1476 // s0: number of arguments including receiver (C callee-saved)
1477 // s1: pointer to first argument (C callee-saved)
1478 // s2: pointer to builtin function (C callee-saved)
1480 // Prepare arguments for C routine.
1483 // a1 = argv (set in the delay slot after find_ra below).
1485 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
1486 // also need to reserve the 4 argument slots on the stack.
1488 __ AssertStackIsAligned();
1490 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
1492 // To let the GC traverse the return address of the exit frames, we need to
1493 // know where the return address is. The CEntryStub is unmovable, so
1494 // we can store the address on the stack to be able to find it again and
1495 // we never have to restore it, because it will not change.
1496 { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
1497 // This branch-and-link sequence is needed to find the current PC on mips,
1498 // saved to the ra register.
1499 // Use masm-> here instead of the double-underscore macro since extra
1500 // coverage code can interfere with the proper calculation of ra.
1502 masm->bal(&find_ra); // bal exposes branch delay slot.
1504 masm->bind(&find_ra);
1506 // Adjust the value in ra to point to the correct return location, 2nd
1507 // instruction past the real call into C code (the jalr(t9)), and push it.
1508 // This is the return address of the exit frame.
1509 const int kNumInstructionsToJump = 5;
1510 masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
1511 masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
1512 // Stack space reservation moved to the branch delay slot below.
1513 // Stack is still aligned.
1515 // Call the C routine.
1516 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
1518 // Set up sp in the delay slot.
1519 masm->addiu(sp, sp, -kCArgsSlotsSize);
1520 // Make sure the stored 'ra' points to this position.
1521 DCHECK_EQ(kNumInstructionsToJump,
1522 masm->InstructionsGeneratedSince(&find_ra));
1526 // Runtime functions should not return 'the hole'. Allowing it to escape may
1527 // lead to crashes in the IC code later.
1528 if (FLAG_debug_code) {
1530 __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
1531 __ Branch(&okay, ne, v0, Operand(t0));
1532 __ stop("The hole escaped");
1536 // Check result for exception sentinel.
1537 Label exception_returned;
1538 __ LoadRoot(t0, Heap::kExceptionRootIndex);
1539 __ Branch(&exception_returned, eq, t0, Operand(v0));
1541 ExternalReference pending_exception_address(
1542 Isolate::kPendingExceptionAddress, isolate());
1544 // Check that there is no pending exception, otherwise we
1545 // should have returned the exception sentinel.
1546 if (FLAG_debug_code) {
1548 __ li(a2, Operand(pending_exception_address));
1549 __ lw(a2, MemOperand(a2));
1550 __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
1551 // Cannot use check here as it attempts to generate call into runtime.
1552 __ Branch(&okay, eq, t0, Operand(a2));
1553 __ stop("Unexpected pending exception");
1557 // Exit C frame and return.
1559 // sp: stack pointer
1560 // fp: frame pointer
1561 // s0: still holds argc (callee-saved).
1562 __ LeaveExitFrame(save_doubles_, s0, true, EMIT_RETURN);
1564 // Handling of exception.
1565 __ bind(&exception_returned);
1567 // Retrieve the pending exception.
1568 __ li(a2, Operand(pending_exception_address));
1569 __ lw(v0, MemOperand(a2));
1571 // Clear the pending exception.
1572 __ li(a3, Operand(isolate()->factory()->the_hole_value()));
1573 __ sw(a3, MemOperand(a2));
1575 // Special handling of termination exceptions which are uncatchable
1576 // by javascript code.
1577 Label throw_termination_exception;
1578 __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
1579 __ Branch(&throw_termination_exception, eq, v0, Operand(t0));
1581 // Handle normal exception.
1584 __ bind(&throw_termination_exception);
1585 __ ThrowUncatchable(v0);
1589 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
1590 Label invoke, handler_entry, exit;
1591 Isolate* isolate = masm->isolate();
1594 // a0: entry address
1603 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1605 // Save callee saved registers on the stack.
1606 __ MultiPush(kCalleeSaved | ra.bit());
1608 // Save callee-saved FPU registers.
1609 __ MultiPushFPU(kCalleeSavedFPU);
1610 // Set up the reserved register for 0.0.
1611 __ Move(kDoubleRegZero, 0.0);
1614 // Load argv in s0 register.
1615 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1616 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
1618 __ InitializeRootRegister();
1619 __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
1621 // We build an EntryFrame.
1622 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
1623 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
1624 __ li(t2, Operand(Smi::FromInt(marker)));
1625 __ li(t1, Operand(Smi::FromInt(marker)));
1626 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1628 __ lw(t0, MemOperand(t0));
1629 __ Push(t3, t2, t1, t0);
1630 // Set up frame pointer for the frame to be pushed.
1631 __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
1634 // a0: entry_address
1636 // a2: receiver_pointer
1642 // function slot | entry frame
1644 // bad fp (0xff...f) |
1645 // callee saved registers + ra
1649 // If this is the outermost JS call, set js_entry_sp value.
1650 Label non_outermost_js;
1651 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
1652 __ li(t1, Operand(ExternalReference(js_entry_sp)));
1653 __ lw(t2, MemOperand(t1));
1654 __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
1655 __ sw(fp, MemOperand(t1));
1656 __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1659 __ nop(); // Branch delay slot nop.
1660 __ bind(&non_outermost_js);
1661 __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1665 // Jump to a faked try block that does the invoke, with a faked catch
1666 // block that sets the pending exception.
1668 __ bind(&handler_entry);
1669 handler_offset_ = handler_entry.pos();
1670 // Caught exception: Store result (exception) in the pending exception
1671 // field in the JSEnv and return a failure sentinel. Coming in here the
1672 // fp will be invalid because the PushTryHandler below sets it to 0 to
1673 // signal the existence of the JSEntry frame.
1674 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1676 __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
1677 __ LoadRoot(v0, Heap::kExceptionRootIndex);
1678 __ b(&exit); // b exposes branch delay slot.
1679 __ nop(); // Branch delay slot nop.
1681 // Invoke: Link this frame into the handler chain. There's only one
1682 // handler block in this code object, so its index is 0.
1684 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
1685 // If an exception not caught by another handler occurs, this handler
1686 // returns control to the code after the bal(&invoke) above, which
1687 // restores all kCalleeSaved registers (including cp and fp) to their
1688 // saved values before returning a failure to C.
1690 // Clear any pending exceptions.
1691 __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
1692 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1694 __ sw(t1, MemOperand(t0));
1696 // Invoke the function by calling through JS entry trampoline builtin.
1697 // Notice that we cannot store a reference to the trampoline code directly in
1698 // this stub, because runtime stubs are not traversed when doing GC.
1701 // a0: entry_address
1703 // a2: receiver_pointer
1710 // callee saved registers + ra
1715 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1717 __ li(t0, Operand(construct_entry));
1719 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
1720 __ li(t0, Operand(entry));
1722 __ lw(t9, MemOperand(t0)); // Deref address.
1724 // Call JSEntryTrampoline.
1725 __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
1728 // Unlink this frame from the handler chain.
1731 __ bind(&exit); // v0 holds result
1732 // Check if the current stack frame is marked as the outermost JS frame.
1733 Label non_outermost_js_2;
1735 __ Branch(&non_outermost_js_2,
1738 Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1739 __ li(t1, Operand(ExternalReference(js_entry_sp)));
1740 __ sw(zero_reg, MemOperand(t1));
1741 __ bind(&non_outermost_js_2);
1743 // Restore the top frame descriptors from the stack.
1745 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1747 __ sw(t1, MemOperand(t0));
1749 // Reset the stack to the callee saved registers.
1750 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
1752 // Restore callee-saved fpu registers.
1753 __ MultiPopFPU(kCalleeSavedFPU);
1755 // Restore callee saved registers from the stack.
1756 __ MultiPop(kCalleeSaved | ra.bit());
1762 // Uses registers a0 to t0.
1763 // Expected input (depending on whether args are in registers or on the stack):
1764 // * object: a0 or at sp + 1 * kPointerSize.
1765 // * function: a1 or at sp.
1767 // An inlined call site may have been generated before calling this stub.
1768 // In this case the offset to the inline site to patch is passed on the stack,
1769 // in the safepoint slot for register t0.
1770 void InstanceofStub::Generate(MacroAssembler* masm) {
1771 // Call site inlining and patching implies arguments in registers.
1772 DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
1773 // ReturnTrueFalse is only implemented for inlined call sites.
1774 DCHECK(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
1776 // Fixed register usage throughout the stub:
1777 const Register object = a0; // Object (lhs).
1778 Register map = a3; // Map of the object.
1779 const Register function = a1; // Function (rhs).
1780 const Register prototype = t0; // Prototype of the function.
1781 const Register inline_site = t5;
1782 const Register scratch = a2;
1784 const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
1786 Label slow, loop, is_instance, is_not_instance, not_js_object;
1788 if (!HasArgsInRegisters()) {
1789 __ lw(object, MemOperand(sp, 1 * kPointerSize));
1790 __ lw(function, MemOperand(sp, 0));
1793 // Check that the left hand is a JS object and load map.
1794 __ JumpIfSmi(object, ¬_js_object);
1795 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
1797 // If there is a call site cache don't look in the global cache, but do the
1798 // real lookup and update the call site cache.
1799 if (!HasCallSiteInlineCheck()) {
1801 __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
1802 __ Branch(&miss, ne, function, Operand(at));
1803 __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
1804 __ Branch(&miss, ne, map, Operand(at));
1805 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1806 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1811 // Get the prototype of the function.
1812 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
1814 // Check that the function prototype is a JS object.
1815 __ JumpIfSmi(prototype, &slow);
1816 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
1818 // Update the global instanceof or call site inlined cache with the current
1819 // map and function. The cached answer will be set when it is known below.
1820 if (!HasCallSiteInlineCheck()) {
1821 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1822 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
1824 DCHECK(HasArgsInRegisters());
1825 // Patch the (relocated) inlined map check.
1827 // The offset was stored in t0 safepoint slot.
1828 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
1829 __ LoadFromSafepointRegisterSlot(scratch, t0);
1830 __ Subu(inline_site, ra, scratch);
1831 // Get the map location in scratch and patch it.
1832 __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
1833 __ sw(map, FieldMemOperand(scratch, Cell::kValueOffset));
1836 // Register mapping: a3 is object map and t0 is function prototype.
1837 // Get prototype of object into a2.
1838 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
1840 // We don't need map any more. Use it as a scratch register.
1841 Register scratch2 = map;
1844 // Loop through the prototype chain looking for the function prototype.
1845 __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
1847 __ Branch(&is_instance, eq, scratch, Operand(prototype));
1848 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
1849 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
1850 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
1853 __ bind(&is_instance);
1854 DCHECK(Smi::FromInt(0) == 0);
1855 if (!HasCallSiteInlineCheck()) {
1856 __ mov(v0, zero_reg);
1857 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1859 // Patch the call site to return true.
1860 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
1861 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1862 // Get the boolean result location in scratch and patch it.
1863 __ PatchRelocatedValue(inline_site, scratch, v0);
1865 if (!ReturnTrueFalseObject()) {
1866 DCHECK_EQ(Smi::FromInt(0), 0);
1867 __ mov(v0, zero_reg);
1870 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1872 __ bind(&is_not_instance);
1873 if (!HasCallSiteInlineCheck()) {
1874 __ li(v0, Operand(Smi::FromInt(1)));
1875 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1877 // Patch the call site to return false.
1878 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1879 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1880 // Get the boolean result location in scratch and patch it.
1881 __ PatchRelocatedValue(inline_site, scratch, v0);
1883 if (!ReturnTrueFalseObject()) {
1884 __ li(v0, Operand(Smi::FromInt(1)));
1888 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1890 Label object_not_null, object_not_null_or_smi;
1891 __ bind(¬_js_object);
1892 // Before null, smi and string value checks, check that the rhs is a function
1893 // as for a non-function rhs an exception needs to be thrown.
1894 __ JumpIfSmi(function, &slow);
1895 __ GetObjectType(function, scratch2, scratch);
1896 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
1898 // Null is not instance of anything.
1899 __ Branch(&object_not_null,
1902 Operand(isolate()->factory()->null_value()));
1903 __ li(v0, Operand(Smi::FromInt(1)));
1904 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1906 __ bind(&object_not_null);
1907 // Smi values are not instances of anything.
1908 __ JumpIfNotSmi(object, &object_not_null_or_smi);
1909 __ li(v0, Operand(Smi::FromInt(1)));
1910 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1912 __ bind(&object_not_null_or_smi);
1913 // String values are not instances of anything.
1914 __ IsObjectJSStringType(object, scratch, &slow);
1915 __ li(v0, Operand(Smi::FromInt(1)));
1916 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1918 // Slow-case. Tail call builtin.
1920 if (!ReturnTrueFalseObject()) {
1921 if (HasArgsInRegisters()) {
1924 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
1927 FrameScope scope(masm, StackFrame::INTERNAL);
1929 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
1932 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
1933 __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
1934 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1935 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1940 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1942 Register receiver = LoadIC::ReceiverRegister();
1943 NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3,
1946 PropertyAccessCompiler::TailCallBuiltin(
1947 masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1951 Register InstanceofStub::left() { return a0; }
1954 Register InstanceofStub::right() { return a1; }
1957 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
1958 // The displacement is the offset of the last parameter (if any)
1959 // relative to the frame pointer.
1960 const int kDisplacement =
1961 StandardFrameConstants::kCallerSPOffset - kPointerSize;
1963 // Check that the key is a smiGenerateReadElement.
1965 __ JumpIfNotSmi(a1, &slow);
1967 // Check if the calling frame is an arguments adaptor frame.
1969 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1970 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
1974 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1976 // Check index (a1) against formal parameters count limit passed in
1977 // through register a0. Use unsigned comparison to get negative
1979 __ Branch(&slow, hs, a1, Operand(a0));
1981 // Read the argument from the stack and return it.
1982 __ subu(a3, a0, a1);
1983 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
1984 __ Addu(a3, fp, Operand(t3));
1985 __ Ret(USE_DELAY_SLOT);
1986 __ lw(v0, MemOperand(a3, kDisplacement));
1988 // Arguments adaptor case: Check index (a1) against actual arguments
1989 // limit found in the arguments adaptor frame. Use unsigned
1990 // comparison to get negative check for free.
1992 __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1993 __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
1995 // Read the argument from the adaptor frame and return it.
1996 __ subu(a3, a0, a1);
1997 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
1998 __ Addu(a3, a2, Operand(t3));
1999 __ Ret(USE_DELAY_SLOT);
2000 __ lw(v0, MemOperand(a3, kDisplacement));
2002 // Slow-case: Handle non-smi or out-of-bounds access to arguments
2003 // by calling the runtime system.
2006 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
2010 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
2011 // sp[0] : number of parameters
2012 // sp[4] : receiver displacement
2014 // Check if the calling frame is an arguments adaptor frame.
2016 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2017 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
2021 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2023 // Patch the arguments.length and the parameters pointer in the current frame.
2024 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
2025 __ sw(a2, MemOperand(sp, 0 * kPointerSize));
2027 __ Addu(a3, a3, Operand(t3));
2028 __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
2029 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
2032 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
2036 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
2038 // sp[0] : number of parameters (tagged)
2039 // sp[4] : address of receiver argument
2041 // Registers used over whole function:
2042 // t2 : allocated object (tagged)
2043 // t5 : mapped parameter count (tagged)
2045 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
2046 // a1 = parameter count (tagged)
2048 // Check if the calling frame is an arguments adaptor frame.
2050 Label adaptor_frame, try_allocate;
2051 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2052 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
2053 __ Branch(&adaptor_frame,
2056 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2058 // No adaptor, parameter count = argument count.
2060 __ b(&try_allocate);
2061 __ nop(); // Branch delay slot nop.
2063 // We have an adaptor frame. Patch the parameters pointer.
2064 __ bind(&adaptor_frame);
2065 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
2067 __ Addu(a3, a3, Operand(t6));
2068 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
2069 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
2071 // a1 = parameter count (tagged)
2072 // a2 = argument count (tagged)
2073 // Compute the mapped parameter count = min(a1, a2) in a1.
2075 __ Branch(&skip_min, lt, a1, Operand(a2));
2079 __ bind(&try_allocate);
2081 // Compute the sizes of backing store, parameter map, and arguments object.
2082 // 1. Parameter map, has 2 extra words containing context and backing store.
2083 const int kParameterMapHeaderSize =
2084 FixedArray::kHeaderSize + 2 * kPointerSize;
2085 // If there are no mapped parameters, we do not need the parameter_map.
2086 Label param_map_size;
2087 DCHECK_EQ(0, Smi::FromInt(0));
2088 __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg));
2089 __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
2091 __ addiu(t5, t5, kParameterMapHeaderSize);
2092 __ bind(¶m_map_size);
2094 // 2. Backing store.
2096 __ Addu(t5, t5, Operand(t6));
2097 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
2099 // 3. Arguments object.
2100 __ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize));
2102 // Do the allocation of all three objects in one go.
2103 __ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT);
2105 // v0 = address of new object(s) (tagged)
2106 // a2 = argument count (smi-tagged)
2107 // Get the arguments boilerplate from the current native context into t0.
2108 const int kNormalOffset =
2109 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
2110 const int kAliasedOffset =
2111 Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
2113 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2114 __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
2115 Label skip2_ne, skip2_eq;
2116 __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
2117 __ lw(t0, MemOperand(t0, kNormalOffset));
2120 __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
2121 __ lw(t0, MemOperand(t0, kAliasedOffset));
2124 // v0 = address of new object (tagged)
2125 // a1 = mapped parameter count (tagged)
2126 // a2 = argument count (smi-tagged)
2127 // t0 = address of arguments map (tagged)
2128 __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
2129 __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
2130 __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
2131 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
2133 // Set up the callee in-object property.
2134 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
2135 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
2136 __ AssertNotSmi(a3);
2137 const int kCalleeOffset = JSObject::kHeaderSize +
2138 Heap::kArgumentsCalleeIndex * kPointerSize;
2139 __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
2141 // Use the length (smi tagged) and set that as an in-object property too.
2143 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2144 const int kLengthOffset = JSObject::kHeaderSize +
2145 Heap::kArgumentsLengthIndex * kPointerSize;
2146 __ sw(a2, FieldMemOperand(v0, kLengthOffset));
2148 // Set up the elements pointer in the allocated arguments object.
2149 // If we allocated a parameter map, t0 will point there, otherwise
2150 // it will point to the backing store.
2151 __ Addu(t0, v0, Operand(Heap::kSloppyArgumentsObjectSize));
2152 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
2154 // v0 = address of new object (tagged)
2155 // a1 = mapped parameter count (tagged)
2156 // a2 = argument count (tagged)
2157 // t0 = address of parameter map or backing store (tagged)
2158 // Initialize parameter map. If there are no mapped arguments, we're done.
2159 Label skip_parameter_map;
2161 __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
2162 // Move backing store address to a3, because it is
2163 // expected there when filling in the unmapped arguments.
2167 __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
2169 __ LoadRoot(t2, Heap::kSloppyArgumentsElementsMapRootIndex);
2170 __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
2171 __ Addu(t2, a1, Operand(Smi::FromInt(2)));
2172 __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
2173 __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
2175 __ Addu(t2, t0, Operand(t6));
2176 __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
2177 __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
2179 // Copy the parameter slots and the holes in the arguments.
2180 // We need to fill in mapped_parameter_count slots. They index the context,
2181 // where parameters are stored in reverse order, at
2182 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
2183 // The mapped parameter thus need to get indices
2184 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
2185 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
2186 // We loop from right to left.
2187 Label parameters_loop, parameters_test;
2189 __ lw(t5, MemOperand(sp, 0 * kPointerSize));
2190 __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
2191 __ Subu(t5, t5, Operand(a1));
2192 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
2194 __ Addu(a3, t0, Operand(t6));
2195 __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
2197 // t2 = loop variable (tagged)
2198 // a1 = mapping index (tagged)
2199 // a3 = address of backing store (tagged)
2200 // t0 = address of parameter map (tagged)
2201 // t1 = temporary scratch (a.o., for address calculation)
2202 // t3 = the hole value
2203 __ jmp(¶meters_test);
2205 __ bind(¶meters_loop);
2206 __ Subu(t2, t2, Operand(Smi::FromInt(1)));
2208 __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
2209 __ Addu(t6, t0, t1);
2210 __ sw(t5, MemOperand(t6));
2211 __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
2212 __ Addu(t6, a3, t1);
2213 __ sw(t3, MemOperand(t6));
2214 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
2215 __ bind(¶meters_test);
2216 __ Branch(¶meters_loop, ne, t2, Operand(Smi::FromInt(0)));
2218 __ bind(&skip_parameter_map);
2219 // a2 = argument count (tagged)
2220 // a3 = address of backing store (tagged)
2222 // Copy arguments header and remaining slots (if there are any).
2223 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
2224 __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
2225 __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
2227 Label arguments_loop, arguments_test;
2229 __ lw(t0, MemOperand(sp, 1 * kPointerSize));
2231 __ Subu(t0, t0, Operand(t6));
2232 __ jmp(&arguments_test);
2234 __ bind(&arguments_loop);
2235 __ Subu(t0, t0, Operand(kPointerSize));
2236 __ lw(t2, MemOperand(t0, 0));
2238 __ Addu(t1, a3, Operand(t6));
2239 __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
2240 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
2242 __ bind(&arguments_test);
2243 __ Branch(&arguments_loop, lt, t5, Operand(a2));
2245 // Return and remove the on-stack parameters.
2248 // Do the runtime call to allocate the arguments object.
2249 // a2 = argument count (tagged)
2251 __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
2252 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
2256 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
2257 // sp[0] : number of parameters
2258 // sp[4] : receiver displacement
2260 // Check if the calling frame is an arguments adaptor frame.
2261 Label adaptor_frame, try_allocate, runtime;
2262 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2263 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
2264 __ Branch(&adaptor_frame,
2267 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2269 // Get the length from the frame.
2270 __ lw(a1, MemOperand(sp, 0));
2271 __ Branch(&try_allocate);
2273 // Patch the arguments.length and the parameters pointer.
2274 __ bind(&adaptor_frame);
2275 __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
2276 __ sw(a1, MemOperand(sp, 0));
2277 __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
2278 __ Addu(a3, a2, Operand(at));
2280 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
2281 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
2283 // Try the new space allocation. Start out with computing the size
2284 // of the arguments object and the elements array in words.
2285 Label add_arguments_object;
2286 __ bind(&try_allocate);
2287 __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
2288 __ srl(a1, a1, kSmiTagSize);
2290 __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
2291 __ bind(&add_arguments_object);
2292 __ Addu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
2294 // Do the allocation of both objects in one go.
2295 __ Allocate(a1, v0, a2, a3, &runtime,
2296 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
2298 // Get the arguments boilerplate from the current native context.
2299 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2300 __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
2301 __ lw(t0, MemOperand(
2302 t0, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
2304 __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
2305 __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
2306 __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
2307 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
2309 // Get the length (smi tagged) and set that as an in-object property too.
2310 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2311 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
2313 __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
2314 Heap::kArgumentsLengthIndex * kPointerSize));
2317 __ Branch(&done, eq, a1, Operand(zero_reg));
2319 // Get the parameters pointer from the stack.
2320 __ lw(a2, MemOperand(sp, 1 * kPointerSize));
2322 // Set up the elements pointer in the allocated arguments object and
2323 // initialize the header in the elements fixed array.
2324 __ Addu(t0, v0, Operand(Heap::kStrictArgumentsObjectSize));
2325 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
2326 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
2327 __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
2328 __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
2329 // Untag the length for the loop.
2330 __ srl(a1, a1, kSmiTagSize);
2332 // Copy the fixed array slots.
2334 // Set up t0 to point to the first array slot.
2335 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2337 // Pre-decrement a2 with kPointerSize on each iteration.
2338 // Pre-decrement in order to skip receiver.
2339 __ Addu(a2, a2, Operand(-kPointerSize));
2340 __ lw(a3, MemOperand(a2));
2341 // Post-increment t0 with kPointerSize on each iteration.
2342 __ sw(a3, MemOperand(t0));
2343 __ Addu(t0, t0, Operand(kPointerSize));
2344 __ Subu(a1, a1, Operand(1));
2345 __ Branch(&loop, ne, a1, Operand(zero_reg));
2347 // Return and remove the on-stack parameters.
2351 // Do the runtime call to allocate the arguments object.
2353 __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
2357 void RegExpExecStub::Generate(MacroAssembler* masm) {
2358 // Just jump directly to runtime if native RegExp is not selected at compile
2359 // time or if regexp entry in generated code is turned off runtime switch or
2361 #ifdef V8_INTERPRETED_REGEXP
2362 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2363 #else // V8_INTERPRETED_REGEXP
2365 // Stack frame on entry.
2366 // sp[0]: last_match_info (expected JSArray)
2367 // sp[4]: previous index
2368 // sp[8]: subject string
2369 // sp[12]: JSRegExp object
2371 const int kLastMatchInfoOffset = 0 * kPointerSize;
2372 const int kPreviousIndexOffset = 1 * kPointerSize;
2373 const int kSubjectOffset = 2 * kPointerSize;
2374 const int kJSRegExpOffset = 3 * kPointerSize;
2377 // Allocation of registers for this function. These are in callee save
2378 // registers and will be preserved by the call to the native RegExp code, as
2379 // this code is called using the normal C calling convention. When calling
2380 // directly from generated code the native RegExp code will not do a GC and
2381 // therefore the content of these registers are safe to use after the call.
2382 // MIPS - using s0..s2, since we are not using CEntry Stub.
2383 Register subject = s0;
2384 Register regexp_data = s1;
2385 Register last_match_info_elements = s2;
2387 // Ensure that a RegExp stack is allocated.
2388 ExternalReference address_of_regexp_stack_memory_address =
2389 ExternalReference::address_of_regexp_stack_memory_address(
2391 ExternalReference address_of_regexp_stack_memory_size =
2392 ExternalReference::address_of_regexp_stack_memory_size(isolate());
2393 __ li(a0, Operand(address_of_regexp_stack_memory_size));
2394 __ lw(a0, MemOperand(a0, 0));
2395 __ Branch(&runtime, eq, a0, Operand(zero_reg));
2397 // Check that the first argument is a JSRegExp object.
2398 __ lw(a0, MemOperand(sp, kJSRegExpOffset));
2399 STATIC_ASSERT(kSmiTag == 0);
2400 __ JumpIfSmi(a0, &runtime);
2401 __ GetObjectType(a0, a1, a1);
2402 __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
2404 // Check that the RegExp has been compiled (data contains a fixed array).
2405 __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
2406 if (FLAG_debug_code) {
2407 __ SmiTst(regexp_data, t0);
2409 kUnexpectedTypeForRegExpDataFixedArrayExpected,
2412 __ GetObjectType(regexp_data, a0, a0);
2414 kUnexpectedTypeForRegExpDataFixedArrayExpected,
2416 Operand(FIXED_ARRAY_TYPE));
2419 // regexp_data: RegExp data (FixedArray)
2420 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2421 __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2422 __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
2424 // regexp_data: RegExp data (FixedArray)
2425 // Check that the number of captures fit in the static offsets vector buffer.
2427 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2428 // Check (number_of_captures + 1) * 2 <= offsets vector size
2429 // Or number_of_captures * 2 <= offsets vector size - 2
2430 // Multiplying by 2 comes for free since a2 is smi-tagged.
2431 STATIC_ASSERT(kSmiTag == 0);
2432 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2433 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
2435 &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
2437 // Reset offset for possibly sliced string.
2438 __ mov(t0, zero_reg);
2439 __ lw(subject, MemOperand(sp, kSubjectOffset));
2440 __ JumpIfSmi(subject, &runtime);
2441 __ mov(a3, subject); // Make a copy of the original subject string.
2442 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2443 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2444 // subject: subject string
2445 // a3: subject string
2446 // a0: subject string instance type
2447 // regexp_data: RegExp data (FixedArray)
2448 // Handle subject string according to its encoding and representation:
2449 // (1) Sequential string? If yes, go to (5).
2450 // (2) Anything but sequential or cons? If yes, go to (6).
2451 // (3) Cons string. If the string is flat, replace subject with first string.
2452 // Otherwise bailout.
2453 // (4) Is subject external? If yes, go to (7).
2454 // (5) Sequential string. Load regexp code according to encoding.
2458 // Deferred code at the end of the stub:
2459 // (6) Not a long external string? If yes, go to (8).
2460 // (7) External string. Make it, offset-wise, look like a sequential string.
2462 // (8) Short external string or not a string? If yes, bail out to runtime.
2463 // (9) Sliced string. Replace subject with parent. Go to (4).
2465 Label seq_string /* 5 */, external_string /* 7 */,
2466 check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
2467 not_long_external /* 8 */;
2469 // (1) Sequential string? If yes, go to (5).
2472 Operand(kIsNotStringMask |
2473 kStringRepresentationMask |
2474 kShortExternalStringMask));
2475 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
2476 __ Branch(&seq_string, eq, a1, Operand(zero_reg)); // Go to (5).
2478 // (2) Anything but sequential or cons? If yes, go to (6).
2479 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2480 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2481 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2482 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2484 __ Branch(¬_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
2486 // (3) Cons string. Check that it's flat.
2487 // Replace subject with first string and reload instance type.
2488 __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
2489 __ LoadRoot(a1, Heap::kempty_stringRootIndex);
2490 __ Branch(&runtime, ne, a0, Operand(a1));
2491 __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2493 // (4) Is subject external? If yes, go to (7).
2494 __ bind(&check_underlying);
2495 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2496 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2497 STATIC_ASSERT(kSeqStringTag == 0);
2498 __ And(at, a0, Operand(kStringRepresentationMask));
2499 // The underlying external string is never a short external string.
2500 STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
2501 STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2502 __ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7).
2504 // (5) Sequential string. Load regexp code according to encoding.
2505 __ bind(&seq_string);
2506 // subject: sequential subject string (or look-alike, external string)
2507 // a3: original subject string
2508 // Load previous index and check range before a3 is overwritten. We have to
2509 // use a3 instead of subject here because subject might have been only made
2510 // to look like a sequential string when it actually is an external string.
2511 __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
2512 __ JumpIfNotSmi(a1, &runtime);
2513 __ lw(a3, FieldMemOperand(a3, String::kLengthOffset));
2514 __ Branch(&runtime, ls, a3, Operand(a1));
2515 __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
2517 STATIC_ASSERT(kStringEncodingMask == 4);
2518 STATIC_ASSERT(kOneByteStringTag == 4);
2519 STATIC_ASSERT(kTwoByteStringTag == 0);
2520 __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII.
2521 __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
2522 __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
2523 __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
2524 __ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
2526 // (E) Carry on. String handling is done.
2527 // t9: irregexp code
2528 // Check that the irregexp code has been generated for the actual string
2529 // encoding. If it has, the field contains a code object otherwise it contains
2530 // a smi (code flushing support).
2531 __ JumpIfSmi(t9, &runtime);
2533 // a1: previous index
2534 // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
2536 // subject: Subject string
2537 // regexp_data: RegExp data (FixedArray)
2538 // All checks done. Now push arguments for native regexp code.
2539 __ IncrementCounter(isolate()->counters()->regexp_entry_native(),
2542 // Isolates: note we add an additional parameter here (isolate pointer).
2543 const int kRegExpExecuteArguments = 9;
2544 const int kParameterRegisters = 4;
2545 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
2547 // Stack pointer now points to cell where return address is to be written.
2548 // Arguments are before that on the stack or in registers, meaning we
2549 // treat the return address as argument 5. Thus every argument after that
2550 // needs to be shifted back by 1. Since DirectCEntryStub will handle
2551 // allocating space for the c argument slots, we don't need to calculate
2552 // that into the argument positions on the stack. This is how the stack will
2553 // look (sp meaning the value of sp at this moment):
2554 // [sp + 5] - Argument 9
2555 // [sp + 4] - Argument 8
2556 // [sp + 3] - Argument 7
2557 // [sp + 2] - Argument 6
2558 // [sp + 1] - Argument 5
2559 // [sp + 0] - saved ra
2561 // Argument 9: Pass current isolate address.
2562 // CFunctionArgumentOperand handles MIPS stack argument slots.
2563 __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
2564 __ sw(a0, MemOperand(sp, 5 * kPointerSize));
2566 // Argument 8: Indicate that this is a direct call from JavaScript.
2567 __ li(a0, Operand(1));
2568 __ sw(a0, MemOperand(sp, 4 * kPointerSize));
2570 // Argument 7: Start (high end) of backtracking stack memory area.
2571 __ li(a0, Operand(address_of_regexp_stack_memory_address));
2572 __ lw(a0, MemOperand(a0, 0));
2573 __ li(a2, Operand(address_of_regexp_stack_memory_size));
2574 __ lw(a2, MemOperand(a2, 0));
2575 __ addu(a0, a0, a2);
2576 __ sw(a0, MemOperand(sp, 3 * kPointerSize));
2578 // Argument 6: Set the number of capture registers to zero to force global
2579 // regexps to behave as non-global. This does not affect non-global regexps.
2580 __ mov(a0, zero_reg);
2581 __ sw(a0, MemOperand(sp, 2 * kPointerSize));
2583 // Argument 5: static offsets vector buffer.
2585 ExternalReference::address_of_static_offsets_vector(isolate())));
2586 __ sw(a0, MemOperand(sp, 1 * kPointerSize));
2588 // For arguments 4 and 3 get string length, calculate start of string data
2589 // and calculate the shift of the index (0 for ASCII and 1 for two byte).
2590 __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
2591 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
2592 // Load the length from the original subject string from the previous stack
2593 // frame. Therefore we have to use fp, which points exactly to two pointer
2594 // sizes below the previous sp. (Because creating a new stack frame pushes
2595 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
2596 __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2597 // If slice offset is not 0, load the length from the original sliced string.
2598 // Argument 4, a3: End of string data
2599 // Argument 3, a2: Start of string data
2600 // Prepare start and end index of the input.
2601 __ sllv(t1, t0, a3);
2602 __ addu(t0, t2, t1);
2603 __ sllv(t1, a1, a3);
2604 __ addu(a2, t0, t1);
2606 __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
2607 __ sra(t2, t2, kSmiTagSize);
2608 __ sllv(t1, t2, a3);
2609 __ addu(a3, t0, t1);
2610 // Argument 2 (a1): Previous index.
2613 // Argument 1 (a0): Subject string.
2614 __ mov(a0, subject);
2616 // Locate the code entry and call it.
2617 __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
2618 DirectCEntryStub stub(isolate());
2619 stub.GenerateCall(masm, t9);
2621 __ LeaveExitFrame(false, no_reg, true);
2624 // subject: subject string (callee saved)
2625 // regexp_data: RegExp data (callee saved)
2626 // last_match_info_elements: Last match info elements (callee saved)
2627 // Check the result.
2629 __ Branch(&success, eq, v0, Operand(1));
2630 // We expect exactly one result since we force the called regexp to behave
2633 __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
2634 // If not exception it can only be retry. Handle that in the runtime system.
2635 __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
2636 // Result must now be exception. If there is no pending exception already a
2637 // stack overflow (on the backtrack stack) was detected in RegExp code but
2638 // haven't created the exception yet. Handle that in the runtime system.
2639 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
2640 __ li(a1, Operand(isolate()->factory()->the_hole_value()));
2641 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2643 __ lw(v0, MemOperand(a2, 0));
2644 __ Branch(&runtime, eq, v0, Operand(a1));
2646 __ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
2648 // Check if the exception is a termination. If so, throw as uncatchable.
2649 __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
2650 Label termination_exception;
2651 __ Branch(&termination_exception, eq, v0, Operand(a0));
2655 __ bind(&termination_exception);
2656 __ ThrowUncatchable(v0);
2659 // For failure and exception return null.
2660 __ li(v0, Operand(isolate()->factory()->null_value()));
2663 // Process the result from the native regexp code.
2666 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2667 // Calculate number of capture registers (number_of_captures + 1) * 2.
2668 // Multiplying by 2 comes for free since r1 is smi-tagged.
2669 STATIC_ASSERT(kSmiTag == 0);
2670 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2671 __ Addu(a1, a1, Operand(2)); // a1 was a smi.
2673 __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
2674 __ JumpIfSmi(a0, &runtime);
2675 __ GetObjectType(a0, a2, a2);
2676 __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
2677 // Check that the JSArray is in fast case.
2678 __ lw(last_match_info_elements,
2679 FieldMemOperand(a0, JSArray::kElementsOffset));
2680 __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2681 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
2682 __ Branch(&runtime, ne, a0, Operand(at));
2683 // Check that the last match info has space for the capture registers and the
2684 // additional information.
2686 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
2687 __ Addu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
2688 __ sra(at, a0, kSmiTagSize);
2689 __ Branch(&runtime, gt, a2, Operand(at));
2691 // a1: number of capture registers
2692 // subject: subject string
2693 // Store the capture count.
2694 __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
2695 __ sw(a2, FieldMemOperand(last_match_info_elements,
2696 RegExpImpl::kLastCaptureCountOffset));
2697 // Store last subject and last input.
2699 FieldMemOperand(last_match_info_elements,
2700 RegExpImpl::kLastSubjectOffset));
2701 __ mov(a2, subject);
2702 __ RecordWriteField(last_match_info_elements,
2703 RegExpImpl::kLastSubjectOffset,
2708 __ mov(subject, a2);
2710 FieldMemOperand(last_match_info_elements,
2711 RegExpImpl::kLastInputOffset));
2712 __ RecordWriteField(last_match_info_elements,
2713 RegExpImpl::kLastInputOffset,
2719 // Get the static offsets vector filled by the native regexp code.
2720 ExternalReference address_of_static_offsets_vector =
2721 ExternalReference::address_of_static_offsets_vector(isolate());
2722 __ li(a2, Operand(address_of_static_offsets_vector));
2724 // a1: number of capture registers
2725 // a2: offsets vector
2726 Label next_capture, done;
2727 // Capture register counter starts from number of capture registers and
2728 // counts down until wrapping after zero.
2730 last_match_info_elements,
2731 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
2732 __ bind(&next_capture);
2733 __ Subu(a1, a1, Operand(1));
2734 __ Branch(&done, lt, a1, Operand(zero_reg));
2735 // Read the value from the static offsets vector buffer.
2736 __ lw(a3, MemOperand(a2, 0));
2737 __ addiu(a2, a2, kPointerSize);
2738 // Store the smi value in the last match info.
2739 __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
2740 __ sw(a3, MemOperand(a0, 0));
2741 __ Branch(&next_capture, USE_DELAY_SLOT);
2742 __ addiu(a0, a0, kPointerSize); // In branch delay slot.
2746 // Return last match info.
2747 __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
2750 // Do the runtime call to execute the regexp.
2752 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2754 // Deferred code for string handling.
2755 // (6) Not a long external string? If yes, go to (8).
2756 __ bind(¬_seq_nor_cons);
2758 __ Branch(¬_long_external, gt, a1, Operand(kExternalStringTag));
2760 // (7) External string. Make it, offset-wise, look like a sequential string.
2761 __ bind(&external_string);
2762 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2763 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2764 if (FLAG_debug_code) {
2765 // Assert that we do not have a cons or slice (indirect strings) here.
2766 // Sequential strings have already been ruled out.
2767 __ And(at, a0, Operand(kIsIndirectStringMask));
2769 kExternalStringExpectedButNotFound,
2774 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
2775 // Move the pointer so that offset-wise, it looks like a sequential string.
2776 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2779 SeqTwoByteString::kHeaderSize - kHeapObjectTag);
2780 __ jmp(&seq_string); // Go to (5).
2782 // (8) Short external string or not a string? If yes, bail out to runtime.
2783 __ bind(¬_long_external);
2784 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
2785 __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
2786 __ Branch(&runtime, ne, at, Operand(zero_reg));
2788 // (9) Sliced string. Replace subject with parent. Go to (4).
2789 // Load offset into t0 and replace subject string with parent.
2790 __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
2791 __ sra(t0, t0, kSmiTagSize);
2792 __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
2793 __ jmp(&check_underlying); // Go to (4).
2794 #endif // V8_INTERPRETED_REGEXP
2798 static void GenerateRecordCallTarget(MacroAssembler* masm) {
2799 // Cache the called function in a feedback vector slot. Cache states
2800 // are uninitialized, monomorphic (indicated by a JSFunction), and
2802 // a0 : number of arguments to the construct function
2803 // a1 : the function to call
2804 // a2 : Feedback vector
2805 // a3 : slot in feedback vector (Smi)
2806 Label initialize, done, miss, megamorphic, not_array_function;
2808 DCHECK_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
2809 masm->isolate()->heap()->megamorphic_symbol());
2810 DCHECK_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
2811 masm->isolate()->heap()->uninitialized_symbol());
2813 // Load the cache state into t0.
2814 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2815 __ Addu(t0, a2, Operand(t0));
2816 __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
2818 // A monomorphic cache hit or an already megamorphic state: invoke the
2819 // function without changing the state.
2820 __ Branch(&done, eq, t0, Operand(a1));
2822 if (!FLAG_pretenuring_call_new) {
2823 // If we came here, we need to see if we are the array function.
2824 // If we didn't have a matching function, and we didn't find the megamorph
2825 // sentinel, then we have in the slot either some other function or an
2826 // AllocationSite. Do a map check on the object in a3.
2827 __ lw(t1, FieldMemOperand(t0, 0));
2828 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2829 __ Branch(&miss, ne, t1, Operand(at));
2831 // Make sure the function is the Array() function
2832 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
2833 __ Branch(&megamorphic, ne, a1, Operand(t0));
2839 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2841 __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
2842 __ Branch(&initialize, eq, t0, Operand(at));
2843 // MegamorphicSentinel is an immortal immovable object (undefined) so no
2844 // write-barrier is needed.
2845 __ bind(&megamorphic);
2846 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2847 __ Addu(t0, a2, Operand(t0));
2848 __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
2849 __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
2852 // An uninitialized cache is patched with the function.
2853 __ bind(&initialize);
2854 if (!FLAG_pretenuring_call_new) {
2855 // Make sure the function is the Array() function.
2856 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
2857 __ Branch(¬_array_function, ne, a1, Operand(t0));
2859 // The target function is the Array constructor,
2860 // Create an AllocationSite if we don't already have it, store it in the
2863 FrameScope scope(masm, StackFrame::INTERNAL);
2864 const RegList kSavedRegs =
2870 // Arguments register must be smi-tagged to call out.
2872 __ MultiPush(kSavedRegs);
2874 CreateAllocationSiteStub create_stub(masm->isolate());
2875 __ CallStub(&create_stub);
2877 __ MultiPop(kSavedRegs);
2882 __ bind(¬_array_function);
2885 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2886 __ Addu(t0, a2, Operand(t0));
2887 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2888 __ sw(a1, MemOperand(t0, 0));
2890 __ Push(t0, a2, a1);
2891 __ RecordWrite(a2, t0, a1, kRAHasNotBeenSaved, kDontSaveFPRegs,
2892 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
2899 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
2900 __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2901 __ lw(t0, FieldMemOperand(a3, SharedFunctionInfo::kCompilerHintsOffset));
2903 // Do not transform the receiver for strict mode functions.
2904 int32_t strict_mode_function_mask =
2905 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
2906 // Do not transform the receiver for native (Compilerhints already in a3).
2907 int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
2908 __ And(at, t0, Operand(strict_mode_function_mask | native_mask));
2909 __ Branch(cont, ne, at, Operand(zero_reg));
2913 static void EmitSlowCase(MacroAssembler* masm,
2915 Label* non_function) {
2916 // Check for function proxy.
2917 __ Branch(non_function, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
2918 __ push(a1); // put proxy as additional argument
2919 __ li(a0, Operand(argc + 1, RelocInfo::NONE32));
2920 __ mov(a2, zero_reg);
2921 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
2923 Handle<Code> adaptor =
2924 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
2925 __ Jump(adaptor, RelocInfo::CODE_TARGET);
2928 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
2929 // of the original receiver from the call site).
2930 __ bind(non_function);
2931 __ sw(a1, MemOperand(sp, argc * kPointerSize));
2932 __ li(a0, Operand(argc)); // Set up the number of arguments.
2933 __ mov(a2, zero_reg);
2934 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
2935 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2936 RelocInfo::CODE_TARGET);
2940 static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
2941 // Wrap the receiver and patch it back onto the stack.
2942 { FrameScope frame_scope(masm, StackFrame::INTERNAL);
2944 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
2947 __ Branch(USE_DELAY_SLOT, cont);
2948 __ sw(v0, MemOperand(sp, argc * kPointerSize));
2952 static void CallFunctionNoFeedback(MacroAssembler* masm,
2953 int argc, bool needs_checks,
2954 bool call_as_method) {
2955 // a1 : the function to call
2956 Label slow, non_function, wrap, cont;
2959 // Check that the function is really a JavaScript function.
2960 // a1: pushed function (to be verified)
2961 __ JumpIfSmi(a1, &non_function);
2963 // Goto slow case if we do not have a function.
2964 __ GetObjectType(a1, t0, t0);
2965 __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
2968 // Fast-case: Invoke the function now.
2969 // a1: pushed function
2970 ParameterCount actual(argc);
2972 if (call_as_method) {
2974 EmitContinueIfStrictOrNative(masm, &cont);
2977 // Compute the receiver in sloppy mode.
2978 __ lw(a3, MemOperand(sp, argc * kPointerSize));
2981 __ JumpIfSmi(a3, &wrap);
2982 __ GetObjectType(a3, t0, t0);
2983 __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
2991 __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
2994 // Slow-case: Non-function called.
2996 EmitSlowCase(masm, argc, &non_function);
2999 if (call_as_method) {
3001 // Wrap the receiver and patch it back onto the stack.
3002 EmitWrapCase(masm, argc, &cont);
3007 void CallFunctionStub::Generate(MacroAssembler* masm) {
3008 CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
3012 void CallConstructStub::Generate(MacroAssembler* masm) {
3013 // a0 : number of arguments
3014 // a1 : the function to call
3015 // a2 : feedback vector
3016 // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
3017 Label slow, non_function_call;
3019 // Check that the function is not a smi.
3020 __ JumpIfSmi(a1, &non_function_call);
3021 // Check that the function is a JSFunction.
3022 __ GetObjectType(a1, t0, t0);
3023 __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
3025 if (RecordCallTarget()) {
3026 GenerateRecordCallTarget(masm);
3028 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
3029 __ Addu(t1, a2, at);
3030 if (FLAG_pretenuring_call_new) {
3031 // Put the AllocationSite from the feedback vector into a2.
3032 // By adding kPointerSize we encode that we know the AllocationSite
3033 // entry is at the feedback vector slot given by a3 + 1.
3034 __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize + kPointerSize));
3036 Label feedback_register_initialized;
3037 // Put the AllocationSite from the feedback vector into a2, or undefined.
3038 __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
3039 __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
3040 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
3041 __ Branch(&feedback_register_initialized, eq, t1, Operand(at));
3042 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
3043 __ bind(&feedback_register_initialized);
3046 __ AssertUndefinedOrAllocationSite(a2, t1);
3049 // Jump to the function-specific construct stub.
3050 Register jmp_reg = t0;
3051 __ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3052 __ lw(jmp_reg, FieldMemOperand(jmp_reg,
3053 SharedFunctionInfo::kConstructStubOffset));
3054 __ Addu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
3057 // a0: number of arguments
3058 // a1: called object
3062 __ Branch(&non_function_call, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
3063 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
3066 __ bind(&non_function_call);
3067 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
3069 // Set expected number of arguments to zero (not changing r0).
3070 __ li(a2, Operand(0, RelocInfo::NONE32));
3071 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3072 RelocInfo::CODE_TARGET);
3076 static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
3077 __ lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3078 __ lw(vector, FieldMemOperand(vector,
3079 JSFunction::kSharedFunctionInfoOffset));
3080 __ lw(vector, FieldMemOperand(vector,
3081 SharedFunctionInfo::kFeedbackVectorOffset));
3085 void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
3090 EmitLoadTypeFeedbackVector(masm, a2);
3092 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
3093 __ Branch(&miss, ne, a1, Operand(at));
3095 __ li(a0, Operand(arg_count()));
3096 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
3097 __ Addu(at, a2, Operand(at));
3098 __ lw(t0, FieldMemOperand(at, FixedArray::kHeaderSize));
3100 // Verify that t0 contains an AllocationSite
3101 __ lw(t1, FieldMemOperand(t0, HeapObject::kMapOffset));
3102 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
3103 __ Branch(&miss, ne, t1, Operand(at));
3106 ArrayConstructorStub stub(masm->isolate(), arg_count());
3107 __ TailCallStub(&stub);
3110 GenerateMiss(masm, IC::kCallIC_Customization_Miss);
3112 // The slow case, we need this no matter what to complete a call after a miss.
3113 CallFunctionNoFeedback(masm,
3119 __ stop("Unexpected code address");
3123 void CallICStub::Generate(MacroAssembler* masm) {
3125 // r3 - slot id (Smi)
3126 Label extra_checks_or_miss, slow_start;
3127 Label slow, non_function, wrap, cont;
3128 Label have_js_function;
3129 int argc = state_.arg_count();
3130 ParameterCount actual(argc);
3132 EmitLoadTypeFeedbackVector(masm, a2);
3134 // The checks. First, does r1 match the recorded monomorphic target?
3135 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
3136 __ Addu(t0, a2, Operand(t0));
3137 __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
3138 __ Branch(&extra_checks_or_miss, ne, a1, Operand(t0));
3140 __ bind(&have_js_function);
3141 if (state_.CallAsMethod()) {
3142 EmitContinueIfStrictOrNative(masm, &cont);
3143 // Compute the receiver in sloppy mode.
3144 __ lw(a3, MemOperand(sp, argc * kPointerSize));
3146 __ JumpIfSmi(a3, &wrap);
3147 __ GetObjectType(a3, t0, t0);
3148 __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
3153 __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
3156 EmitSlowCase(masm, argc, &non_function);
3158 if (state_.CallAsMethod()) {
3160 EmitWrapCase(masm, argc, &cont);
3163 __ bind(&extra_checks_or_miss);
3166 __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
3167 __ Branch(&slow_start, eq, t0, Operand(at));
3168 __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
3169 __ Branch(&miss, eq, t0, Operand(at));
3171 if (!FLAG_trace_ic) {
3172 // We are going megamorphic. If the feedback is a JSFunction, it is fine
3173 // to handle it here. More complex cases are dealt with in the runtime.
3174 __ AssertNotSmi(t0);
3175 __ GetObjectType(t0, t1, t1);
3176 __ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE));
3177 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
3178 __ Addu(t0, a2, Operand(t0));
3179 __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
3180 __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
3181 __ Branch(&slow_start);
3184 // We are here because tracing is on or we are going monomorphic.
3186 GenerateMiss(masm, IC::kCallIC_Miss);
3189 __ bind(&slow_start);
3190 // Check that the function is really a JavaScript function.
3191 // r1: pushed function (to be verified)
3192 __ JumpIfSmi(a1, &non_function);
3194 // Goto slow case if we do not have a function.
3195 __ GetObjectType(a1, t0, t0);
3196 __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
3197 __ Branch(&have_js_function);
3201 void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
3202 // Get the receiver of the function from the stack; 1 ~ return address.
3203 __ lw(t0, MemOperand(sp, (state_.arg_count() + 1) * kPointerSize));
3206 FrameScope scope(masm, StackFrame::INTERNAL);
3208 // Push the receiver and the function and feedback info.
3209 __ Push(t0, a1, a2, a3);
3212 ExternalReference miss = ExternalReference(IC_Utility(id),
3214 __ CallExternalReference(miss, 4);
3216 // Move result to a1 and exit the internal frame.
3222 // StringCharCodeAtGenerator.
3223 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
3226 Label got_char_code;
3227 Label sliced_string;
3229 DCHECK(!t0.is(index_));
3230 DCHECK(!t0.is(result_));
3231 DCHECK(!t0.is(object_));
3233 // If the receiver is a smi trigger the non-string case.
3234 __ JumpIfSmi(object_, receiver_not_string_);
3236 // Fetch the instance type of the receiver into result register.
3237 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3238 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3239 // If the receiver is not a string trigger the non-string case.
3240 __ And(t0, result_, Operand(kIsNotStringMask));
3241 __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
3243 // If the index is non-smi trigger the non-smi case.
3244 __ JumpIfNotSmi(index_, &index_not_smi_);
3246 __ bind(&got_smi_index_);
3248 // Check for index out of range.
3249 __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
3250 __ Branch(index_out_of_range_, ls, t0, Operand(index_));
3252 __ sra(index_, index_, kSmiTagSize);
3254 StringCharLoadGenerator::Generate(masm,
3260 __ sll(result_, result_, kSmiTagSize);
3265 void StringCharCodeAtGenerator::GenerateSlow(
3266 MacroAssembler* masm,
3267 const RuntimeCallHelper& call_helper) {
3268 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
3270 // Index is not a smi.
3271 __ bind(&index_not_smi_);
3272 // If index is a heap number, try converting it to an integer.
3275 Heap::kHeapNumberMapRootIndex,
3278 call_helper.BeforeCall(masm);
3279 // Consumed by runtime conversion function:
3280 __ Push(object_, index_);
3281 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3282 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3284 DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3285 // NumberToSmi discards numbers that are not exact integers.
3286 __ CallRuntime(Runtime::kNumberToSmi, 1);
3289 // Save the conversion result before the pop instructions below
3290 // have a chance to overwrite it.
3292 __ Move(index_, v0);
3294 // Reload the instance type.
3295 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3296 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3297 call_helper.AfterCall(masm);
3298 // If index is still not a smi, it must be out of range.
3299 __ JumpIfNotSmi(index_, index_out_of_range_);
3300 // Otherwise, return to the fast path.
3301 __ Branch(&got_smi_index_);
3303 // Call runtime. We get here when the receiver is a string and the
3304 // index is a number, but the code of getting the actual character
3305 // is too complex (e.g., when the string needs to be flattened).
3306 __ bind(&call_runtime_);
3307 call_helper.BeforeCall(masm);
3308 __ sll(index_, index_, kSmiTagSize);
3309 __ Push(object_, index_);
3310 __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
3312 __ Move(result_, v0);
3314 call_helper.AfterCall(masm);
3317 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3321 // -------------------------------------------------------------------------
3322 // StringCharFromCodeGenerator
3324 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3325 // Fast case of Heap::LookupSingleCharacterStringFromCode.
3327 DCHECK(!t0.is(result_));
3328 DCHECK(!t0.is(code_));
3330 STATIC_ASSERT(kSmiTag == 0);
3331 STATIC_ASSERT(kSmiShiftSize == 0);
3332 DCHECK(IsPowerOf2(String::kMaxOneByteCharCode + 1));
3335 Operand(kSmiTagMask |
3336 ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
3337 __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
3339 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3340 // At this point code register contains smi tagged ASCII char code.
3341 STATIC_ASSERT(kSmiTag == 0);
3342 __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
3343 __ Addu(result_, result_, t0);
3344 __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3345 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3346 __ Branch(&slow_case_, eq, result_, Operand(t0));
3351 void StringCharFromCodeGenerator::GenerateSlow(
3352 MacroAssembler* masm,
3353 const RuntimeCallHelper& call_helper) {
3354 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3356 __ bind(&slow_case_);
3357 call_helper.BeforeCall(masm);
3359 __ CallRuntime(Runtime::kCharFromCode, 1);
3360 __ Move(result_, v0);
3362 call_helper.AfterCall(masm);
3365 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3369 enum CopyCharactersFlags {
3371 DEST_ALWAYS_ALIGNED = 2
3375 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
3380 String::Encoding encoding) {
3381 if (FLAG_debug_code) {
3382 // Check that destination is word aligned.
3383 __ And(scratch, dest, Operand(kPointerAlignmentMask));
3385 kDestinationOfCopyNotAligned,
3390 // Assumes word reads and writes are little endian.
3391 // Nothing to do for zero characters.
3394 if (encoding == String::TWO_BYTE_ENCODING) {
3395 __ Addu(count, count, count);
3398 Register limit = count; // Read until dest equals this.
3399 __ Addu(limit, dest, Operand(count));
3401 Label loop_entry, loop;
3402 // Copy bytes from src to dest until dest hits limit.
3403 __ Branch(&loop_entry);
3405 __ lbu(scratch, MemOperand(src));
3406 __ Addu(src, src, Operand(1));
3407 __ sb(scratch, MemOperand(dest));
3408 __ Addu(dest, dest, Operand(1));
3409 __ bind(&loop_entry);
3410 __ Branch(&loop, lt, dest, Operand(limit));
3416 void StringHelper::GenerateHashInit(MacroAssembler* masm,
3418 Register character) {
3419 // hash = seed + character + ((seed + character) << 10);
3420 __ LoadRoot(hash, Heap::kHashSeedRootIndex);
3421 // Untag smi seed and add the character.
3423 __ addu(hash, hash, character);
3424 __ sll(at, hash, 10);
3425 __ addu(hash, hash, at);
3426 // hash ^= hash >> 6;
3427 __ srl(at, hash, 6);
3428 __ xor_(hash, hash, at);
3432 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
3434 Register character) {
3435 // hash += character;
3436 __ addu(hash, hash, character);
3437 // hash += hash << 10;
3438 __ sll(at, hash, 10);
3439 __ addu(hash, hash, at);
3440 // hash ^= hash >> 6;
3441 __ srl(at, hash, 6);
3442 __ xor_(hash, hash, at);
3446 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
3448 // hash += hash << 3;
3449 __ sll(at, hash, 3);
3450 __ addu(hash, hash, at);
3451 // hash ^= hash >> 11;
3452 __ srl(at, hash, 11);
3453 __ xor_(hash, hash, at);
3454 // hash += hash << 15;
3455 __ sll(at, hash, 15);
3456 __ addu(hash, hash, at);
3458 __ li(at, Operand(String::kHashBitMask));
3459 __ and_(hash, hash, at);
3461 // if (hash == 0) hash = 27;
3462 __ ori(at, zero_reg, StringHasher::kZeroHash);
3463 __ Movz(hash, at, hash);
3467 void SubStringStub::Generate(MacroAssembler* masm) {
3469 // Stack frame on entry.
3470 // ra: return address
3475 // This stub is called from the native-call %_SubString(...), so
3476 // nothing can be assumed about the arguments. It is tested that:
3477 // "string" is a sequential string,
3478 // both "from" and "to" are smis, and
3479 // 0 <= from <= to <= string.length.
3480 // If any of these assumptions fail, we call the runtime system.
3482 const int kToOffset = 0 * kPointerSize;
3483 const int kFromOffset = 1 * kPointerSize;
3484 const int kStringOffset = 2 * kPointerSize;
3486 __ lw(a2, MemOperand(sp, kToOffset));
3487 __ lw(a3, MemOperand(sp, kFromOffset));
3488 STATIC_ASSERT(kFromOffset == kToOffset + 4);
3489 STATIC_ASSERT(kSmiTag == 0);
3490 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3492 // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
3493 // safe in this case.
3494 __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
3495 __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
3496 // Both a2 and a3 are untagged integers.
3498 __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
3500 __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to.
3501 __ Subu(a2, a2, a3);
3503 // Make sure first argument is a string.
3504 __ lw(v0, MemOperand(sp, kStringOffset));
3505 __ JumpIfSmi(v0, &runtime);
3506 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
3507 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3508 __ And(t0, a1, Operand(kIsNotStringMask));
3510 __ Branch(&runtime, ne, t0, Operand(zero_reg));
3513 __ Branch(&single_char, eq, a2, Operand(1));
3515 // Short-cut for the case of trivial substring.
3517 // v0: original string
3518 // a2: result string length
3519 __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
3521 // Return original string.
3522 __ Branch(&return_v0, eq, a2, Operand(t0));
3523 // Longer than original string's length or negative: unsafe arguments.
3524 __ Branch(&runtime, hi, a2, Operand(t0));
3525 // Shorter than original string's length: an actual substring.
3527 // Deal with different string types: update the index if necessary
3528 // and put the underlying string into t1.
3529 // v0: original string
3530 // a1: instance type
3532 // a3: from index (untagged)
3533 Label underlying_unpacked, sliced_string, seq_or_external_string;
3534 // If the string is not indirect, it can only be sequential or external.
3535 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
3536 STATIC_ASSERT(kIsIndirectStringMask != 0);
3537 __ And(t0, a1, Operand(kIsIndirectStringMask));
3538 __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
3539 // t0 is used as a scratch register and can be overwritten in either case.
3540 __ And(t0, a1, Operand(kSlicedNotConsMask));
3541 __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
3542 // Cons string. Check whether it is flat, then fetch first part.
3543 __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
3544 __ LoadRoot(t0, Heap::kempty_stringRootIndex);
3545 __ Branch(&runtime, ne, t1, Operand(t0));
3546 __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
3547 // Update instance type.
3548 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
3549 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3550 __ jmp(&underlying_unpacked);
3552 __ bind(&sliced_string);
3553 // Sliced string. Fetch parent and correct start index by offset.
3554 __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
3555 __ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
3556 __ sra(t0, t0, 1); // Add offset to index.
3557 __ Addu(a3, a3, t0);
3558 // Update instance type.
3559 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
3560 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3561 __ jmp(&underlying_unpacked);
3563 __ bind(&seq_or_external_string);
3564 // Sequential or external string. Just move string to the expected register.
3567 __ bind(&underlying_unpacked);
3569 if (FLAG_string_slices) {
3571 // t1: underlying subject string
3572 // a1: instance type of underlying subject string
3574 // a3: adjusted start index (untagged)
3575 // Short slice. Copy instead of slicing.
3576 __ Branch(©_routine, lt, a2, Operand(SlicedString::kMinLength));
3577 // Allocate new sliced string. At this point we do not reload the instance
3578 // type including the string encoding because we simply rely on the info
3579 // provided by the original string. It does not matter if the original
3580 // string's encoding is wrong because we always have to recheck encoding of
3581 // the newly created string's parent anyways due to externalized strings.
3582 Label two_byte_slice, set_slice_header;
3583 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
3584 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
3585 __ And(t0, a1, Operand(kStringEncodingMask));
3586 __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
3587 __ AllocateAsciiSlicedString(v0, a2, t2, t3, &runtime);
3588 __ jmp(&set_slice_header);
3589 __ bind(&two_byte_slice);
3590 __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
3591 __ bind(&set_slice_header);
3593 __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
3594 __ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
3597 __ bind(©_routine);
3600 // t1: underlying subject string
3601 // a1: instance type of underlying subject string
3603 // a3: adjusted start index (untagged)
3604 Label two_byte_sequential, sequential_string, allocate_result;
3605 STATIC_ASSERT(kExternalStringTag != 0);
3606 STATIC_ASSERT(kSeqStringTag == 0);
3607 __ And(t0, a1, Operand(kExternalStringTag));
3608 __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
3610 // Handle external string.
3611 // Rule out short external strings.
3612 STATIC_ASSERT(kShortExternalStringTag != 0);
3613 __ And(t0, a1, Operand(kShortExternalStringTag));
3614 __ Branch(&runtime, ne, t0, Operand(zero_reg));
3615 __ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
3616 // t1 already points to the first character of underlying string.
3617 __ jmp(&allocate_result);
3619 __ bind(&sequential_string);
3620 // Locate first character of underlying subject string.
3621 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3622 __ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3624 __ bind(&allocate_result);
3625 // Sequential acii string. Allocate the result.
3626 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3627 __ And(t0, a1, Operand(kStringEncodingMask));
3628 __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
3630 // Allocate and copy the resulting ASCII string.
3631 __ AllocateAsciiString(v0, a2, t0, t2, t3, &runtime);
3633 // Locate first character of substring to copy.
3634 __ Addu(t1, t1, a3);
3636 // Locate first character of result.
3637 __ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3639 // v0: result string
3640 // a1: first character of result string
3641 // a2: result string length
3642 // t1: first character of substring to copy
3643 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3644 StringHelper::GenerateCopyCharacters(
3645 masm, a1, t1, a2, a3, String::ONE_BYTE_ENCODING);
3648 // Allocate and copy the resulting two-byte string.
3649 __ bind(&two_byte_sequential);
3650 __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
3652 // Locate first character of substring to copy.
3653 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
3655 __ Addu(t1, t1, t0);
3656 // Locate first character of result.
3657 __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3659 // v0: result string.
3660 // a1: first character of result.
3661 // a2: result length.
3662 // t1: first character of substring to copy.
3663 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3664 StringHelper::GenerateCopyCharacters(
3665 masm, a1, t1, a2, a3, String::TWO_BYTE_ENCODING);
3667 __ bind(&return_v0);
3668 Counters* counters = isolate()->counters();
3669 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
3672 // Just jump to runtime to create the sub string.
3674 __ TailCallRuntime(Runtime::kSubString, 3, 1);
3676 __ bind(&single_char);
3677 // v0: original string
3678 // a1: instance type
3680 // a3: from index (untagged)
3682 StringCharAtGenerator generator(
3683 v0, a3, a2, v0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
3684 generator.GenerateFast(masm);
3686 generator.SkipSlow(masm, &runtime);
3690 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
3695 Register scratch3) {
3696 Register length = scratch1;
3699 Label strings_not_equal, check_zero_length;
3700 __ lw(length, FieldMemOperand(left, String::kLengthOffset));
3701 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
3702 __ Branch(&check_zero_length, eq, length, Operand(scratch2));
3703 __ bind(&strings_not_equal);
3704 DCHECK(is_int16(NOT_EQUAL));
3705 __ Ret(USE_DELAY_SLOT);
3706 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
3708 // Check if the length is zero.
3709 Label compare_chars;
3710 __ bind(&check_zero_length);
3711 STATIC_ASSERT(kSmiTag == 0);
3712 __ Branch(&compare_chars, ne, length, Operand(zero_reg));
3713 DCHECK(is_int16(EQUAL));
3714 __ Ret(USE_DELAY_SLOT);
3715 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3717 // Compare characters.
3718 __ bind(&compare_chars);
3720 GenerateAsciiCharsCompareLoop(masm,
3721 left, right, length, scratch2, scratch3, v0,
3722 &strings_not_equal);
3724 // Characters are equal.
3725 __ Ret(USE_DELAY_SLOT);
3726 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3730 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
3736 Register scratch4) {
3737 Label result_not_equal, compare_lengths;
3738 // Find minimum length and length difference.
3739 __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
3740 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
3741 __ Subu(scratch3, scratch1, Operand(scratch2));
3742 Register length_delta = scratch3;
3743 __ slt(scratch4, scratch2, scratch1);
3744 __ Movn(scratch1, scratch2, scratch4);
3745 Register min_length = scratch1;
3746 STATIC_ASSERT(kSmiTag == 0);
3747 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
3750 GenerateAsciiCharsCompareLoop(masm,
3751 left, right, min_length, scratch2, scratch4, v0,
3754 // Compare lengths - strings up to min-length are equal.
3755 __ bind(&compare_lengths);
3756 DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
3757 // Use length_delta as result if it's zero.
3758 __ mov(scratch2, length_delta);
3759 __ mov(scratch4, zero_reg);
3760 __ mov(v0, zero_reg);
3762 __ bind(&result_not_equal);
3763 // Conditionally update the result based either on length_delta or
3764 // the last comparion performed in the loop above.
3766 __ Branch(&ret, eq, scratch2, Operand(scratch4));
3767 __ li(v0, Operand(Smi::FromInt(GREATER)));
3768 __ Branch(&ret, gt, scratch2, Operand(scratch4));
3769 __ li(v0, Operand(Smi::FromInt(LESS)));
3775 void StringCompareStub::GenerateAsciiCharsCompareLoop(
3776 MacroAssembler* masm,
3783 Label* chars_not_equal) {
3784 // Change index to run from -length to -1 by adding length to string
3785 // start. This means that loop ends when index reaches zero, which
3786 // doesn't need an additional compare.
3787 __ SmiUntag(length);
3788 __ Addu(scratch1, length,
3789 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3790 __ Addu(left, left, Operand(scratch1));
3791 __ Addu(right, right, Operand(scratch1));
3792 __ Subu(length, zero_reg, length);
3793 Register index = length; // index = -length;
3799 __ Addu(scratch3, left, index);
3800 __ lbu(scratch1, MemOperand(scratch3));
3801 __ Addu(scratch3, right, index);
3802 __ lbu(scratch2, MemOperand(scratch3));
3803 __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
3804 __ Addu(index, index, 1);
3805 __ Branch(&loop, ne, index, Operand(zero_reg));
3809 void StringCompareStub::Generate(MacroAssembler* masm) {
3812 Counters* counters = isolate()->counters();
3814 // Stack frame on entry.
3815 // sp[0]: right string
3816 // sp[4]: left string
3817 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
3818 __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
3821 __ Branch(¬_same, ne, a0, Operand(a1));
3822 STATIC_ASSERT(EQUAL == 0);
3823 STATIC_ASSERT(kSmiTag == 0);
3824 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3825 __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
3830 // Check that both objects are sequential ASCII strings.
3831 __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
3833 // Compare flat ASCII strings natively. Remove arguments from stack first.
3834 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
3835 __ Addu(sp, sp, Operand(2 * kPointerSize));
3836 GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
3839 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3843 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
3844 // ----------- S t a t e -------------
3847 // -- ra : return address
3848 // -----------------------------------
3850 // Load a2 with the allocation site. We stick an undefined dummy value here
3851 // and replace it with the real allocation site later when we instantiate this
3852 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
3853 __ li(a2, handle(isolate()->heap()->undefined_value()));
3855 // Make sure that we actually patched the allocation site.
3856 if (FLAG_debug_code) {
3857 __ And(at, a2, Operand(kSmiTagMask));
3858 __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
3859 __ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
3860 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
3861 __ Assert(eq, kExpectedAllocationSite, t0, Operand(at));
3864 // Tail call into the stub that handles binary operations with allocation
3866 BinaryOpWithAllocationSiteStub stub(isolate(), state_);
3867 __ TailCallStub(&stub);
3871 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
3872 DCHECK(state_ == CompareIC::SMI);
3875 __ JumpIfNotSmi(a2, &miss);
3877 if (GetCondition() == eq) {
3878 // For equality we do not care about the sign of the result.
3879 __ Ret(USE_DELAY_SLOT);
3880 __ Subu(v0, a0, a1);
3882 // Untag before subtracting to avoid handling overflow.
3885 __ Ret(USE_DELAY_SLOT);
3886 __ Subu(v0, a1, a0);
3894 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
3895 DCHECK(state_ == CompareIC::NUMBER);
3898 Label unordered, maybe_undefined1, maybe_undefined2;
3901 if (left_ == CompareIC::SMI) {
3902 __ JumpIfNotSmi(a1, &miss);
3904 if (right_ == CompareIC::SMI) {
3905 __ JumpIfNotSmi(a0, &miss);
3908 // Inlining the double comparison and falling back to the general compare
3909 // stub if NaN is involved.
3910 // Load left and right operand.
3911 Label done, left, left_smi, right_smi;
3912 __ JumpIfSmi(a0, &right_smi);
3913 __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
3915 __ Subu(a2, a0, Operand(kHeapObjectTag));
3916 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
3918 __ bind(&right_smi);
3919 __ SmiUntag(a2, a0); // Can't clobber a0 yet.
3920 FPURegister single_scratch = f6;
3921 __ mtc1(a2, single_scratch);
3922 __ cvt_d_w(f2, single_scratch);
3925 __ JumpIfSmi(a1, &left_smi);
3926 __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
3928 __ Subu(a2, a1, Operand(kHeapObjectTag));
3929 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
3932 __ SmiUntag(a2, a1); // Can't clobber a1 yet.
3933 single_scratch = f8;
3934 __ mtc1(a2, single_scratch);
3935 __ cvt_d_w(f0, single_scratch);
3939 // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
3940 Label fpu_eq, fpu_lt;
3941 // Test if equal, and also handle the unordered/NaN case.
3942 __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
3944 // Test if less (unordered case is already handled).
3945 __ BranchF(&fpu_lt, NULL, lt, f0, f2);
3947 // Otherwise it's greater, so just fall thru, and return.
3948 DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
3949 __ Ret(USE_DELAY_SLOT);
3950 __ li(v0, Operand(GREATER));
3953 __ Ret(USE_DELAY_SLOT);
3954 __ li(v0, Operand(EQUAL));
3957 __ Ret(USE_DELAY_SLOT);
3958 __ li(v0, Operand(LESS));
3960 __ bind(&unordered);
3961 __ bind(&generic_stub);
3962 ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
3963 CompareIC::GENERIC);
3964 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3966 __ bind(&maybe_undefined1);
3967 if (Token::IsOrderedRelationalCompareOp(op_)) {
3968 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3969 __ Branch(&miss, ne, a0, Operand(at));
3970 __ JumpIfSmi(a1, &unordered);
3971 __ GetObjectType(a1, a2, a2);
3972 __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
3976 __ bind(&maybe_undefined2);
3977 if (Token::IsOrderedRelationalCompareOp(op_)) {
3978 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3979 __ Branch(&unordered, eq, a1, Operand(at));
3987 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3988 DCHECK(state_ == CompareIC::INTERNALIZED_STRING);
3991 // Registers containing left and right operands respectively.
3993 Register right = a0;
3997 // Check that both operands are heap objects.
3998 __ JumpIfEitherSmi(left, right, &miss);
4000 // Check that both operands are internalized strings.
4001 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4002 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4003 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4004 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4005 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
4006 __ Or(tmp1, tmp1, Operand(tmp2));
4007 __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
4008 __ Branch(&miss, ne, at, Operand(zero_reg));
4010 // Make sure a0 is non-zero. At this point input operands are
4011 // guaranteed to be non-zero.
4012 DCHECK(right.is(a0));
4013 STATIC_ASSERT(EQUAL == 0);
4014 STATIC_ASSERT(kSmiTag == 0);
4016 // Internalized strings are compared by identity.
4017 __ Ret(ne, left, Operand(right));
4018 DCHECK(is_int16(EQUAL));
4019 __ Ret(USE_DELAY_SLOT);
4020 __ li(v0, Operand(Smi::FromInt(EQUAL)));
4027 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
4028 DCHECK(state_ == CompareIC::UNIQUE_NAME);
4029 DCHECK(GetCondition() == eq);
4032 // Registers containing left and right operands respectively.
4034 Register right = a0;
4038 // Check that both operands are heap objects.
4039 __ JumpIfEitherSmi(left, right, &miss);
4041 // Check that both operands are unique names. This leaves the instance
4042 // types loaded in tmp1 and tmp2.
4043 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4044 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4045 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4046 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4048 __ JumpIfNotUniqueName(tmp1, &miss);
4049 __ JumpIfNotUniqueName(tmp2, &miss);
4054 // Unique names are compared by identity.
4056 __ Branch(&done, ne, left, Operand(right));
4057 // Make sure a0 is non-zero. At this point input operands are
4058 // guaranteed to be non-zero.
4059 DCHECK(right.is(a0));
4060 STATIC_ASSERT(EQUAL == 0);
4061 STATIC_ASSERT(kSmiTag == 0);
4062 __ li(v0, Operand(Smi::FromInt(EQUAL)));
4071 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
4072 DCHECK(state_ == CompareIC::STRING);
4075 bool equality = Token::IsEqualityOp(op_);
4077 // Registers containing left and right operands respectively.
4079 Register right = a0;
4086 // Check that both operands are heap objects.
4087 __ JumpIfEitherSmi(left, right, &miss);
4089 // Check that both operands are strings. This leaves the instance
4090 // types loaded in tmp1 and tmp2.
4091 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4092 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4093 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4094 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4095 STATIC_ASSERT(kNotStringTag != 0);
4096 __ Or(tmp3, tmp1, tmp2);
4097 __ And(tmp5, tmp3, Operand(kIsNotStringMask));
4098 __ Branch(&miss, ne, tmp5, Operand(zero_reg));
4100 // Fast check for identical strings.
4101 Label left_ne_right;
4102 STATIC_ASSERT(EQUAL == 0);
4103 STATIC_ASSERT(kSmiTag == 0);
4104 __ Branch(&left_ne_right, ne, left, Operand(right));
4105 __ Ret(USE_DELAY_SLOT);
4106 __ mov(v0, zero_reg); // In the delay slot.
4107 __ bind(&left_ne_right);
4109 // Handle not identical strings.
4111 // Check that both strings are internalized strings. If they are, we're done
4112 // because we already know they are not identical. We know they are both
4115 DCHECK(GetCondition() == eq);
4116 STATIC_ASSERT(kInternalizedTag == 0);
4117 __ Or(tmp3, tmp1, Operand(tmp2));
4118 __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
4120 __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
4121 // Make sure a0 is non-zero. At this point input operands are
4122 // guaranteed to be non-zero.
4123 DCHECK(right.is(a0));
4124 __ Ret(USE_DELAY_SLOT);
4125 __ mov(v0, a0); // In the delay slot.
4126 __ bind(&is_symbol);
4129 // Check that both strings are sequential ASCII.
4131 __ JumpIfBothInstanceTypesAreNotSequentialAscii(
4132 tmp1, tmp2, tmp3, tmp4, &runtime);
4134 // Compare flat ASCII strings. Returns when done.
4136 StringCompareStub::GenerateFlatAsciiStringEquals(
4137 masm, left, right, tmp1, tmp2, tmp3);
4139 StringCompareStub::GenerateCompareFlatAsciiStrings(
4140 masm, left, right, tmp1, tmp2, tmp3, tmp4);
4143 // Handle more complex cases in runtime.
4145 __ Push(left, right);
4147 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
4149 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
4157 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
4158 DCHECK(state_ == CompareIC::OBJECT);
4160 __ And(a2, a1, Operand(a0));
4161 __ JumpIfSmi(a2, &miss);
4163 __ GetObjectType(a0, a2, a2);
4164 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
4165 __ GetObjectType(a1, a2, a2);
4166 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
4168 DCHECK(GetCondition() == eq);
4169 __ Ret(USE_DELAY_SLOT);
4170 __ subu(v0, a0, a1);
4177 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
4180 __ JumpIfSmi(a2, &miss);
4181 __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
4182 __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
4183 __ Branch(&miss, ne, a2, Operand(known_map_));
4184 __ Branch(&miss, ne, a3, Operand(known_map_));
4186 __ Ret(USE_DELAY_SLOT);
4187 __ subu(v0, a0, a1);
4194 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
4196 // Call the runtime system in a fresh internal frame.
4197 ExternalReference miss =
4198 ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
4199 FrameScope scope(masm, StackFrame::INTERNAL);
4201 __ Push(ra, a1, a0);
4202 __ li(t0, Operand(Smi::FromInt(op_)));
4203 __ addiu(sp, sp, -kPointerSize);
4204 __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
4205 __ sw(t0, MemOperand(sp)); // In the delay slot.
4206 // Compute the entry point of the rewritten stub.
4207 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
4208 // Restore registers.
4215 void DirectCEntryStub::Generate(MacroAssembler* masm) {
4216 // Make place for arguments to fit C calling convention. Most of the callers
4217 // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
4218 // so they handle stack restoring and we don't have to do that here.
4219 // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
4220 // kCArgsSlotsSize stack space after the call.
4221 __ Subu(sp, sp, Operand(kCArgsSlotsSize));
4222 // Place the return address on the stack, making the call
4223 // GC safe. The RegExp backend also relies on this.
4224 __ sw(ra, MemOperand(sp, kCArgsSlotsSize));
4225 __ Call(t9); // Call the C++ function.
4226 __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
4228 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
4229 // In case of an error the return address may point to a memory area
4230 // filled with kZapValue by the GC.
4231 // Dereference the address and check for this.
4232 __ lw(t0, MemOperand(t9));
4233 __ Assert(ne, kReceivedInvalidReturnAddress, t0,
4234 Operand(reinterpret_cast<uint32_t>(kZapValue)));
4240 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
4243 reinterpret_cast<intptr_t>(GetCode().location());
4244 __ Move(t9, target);
4245 __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
4250 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
4254 Register properties,
4256 Register scratch0) {
4257 DCHECK(name->IsUniqueName());
4258 // If names of slots in range from 1 to kProbes - 1 for the hash value are
4259 // not equal to the name and kProbes-th slot is not used (its name is the
4260 // undefined value), it guarantees the hash table doesn't contain the
4261 // property. It's true even if some slots represent deleted properties
4262 // (their names are the hole value).
4263 for (int i = 0; i < kInlinedProbes; i++) {
4264 // scratch0 points to properties hash.
4265 // Compute the masked index: (hash + i + i * i) & mask.
4266 Register index = scratch0;
4267 // Capacity is smi 2^n.
4268 __ lw(index, FieldMemOperand(properties, kCapacityOffset));
4269 __ Subu(index, index, Operand(1));
4270 __ And(index, index, Operand(
4271 Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
4273 // Scale the index by multiplying by the entry size.
4274 DCHECK(NameDictionary::kEntrySize == 3);
4275 __ sll(at, index, 1);
4276 __ Addu(index, index, at);
4278 Register entity_name = scratch0;
4279 // Having undefined at this place means the name is not contained.
4280 DCHECK_EQ(kSmiTagSize, 1);
4281 Register tmp = properties;
4282 __ sll(scratch0, index, 1);
4283 __ Addu(tmp, properties, scratch0);
4284 __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
4286 DCHECK(!tmp.is(entity_name));
4287 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
4288 __ Branch(done, eq, entity_name, Operand(tmp));
4290 // Load the hole ready for use below:
4291 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
4293 // Stop if found the property.
4294 __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
4297 __ Branch(&good, eq, entity_name, Operand(tmp));
4299 // Check if the entry name is not a unique name.
4300 __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
4302 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
4303 __ JumpIfNotUniqueName(entity_name, miss);
4306 // Restore the properties.
4308 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4311 const int spill_mask =
4312 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
4313 a2.bit() | a1.bit() | a0.bit() | v0.bit());
4315 __ MultiPush(spill_mask);
4316 __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4317 __ li(a1, Operand(Handle<Name>(name)));
4318 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
4321 __ MultiPop(spill_mask);
4323 __ Branch(done, eq, at, Operand(zero_reg));
4324 __ Branch(miss, ne, at, Operand(zero_reg));
4328 // Probe the name dictionary in the |elements| register. Jump to the
4329 // |done| label if a property with the given name is found. Jump to
4330 // the |miss| label otherwise.
4331 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
4332 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
4338 Register scratch2) {
4339 DCHECK(!elements.is(scratch1));
4340 DCHECK(!elements.is(scratch2));
4341 DCHECK(!name.is(scratch1));
4342 DCHECK(!name.is(scratch2));
4344 __ AssertName(name);
4346 // Compute the capacity mask.
4347 __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
4348 __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
4349 __ Subu(scratch1, scratch1, Operand(1));
4351 // Generate an unrolled loop that performs a few probes before
4352 // giving up. Measurements done on Gmail indicate that 2 probes
4353 // cover ~93% of loads from dictionaries.
4354 for (int i = 0; i < kInlinedProbes; i++) {
4355 // Compute the masked index: (hash + i + i * i) & mask.
4356 __ lw(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
4358 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4359 // the hash in a separate instruction. The value hash + i + i * i is right
4360 // shifted in the following and instruction.
4361 DCHECK(NameDictionary::GetProbeOffset(i) <
4362 1 << (32 - Name::kHashFieldOffset));
4363 __ Addu(scratch2, scratch2, Operand(
4364 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4366 __ srl(scratch2, scratch2, Name::kHashShift);
4367 __ And(scratch2, scratch1, scratch2);
4369 // Scale the index by multiplying by the element size.
4370 DCHECK(NameDictionary::kEntrySize == 3);
4371 // scratch2 = scratch2 * 3.
4373 __ sll(at, scratch2, 1);
4374 __ Addu(scratch2, scratch2, at);
4376 // Check if the key is identical to the name.
4377 __ sll(at, scratch2, 2);
4378 __ Addu(scratch2, elements, at);
4379 __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
4380 __ Branch(done, eq, name, Operand(at));
4383 const int spill_mask =
4384 (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
4385 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
4386 ~(scratch1.bit() | scratch2.bit());
4388 __ MultiPush(spill_mask);
4390 DCHECK(!elements.is(a1));
4392 __ Move(a0, elements);
4394 __ Move(a0, elements);
4397 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
4399 __ mov(scratch2, a2);
4401 __ MultiPop(spill_mask);
4403 __ Branch(done, ne, at, Operand(zero_reg));
4404 __ Branch(miss, eq, at, Operand(zero_reg));
4408 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
4409 // This stub overrides SometimesSetsUpAFrame() to return false. That means
4410 // we cannot call anything that could cause a GC from this stub.
4412 // result: NameDictionary to probe
4414 // dictionary: NameDictionary to probe.
4415 // index: will hold an index of entry if lookup is successful.
4416 // might alias with result_.
4418 // result_ is zero if lookup failed, non zero otherwise.
4420 Register result = v0;
4421 Register dictionary = a0;
4423 Register index = a2;
4426 Register undefined = t1;
4427 Register entry_key = t2;
4429 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
4431 __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
4432 __ sra(mask, mask, kSmiTagSize);
4433 __ Subu(mask, mask, Operand(1));
4435 __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
4437 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
4439 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
4440 // Compute the masked index: (hash + i + i * i) & mask.
4441 // Capacity is smi 2^n.
4443 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4444 // the hash in a separate instruction. The value hash + i + i * i is right
4445 // shifted in the following and instruction.
4446 DCHECK(NameDictionary::GetProbeOffset(i) <
4447 1 << (32 - Name::kHashFieldOffset));
4448 __ Addu(index, hash, Operand(
4449 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4451 __ mov(index, hash);
4453 __ srl(index, index, Name::kHashShift);
4454 __ And(index, mask, index);
4456 // Scale the index by multiplying by the entry size.
4457 DCHECK(NameDictionary::kEntrySize == 3);
4460 __ sll(index, index, 1);
4461 __ Addu(index, index, at);
4464 DCHECK_EQ(kSmiTagSize, 1);
4465 __ sll(index, index, 2);
4466 __ Addu(index, index, dictionary);
4467 __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
4469 // Having undefined at this place means the name is not contained.
4470 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined));
4472 // Stop if found the property.
4473 __ Branch(&in_dictionary, eq, entry_key, Operand(key));
4475 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
4476 // Check if the entry name is not a unique name.
4477 __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
4479 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
4480 __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
4484 __ bind(&maybe_in_dictionary);
4485 // If we are doing negative lookup then probing failure should be
4486 // treated as a lookup success. For positive lookup probing failure
4487 // should be treated as lookup failure.
4488 if (mode_ == POSITIVE_LOOKUP) {
4489 __ Ret(USE_DELAY_SLOT);
4490 __ mov(result, zero_reg);
4493 __ bind(&in_dictionary);
4494 __ Ret(USE_DELAY_SLOT);
4497 __ bind(¬_in_dictionary);
4498 __ Ret(USE_DELAY_SLOT);
4499 __ mov(result, zero_reg);
4503 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
4505 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
4507 // Hydrogen code stubs need stub2 at snapshot time.
4508 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
4513 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
4514 // the value has just been written into the object, now this stub makes sure
4515 // we keep the GC informed. The word in the object where the value has been
4516 // written is in the address register.
4517 void RecordWriteStub::Generate(MacroAssembler* masm) {
4518 Label skip_to_incremental_noncompacting;
4519 Label skip_to_incremental_compacting;
4521 // The first two branch+nop instructions are generated with labels so as to
4522 // get the offset fixed up correctly by the bind(Label*) call. We patch it
4523 // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
4524 // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
4525 // incremental heap marking.
4526 // See RecordWriteStub::Patch for details.
4527 __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
4529 __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
4532 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4533 __ RememberedSetHelper(object_,
4537 MacroAssembler::kReturnAtEnd);
4541 __ bind(&skip_to_incremental_noncompacting);
4542 GenerateIncremental(masm, INCREMENTAL);
4544 __ bind(&skip_to_incremental_compacting);
4545 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4547 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
4548 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
4550 PatchBranchIntoNop(masm, 0);
4551 PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
4555 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4558 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4559 Label dont_need_remembered_set;
4561 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
4562 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
4564 &dont_need_remembered_set);
4566 __ CheckPageFlag(regs_.object(),
4568 1 << MemoryChunk::SCAN_ON_SCAVENGE,
4570 &dont_need_remembered_set);
4572 // First notify the incremental marker if necessary, then update the
4574 CheckNeedsToInformIncrementalMarker(
4575 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4576 InformIncrementalMarker(masm);
4577 regs_.Restore(masm);
4578 __ RememberedSetHelper(object_,
4582 MacroAssembler::kReturnAtEnd);
4584 __ bind(&dont_need_remembered_set);
4587 CheckNeedsToInformIncrementalMarker(
4588 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4589 InformIncrementalMarker(masm);
4590 regs_.Restore(masm);
4595 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4596 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
4597 int argument_count = 3;
4598 __ PrepareCallCFunction(argument_count, regs_.scratch0());
4600 a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
4601 DCHECK(!address.is(regs_.object()));
4602 DCHECK(!address.is(a0));
4603 __ Move(address, regs_.address());
4604 __ Move(a0, regs_.object());
4605 __ Move(a1, address);
4606 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
4608 AllowExternalCallThatCantCauseGC scope(masm);
4610 ExternalReference::incremental_marking_record_write_function(isolate()),
4612 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
4616 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4617 MacroAssembler* masm,
4618 OnNoNeedToInformIncrementalMarker on_no_need,
4621 Label need_incremental;
4622 Label need_incremental_pop_scratch;
4624 __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
4625 __ lw(regs_.scratch1(),
4626 MemOperand(regs_.scratch0(),
4627 MemoryChunk::kWriteBarrierCounterOffset));
4628 __ Subu(regs_.scratch1(), regs_.scratch1(), Operand(1));
4629 __ sw(regs_.scratch1(),
4630 MemOperand(regs_.scratch0(),
4631 MemoryChunk::kWriteBarrierCounterOffset));
4632 __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
4634 // Let's look at the color of the object: If it is not black we don't have
4635 // to inform the incremental marker.
4636 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4638 regs_.Restore(masm);
4639 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4640 __ RememberedSetHelper(object_,
4644 MacroAssembler::kReturnAtEnd);
4651 // Get the value from the slot.
4652 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
4654 if (mode == INCREMENTAL_COMPACTION) {
4655 Label ensure_not_white;
4657 __ CheckPageFlag(regs_.scratch0(), // Contains value.
4658 regs_.scratch1(), // Scratch.
4659 MemoryChunk::kEvacuationCandidateMask,
4663 __ CheckPageFlag(regs_.object(),
4664 regs_.scratch1(), // Scratch.
4665 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
4669 __ bind(&ensure_not_white);
4672 // We need extra registers for this, so we push the object and the address
4673 // register temporarily.
4674 __ Push(regs_.object(), regs_.address());
4675 __ EnsureNotWhite(regs_.scratch0(), // The value.
4676 regs_.scratch1(), // Scratch.
4677 regs_.object(), // Scratch.
4678 regs_.address(), // Scratch.
4679 &need_incremental_pop_scratch);
4680 __ Pop(regs_.object(), regs_.address());
4682 regs_.Restore(masm);
4683 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4684 __ RememberedSetHelper(object_,
4688 MacroAssembler::kReturnAtEnd);
4693 __ bind(&need_incremental_pop_scratch);
4694 __ Pop(regs_.object(), regs_.address());
4696 __ bind(&need_incremental);
4698 // Fall through when we need to inform the incremental marker.
4702 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4703 // ----------- S t a t e -------------
4704 // -- a0 : element value to store
4705 // -- a3 : element index as smi
4706 // -- sp[0] : array literal index in function as smi
4707 // -- sp[4] : array literal
4708 // clobbers a1, a2, t0
4709 // -----------------------------------
4712 Label double_elements;
4714 Label slow_elements;
4715 Label fast_elements;
4717 // Get array literal index, array literal and its map.
4718 __ lw(t0, MemOperand(sp, 0 * kPointerSize));
4719 __ lw(a1, MemOperand(sp, 1 * kPointerSize));
4720 __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
4722 __ CheckFastElements(a2, t1, &double_elements);
4723 // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
4724 __ JumpIfSmi(a0, &smi_element);
4725 __ CheckFastSmiElements(a2, t1, &fast_elements);
4727 // Store into the array literal requires a elements transition. Call into
4729 __ bind(&slow_elements);
4731 __ Push(a1, a3, a0);
4732 __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4733 __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset));
4735 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4737 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4738 __ bind(&fast_elements);
4739 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
4740 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
4741 __ Addu(t2, t1, t2);
4742 __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4743 __ sw(a0, MemOperand(t2, 0));
4744 // Update the write barrier for the array store.
4745 __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
4746 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
4747 __ Ret(USE_DELAY_SLOT);
4750 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
4751 // and value is Smi.
4752 __ bind(&smi_element);
4753 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
4754 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
4755 __ Addu(t2, t1, t2);
4756 __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
4757 __ Ret(USE_DELAY_SLOT);
4760 // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
4761 __ bind(&double_elements);
4762 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
4763 __ StoreNumberToDoubleElements(a0, a3, t1, t3, t5, a2, &slow_elements);
4764 __ Ret(USE_DELAY_SLOT);
4769 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4770 CEntryStub ces(isolate(), 1, kSaveFPRegs);
4771 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4772 int parameter_count_offset =
4773 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4774 __ lw(a1, MemOperand(fp, parameter_count_offset));
4775 if (function_mode_ == JS_FUNCTION_STUB_MODE) {
4776 __ Addu(a1, a1, Operand(1));
4778 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4779 __ sll(a1, a1, kPointerSizeLog2);
4780 __ Ret(USE_DELAY_SLOT);
4781 __ Addu(sp, sp, a1);
4785 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4786 if (masm->isolate()->function_entry_hook() != NULL) {
4787 ProfileEntryHookStub stub(masm->isolate());
4795 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4796 // The entry hook is a "push ra" instruction, followed by a call.
4797 // Note: on MIPS "push" is 2 instruction
4798 const int32_t kReturnAddressDistanceFromFunctionStart =
4799 Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
4801 // This should contain all kJSCallerSaved registers.
4802 const RegList kSavedRegs =
4803 kJSCallerSaved | // Caller saved registers.
4804 s5.bit(); // Saved stack pointer.
4806 // We also save ra, so the count here is one higher than the mask indicates.
4807 const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
4809 // Save all caller-save registers as this may be called from anywhere.
4810 __ MultiPush(kSavedRegs | ra.bit());
4812 // Compute the function's address for the first argument.
4813 __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
4815 // The caller's return address is above the saved temporaries.
4816 // Grab that for the second argument to the hook.
4817 __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
4819 // Align the stack if necessary.
4820 int frame_alignment = masm->ActivationFrameAlignment();
4821 if (frame_alignment > kPointerSize) {
4823 DCHECK(IsPowerOf2(frame_alignment));
4824 __ And(sp, sp, Operand(-frame_alignment));
4826 __ Subu(sp, sp, kCArgsSlotsSize);
4827 #if defined(V8_HOST_ARCH_MIPS)
4828 int32_t entry_hook =
4829 reinterpret_cast<int32_t>(isolate()->function_entry_hook());
4830 __ li(t9, Operand(entry_hook));
4832 // Under the simulator we need to indirect the entry hook through a
4833 // trampoline function at a known address.
4834 // It additionally takes an isolate as a third parameter.
4835 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
4837 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4838 __ li(t9, Operand(ExternalReference(&dispatcher,
4839 ExternalReference::BUILTIN_CALL,
4842 // Call C function through t9 to conform ABI for PIC.
4845 // Restore the stack pointer if needed.
4846 if (frame_alignment > kPointerSize) {
4849 __ Addu(sp, sp, kCArgsSlotsSize);
4852 // Also pop ra to get Ret(0).
4853 __ MultiPop(kSavedRegs | ra.bit());
4859 static void CreateArrayDispatch(MacroAssembler* masm,
4860 AllocationSiteOverrideMode mode) {
4861 if (mode == DISABLE_ALLOCATION_SITES) {
4862 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4863 __ TailCallStub(&stub);
4864 } else if (mode == DONT_OVERRIDE) {
4865 int last_index = GetSequenceIndexFromFastElementsKind(
4866 TERMINAL_FAST_ELEMENTS_KIND);
4867 for (int i = 0; i <= last_index; ++i) {
4868 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4869 T stub(masm->isolate(), kind);
4870 __ TailCallStub(&stub, eq, a3, Operand(kind));
4873 // If we reached this point there is a problem.
4874 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4881 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4882 AllocationSiteOverrideMode mode) {
4883 // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4884 // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
4885 // a0 - number of arguments
4886 // a1 - constructor?
4887 // sp[0] - last argument
4888 Label normal_sequence;
4889 if (mode == DONT_OVERRIDE) {
4890 DCHECK(FAST_SMI_ELEMENTS == 0);
4891 DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
4892 DCHECK(FAST_ELEMENTS == 2);
4893 DCHECK(FAST_HOLEY_ELEMENTS == 3);
4894 DCHECK(FAST_DOUBLE_ELEMENTS == 4);
4895 DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
4897 // is the low bit set? If so, we are holey and that is good.
4898 __ And(at, a3, Operand(1));
4899 __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
4902 // look at the first argument
4903 __ lw(t1, MemOperand(sp, 0));
4904 __ Branch(&normal_sequence, eq, t1, Operand(zero_reg));
4906 if (mode == DISABLE_ALLOCATION_SITES) {
4907 ElementsKind initial = GetInitialFastElementsKind();
4908 ElementsKind holey_initial = GetHoleyElementsKind(initial);
4910 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
4912 DISABLE_ALLOCATION_SITES);
4913 __ TailCallStub(&stub_holey);
4915 __ bind(&normal_sequence);
4916 ArraySingleArgumentConstructorStub stub(masm->isolate(),
4918 DISABLE_ALLOCATION_SITES);
4919 __ TailCallStub(&stub);
4920 } else if (mode == DONT_OVERRIDE) {
4921 // We are going to create a holey array, but our kind is non-holey.
4922 // Fix kind and retry (only if we have an allocation site in the slot).
4923 __ Addu(a3, a3, Operand(1));
4925 if (FLAG_debug_code) {
4926 __ lw(t1, FieldMemOperand(a2, 0));
4927 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
4928 __ Assert(eq, kExpectedAllocationSite, t1, Operand(at));
4931 // Save the resulting elements kind in type info. We can't just store a3
4932 // in the AllocationSite::transition_info field because elements kind is
4933 // restricted to a portion of the field...upper bits need to be left alone.
4934 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4935 __ lw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
4936 __ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
4937 __ sw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
4940 __ bind(&normal_sequence);
4941 int last_index = GetSequenceIndexFromFastElementsKind(
4942 TERMINAL_FAST_ELEMENTS_KIND);
4943 for (int i = 0; i <= last_index; ++i) {
4944 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4945 ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
4946 __ TailCallStub(&stub, eq, a3, Operand(kind));
4949 // If we reached this point there is a problem.
4950 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4958 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
4959 int to_index = GetSequenceIndexFromFastElementsKind(
4960 TERMINAL_FAST_ELEMENTS_KIND);
4961 for (int i = 0; i <= to_index; ++i) {
4962 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4963 T stub(isolate, kind);
4965 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
4966 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
4973 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
4974 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
4976 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
4978 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
4983 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
4985 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
4986 for (int i = 0; i < 2; i++) {
4987 // For internal arrays we only need a few things.
4988 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
4990 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
4992 InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
4998 void ArrayConstructorStub::GenerateDispatchToArrayStub(
4999 MacroAssembler* masm,
5000 AllocationSiteOverrideMode mode) {
5001 if (argument_count_ == ANY) {
5002 Label not_zero_case, not_one_case;
5004 __ Branch(¬_zero_case, ne, at, Operand(zero_reg));
5005 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5007 __ bind(¬_zero_case);
5008 __ Branch(¬_one_case, gt, a0, Operand(1));
5009 CreateArrayDispatchOneArgument(masm, mode);
5011 __ bind(¬_one_case);
5012 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5013 } else if (argument_count_ == NONE) {
5014 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5015 } else if (argument_count_ == ONE) {
5016 CreateArrayDispatchOneArgument(masm, mode);
5017 } else if (argument_count_ == MORE_THAN_ONE) {
5018 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5025 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
5026 // ----------- S t a t e -------------
5027 // -- a0 : argc (only if argument_count_ == ANY)
5028 // -- a1 : constructor
5029 // -- a2 : AllocationSite or undefined
5030 // -- sp[0] : return address
5031 // -- sp[4] : last argument
5032 // -----------------------------------
5034 if (FLAG_debug_code) {
5035 // The array construct code is only set for the global and natives
5036 // builtin Array functions which always have maps.
5038 // Initial map for the builtin Array function should be a map.
5039 __ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
5040 // Will both indicate a NULL and a Smi.
5042 __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
5043 at, Operand(zero_reg));
5044 __ GetObjectType(t0, t0, t1);
5045 __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
5046 t1, Operand(MAP_TYPE));
5048 // We should either have undefined in a2 or a valid AllocationSite
5049 __ AssertUndefinedOrAllocationSite(a2, t0);
5053 // Get the elements kind and case on that.
5054 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5055 __ Branch(&no_info, eq, a2, Operand(at));
5057 __ lw(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
5059 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
5060 __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
5061 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
5064 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
5068 void InternalArrayConstructorStub::GenerateCase(
5069 MacroAssembler* masm, ElementsKind kind) {
5071 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
5072 __ TailCallStub(&stub0, lo, a0, Operand(1));
5074 InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
5075 __ TailCallStub(&stubN, hi, a0, Operand(1));
5077 if (IsFastPackedElementsKind(kind)) {
5078 // We might need to create a holey array
5079 // look at the first argument.
5080 __ lw(at, MemOperand(sp, 0));
5082 InternalArraySingleArgumentConstructorStub
5083 stub1_holey(isolate(), GetHoleyElementsKind(kind));
5084 __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
5087 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
5088 __ TailCallStub(&stub1);
5092 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
5093 // ----------- S t a t e -------------
5095 // -- a1 : constructor
5096 // -- sp[0] : return address
5097 // -- sp[4] : last argument
5098 // -----------------------------------
5100 if (FLAG_debug_code) {
5101 // The array construct code is only set for the global and natives
5102 // builtin Array functions which always have maps.
5104 // Initial map for the builtin Array function should be a map.
5105 __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
5106 // Will both indicate a NULL and a Smi.
5108 __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
5109 at, Operand(zero_reg));
5110 __ GetObjectType(a3, a3, t0);
5111 __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
5112 t0, Operand(MAP_TYPE));
5115 // Figure out the right elements kind.
5116 __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
5118 // Load the map's "bit field 2" into a3. We only need the first byte,
5119 // but the following bit field extraction takes care of that anyway.
5120 __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
5121 // Retrieve elements_kind from bit field 2.
5122 __ DecodeField<Map::ElementsKindBits>(a3);
5124 if (FLAG_debug_code) {
5126 __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
5128 eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
5129 a3, Operand(FAST_HOLEY_ELEMENTS));
5133 Label fast_elements_case;
5134 __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
5135 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
5137 __ bind(&fast_elements_case);
5138 GenerateCase(masm, FAST_ELEMENTS);
5142 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
5143 // ----------- S t a t e -------------
5145 // -- t0 : call_data
5147 // -- a1 : api_function_address
5150 // -- sp[0] : last argument
5152 // -- sp[(argc - 1)* 4] : first argument
5153 // -- sp[argc * 4] : receiver
5154 // -----------------------------------
5156 Register callee = a0;
5157 Register call_data = t0;
5158 Register holder = a2;
5159 Register api_function_address = a1;
5160 Register context = cp;
5162 int argc = ArgumentBits::decode(bit_field_);
5163 bool is_store = IsStoreBits::decode(bit_field_);
5164 bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
5166 typedef FunctionCallbackArguments FCA;
5168 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
5169 STATIC_ASSERT(FCA::kCalleeIndex == 5);
5170 STATIC_ASSERT(FCA::kDataIndex == 4);
5171 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
5172 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
5173 STATIC_ASSERT(FCA::kIsolateIndex == 1);
5174 STATIC_ASSERT(FCA::kHolderIndex == 0);
5175 STATIC_ASSERT(FCA::kArgsLength == 7);
5177 // Save context, callee and call data.
5178 __ Push(context, callee, call_data);
5179 // Load context from callee.
5180 __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
5182 Register scratch = call_data;
5183 if (!call_data_undefined) {
5184 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5186 // Push return value and default return value.
5187 __ Push(scratch, scratch);
5189 Operand(ExternalReference::isolate_address(isolate())));
5190 // Push isolate and holder.
5191 __ Push(scratch, holder);
5193 // Prepare arguments.
5194 __ mov(scratch, sp);
5196 // Allocate the v8::Arguments structure in the arguments' space since
5197 // it's not controlled by GC.
5198 const int kApiStackSpace = 4;
5200 FrameScope frame_scope(masm, StackFrame::MANUAL);
5201 __ EnterExitFrame(false, kApiStackSpace);
5203 DCHECK(!api_function_address.is(a0) && !scratch.is(a0));
5204 // a0 = FunctionCallbackInfo&
5205 // Arguments is after the return address.
5206 __ Addu(a0, sp, Operand(1 * kPointerSize));
5207 // FunctionCallbackInfo::implicit_args_
5208 __ sw(scratch, MemOperand(a0, 0 * kPointerSize));
5209 // FunctionCallbackInfo::values_
5210 __ Addu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
5211 __ sw(at, MemOperand(a0, 1 * kPointerSize));
5212 // FunctionCallbackInfo::length_ = argc
5213 __ li(at, Operand(argc));
5214 __ sw(at, MemOperand(a0, 2 * kPointerSize));
5215 // FunctionCallbackInfo::is_construct_call = 0
5216 __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
5218 const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
5219 ExternalReference thunk_ref =
5220 ExternalReference::invoke_function_callback(isolate());
5222 AllowExternalCallThatCantCauseGC scope(masm);
5223 MemOperand context_restore_operand(
5224 fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
5225 // Stores return the first js argument.
5226 int return_value_offset = 0;
5228 return_value_offset = 2 + FCA::kArgsLength;
5230 return_value_offset = 2 + FCA::kReturnValueOffset;
5232 MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
5234 __ CallApiFunctionAndReturn(api_function_address,
5237 return_value_operand,
5238 &context_restore_operand);
5242 void CallApiGetterStub::Generate(MacroAssembler* masm) {
5243 // ----------- S t a t e -------------
5245 // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
5247 // -- a2 : api_function_address
5248 // -----------------------------------
5250 Register api_function_address = a2;
5252 __ mov(a0, sp); // a0 = Handle<Name>
5253 __ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA
5255 const int kApiStackSpace = 1;
5256 FrameScope frame_scope(masm, StackFrame::MANUAL);
5257 __ EnterExitFrame(false, kApiStackSpace);
5259 // Create PropertyAccessorInfo instance on the stack above the exit frame with
5260 // a1 (internal::Object** args_) as the data.
5261 __ sw(a1, MemOperand(sp, 1 * kPointerSize));
5262 __ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo&
5264 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
5266 ExternalReference thunk_ref =
5267 ExternalReference::invoke_accessor_getter_callback(isolate());
5268 __ CallApiFunctionAndReturn(api_function_address,
5271 MemOperand(fp, 6 * kPointerSize),
5278 } } // namespace v8::internal
5280 #endif // V8_TARGET_ARCH_MIPS