1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #if V8_TARGET_ARCH_MIPS
32 #include "bootstrapper.h"
33 #include "code-stubs.h"
35 #include "regexp-macro-assembler.h"
36 #include "stub-cache.h"
42 void FastNewClosureStub::InitializeInterfaceDescriptor(
44 CodeStubInterfaceDescriptor* descriptor) {
45 static Register registers[] = { a2 };
46 descriptor->register_param_count_ = 1;
47 descriptor->register_params_ = registers;
48 descriptor->deoptimization_handler_ =
49 Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
53 void FastNewContextStub::InitializeInterfaceDescriptor(
55 CodeStubInterfaceDescriptor* descriptor) {
56 static Register registers[] = { a1 };
57 descriptor->register_param_count_ = 1;
58 descriptor->register_params_ = registers;
59 descriptor->deoptimization_handler_ = NULL;
63 void ToNumberStub::InitializeInterfaceDescriptor(
65 CodeStubInterfaceDescriptor* descriptor) {
66 static Register registers[] = { a0 };
67 descriptor->register_param_count_ = 1;
68 descriptor->register_params_ = registers;
69 descriptor->deoptimization_handler_ = NULL;
73 void NumberToStringStub::InitializeInterfaceDescriptor(
75 CodeStubInterfaceDescriptor* descriptor) {
76 static Register registers[] = { a0 };
77 descriptor->register_param_count_ = 1;
78 descriptor->register_params_ = registers;
79 descriptor->deoptimization_handler_ =
80 Runtime::FunctionForId(Runtime::kNumberToString)->entry;
84 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
86 CodeStubInterfaceDescriptor* descriptor) {
87 static Register registers[] = { a3, a2, a1 };
88 descriptor->register_param_count_ = 3;
89 descriptor->register_params_ = registers;
90 descriptor->deoptimization_handler_ =
91 Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
95 void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
97 CodeStubInterfaceDescriptor* descriptor) {
98 static Register registers[] = { a3, a2, a1, a0 };
99 descriptor->register_param_count_ = 4;
100 descriptor->register_params_ = registers;
101 descriptor->deoptimization_handler_ =
102 Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
106 void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
108 CodeStubInterfaceDescriptor* descriptor) {
109 static Register registers[] = { a2 };
110 descriptor->register_param_count_ = 1;
111 descriptor->register_params_ = registers;
112 descriptor->deoptimization_handler_ = NULL;
116 void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
118 CodeStubInterfaceDescriptor* descriptor) {
119 static Register registers[] = { a1, a0 };
120 descriptor->register_param_count_ = 2;
121 descriptor->register_params_ = registers;
122 descriptor->deoptimization_handler_ =
123 FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
127 void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
129 CodeStubInterfaceDescriptor* descriptor) {
130 static Register registers[] = {a1, a0 };
131 descriptor->register_param_count_ = 2;
132 descriptor->register_params_ = registers;
133 descriptor->deoptimization_handler_ =
134 FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
138 void LoadFieldStub::InitializeInterfaceDescriptor(
140 CodeStubInterfaceDescriptor* descriptor) {
141 static Register registers[] = { a0 };
142 descriptor->register_param_count_ = 1;
143 descriptor->register_params_ = registers;
144 descriptor->deoptimization_handler_ = NULL;
148 void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
150 CodeStubInterfaceDescriptor* descriptor) {
151 static Register registers[] = { a1 };
152 descriptor->register_param_count_ = 1;
153 descriptor->register_params_ = registers;
154 descriptor->deoptimization_handler_ = NULL;
158 void KeyedArrayCallStub::InitializeInterfaceDescriptor(
160 CodeStubInterfaceDescriptor* descriptor) {
161 static Register registers[] = { a2 };
162 descriptor->register_param_count_ = 1;
163 descriptor->register_params_ = registers;
164 descriptor->continuation_type_ = TAIL_CALL_CONTINUATION;
165 descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
166 descriptor->deoptimization_handler_ =
167 FUNCTION_ADDR(KeyedCallIC_MissFromStubFailure);
171 void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
173 CodeStubInterfaceDescriptor* descriptor) {
174 static Register registers[] = { a2, a1, a0 };
175 descriptor->register_param_count_ = 3;
176 descriptor->register_params_ = registers;
177 descriptor->deoptimization_handler_ =
178 FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
182 void TransitionElementsKindStub::InitializeInterfaceDescriptor(
184 CodeStubInterfaceDescriptor* descriptor) {
185 static Register registers[] = { a0, a1 };
186 descriptor->register_param_count_ = 2;
187 descriptor->register_params_ = registers;
189 Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
190 descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
194 void CompareNilICStub::InitializeInterfaceDescriptor(
196 CodeStubInterfaceDescriptor* descriptor) {
197 static Register registers[] = { a0 };
198 descriptor->register_param_count_ = 1;
199 descriptor->register_params_ = registers;
200 descriptor->deoptimization_handler_ =
201 FUNCTION_ADDR(CompareNilIC_Miss);
202 descriptor->SetMissHandler(
203 ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
207 static void InitializeArrayConstructorDescriptor(
209 CodeStubInterfaceDescriptor* descriptor,
210 int constant_stack_parameter_count) {
212 // a0 -- number of arguments
214 // a2 -- allocation site with elements kind
215 static Register registers_variable_args[] = { a1, a2, a0 };
216 static Register registers_no_args[] = { a1, a2 };
218 if (constant_stack_parameter_count == 0) {
219 descriptor->register_param_count_ = 2;
220 descriptor->register_params_ = registers_no_args;
222 // stack param count needs (constructor pointer, and single argument)
223 descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
224 descriptor->stack_parameter_count_ = a0;
225 descriptor->register_param_count_ = 3;
226 descriptor->register_params_ = registers_variable_args;
229 descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
230 descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
231 descriptor->deoptimization_handler_ =
232 Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
236 static void InitializeInternalArrayConstructorDescriptor(
238 CodeStubInterfaceDescriptor* descriptor,
239 int constant_stack_parameter_count) {
241 // a0 -- number of arguments
242 // a1 -- constructor function
243 static Register registers_variable_args[] = { a1, a0 };
244 static Register registers_no_args[] = { a1 };
246 if (constant_stack_parameter_count == 0) {
247 descriptor->register_param_count_ = 1;
248 descriptor->register_params_ = registers_no_args;
250 // stack param count needs (constructor pointer, and single argument)
251 descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
252 descriptor->stack_parameter_count_ = a0;
253 descriptor->register_param_count_ = 2;
254 descriptor->register_params_ = registers_variable_args;
257 descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
258 descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
259 descriptor->deoptimization_handler_ =
260 Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
264 void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
266 CodeStubInterfaceDescriptor* descriptor) {
267 InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
271 void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
273 CodeStubInterfaceDescriptor* descriptor) {
274 InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
278 void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
280 CodeStubInterfaceDescriptor* descriptor) {
281 InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
285 void ToBooleanStub::InitializeInterfaceDescriptor(
287 CodeStubInterfaceDescriptor* descriptor) {
288 static Register registers[] = { a0 };
289 descriptor->register_param_count_ = 1;
290 descriptor->register_params_ = registers;
291 descriptor->deoptimization_handler_ =
292 FUNCTION_ADDR(ToBooleanIC_Miss);
293 descriptor->SetMissHandler(
294 ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
298 void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
300 CodeStubInterfaceDescriptor* descriptor) {
301 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
305 void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
307 CodeStubInterfaceDescriptor* descriptor) {
308 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
312 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
314 CodeStubInterfaceDescriptor* descriptor) {
315 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
319 void StoreGlobalStub::InitializeInterfaceDescriptor(
321 CodeStubInterfaceDescriptor* descriptor) {
322 static Register registers[] = { a1, a2, a0 };
323 descriptor->register_param_count_ = 3;
324 descriptor->register_params_ = registers;
325 descriptor->deoptimization_handler_ =
326 FUNCTION_ADDR(StoreIC_MissFromStubFailure);
330 void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
332 CodeStubInterfaceDescriptor* descriptor) {
333 static Register registers[] = { a0, a3, a1, a2 };
334 descriptor->register_param_count_ = 4;
335 descriptor->register_params_ = registers;
336 descriptor->deoptimization_handler_ =
337 FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
341 void BinaryOpICStub::InitializeInterfaceDescriptor(
343 CodeStubInterfaceDescriptor* descriptor) {
344 static Register registers[] = { a1, a0 };
345 descriptor->register_param_count_ = 2;
346 descriptor->register_params_ = registers;
347 descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
348 descriptor->SetMissHandler(
349 ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
353 void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
355 CodeStubInterfaceDescriptor* descriptor) {
356 static Register registers[] = { a2, a1, a0 };
357 descriptor->register_param_count_ = 3;
358 descriptor->register_params_ = registers;
359 descriptor->deoptimization_handler_ =
360 FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
364 void StringAddStub::InitializeInterfaceDescriptor(
366 CodeStubInterfaceDescriptor* descriptor) {
367 static Register registers[] = { a1, a0 };
368 descriptor->register_param_count_ = 2;
369 descriptor->register_params_ = registers;
370 descriptor->deoptimization_handler_ =
371 Runtime::FunctionForId(Runtime::kStringAdd)->entry;
375 void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
377 CallInterfaceDescriptor* descriptor =
378 isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
379 static Register registers[] = { a1, // JSFunction
381 a0, // actual number of arguments
382 a2, // expected number of arguments
384 static Representation representations[] = {
385 Representation::Tagged(), // JSFunction
386 Representation::Tagged(), // context
387 Representation::Integer32(), // actual number of arguments
388 Representation::Integer32(), // expected number of arguments
390 descriptor->register_param_count_ = 4;
391 descriptor->register_params_ = registers;
392 descriptor->param_representations_ = representations;
395 CallInterfaceDescriptor* descriptor =
396 isolate->call_descriptor(Isolate::KeyedCall);
397 static Register registers[] = { cp, // context
400 static Representation representations[] = {
401 Representation::Tagged(), // context
402 Representation::Tagged(), // key
404 descriptor->register_param_count_ = 2;
405 descriptor->register_params_ = registers;
406 descriptor->param_representations_ = representations;
409 CallInterfaceDescriptor* descriptor =
410 isolate->call_descriptor(Isolate::NamedCall);
411 static Register registers[] = { cp, // context
414 static Representation representations[] = {
415 Representation::Tagged(), // context
416 Representation::Tagged(), // name
418 descriptor->register_param_count_ = 2;
419 descriptor->register_params_ = registers;
420 descriptor->param_representations_ = representations;
423 CallInterfaceDescriptor* descriptor =
424 isolate->call_descriptor(Isolate::CallHandler);
425 static Register registers[] = { cp, // context
428 static Representation representations[] = {
429 Representation::Tagged(), // context
430 Representation::Tagged(), // receiver
432 descriptor->register_param_count_ = 2;
433 descriptor->register_params_ = registers;
434 descriptor->param_representations_ = representations;
439 #define __ ACCESS_MASM(masm)
442 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
445 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
451 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
456 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
457 // Update the static counter each time a new code stub is generated.
458 Isolate* isolate = masm->isolate();
459 isolate->counters()->code_stubs()->Increment();
461 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
462 int param_count = descriptor->register_param_count_;
464 // Call the runtime system in a fresh internal frame.
465 FrameScope scope(masm, StackFrame::INTERNAL);
466 ASSERT(descriptor->register_param_count_ == 0 ||
467 a0.is(descriptor->register_params_[param_count - 1]));
469 for (int i = 0; i < param_count; ++i) {
470 __ push(descriptor->register_params_[i]);
472 ExternalReference miss = descriptor->miss_handler();
473 __ CallExternalReference(miss, descriptor->register_param_count_);
480 void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
481 // Stack layout on entry:
484 // [sp + kPointerSize]: serialized scope info
486 // Try to allocate the context in new space.
488 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
489 __ Allocate(FixedArray::SizeFor(length), v0, a1, a2, &gc, TAG_OBJECT);
491 // Load the function from the stack.
492 __ lw(a3, MemOperand(sp, 0));
494 // Load the serialized scope info from the stack.
495 __ lw(a1, MemOperand(sp, 1 * kPointerSize));
497 // Set up the object header.
498 __ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
499 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
500 __ li(a2, Operand(Smi::FromInt(length)));
501 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
503 // If this block context is nested in the native context we get a smi
504 // sentinel instead of a function. The block context should get the
505 // canonical empty function of the native context as its closure which
506 // we still have to look up.
507 Label after_sentinel;
508 __ JumpIfNotSmi(a3, &after_sentinel);
509 if (FLAG_debug_code) {
510 __ Assert(eq, kExpected0AsASmiSentinel, a3, Operand(zero_reg));
512 __ lw(a3, GlobalObjectOperand());
513 __ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
514 __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
515 __ bind(&after_sentinel);
517 // Set up the fixed slots, copy the global object from the previous context.
518 __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
519 __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
520 __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
521 __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
522 __ sw(a2, ContextOperand(v0, Context::GLOBAL_OBJECT_INDEX));
524 // Initialize the rest of the slots to the hole value.
525 __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
526 for (int i = 0; i < slots_; i++) {
527 __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS));
530 // Remove the on-stack argument and return.
534 // Need to collect. Call into runtime system.
536 __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
540 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
541 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
542 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
543 // scratch register. Destroys the source register. No GC occurs during this
544 // stub so you don't have to set up the frame.
545 class ConvertToDoubleStub : public PlatformCodeStub {
547 ConvertToDoubleStub(Register result_reg_1,
548 Register result_reg_2,
550 Register scratch_reg)
551 : result1_(result_reg_1),
552 result2_(result_reg_2),
554 zeros_(scratch_reg) { }
562 // Minor key encoding in 16 bits.
563 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
564 class OpBits: public BitField<Token::Value, 2, 14> {};
566 Major MajorKey() { return ConvertToDouble; }
568 // Encode the parameters in a unique 16 bit value.
569 return result1_.code() +
570 (result2_.code() << 4) +
571 (source_.code() << 8) +
572 (zeros_.code() << 12);
575 void Generate(MacroAssembler* masm);
579 void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
580 #ifndef BIG_ENDIAN_FLOATING_POINT
581 Register exponent = result1_;
582 Register mantissa = result2_;
584 Register exponent = result2_;
585 Register mantissa = result1_;
588 // Convert from Smi to integer.
589 __ sra(source_, source_, kSmiTagSize);
590 // Move sign bit from source to destination. This works because the sign bit
591 // in the exponent word of the double has the same position and polarity as
592 // the 2's complement sign bit in a Smi.
593 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
594 __ And(exponent, source_, Operand(HeapNumber::kSignMask));
595 // Subtract from 0 if source was negative.
596 __ subu(at, zero_reg, source_);
597 __ Movn(source_, at, exponent);
599 // We have -1, 0 or 1, which we treat specially. Register source_ contains
600 // absolute value: it is either equal to 1 (special case of -1 and 1),
601 // greater than 1 (not a special case) or less than 1 (special case of 0).
602 __ Branch(¬_special, gt, source_, Operand(1));
604 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
605 const uint32_t exponent_word_for_1 =
606 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
607 // Safe to use 'at' as dest reg here.
608 __ Or(at, exponent, Operand(exponent_word_for_1));
609 __ Movn(exponent, at, source_); // Write exp when source not 0.
610 // 1, 0 and -1 all have 0 for the second word.
611 __ Ret(USE_DELAY_SLOT);
612 __ mov(mantissa, zero_reg);
614 __ bind(¬_special);
615 // Count leading zeros.
616 // Gets the wrong answer for 0, but we already checked for that case above.
617 __ Clz(zeros_, source_);
618 // Compute exponent and or it into the exponent register.
619 // We use mantissa as a scratch register here.
620 __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
621 __ subu(mantissa, mantissa, zeros_);
622 __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
623 __ Or(exponent, exponent, mantissa);
625 // Shift up the source chopping the top bit off.
626 __ Addu(zeros_, zeros_, Operand(1));
627 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
628 __ sllv(source_, source_, zeros_);
629 // Compute lower part of fraction (last 12 bits).
630 __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
631 // And the top (top 20 bits).
632 __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
634 __ Ret(USE_DELAY_SLOT);
635 __ or_(exponent, exponent, source_);
639 void DoubleToIStub::Generate(MacroAssembler* masm) {
640 Label out_of_range, only_low, negate, done;
641 Register input_reg = source();
642 Register result_reg = destination();
644 int double_offset = offset();
645 // Account for saved regs if input is sp.
646 if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
649 GetRegisterThatIsNotOneOf(input_reg, result_reg);
651 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
653 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
654 DoubleRegister double_scratch = kLithiumScratchDouble;
656 __ Push(scratch, scratch2, scratch3);
658 if (!skip_fastpath()) {
659 // Load double input.
660 __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
662 // Clear cumulative exception flags and save the FCSR.
663 __ cfc1(scratch2, FCSR);
664 __ ctc1(zero_reg, FCSR);
666 // Try a conversion to a signed integer.
667 __ Trunc_w_d(double_scratch, double_scratch);
668 // Move the converted value into the result register.
669 __ mfc1(scratch3, double_scratch);
671 // Retrieve and restore the FCSR.
672 __ cfc1(scratch, FCSR);
673 __ ctc1(scratch2, FCSR);
675 // Check for overflow and NaNs.
678 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
679 | kFCSRInvalidOpFlagMask);
680 // If we had no exceptions then set result_reg and we are done.
682 __ Branch(&error, ne, scratch, Operand(zero_reg));
683 __ Move(result_reg, scratch3);
688 // Load the double value and perform a manual truncation.
689 Register input_high = scratch2;
690 Register input_low = scratch3;
692 __ lw(input_low, MemOperand(input_reg, double_offset));
693 __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize));
695 Label normal_exponent, restore_sign;
696 // Extract the biased exponent in result.
699 HeapNumber::kExponentShift,
700 HeapNumber::kExponentBits);
702 // Check for Infinity and NaNs, which should return 0.
703 __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
704 __ Movz(result_reg, zero_reg, scratch);
705 __ Branch(&done, eq, scratch, Operand(zero_reg));
707 // Express exponent as delta to (number of mantissa bits + 31).
710 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
712 // If the delta is strictly positive, all bits would be shifted away,
713 // which means that we can return 0.
714 __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
715 __ mov(result_reg, zero_reg);
718 __ bind(&normal_exponent);
719 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
721 __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
724 Register sign = result_reg;
726 __ And(sign, input_high, Operand(HeapNumber::kSignMask));
728 // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
729 // to check for this specific case.
730 Label high_shift_needed, high_shift_done;
731 __ Branch(&high_shift_needed, lt, scratch, Operand(32));
732 __ mov(input_high, zero_reg);
733 __ Branch(&high_shift_done);
734 __ bind(&high_shift_needed);
736 // Set the implicit 1 before the mantissa part in input_high.
739 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
740 // Shift the mantissa bits to the correct position.
741 // We don't need to clear non-mantissa bits as they will be shifted away.
742 // If they weren't, it would mean that the answer is in the 32bit range.
743 __ sllv(input_high, input_high, scratch);
745 __ bind(&high_shift_done);
747 // Replace the shifted bits with bits from the lower mantissa word.
748 Label pos_shift, shift_done;
750 __ subu(scratch, at, scratch);
751 __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
754 __ Subu(scratch, zero_reg, scratch);
755 __ sllv(input_low, input_low, scratch);
756 __ Branch(&shift_done);
759 __ srlv(input_low, input_low, scratch);
761 __ bind(&shift_done);
762 __ Or(input_high, input_high, Operand(input_low));
763 // Restore sign if necessary.
764 __ mov(scratch, sign);
767 __ Subu(result_reg, zero_reg, input_high);
768 __ Movz(result_reg, input_high, scratch);
772 __ Pop(scratch, scratch2, scratch3);
777 void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
779 WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
780 WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
781 stub1.GetCode(isolate);
782 stub2.GetCode(isolate);
786 // See comment for class, this does NOT work for int32's that are in Smi range.
787 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
788 Label max_negative_int;
789 // the_int_ has the answer which is a signed int32 but not a Smi.
790 // We test for the special value that has a different exponent.
791 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
792 // Test sign, and save for later conditionals.
793 __ And(sign_, the_int_, Operand(0x80000000u));
794 __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
796 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
797 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
798 uint32_t non_smi_exponent =
799 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
800 __ li(scratch_, Operand(non_smi_exponent));
801 // Set the sign bit in scratch_ if the value was negative.
802 __ or_(scratch_, scratch_, sign_);
803 // Subtract from 0 if the value was negative.
804 __ subu(at, zero_reg, the_int_);
805 __ Movn(the_int_, at, sign_);
806 // We should be masking the implict first digit of the mantissa away here,
807 // but it just ends up combining harmlessly with the last digit of the
808 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
809 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
810 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
811 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
812 __ srl(at, the_int_, shift_distance);
813 __ or_(scratch_, scratch_, at);
814 __ sw(scratch_, FieldMemOperand(the_heap_number_,
815 HeapNumber::kExponentOffset));
816 __ sll(scratch_, the_int_, 32 - shift_distance);
817 __ Ret(USE_DELAY_SLOT);
818 __ sw(scratch_, FieldMemOperand(the_heap_number_,
819 HeapNumber::kMantissaOffset));
821 __ bind(&max_negative_int);
822 // The max negative int32 is stored as a positive number in the mantissa of
823 // a double because it uses a sign bit instead of using two's complement.
824 // The actual mantissa bits stored are all 0 because the implicit most
825 // significant 1 bit is not stored.
826 non_smi_exponent += 1 << HeapNumber::kExponentShift;
827 __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
829 FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
830 __ mov(scratch_, zero_reg);
831 __ Ret(USE_DELAY_SLOT);
833 FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
837 // Handle the case where the lhs and rhs are the same object.
838 // Equality is almost reflexive (everything but NaN), so this is a test
839 // for "identity and not NaN".
840 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
844 Label heap_number, return_equal;
845 Register exp_mask_reg = t5;
847 __ Branch(¬_identical, ne, a0, Operand(a1));
849 __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
851 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
852 // so we do the second best thing - test it ourselves.
853 // They are both equal and they are not both Smis so both of them are not
854 // Smis. If it's not a heap number, then return equal.
855 if (cc == less || cc == greater) {
856 __ GetObjectType(a0, t4, t4);
857 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
859 __ GetObjectType(a0, t4, t4);
860 __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
861 // Comparing JS objects with <=, >= is complicated.
863 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
864 // Normally here we fall through to return_equal, but undefined is
865 // special: (undefined == undefined) == true, but
866 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
867 if (cc == less_equal || cc == greater_equal) {
868 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
869 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
870 __ Branch(&return_equal, ne, a0, Operand(t2));
871 ASSERT(is_int16(GREATER) && is_int16(LESS));
872 __ Ret(USE_DELAY_SLOT);
874 // undefined <= undefined should fail.
875 __ li(v0, Operand(GREATER));
877 // undefined >= undefined should fail.
878 __ li(v0, Operand(LESS));
884 __ bind(&return_equal);
885 ASSERT(is_int16(GREATER) && is_int16(LESS));
886 __ Ret(USE_DELAY_SLOT);
888 __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
889 } else if (cc == greater) {
890 __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
892 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
895 // For less and greater we don't have to check for NaN since the result of
896 // x < x is false regardless. For the others here is some code to check
898 if (cc != lt && cc != gt) {
899 __ bind(&heap_number);
900 // It is a heap number, so return non-equal if it's NaN and equal if it's
903 // The representation of NaN values has all exponent bits (52..62) set,
904 // and not all mantissa bits (0..51) clear.
905 // Read top bits of double representation (second word of value).
906 __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
907 // Test that exponent bits are all set.
908 __ And(t3, t2, Operand(exp_mask_reg));
909 // If all bits not set (ne cond), then not a NaN, objects are equal.
910 __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
912 // Shift out flag and all exponent bits, retaining only mantissa.
913 __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
914 // Or with all low-bits of mantissa.
915 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
916 __ Or(v0, t3, Operand(t2));
917 // For equal we already have the right value in v0: Return zero (equal)
918 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
919 // not (it's a NaN). For <= and >= we need to load v0 with the failing
920 // value if it's a NaN.
922 // All-zero means Infinity means equal.
923 __ Ret(eq, v0, Operand(zero_reg));
924 ASSERT(is_int16(GREATER) && is_int16(LESS));
925 __ Ret(USE_DELAY_SLOT);
927 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
929 __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
933 // No fall through here.
935 __ bind(¬_identical);
939 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
942 Label* both_loaded_as_doubles,
945 ASSERT((lhs.is(a0) && rhs.is(a1)) ||
946 (lhs.is(a1) && rhs.is(a0)));
949 __ JumpIfSmi(lhs, &lhs_is_smi);
951 // Check whether the non-smi is a heap number.
952 __ GetObjectType(lhs, t4, t4);
954 // If lhs was not a number and rhs was a Smi then strict equality cannot
955 // succeed. Return non-equal (lhs is already not zero).
956 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
959 // Smi compared non-strictly with a non-Smi non-heap-number. Call
961 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
964 // Rhs is a smi, lhs is a number.
965 // Convert smi rhs to double.
966 __ sra(at, rhs, kSmiTagSize);
968 __ cvt_d_w(f14, f14);
969 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
971 // We now have both loaded as doubles.
972 __ jmp(both_loaded_as_doubles);
974 __ bind(&lhs_is_smi);
975 // Lhs is a Smi. Check whether the non-smi is a heap number.
976 __ GetObjectType(rhs, t4, t4);
978 // If lhs was not a number and rhs was a Smi then strict equality cannot
979 // succeed. Return non-equal.
980 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
981 __ li(v0, Operand(1));
983 // Smi compared non-strictly with a non-Smi non-heap-number. Call
985 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
988 // Lhs is a smi, rhs is a number.
989 // Convert smi lhs to double.
990 __ sra(at, lhs, kSmiTagSize);
992 __ cvt_d_w(f12, f12);
993 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
994 // Fall through to both_loaded_as_doubles.
998 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
1001 // If either operand is a JS object or an oddball value, then they are
1002 // not equal since their pointers are different.
1003 // There is no test for undetectability in strict equality.
1004 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
1005 Label first_non_object;
1006 // Get the type of the first operand into a2 and compare it with
1007 // FIRST_SPEC_OBJECT_TYPE.
1008 __ GetObjectType(lhs, a2, a2);
1009 __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
1012 Label return_not_equal;
1013 __ bind(&return_not_equal);
1014 __ Ret(USE_DELAY_SLOT);
1015 __ li(v0, Operand(1));
1017 __ bind(&first_non_object);
1018 // Check for oddballs: true, false, null, undefined.
1019 __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
1021 __ GetObjectType(rhs, a3, a3);
1022 __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1024 // Check for oddballs: true, false, null, undefined.
1025 __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
1027 // Now that we have the types we might as well check for
1028 // internalized-internalized.
1029 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
1030 __ Or(a2, a2, Operand(a3));
1031 __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
1032 __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
1036 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
1039 Label* both_loaded_as_doubles,
1040 Label* not_heap_numbers,
1042 __ GetObjectType(lhs, a3, a2);
1043 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
1044 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
1045 // If first was a heap number & second wasn't, go to slow case.
1046 __ Branch(slow, ne, a3, Operand(a2));
1048 // Both are heap numbers. Load them up then jump to the code we have
1050 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1051 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1053 __ jmp(both_loaded_as_doubles);
1057 // Fast negative check for internalized-to-internalized equality.
1058 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
1061 Label* possible_strings,
1062 Label* not_both_strings) {
1063 ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1064 (lhs.is(a1) && rhs.is(a0)));
1066 // a2 is object type of rhs.
1068 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
1069 __ And(at, a2, Operand(kIsNotStringMask));
1070 __ Branch(&object_test, ne, at, Operand(zero_reg));
1071 __ And(at, a2, Operand(kIsNotInternalizedMask));
1072 __ Branch(possible_strings, ne, at, Operand(zero_reg));
1073 __ GetObjectType(rhs, a3, a3);
1074 __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
1075 __ And(at, a3, Operand(kIsNotInternalizedMask));
1076 __ Branch(possible_strings, ne, at, Operand(zero_reg));
1078 // Both are internalized strings. We already checked they weren't the same
1079 // pointer so they are not equal.
1080 __ Ret(USE_DELAY_SLOT);
1081 __ li(v0, Operand(1)); // Non-zero indicates not equal.
1083 __ bind(&object_test);
1084 __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
1085 __ GetObjectType(rhs, a2, a3);
1086 __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1088 // If both objects are undetectable, they are equal. Otherwise, they
1089 // are not equal, since they are different objects and an object is not
1090 // equal to undefined.
1091 __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
1092 __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
1093 __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
1094 __ and_(a0, a2, a3);
1095 __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
1096 __ Ret(USE_DELAY_SLOT);
1097 __ xori(v0, a0, 1 << Map::kIsUndetectable);
1101 static void ICCompareStub_CheckInputType(MacroAssembler* masm,
1104 CompareIC::State expected,
1107 if (expected == CompareIC::SMI) {
1108 __ JumpIfNotSmi(input, fail);
1109 } else if (expected == CompareIC::NUMBER) {
1110 __ JumpIfSmi(input, &ok);
1111 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
1114 // We could be strict about internalized/string here, but as long as
1115 // hydrogen doesn't care, the stub doesn't have to care either.
1120 // On entry a1 and a2 are the values to be compared.
1121 // On exit a0 is 0, positive or negative to indicate the result of
1123 void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
1126 Condition cc = GetCondition();
1129 ICCompareStub_CheckInputType(masm, lhs, a2, left_, &miss);
1130 ICCompareStub_CheckInputType(masm, rhs, a3, right_, &miss);
1132 Label slow; // Call builtin.
1133 Label not_smis, both_loaded_as_doubles;
1135 Label not_two_smis, smi_done;
1137 __ JumpIfNotSmi(a2, ¬_two_smis);
1140 __ Ret(USE_DELAY_SLOT);
1141 __ subu(v0, a1, a0);
1142 __ bind(¬_two_smis);
1144 // NOTICE! This code is only reached after a smi-fast-case check, so
1145 // it is certain that at least one operand isn't a smi.
1147 // Handle the case where the objects are identical. Either returns the answer
1148 // or goes to slow. Only falls through if the objects were not identical.
1149 EmitIdenticalObjectComparison(masm, &slow, cc);
1151 // If either is a Smi (we know that not both are), then they can only
1152 // be strictly equal if the other is a HeapNumber.
1153 STATIC_ASSERT(kSmiTag == 0);
1154 ASSERT_EQ(0, Smi::FromInt(0));
1155 __ And(t2, lhs, Operand(rhs));
1156 __ JumpIfNotSmi(t2, ¬_smis, t0);
1157 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
1158 // 1) Return the answer.
1160 // 3) Fall through to both_loaded_as_doubles.
1161 // 4) Jump to rhs_not_nan.
1162 // In cases 3 and 4 we have found out we were dealing with a number-number
1163 // comparison and the numbers have been loaded into f12 and f14 as doubles,
1164 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
1165 EmitSmiNonsmiComparison(masm, lhs, rhs,
1166 &both_loaded_as_doubles, &slow, strict());
1168 __ bind(&both_loaded_as_doubles);
1169 // f12, f14 are the double representations of the left hand side
1170 // and the right hand side if we have FPU. Otherwise a2, a3 represent
1171 // left hand side and a0, a1 represent right hand side.
1173 Isolate* isolate = masm->isolate();
1175 __ li(t0, Operand(LESS));
1176 __ li(t1, Operand(GREATER));
1177 __ li(t2, Operand(EQUAL));
1179 // Check if either rhs or lhs is NaN.
1180 __ BranchF(NULL, &nan, eq, f12, f14);
1182 // Check if LESS condition is satisfied. If true, move conditionally
1184 __ c(OLT, D, f12, f14);
1186 // Use previous check to store conditionally to v0 oposite condition
1187 // (GREATER). If rhs is equal to lhs, this will be corrected in next
1190 // Check if EQUAL condition is satisfied. If true, move conditionally
1192 __ c(EQ, D, f12, f14);
1198 // NaN comparisons always fail.
1199 // Load whatever we need in v0 to make the comparison fail.
1200 ASSERT(is_int16(GREATER) && is_int16(LESS));
1201 __ Ret(USE_DELAY_SLOT);
1202 if (cc == lt || cc == le) {
1203 __ li(v0, Operand(GREATER));
1205 __ li(v0, Operand(LESS));
1210 // At this point we know we are dealing with two different objects,
1211 // and neither of them is a Smi. The objects are in lhs_ and rhs_.
1213 // This returns non-equal for some object types, or falls through if it
1215 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
1218 Label check_for_internalized_strings;
1219 Label flat_string_check;
1220 // Check for heap-number-heap-number comparison. Can jump to slow case,
1221 // or load both doubles and jump to the code that handles
1222 // that case. If the inputs are not doubles then jumps to
1223 // check_for_internalized_strings.
1224 // In this case a2 will contain the type of lhs_.
1225 EmitCheckForTwoHeapNumbers(masm,
1228 &both_loaded_as_doubles,
1229 &check_for_internalized_strings,
1230 &flat_string_check);
1232 __ bind(&check_for_internalized_strings);
1233 if (cc == eq && !strict()) {
1234 // Returns an answer for two internalized strings or two
1235 // detectable objects.
1236 // Otherwise jumps to string case or not both strings case.
1237 // Assumes that a2 is the type of lhs_ on entry.
1238 EmitCheckForInternalizedStringsOrObjects(
1239 masm, lhs, rhs, &flat_string_check, &slow);
1242 // Check for both being sequential ASCII strings, and inline if that is the
1244 __ bind(&flat_string_check);
1246 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow);
1248 __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
1250 StringCompareStub::GenerateFlatAsciiStringEquals(masm,
1257 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
1265 // Never falls through to here.
1268 // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
1271 // Figure out which native to call and setup the arguments.
1272 Builtins::JavaScript native;
1274 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1276 native = Builtins::COMPARE;
1277 int ncr; // NaN compare result.
1278 if (cc == lt || cc == le) {
1281 ASSERT(cc == gt || cc == ge); // Remaining cases.
1284 __ li(a0, Operand(Smi::FromInt(ncr)));
1288 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1289 // tagged as a small integer.
1290 __ InvokeBuiltin(native, JUMP_FUNCTION);
1297 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
1300 if (save_doubles_ == kSaveFPRegs) {
1301 __ PushSafepointRegistersAndDoubles();
1303 __ PushSafepointRegisters();
1309 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
1312 __ StoreToSafepointRegisterSlot(t9, t9);
1313 if (save_doubles_ == kSaveFPRegs) {
1314 __ PopSafepointRegistersAndDoubles();
1316 __ PopSafepointRegisters();
1322 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1323 // We don't allow a GC during a store buffer overflow so there is no need to
1324 // store the registers in any particular way, but we do have to store and
1326 __ MultiPush(kJSCallerSaved | ra.bit());
1327 if (save_doubles_ == kSaveFPRegs) {
1328 __ MultiPushFPU(kCallerSavedFPU);
1330 const int argument_count = 1;
1331 const int fp_argument_count = 0;
1332 const Register scratch = a1;
1334 AllowExternalCallThatCantCauseGC scope(masm);
1335 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
1336 __ li(a0, Operand(ExternalReference::isolate_address(masm->isolate())));
1338 ExternalReference::store_buffer_overflow_function(masm->isolate()),
1340 if (save_doubles_ == kSaveFPRegs) {
1341 __ MultiPopFPU(kCallerSavedFPU);
1344 __ MultiPop(kJSCallerSaved | ra.bit());
1349 void MathPowStub::Generate(MacroAssembler* masm) {
1350 const Register base = a1;
1351 const Register exponent = a2;
1352 const Register heapnumbermap = t1;
1353 const Register heapnumber = v0;
1354 const DoubleRegister double_base = f2;
1355 const DoubleRegister double_exponent = f4;
1356 const DoubleRegister double_result = f0;
1357 const DoubleRegister double_scratch = f6;
1358 const FPURegister single_scratch = f8;
1359 const Register scratch = t5;
1360 const Register scratch2 = t3;
1362 Label call_runtime, done, int_exponent;
1363 if (exponent_type_ == ON_STACK) {
1364 Label base_is_smi, unpack_exponent;
1365 // The exponent and base are supplied as arguments on the stack.
1366 // This can only happen if the stub is called from non-optimized code.
1367 // Load input parameters from stack to double registers.
1368 __ lw(base, MemOperand(sp, 1 * kPointerSize));
1369 __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
1371 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
1373 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
1374 __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
1375 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
1377 __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
1378 __ jmp(&unpack_exponent);
1380 __ bind(&base_is_smi);
1381 __ mtc1(scratch, single_scratch);
1382 __ cvt_d_w(double_base, single_scratch);
1383 __ bind(&unpack_exponent);
1385 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
1387 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
1388 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
1389 __ ldc1(double_exponent,
1390 FieldMemOperand(exponent, HeapNumber::kValueOffset));
1391 } else if (exponent_type_ == TAGGED) {
1392 // Base is already in double_base.
1393 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
1395 __ ldc1(double_exponent,
1396 FieldMemOperand(exponent, HeapNumber::kValueOffset));
1399 if (exponent_type_ != INTEGER) {
1400 Label int_exponent_convert;
1401 // Detect integer exponents stored as double.
1402 __ EmitFPUTruncate(kRoundToMinusInf,
1408 kCheckForInexactConversion);
1409 // scratch2 == 0 means there was no conversion error.
1410 __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
1412 if (exponent_type_ == ON_STACK) {
1413 // Detect square root case. Crankshaft detects constant +/-0.5 at
1414 // compile time and uses DoMathPowHalf instead. We then skip this check
1415 // for non-constant cases of +/-0.5 as these hardly occur.
1416 Label not_plus_half;
1419 __ Move(double_scratch, 0.5);
1420 __ BranchF(USE_DELAY_SLOT,
1426 // double_scratch can be overwritten in the delay slot.
1427 // Calculates square root of base. Check for the special case of
1428 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
1429 __ Move(double_scratch, -V8_INFINITY);
1430 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
1431 __ neg_d(double_result, double_scratch);
1433 // Add +0 to convert -0 to +0.
1434 __ add_d(double_scratch, double_base, kDoubleRegZero);
1435 __ sqrt_d(double_result, double_scratch);
1438 __ bind(¬_plus_half);
1439 __ Move(double_scratch, -0.5);
1440 __ BranchF(USE_DELAY_SLOT,
1446 // double_scratch can be overwritten in the delay slot.
1447 // Calculates square root of base. Check for the special case of
1448 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
1449 __ Move(double_scratch, -V8_INFINITY);
1450 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
1451 __ Move(double_result, kDoubleRegZero);
1453 // Add +0 to convert -0 to +0.
1454 __ add_d(double_scratch, double_base, kDoubleRegZero);
1455 __ Move(double_result, 1);
1456 __ sqrt_d(double_scratch, double_scratch);
1457 __ div_d(double_result, double_result, double_scratch);
1463 AllowExternalCallThatCantCauseGC scope(masm);
1464 __ PrepareCallCFunction(0, 2, scratch2);
1465 __ MovToFloatParameters(double_base, double_exponent);
1467 ExternalReference::power_double_double_function(masm->isolate()),
1471 __ MovFromFloatResult(double_result);
1474 __ bind(&int_exponent_convert);
1477 // Calculate power with integer exponent.
1478 __ bind(&int_exponent);
1480 // Get two copies of exponent in the registers scratch and exponent.
1481 if (exponent_type_ == INTEGER) {
1482 __ mov(scratch, exponent);
1484 // Exponent has previously been stored into scratch as untagged integer.
1485 __ mov(exponent, scratch);
1488 __ mov_d(double_scratch, double_base); // Back up base.
1489 __ Move(double_result, 1.0);
1491 // Get absolute value of exponent.
1492 Label positive_exponent;
1493 __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
1494 __ Subu(scratch, zero_reg, scratch);
1495 __ bind(&positive_exponent);
1497 Label while_true, no_carry, loop_end;
1498 __ bind(&while_true);
1500 __ And(scratch2, scratch, 1);
1502 __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
1503 __ mul_d(double_result, double_result, double_scratch);
1506 __ sra(scratch, scratch, 1);
1508 __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
1509 __ mul_d(double_scratch, double_scratch, double_scratch);
1511 __ Branch(&while_true);
1515 __ Branch(&done, ge, exponent, Operand(zero_reg));
1516 __ Move(double_scratch, 1.0);
1517 __ div_d(double_result, double_scratch, double_result);
1518 // Test whether result is zero. Bail out to check for subnormal result.
1519 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
1520 __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
1522 // double_exponent may not contain the exponent value if the input was a
1523 // smi. We set it with exponent value before bailing out.
1524 __ mtc1(exponent, single_scratch);
1525 __ cvt_d_w(double_exponent, single_scratch);
1527 // Returning or bailing out.
1528 Counters* counters = masm->isolate()->counters();
1529 if (exponent_type_ == ON_STACK) {
1530 // The arguments are still on the stack.
1531 __ bind(&call_runtime);
1532 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
1534 // The stub is called from non-optimized code, which expects the result
1535 // as heap number in exponent.
1537 __ AllocateHeapNumber(
1538 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
1539 __ sdc1(double_result,
1540 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
1541 ASSERT(heapnumber.is(v0));
1542 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1547 AllowExternalCallThatCantCauseGC scope(masm);
1548 __ PrepareCallCFunction(0, 2, scratch);
1549 __ MovToFloatParameters(double_base, double_exponent);
1551 ExternalReference::power_double_double_function(masm->isolate()),
1555 __ MovFromFloatResult(double_result);
1558 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1564 bool CEntryStub::NeedsImmovableCode() {
1569 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
1570 CEntryStub::GenerateAheadOfTime(isolate);
1571 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
1572 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
1573 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
1574 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
1575 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
1576 BinaryOpICStub::GenerateAheadOfTime(isolate);
1577 StoreRegistersStateStub::GenerateAheadOfTime(isolate);
1578 RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
1579 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
1583 void StoreRegistersStateStub::GenerateAheadOfTime(
1585 StoreRegistersStateStub stub1(kDontSaveFPRegs);
1586 stub1.GetCode(isolate);
1587 // Hydrogen code stubs need stub2 at snapshot time.
1588 StoreRegistersStateStub stub2(kSaveFPRegs);
1589 stub2.GetCode(isolate);
1593 void RestoreRegistersStateStub::GenerateAheadOfTime(
1595 RestoreRegistersStateStub stub1(kDontSaveFPRegs);
1596 stub1.GetCode(isolate);
1597 // Hydrogen code stubs need stub2 at snapshot time.
1598 RestoreRegistersStateStub stub2(kSaveFPRegs);
1599 stub2.GetCode(isolate);
1603 void CodeStub::GenerateFPStubs(Isolate* isolate) {
1604 SaveFPRegsMode mode = kSaveFPRegs;
1605 CEntryStub save_doubles(1, mode);
1606 StoreBufferOverflowStub stub(mode);
1607 // These stubs might already be in the snapshot, detect that and don't
1608 // regenerate, which would lead to code stub initialization state being messed
1610 Code* save_doubles_code;
1611 if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
1612 save_doubles_code = *save_doubles.GetCode(isolate);
1614 Code* store_buffer_overflow_code;
1615 if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) {
1616 store_buffer_overflow_code = *stub.GetCode(isolate);
1618 isolate->set_fp_stubs_generated(true);
1622 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1623 CEntryStub stub(1, kDontSaveFPRegs);
1624 stub.GetCode(isolate);
1628 static void JumpIfOOM(MacroAssembler* masm,
1632 STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
1633 STATIC_ASSERT(kFailureTag == 3);
1634 __ andi(scratch, value, 0xf);
1635 __ Branch(oom_label, eq, scratch, Operand(0xf));
1639 void CEntryStub::GenerateCore(MacroAssembler* masm,
1640 Label* throw_normal_exception,
1641 Label* throw_termination_exception,
1642 Label* throw_out_of_memory_exception,
1644 bool always_allocate) {
1645 // v0: result parameter for PerformGC, if any
1646 // s0: number of arguments including receiver (C callee-saved)
1647 // s1: pointer to the first argument (C callee-saved)
1648 // s2: pointer to builtin function (C callee-saved)
1650 Isolate* isolate = masm->isolate();
1653 // Move result passed in v0 into a0 to call PerformGC.
1655 __ PrepareCallCFunction(2, 0, a1);
1656 __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
1657 __ CallCFunction(ExternalReference::perform_gc_function(isolate), 2, 0);
1660 ExternalReference scope_depth =
1661 ExternalReference::heap_always_allocate_scope_depth(isolate);
1662 if (always_allocate) {
1663 __ li(a0, Operand(scope_depth));
1664 __ lw(a1, MemOperand(a0));
1665 __ Addu(a1, a1, Operand(1));
1666 __ sw(a1, MemOperand(a0));
1669 // Prepare arguments for C routine.
1672 // a1 = argv (set in the delay slot after find_ra below).
1674 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
1675 // also need to reserve the 4 argument slots on the stack.
1677 __ AssertStackIsAligned();
1679 __ li(a2, Operand(ExternalReference::isolate_address(isolate)));
1681 // To let the GC traverse the return address of the exit frames, we need to
1682 // know where the return address is. The CEntryStub is unmovable, so
1683 // we can store the address on the stack to be able to find it again and
1684 // we never have to restore it, because it will not change.
1685 { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
1686 // This branch-and-link sequence is needed to find the current PC on mips,
1687 // saved to the ra register.
1688 // Use masm-> here instead of the double-underscore macro since extra
1689 // coverage code can interfere with the proper calculation of ra.
1691 masm->bal(&find_ra); // bal exposes branch delay slot.
1693 masm->bind(&find_ra);
1695 // Adjust the value in ra to point to the correct return location, 2nd
1696 // instruction past the real call into C code (the jalr(t9)), and push it.
1697 // This is the return address of the exit frame.
1698 const int kNumInstructionsToJump = 5;
1699 masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
1700 masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
1701 // Stack space reservation moved to the branch delay slot below.
1702 // Stack is still aligned.
1704 // Call the C routine.
1705 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
1707 // Set up sp in the delay slot.
1708 masm->addiu(sp, sp, -kCArgsSlotsSize);
1709 // Make sure the stored 'ra' points to this position.
1710 ASSERT_EQ(kNumInstructionsToJump,
1711 masm->InstructionsGeneratedSince(&find_ra));
1714 if (always_allocate) {
1715 // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
1716 __ li(a2, Operand(scope_depth));
1717 __ lw(a3, MemOperand(a2));
1718 __ Subu(a3, a3, Operand(1));
1719 __ sw(a3, MemOperand(a2));
1722 // Check for failure result.
1723 Label failure_returned;
1724 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
1725 __ addiu(a2, v0, 1);
1726 __ andi(t0, a2, kFailureTagMask);
1727 __ Branch(USE_DELAY_SLOT, &failure_returned, eq, t0, Operand(zero_reg));
1728 // Restore stack (remove arg slots) in branch delay slot.
1729 __ addiu(sp, sp, kCArgsSlotsSize);
1732 // Exit C frame and return.
1734 // sp: stack pointer
1735 // fp: frame pointer
1736 __ LeaveExitFrame(save_doubles_, s0, true, EMIT_RETURN);
1738 // Check if we should retry or throw exception.
1740 __ bind(&failure_returned);
1741 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
1742 __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
1743 __ Branch(&retry, eq, t0, Operand(zero_reg));
1745 // Special handling of out of memory exceptions.
1746 JumpIfOOM(masm, v0, t0, throw_out_of_memory_exception);
1748 // Retrieve the pending exception.
1749 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1751 __ lw(v0, MemOperand(t0));
1753 // See if we just retrieved an OOM exception.
1754 JumpIfOOM(masm, v0, t0, throw_out_of_memory_exception);
1756 // Clear the pending exception.
1757 __ li(a3, Operand(isolate->factory()->the_hole_value()));
1758 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1760 __ sw(a3, MemOperand(t0));
1762 // Special handling of termination exceptions which are uncatchable
1763 // by javascript code.
1764 __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
1765 __ Branch(throw_termination_exception, eq, v0, Operand(t0));
1767 // Handle normal exception.
1768 __ jmp(throw_normal_exception);
1771 // Last failure (v0) will be moved to (a0) for parameter when retrying.
1775 void CEntryStub::Generate(MacroAssembler* masm) {
1776 // Called from JavaScript; parameters are on stack as if calling JS function
1777 // s0: number of arguments including receiver
1778 // s1: size of arguments excluding receiver
1779 // s2: pointer to builtin function
1780 // fp: frame pointer (restored after C call)
1781 // sp: stack pointer (restored as callee's sp after C call)
1782 // cp: current context (C callee-saved)
1784 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1786 // NOTE: Invocations of builtins may return failure objects
1787 // instead of a proper result. The builtin entry handles
1788 // this by performing a garbage collection and retrying the
1791 // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
1792 // The reason for this is that these arguments would need to be saved anyway
1793 // so it's faster to set them up directly.
1794 // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
1796 // Compute the argv pointer in a callee-saved register.
1797 __ Addu(s1, sp, s1);
1799 // Enter the exit frame that transitions from JavaScript to C++.
1800 FrameScope scope(masm, StackFrame::MANUAL);
1801 __ EnterExitFrame(save_doubles_);
1803 // s0: number of arguments (C callee-saved)
1804 // s1: pointer to first argument (C callee-saved)
1805 // s2: pointer to builtin function (C callee-saved)
1807 Label throw_normal_exception;
1808 Label throw_termination_exception;
1809 Label throw_out_of_memory_exception;
1811 // Call into the runtime system.
1813 &throw_normal_exception,
1814 &throw_termination_exception,
1815 &throw_out_of_memory_exception,
1819 // Do space-specific GC and retry runtime call.
1821 &throw_normal_exception,
1822 &throw_termination_exception,
1823 &throw_out_of_memory_exception,
1827 // Do full GC and retry runtime call one final time.
1828 Failure* failure = Failure::InternalError();
1829 __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
1831 &throw_normal_exception,
1832 &throw_termination_exception,
1833 &throw_out_of_memory_exception,
1837 __ bind(&throw_out_of_memory_exception);
1838 // Set external caught exception to false.
1839 Isolate* isolate = masm->isolate();
1840 ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
1842 __ li(a0, Operand(false, RelocInfo::NONE32));
1843 __ li(a2, Operand(external_caught));
1844 __ sw(a0, MemOperand(a2));
1846 // Set pending exception and v0 to out of memory exception.
1847 Label already_have_failure;
1848 JumpIfOOM(masm, v0, t0, &already_have_failure);
1849 Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
1850 __ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
1851 __ bind(&already_have_failure);
1852 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1854 __ sw(v0, MemOperand(a2));
1855 // Fall through to the next label.
1857 __ bind(&throw_termination_exception);
1858 __ ThrowUncatchable(v0);
1860 __ bind(&throw_normal_exception);
1865 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
1866 Label invoke, handler_entry, exit;
1867 Isolate* isolate = masm->isolate();
1870 // a0: entry address
1879 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1881 // Save callee saved registers on the stack.
1882 __ MultiPush(kCalleeSaved | ra.bit());
1884 // Save callee-saved FPU registers.
1885 __ MultiPushFPU(kCalleeSavedFPU);
1886 // Set up the reserved register for 0.0.
1887 __ Move(kDoubleRegZero, 0.0);
1890 // Load argv in s0 register.
1891 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1892 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
1894 __ InitializeRootRegister();
1895 __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
1897 // We build an EntryFrame.
1898 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
1899 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
1900 __ li(t2, Operand(Smi::FromInt(marker)));
1901 __ li(t1, Operand(Smi::FromInt(marker)));
1902 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1904 __ lw(t0, MemOperand(t0));
1905 __ Push(t3, t2, t1, t0);
1906 // Set up frame pointer for the frame to be pushed.
1907 __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
1910 // a0: entry_address
1912 // a2: receiver_pointer
1918 // function slot | entry frame
1920 // bad fp (0xff...f) |
1921 // callee saved registers + ra
1925 // If this is the outermost JS call, set js_entry_sp value.
1926 Label non_outermost_js;
1927 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
1928 __ li(t1, Operand(ExternalReference(js_entry_sp)));
1929 __ lw(t2, MemOperand(t1));
1930 __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
1931 __ sw(fp, MemOperand(t1));
1932 __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1935 __ nop(); // Branch delay slot nop.
1936 __ bind(&non_outermost_js);
1937 __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1941 // Jump to a faked try block that does the invoke, with a faked catch
1942 // block that sets the pending exception.
1944 __ bind(&handler_entry);
1945 handler_offset_ = handler_entry.pos();
1946 // Caught exception: Store result (exception) in the pending exception
1947 // field in the JSEnv and return a failure sentinel. Coming in here the
1948 // fp will be invalid because the PushTryHandler below sets it to 0 to
1949 // signal the existence of the JSEntry frame.
1950 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1952 __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
1953 __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
1954 __ b(&exit); // b exposes branch delay slot.
1955 __ nop(); // Branch delay slot nop.
1957 // Invoke: Link this frame into the handler chain. There's only one
1958 // handler block in this code object, so its index is 0.
1960 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
1961 // If an exception not caught by another handler occurs, this handler
1962 // returns control to the code after the bal(&invoke) above, which
1963 // restores all kCalleeSaved registers (including cp and fp) to their
1964 // saved values before returning a failure to C.
1966 // Clear any pending exceptions.
1967 __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
1968 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1970 __ sw(t1, MemOperand(t0));
1972 // Invoke the function by calling through JS entry trampoline builtin.
1973 // Notice that we cannot store a reference to the trampoline code directly in
1974 // this stub, because runtime stubs are not traversed when doing GC.
1977 // a0: entry_address
1979 // a2: receiver_pointer
1986 // callee saved registers + ra
1991 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1993 __ li(t0, Operand(construct_entry));
1995 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
1996 __ li(t0, Operand(entry));
1998 __ lw(t9, MemOperand(t0)); // Deref address.
2000 // Call JSEntryTrampoline.
2001 __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
2004 // Unlink this frame from the handler chain.
2007 __ bind(&exit); // v0 holds result
2008 // Check if the current stack frame is marked as the outermost JS frame.
2009 Label non_outermost_js_2;
2011 __ Branch(&non_outermost_js_2,
2014 Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
2015 __ li(t1, Operand(ExternalReference(js_entry_sp)));
2016 __ sw(zero_reg, MemOperand(t1));
2017 __ bind(&non_outermost_js_2);
2019 // Restore the top frame descriptors from the stack.
2021 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
2023 __ sw(t1, MemOperand(t0));
2025 // Reset the stack to the callee saved registers.
2026 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
2028 // Restore callee-saved fpu registers.
2029 __ MultiPopFPU(kCalleeSavedFPU);
2031 // Restore callee saved registers from the stack.
2032 __ MultiPop(kCalleeSaved | ra.bit());
2038 // Uses registers a0 to t0.
2039 // Expected input (depending on whether args are in registers or on the stack):
2040 // * object: a0 or at sp + 1 * kPointerSize.
2041 // * function: a1 or at sp.
2043 // An inlined call site may have been generated before calling this stub.
2044 // In this case the offset to the inline site to patch is passed on the stack,
2045 // in the safepoint slot for register t0.
2046 void InstanceofStub::Generate(MacroAssembler* masm) {
2047 // Call site inlining and patching implies arguments in registers.
2048 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
2049 // ReturnTrueFalse is only implemented for inlined call sites.
2050 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
2052 // Fixed register usage throughout the stub:
2053 const Register object = a0; // Object (lhs).
2054 Register map = a3; // Map of the object.
2055 const Register function = a1; // Function (rhs).
2056 const Register prototype = t0; // Prototype of the function.
2057 const Register inline_site = t5;
2058 const Register scratch = a2;
2060 const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
2062 Label slow, loop, is_instance, is_not_instance, not_js_object;
2064 if (!HasArgsInRegisters()) {
2065 __ lw(object, MemOperand(sp, 1 * kPointerSize));
2066 __ lw(function, MemOperand(sp, 0));
2069 // Check that the left hand is a JS object and load map.
2070 __ JumpIfSmi(object, ¬_js_object);
2071 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
2073 // If there is a call site cache don't look in the global cache, but do the
2074 // real lookup and update the call site cache.
2075 if (!HasCallSiteInlineCheck()) {
2077 __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
2078 __ Branch(&miss, ne, function, Operand(at));
2079 __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
2080 __ Branch(&miss, ne, map, Operand(at));
2081 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
2082 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2087 // Get the prototype of the function.
2088 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
2090 // Check that the function prototype is a JS object.
2091 __ JumpIfSmi(prototype, &slow);
2092 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
2094 // Update the global instanceof or call site inlined cache with the current
2095 // map and function. The cached answer will be set when it is known below.
2096 if (!HasCallSiteInlineCheck()) {
2097 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
2098 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
2100 ASSERT(HasArgsInRegisters());
2101 // Patch the (relocated) inlined map check.
2103 // The offset was stored in t0 safepoint slot.
2104 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
2105 __ LoadFromSafepointRegisterSlot(scratch, t0);
2106 __ Subu(inline_site, ra, scratch);
2107 // Get the map location in scratch and patch it.
2108 __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
2109 __ sw(map, FieldMemOperand(scratch, Cell::kValueOffset));
2112 // Register mapping: a3 is object map and t0 is function prototype.
2113 // Get prototype of object into a2.
2114 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
2116 // We don't need map any more. Use it as a scratch register.
2117 Register scratch2 = map;
2120 // Loop through the prototype chain looking for the function prototype.
2121 __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
2123 __ Branch(&is_instance, eq, scratch, Operand(prototype));
2124 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
2125 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
2126 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
2129 __ bind(&is_instance);
2130 ASSERT(Smi::FromInt(0) == 0);
2131 if (!HasCallSiteInlineCheck()) {
2132 __ mov(v0, zero_reg);
2133 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
2135 // Patch the call site to return true.
2136 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
2137 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
2138 // Get the boolean result location in scratch and patch it.
2139 __ PatchRelocatedValue(inline_site, scratch, v0);
2141 if (!ReturnTrueFalseObject()) {
2142 ASSERT_EQ(Smi::FromInt(0), 0);
2143 __ mov(v0, zero_reg);
2146 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2148 __ bind(&is_not_instance);
2149 if (!HasCallSiteInlineCheck()) {
2150 __ li(v0, Operand(Smi::FromInt(1)));
2151 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
2153 // Patch the call site to return false.
2154 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
2155 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
2156 // Get the boolean result location in scratch and patch it.
2157 __ PatchRelocatedValue(inline_site, scratch, v0);
2159 if (!ReturnTrueFalseObject()) {
2160 __ li(v0, Operand(Smi::FromInt(1)));
2164 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2166 Label object_not_null, object_not_null_or_smi;
2167 __ bind(¬_js_object);
2168 // Before null, smi and string value checks, check that the rhs is a function
2169 // as for a non-function rhs an exception needs to be thrown.
2170 __ JumpIfSmi(function, &slow);
2171 __ GetObjectType(function, scratch2, scratch);
2172 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
2174 // Null is not instance of anything.
2175 __ Branch(&object_not_null,
2178 Operand(masm->isolate()->factory()->null_value()));
2179 __ li(v0, Operand(Smi::FromInt(1)));
2180 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2182 __ bind(&object_not_null);
2183 // Smi values are not instances of anything.
2184 __ JumpIfNotSmi(object, &object_not_null_or_smi);
2185 __ li(v0, Operand(Smi::FromInt(1)));
2186 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2188 __ bind(&object_not_null_or_smi);
2189 // String values are not instances of anything.
2190 __ IsObjectJSStringType(object, scratch, &slow);
2191 __ li(v0, Operand(Smi::FromInt(1)));
2192 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2194 // Slow-case. Tail call builtin.
2196 if (!ReturnTrueFalseObject()) {
2197 if (HasArgsInRegisters()) {
2200 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
2203 FrameScope scope(masm, StackFrame::INTERNAL);
2205 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
2208 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
2209 __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
2210 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
2211 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2216 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
2219 if (kind() == Code::KEYED_LOAD_IC) {
2220 // ----------- S t a t e -------------
2221 // -- ra : return address
2224 // -----------------------------------
2225 __ Branch(&miss, ne, a0,
2226 Operand(masm->isolate()->factory()->prototype_string()));
2229 ASSERT(kind() == Code::LOAD_IC);
2230 // ----------- S t a t e -------------
2232 // -- ra : return address
2234 // -- sp[0] : receiver
2235 // -----------------------------------
2239 StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3, t0, &miss);
2241 StubCompiler::TailCallBuiltin(
2242 masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
2246 void StringLengthStub::Generate(MacroAssembler* masm) {
2249 if (kind() == Code::KEYED_LOAD_IC) {
2250 // ----------- S t a t e -------------
2251 // -- ra : return address
2254 // -----------------------------------
2255 __ Branch(&miss, ne, a0,
2256 Operand(masm->isolate()->factory()->length_string()));
2259 ASSERT(kind() == Code::LOAD_IC);
2260 // ----------- S t a t e -------------
2262 // -- ra : return address
2264 // -- sp[0] : receiver
2265 // -----------------------------------
2269 StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss);
2272 StubCompiler::TailCallBuiltin(
2273 masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
2277 void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
2278 // This accepts as a receiver anything JSArray::SetElementsLength accepts
2279 // (currently anything except for external arrays which means anything with
2280 // elements of FixedArray type). Value must be a number, but only smis are
2281 // accepted as the most common case.
2286 if (kind() == Code::KEYED_STORE_IC) {
2287 // ----------- S t a t e -------------
2288 // -- ra : return address
2292 // -----------------------------------
2293 __ Branch(&miss, ne, a1,
2294 Operand(masm->isolate()->factory()->length_string()));
2298 ASSERT(kind() == Code::STORE_IC);
2299 // ----------- S t a t e -------------
2300 // -- ra : return address
2304 // -----------------------------------
2308 Register scratch = a3;
2310 // Check that the receiver isn't a smi.
2311 __ JumpIfSmi(receiver, &miss);
2313 // Check that the object is a JS array.
2314 __ GetObjectType(receiver, scratch, scratch);
2315 __ Branch(&miss, ne, scratch, Operand(JS_ARRAY_TYPE));
2317 // Check that elements are FixedArray.
2318 // We rely on StoreIC_ArrayLength below to deal with all types of
2319 // fast elements (including COW).
2320 __ lw(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
2321 __ GetObjectType(scratch, scratch, scratch);
2322 __ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE));
2324 // Check that the array has fast properties, otherwise the length
2325 // property might have been redefined.
2326 __ lw(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
2327 __ lw(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
2328 __ LoadRoot(at, Heap::kHashTableMapRootIndex);
2329 __ Branch(&miss, eq, scratch, Operand(at));
2331 // Check that value is a smi.
2332 __ JumpIfNotSmi(value, &miss);
2334 // Prepare tail call to StoreIC_ArrayLength.
2335 __ Push(receiver, value);
2337 ExternalReference ref =
2338 ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
2339 __ TailCallExternalReference(ref, 2, 1);
2343 StubCompiler::TailCallBuiltin(
2344 masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
2348 Register InstanceofStub::left() { return a0; }
2351 Register InstanceofStub::right() { return a1; }
2354 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
2355 // The displacement is the offset of the last parameter (if any)
2356 // relative to the frame pointer.
2357 const int kDisplacement =
2358 StandardFrameConstants::kCallerSPOffset - kPointerSize;
2360 // Check that the key is a smiGenerateReadElement.
2362 __ JumpIfNotSmi(a1, &slow);
2364 // Check if the calling frame is an arguments adaptor frame.
2366 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2367 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
2371 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2373 // Check index (a1) against formal parameters count limit passed in
2374 // through register a0. Use unsigned comparison to get negative
2376 __ Branch(&slow, hs, a1, Operand(a0));
2378 // Read the argument from the stack and return it.
2379 __ subu(a3, a0, a1);
2380 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
2381 __ Addu(a3, fp, Operand(t3));
2382 __ Ret(USE_DELAY_SLOT);
2383 __ lw(v0, MemOperand(a3, kDisplacement));
2385 // Arguments adaptor case: Check index (a1) against actual arguments
2386 // limit found in the arguments adaptor frame. Use unsigned
2387 // comparison to get negative check for free.
2389 __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
2390 __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
2392 // Read the argument from the adaptor frame and return it.
2393 __ subu(a3, a0, a1);
2394 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
2395 __ Addu(a3, a2, Operand(t3));
2396 __ Ret(USE_DELAY_SLOT);
2397 __ lw(v0, MemOperand(a3, kDisplacement));
2399 // Slow-case: Handle non-smi or out-of-bounds access to arguments
2400 // by calling the runtime system.
2403 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
2407 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
2408 // sp[0] : number of parameters
2409 // sp[4] : receiver displacement
2411 // Check if the calling frame is an arguments adaptor frame.
2413 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2414 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
2418 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2420 // Patch the arguments.length and the parameters pointer in the current frame.
2421 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
2422 __ sw(a2, MemOperand(sp, 0 * kPointerSize));
2424 __ Addu(a3, a3, Operand(t3));
2425 __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
2426 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
2429 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
2433 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
2435 // sp[0] : number of parameters (tagged)
2436 // sp[4] : address of receiver argument
2438 // Registers used over whole function:
2439 // t2 : allocated object (tagged)
2440 // t5 : mapped parameter count (tagged)
2442 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
2443 // a1 = parameter count (tagged)
2445 // Check if the calling frame is an arguments adaptor frame.
2447 Label adaptor_frame, try_allocate;
2448 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2449 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
2450 __ Branch(&adaptor_frame,
2453 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2455 // No adaptor, parameter count = argument count.
2457 __ b(&try_allocate);
2458 __ nop(); // Branch delay slot nop.
2460 // We have an adaptor frame. Patch the parameters pointer.
2461 __ bind(&adaptor_frame);
2462 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
2464 __ Addu(a3, a3, Operand(t6));
2465 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
2466 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
2468 // a1 = parameter count (tagged)
2469 // a2 = argument count (tagged)
2470 // Compute the mapped parameter count = min(a1, a2) in a1.
2472 __ Branch(&skip_min, lt, a1, Operand(a2));
2476 __ bind(&try_allocate);
2478 // Compute the sizes of backing store, parameter map, and arguments object.
2479 // 1. Parameter map, has 2 extra words containing context and backing store.
2480 const int kParameterMapHeaderSize =
2481 FixedArray::kHeaderSize + 2 * kPointerSize;
2482 // If there are no mapped parameters, we do not need the parameter_map.
2483 Label param_map_size;
2484 ASSERT_EQ(0, Smi::FromInt(0));
2485 __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg));
2486 __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
2488 __ addiu(t5, t5, kParameterMapHeaderSize);
2489 __ bind(¶m_map_size);
2491 // 2. Backing store.
2493 __ Addu(t5, t5, Operand(t6));
2494 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
2496 // 3. Arguments object.
2497 __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
2499 // Do the allocation of all three objects in one go.
2500 __ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT);
2502 // v0 = address of new object(s) (tagged)
2503 // a2 = argument count (tagged)
2504 // Get the arguments boilerplate from the current native context into t0.
2505 const int kNormalOffset =
2506 Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
2507 const int kAliasedOffset =
2508 Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
2510 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2511 __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
2512 Label skip2_ne, skip2_eq;
2513 __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
2514 __ lw(t0, MemOperand(t0, kNormalOffset));
2517 __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
2518 __ lw(t0, MemOperand(t0, kAliasedOffset));
2521 // v0 = address of new object (tagged)
2522 // a1 = mapped parameter count (tagged)
2523 // a2 = argument count (tagged)
2524 // t0 = address of boilerplate object (tagged)
2525 // Copy the JS object part.
2526 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
2527 __ lw(a3, FieldMemOperand(t0, i));
2528 __ sw(a3, FieldMemOperand(v0, i));
2531 // Set up the callee in-object property.
2532 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
2533 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
2534 const int kCalleeOffset = JSObject::kHeaderSize +
2535 Heap::kArgumentsCalleeIndex * kPointerSize;
2536 __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
2538 // Use the length (smi tagged) and set that as an in-object property too.
2539 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2540 const int kLengthOffset = JSObject::kHeaderSize +
2541 Heap::kArgumentsLengthIndex * kPointerSize;
2542 __ sw(a2, FieldMemOperand(v0, kLengthOffset));
2544 // Set up the elements pointer in the allocated arguments object.
2545 // If we allocated a parameter map, t0 will point there, otherwise
2546 // it will point to the backing store.
2547 __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
2548 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
2550 // v0 = address of new object (tagged)
2551 // a1 = mapped parameter count (tagged)
2552 // a2 = argument count (tagged)
2553 // t0 = address of parameter map or backing store (tagged)
2554 // Initialize parameter map. If there are no mapped arguments, we're done.
2555 Label skip_parameter_map;
2557 __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
2558 // Move backing store address to a3, because it is
2559 // expected there when filling in the unmapped arguments.
2563 __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
2565 __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
2566 __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
2567 __ Addu(t2, a1, Operand(Smi::FromInt(2)));
2568 __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
2569 __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
2571 __ Addu(t2, t0, Operand(t6));
2572 __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
2573 __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
2575 // Copy the parameter slots and the holes in the arguments.
2576 // We need to fill in mapped_parameter_count slots. They index the context,
2577 // where parameters are stored in reverse order, at
2578 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
2579 // The mapped parameter thus need to get indices
2580 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
2581 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
2582 // We loop from right to left.
2583 Label parameters_loop, parameters_test;
2585 __ lw(t5, MemOperand(sp, 0 * kPointerSize));
2586 __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
2587 __ Subu(t5, t5, Operand(a1));
2588 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
2590 __ Addu(a3, t0, Operand(t6));
2591 __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
2593 // t2 = loop variable (tagged)
2594 // a1 = mapping index (tagged)
2595 // a3 = address of backing store (tagged)
2596 // t0 = address of parameter map (tagged)
2597 // t1 = temporary scratch (a.o., for address calculation)
2598 // t3 = the hole value
2599 __ jmp(¶meters_test);
2601 __ bind(¶meters_loop);
2602 __ Subu(t2, t2, Operand(Smi::FromInt(1)));
2604 __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
2605 __ Addu(t6, t0, t1);
2606 __ sw(t5, MemOperand(t6));
2607 __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
2608 __ Addu(t6, a3, t1);
2609 __ sw(t3, MemOperand(t6));
2610 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
2611 __ bind(¶meters_test);
2612 __ Branch(¶meters_loop, ne, t2, Operand(Smi::FromInt(0)));
2614 __ bind(&skip_parameter_map);
2615 // a2 = argument count (tagged)
2616 // a3 = address of backing store (tagged)
2618 // Copy arguments header and remaining slots (if there are any).
2619 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
2620 __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
2621 __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
2623 Label arguments_loop, arguments_test;
2625 __ lw(t0, MemOperand(sp, 1 * kPointerSize));
2627 __ Subu(t0, t0, Operand(t6));
2628 __ jmp(&arguments_test);
2630 __ bind(&arguments_loop);
2631 __ Subu(t0, t0, Operand(kPointerSize));
2632 __ lw(t2, MemOperand(t0, 0));
2634 __ Addu(t1, a3, Operand(t6));
2635 __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
2636 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
2638 __ bind(&arguments_test);
2639 __ Branch(&arguments_loop, lt, t5, Operand(a2));
2641 // Return and remove the on-stack parameters.
2644 // Do the runtime call to allocate the arguments object.
2645 // a2 = argument count (tagged)
2647 __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
2648 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
2652 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
2653 // sp[0] : number of parameters
2654 // sp[4] : receiver displacement
2656 // Check if the calling frame is an arguments adaptor frame.
2657 Label adaptor_frame, try_allocate, runtime;
2658 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2659 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
2660 __ Branch(&adaptor_frame,
2663 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2665 // Get the length from the frame.
2666 __ lw(a1, MemOperand(sp, 0));
2667 __ Branch(&try_allocate);
2669 // Patch the arguments.length and the parameters pointer.
2670 __ bind(&adaptor_frame);
2671 __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
2672 __ sw(a1, MemOperand(sp, 0));
2673 __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
2674 __ Addu(a3, a2, Operand(at));
2676 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
2677 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
2679 // Try the new space allocation. Start out with computing the size
2680 // of the arguments object and the elements array in words.
2681 Label add_arguments_object;
2682 __ bind(&try_allocate);
2683 __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
2684 __ srl(a1, a1, kSmiTagSize);
2686 __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
2687 __ bind(&add_arguments_object);
2688 __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
2690 // Do the allocation of both objects in one go.
2691 __ Allocate(a1, v0, a2, a3, &runtime,
2692 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
2694 // Get the arguments boilerplate from the current native context.
2695 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2696 __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
2697 __ lw(t0, MemOperand(t0, Context::SlotOffset(
2698 Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
2700 // Copy the JS object part.
2701 __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
2703 // Get the length (smi tagged) and set that as an in-object property too.
2704 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2705 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
2706 __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
2707 Heap::kArgumentsLengthIndex * kPointerSize));
2710 __ Branch(&done, eq, a1, Operand(zero_reg));
2712 // Get the parameters pointer from the stack.
2713 __ lw(a2, MemOperand(sp, 1 * kPointerSize));
2715 // Set up the elements pointer in the allocated arguments object and
2716 // initialize the header in the elements fixed array.
2717 __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
2718 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
2719 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
2720 __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
2721 __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
2722 // Untag the length for the loop.
2723 __ srl(a1, a1, kSmiTagSize);
2725 // Copy the fixed array slots.
2727 // Set up t0 to point to the first array slot.
2728 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2730 // Pre-decrement a2 with kPointerSize on each iteration.
2731 // Pre-decrement in order to skip receiver.
2732 __ Addu(a2, a2, Operand(-kPointerSize));
2733 __ lw(a3, MemOperand(a2));
2734 // Post-increment t0 with kPointerSize on each iteration.
2735 __ sw(a3, MemOperand(t0));
2736 __ Addu(t0, t0, Operand(kPointerSize));
2737 __ Subu(a1, a1, Operand(1));
2738 __ Branch(&loop, ne, a1, Operand(zero_reg));
2740 // Return and remove the on-stack parameters.
2744 // Do the runtime call to allocate the arguments object.
2746 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
2750 void RegExpExecStub::Generate(MacroAssembler* masm) {
2751 // Just jump directly to runtime if native RegExp is not selected at compile
2752 // time or if regexp entry in generated code is turned off runtime switch or
2754 #ifdef V8_INTERPRETED_REGEXP
2755 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
2756 #else // V8_INTERPRETED_REGEXP
2758 // Stack frame on entry.
2759 // sp[0]: last_match_info (expected JSArray)
2760 // sp[4]: previous index
2761 // sp[8]: subject string
2762 // sp[12]: JSRegExp object
2764 const int kLastMatchInfoOffset = 0 * kPointerSize;
2765 const int kPreviousIndexOffset = 1 * kPointerSize;
2766 const int kSubjectOffset = 2 * kPointerSize;
2767 const int kJSRegExpOffset = 3 * kPointerSize;
2769 Isolate* isolate = masm->isolate();
2772 // Allocation of registers for this function. These are in callee save
2773 // registers and will be preserved by the call to the native RegExp code, as
2774 // this code is called using the normal C calling convention. When calling
2775 // directly from generated code the native RegExp code will not do a GC and
2776 // therefore the content of these registers are safe to use after the call.
2777 // MIPS - using s0..s2, since we are not using CEntry Stub.
2778 Register subject = s0;
2779 Register regexp_data = s1;
2780 Register last_match_info_elements = s2;
2782 // Ensure that a RegExp stack is allocated.
2783 ExternalReference address_of_regexp_stack_memory_address =
2784 ExternalReference::address_of_regexp_stack_memory_address(
2786 ExternalReference address_of_regexp_stack_memory_size =
2787 ExternalReference::address_of_regexp_stack_memory_size(isolate);
2788 __ li(a0, Operand(address_of_regexp_stack_memory_size));
2789 __ lw(a0, MemOperand(a0, 0));
2790 __ Branch(&runtime, eq, a0, Operand(zero_reg));
2792 // Check that the first argument is a JSRegExp object.
2793 __ lw(a0, MemOperand(sp, kJSRegExpOffset));
2794 STATIC_ASSERT(kSmiTag == 0);
2795 __ JumpIfSmi(a0, &runtime);
2796 __ GetObjectType(a0, a1, a1);
2797 __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
2799 // Check that the RegExp has been compiled (data contains a fixed array).
2800 __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
2801 if (FLAG_debug_code) {
2802 __ SmiTst(regexp_data, t0);
2804 kUnexpectedTypeForRegExpDataFixedArrayExpected,
2807 __ GetObjectType(regexp_data, a0, a0);
2809 kUnexpectedTypeForRegExpDataFixedArrayExpected,
2811 Operand(FIXED_ARRAY_TYPE));
2814 // regexp_data: RegExp data (FixedArray)
2815 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2816 __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2817 __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
2819 // regexp_data: RegExp data (FixedArray)
2820 // Check that the number of captures fit in the static offsets vector buffer.
2822 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2823 // Check (number_of_captures + 1) * 2 <= offsets vector size
2824 // Or number_of_captures * 2 <= offsets vector size - 2
2825 // Multiplying by 2 comes for free since a2 is smi-tagged.
2826 STATIC_ASSERT(kSmiTag == 0);
2827 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2828 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
2830 &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
2832 // Reset offset for possibly sliced string.
2833 __ mov(t0, zero_reg);
2834 __ lw(subject, MemOperand(sp, kSubjectOffset));
2835 __ JumpIfSmi(subject, &runtime);
2836 __ mov(a3, subject); // Make a copy of the original subject string.
2837 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2838 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2839 // subject: subject string
2840 // a3: subject string
2841 // a0: subject string instance type
2842 // regexp_data: RegExp data (FixedArray)
2843 // Handle subject string according to its encoding and representation:
2844 // (1) Sequential string? If yes, go to (5).
2845 // (2) Anything but sequential or cons? If yes, go to (6).
2846 // (3) Cons string. If the string is flat, replace subject with first string.
2847 // Otherwise bailout.
2848 // (4) Is subject external? If yes, go to (7).
2849 // (5) Sequential string. Load regexp code according to encoding.
2853 // Deferred code at the end of the stub:
2854 // (6) Not a long external string? If yes, go to (8).
2855 // (7) External string. Make it, offset-wise, look like a sequential string.
2857 // (8) Short external string or not a string? If yes, bail out to runtime.
2858 // (9) Sliced string. Replace subject with parent. Go to (4).
2860 Label seq_string /* 5 */, external_string /* 7 */,
2861 check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
2862 not_long_external /* 8 */;
2864 // (1) Sequential string? If yes, go to (5).
2867 Operand(kIsNotStringMask |
2868 kStringRepresentationMask |
2869 kShortExternalStringMask));
2870 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
2871 __ Branch(&seq_string, eq, a1, Operand(zero_reg)); // Go to (5).
2873 // (2) Anything but sequential or cons? If yes, go to (6).
2874 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2875 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2876 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2877 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2879 __ Branch(¬_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
2881 // (3) Cons string. Check that it's flat.
2882 // Replace subject with first string and reload instance type.
2883 __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
2884 __ LoadRoot(a1, Heap::kempty_stringRootIndex);
2885 __ Branch(&runtime, ne, a0, Operand(a1));
2886 __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2888 // (4) Is subject external? If yes, go to (7).
2889 __ bind(&check_underlying);
2890 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2891 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2892 STATIC_ASSERT(kSeqStringTag == 0);
2893 __ And(at, a0, Operand(kStringRepresentationMask));
2894 // The underlying external string is never a short external string.
2895 STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
2896 STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2897 __ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7).
2899 // (5) Sequential string. Load regexp code according to encoding.
2900 __ bind(&seq_string);
2901 // subject: sequential subject string (or look-alike, external string)
2902 // a3: original subject string
2903 // Load previous index and check range before a3 is overwritten. We have to
2904 // use a3 instead of subject here because subject might have been only made
2905 // to look like a sequential string when it actually is an external string.
2906 __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
2907 __ JumpIfNotSmi(a1, &runtime);
2908 __ lw(a3, FieldMemOperand(a3, String::kLengthOffset));
2909 __ Branch(&runtime, ls, a3, Operand(a1));
2910 __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
2912 STATIC_ASSERT(kStringEncodingMask == 4);
2913 STATIC_ASSERT(kOneByteStringTag == 4);
2914 STATIC_ASSERT(kTwoByteStringTag == 0);
2915 __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII.
2916 __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
2917 __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
2918 __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
2919 __ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
2921 // (E) Carry on. String handling is done.
2922 // t9: irregexp code
2923 // Check that the irregexp code has been generated for the actual string
2924 // encoding. If it has, the field contains a code object otherwise it contains
2925 // a smi (code flushing support).
2926 __ JumpIfSmi(t9, &runtime);
2928 // a1: previous index
2929 // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
2931 // subject: Subject string
2932 // regexp_data: RegExp data (FixedArray)
2933 // All checks done. Now push arguments for native regexp code.
2934 __ IncrementCounter(isolate->counters()->regexp_entry_native(),
2937 // Isolates: note we add an additional parameter here (isolate pointer).
2938 const int kRegExpExecuteArguments = 9;
2939 const int kParameterRegisters = 4;
2940 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
2942 // Stack pointer now points to cell where return address is to be written.
2943 // Arguments are before that on the stack or in registers, meaning we
2944 // treat the return address as argument 5. Thus every argument after that
2945 // needs to be shifted back by 1. Since DirectCEntryStub will handle
2946 // allocating space for the c argument slots, we don't need to calculate
2947 // that into the argument positions on the stack. This is how the stack will
2948 // look (sp meaning the value of sp at this moment):
2949 // [sp + 5] - Argument 9
2950 // [sp + 4] - Argument 8
2951 // [sp + 3] - Argument 7
2952 // [sp + 2] - Argument 6
2953 // [sp + 1] - Argument 5
2954 // [sp + 0] - saved ra
2956 // Argument 9: Pass current isolate address.
2957 // CFunctionArgumentOperand handles MIPS stack argument slots.
2958 __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
2959 __ sw(a0, MemOperand(sp, 5 * kPointerSize));
2961 // Argument 8: Indicate that this is a direct call from JavaScript.
2962 __ li(a0, Operand(1));
2963 __ sw(a0, MemOperand(sp, 4 * kPointerSize));
2965 // Argument 7: Start (high end) of backtracking stack memory area.
2966 __ li(a0, Operand(address_of_regexp_stack_memory_address));
2967 __ lw(a0, MemOperand(a0, 0));
2968 __ li(a2, Operand(address_of_regexp_stack_memory_size));
2969 __ lw(a2, MemOperand(a2, 0));
2970 __ addu(a0, a0, a2);
2971 __ sw(a0, MemOperand(sp, 3 * kPointerSize));
2973 // Argument 6: Set the number of capture registers to zero to force global
2974 // regexps to behave as non-global. This does not affect non-global regexps.
2975 __ mov(a0, zero_reg);
2976 __ sw(a0, MemOperand(sp, 2 * kPointerSize));
2978 // Argument 5: static offsets vector buffer.
2980 ExternalReference::address_of_static_offsets_vector(isolate)));
2981 __ sw(a0, MemOperand(sp, 1 * kPointerSize));
2983 // For arguments 4 and 3 get string length, calculate start of string data
2984 // and calculate the shift of the index (0 for ASCII and 1 for two byte).
2985 __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
2986 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
2987 // Load the length from the original subject string from the previous stack
2988 // frame. Therefore we have to use fp, which points exactly to two pointer
2989 // sizes below the previous sp. (Because creating a new stack frame pushes
2990 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
2991 __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2992 // If slice offset is not 0, load the length from the original sliced string.
2993 // Argument 4, a3: End of string data
2994 // Argument 3, a2: Start of string data
2995 // Prepare start and end index of the input.
2996 __ sllv(t1, t0, a3);
2997 __ addu(t0, t2, t1);
2998 __ sllv(t1, a1, a3);
2999 __ addu(a2, t0, t1);
3001 __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
3002 __ sra(t2, t2, kSmiTagSize);
3003 __ sllv(t1, t2, a3);
3004 __ addu(a3, t0, t1);
3005 // Argument 2 (a1): Previous index.
3008 // Argument 1 (a0): Subject string.
3009 __ mov(a0, subject);
3011 // Locate the code entry and call it.
3012 __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
3013 DirectCEntryStub stub;
3014 stub.GenerateCall(masm, t9);
3016 __ LeaveExitFrame(false, no_reg, true);
3019 // subject: subject string (callee saved)
3020 // regexp_data: RegExp data (callee saved)
3021 // last_match_info_elements: Last match info elements (callee saved)
3022 // Check the result.
3024 __ Branch(&success, eq, v0, Operand(1));
3025 // We expect exactly one result since we force the called regexp to behave
3028 __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
3029 // If not exception it can only be retry. Handle that in the runtime system.
3030 __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
3031 // Result must now be exception. If there is no pending exception already a
3032 // stack overflow (on the backtrack stack) was detected in RegExp code but
3033 // haven't created the exception yet. Handle that in the runtime system.
3034 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
3035 __ li(a1, Operand(isolate->factory()->the_hole_value()));
3036 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3038 __ lw(v0, MemOperand(a2, 0));
3039 __ Branch(&runtime, eq, v0, Operand(a1));
3041 __ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
3043 // Check if the exception is a termination. If so, throw as uncatchable.
3044 __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
3045 Label termination_exception;
3046 __ Branch(&termination_exception, eq, v0, Operand(a0));
3050 __ bind(&termination_exception);
3051 __ ThrowUncatchable(v0);
3054 // For failure and exception return null.
3055 __ li(v0, Operand(isolate->factory()->null_value()));
3058 // Process the result from the native regexp code.
3061 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
3062 // Calculate number of capture registers (number_of_captures + 1) * 2.
3063 // Multiplying by 2 comes for free since r1 is smi-tagged.
3064 STATIC_ASSERT(kSmiTag == 0);
3065 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3066 __ Addu(a1, a1, Operand(2)); // a1 was a smi.
3068 __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
3069 __ JumpIfSmi(a0, &runtime);
3070 __ GetObjectType(a0, a2, a2);
3071 __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
3072 // Check that the JSArray is in fast case.
3073 __ lw(last_match_info_elements,
3074 FieldMemOperand(a0, JSArray::kElementsOffset));
3075 __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
3076 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
3077 __ Branch(&runtime, ne, a0, Operand(at));
3078 // Check that the last match info has space for the capture registers and the
3079 // additional information.
3081 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
3082 __ Addu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
3083 __ sra(at, a0, kSmiTagSize);
3084 __ Branch(&runtime, gt, a2, Operand(at));
3086 // a1: number of capture registers
3087 // subject: subject string
3088 // Store the capture count.
3089 __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
3090 __ sw(a2, FieldMemOperand(last_match_info_elements,
3091 RegExpImpl::kLastCaptureCountOffset));
3092 // Store last subject and last input.
3094 FieldMemOperand(last_match_info_elements,
3095 RegExpImpl::kLastSubjectOffset));
3096 __ mov(a2, subject);
3097 __ RecordWriteField(last_match_info_elements,
3098 RegExpImpl::kLastSubjectOffset,
3103 __ mov(subject, a2);
3105 FieldMemOperand(last_match_info_elements,
3106 RegExpImpl::kLastInputOffset));
3107 __ RecordWriteField(last_match_info_elements,
3108 RegExpImpl::kLastInputOffset,
3114 // Get the static offsets vector filled by the native regexp code.
3115 ExternalReference address_of_static_offsets_vector =
3116 ExternalReference::address_of_static_offsets_vector(isolate);
3117 __ li(a2, Operand(address_of_static_offsets_vector));
3119 // a1: number of capture registers
3120 // a2: offsets vector
3121 Label next_capture, done;
3122 // Capture register counter starts from number of capture registers and
3123 // counts down until wrapping after zero.
3125 last_match_info_elements,
3126 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
3127 __ bind(&next_capture);
3128 __ Subu(a1, a1, Operand(1));
3129 __ Branch(&done, lt, a1, Operand(zero_reg));
3130 // Read the value from the static offsets vector buffer.
3131 __ lw(a3, MemOperand(a2, 0));
3132 __ addiu(a2, a2, kPointerSize);
3133 // Store the smi value in the last match info.
3134 __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
3135 __ sw(a3, MemOperand(a0, 0));
3136 __ Branch(&next_capture, USE_DELAY_SLOT);
3137 __ addiu(a0, a0, kPointerSize); // In branch delay slot.
3141 // Return last match info.
3142 __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
3145 // Do the runtime call to execute the regexp.
3147 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
3149 // Deferred code for string handling.
3150 // (6) Not a long external string? If yes, go to (8).
3151 __ bind(¬_seq_nor_cons);
3153 __ Branch(¬_long_external, gt, a1, Operand(kExternalStringTag));
3155 // (7) External string. Make it, offset-wise, look like a sequential string.
3156 __ bind(&external_string);
3157 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
3158 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
3159 if (FLAG_debug_code) {
3160 // Assert that we do not have a cons or slice (indirect strings) here.
3161 // Sequential strings have already been ruled out.
3162 __ And(at, a0, Operand(kIsIndirectStringMask));
3164 kExternalStringExpectedButNotFound,
3169 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
3170 // Move the pointer so that offset-wise, it looks like a sequential string.
3171 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3174 SeqTwoByteString::kHeaderSize - kHeapObjectTag);
3175 __ jmp(&seq_string); // Go to (5).
3177 // (8) Short external string or not a string? If yes, bail out to runtime.
3178 __ bind(¬_long_external);
3179 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
3180 __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
3181 __ Branch(&runtime, ne, at, Operand(zero_reg));
3183 // (9) Sliced string. Replace subject with parent. Go to (4).
3184 // Load offset into t0 and replace subject string with parent.
3185 __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
3186 __ sra(t0, t0, kSmiTagSize);
3187 __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
3188 __ jmp(&check_underlying); // Go to (4).
3189 #endif // V8_INTERPRETED_REGEXP
3193 void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
3194 const int kMaxInlineLength = 100;
3197 __ lw(a1, MemOperand(sp, kPointerSize * 2));
3198 STATIC_ASSERT(kSmiTag == 0);
3199 STATIC_ASSERT(kSmiTagSize == 1);
3200 __ JumpIfNotSmi(a1, &slowcase);
3201 __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
3202 // Smi-tagging is equivalent to multiplying by 2.
3203 // Allocate RegExpResult followed by FixedArray with size in ebx.
3204 // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
3205 // Elements: [Map][Length][..elements..]
3206 // Size of JSArray with two in-object properties and the header of a
3209 (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
3210 __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
3211 __ Addu(a2, t1, Operand(objects_size));
3213 a2, // In: Size, in words.
3214 v0, // Out: Start of allocation (tagged).
3215 a3, // Scratch register.
3216 t0, // Scratch register.
3218 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
3219 // v0: Start of allocated area, object-tagged.
3220 // a1: Number of elements in array, as smi.
3221 // t1: Number of elements, untagged.
3223 // Set JSArray map to global.regexp_result_map().
3224 // Set empty properties FixedArray.
3225 // Set elements to point to FixedArray allocated right after the JSArray.
3226 // Interleave operations for better latency.
3227 __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
3228 __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
3229 __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
3230 __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
3231 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
3232 __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
3233 __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
3234 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
3236 // Set input, index and length fields from arguments.
3237 __ lw(a1, MemOperand(sp, kPointerSize * 0));
3238 __ lw(a2, MemOperand(sp, kPointerSize * 1));
3239 __ lw(t2, MemOperand(sp, kPointerSize * 2));
3240 __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
3241 __ sw(a2, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
3242 __ sw(t2, FieldMemOperand(v0, JSArray::kLengthOffset));
3244 // Fill out the elements FixedArray.
3245 // v0: JSArray, tagged.
3246 // a3: FixedArray, tagged.
3247 // t1: Number of elements in array, untagged.
3250 __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
3251 __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
3252 // Set FixedArray length.
3253 __ sll(t2, t1, kSmiTagSize);
3254 __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
3255 // Fill contents of fixed-array with undefined.
3256 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
3257 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3258 // Fill fixed array elements with undefined.
3259 // v0: JSArray, tagged.
3261 // a3: Start of elements in FixedArray.
3262 // t1: Number of elements to fill.
3264 __ sll(t1, t1, kPointerSizeLog2); // Convert num elements to num bytes.
3265 __ addu(t1, t1, a3); // Point past last element to store.
3267 __ Branch(&done, ge, a3, Operand(t1)); // Break when a3 past end of elem.
3268 __ sw(a2, MemOperand(a3));
3269 __ Branch(&loop, USE_DELAY_SLOT);
3270 __ addiu(a3, a3, kPointerSize); // In branch delay slot.
3276 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
3280 static void GenerateRecordCallTarget(MacroAssembler* masm) {
3281 // Cache the called function in a global property cell. Cache states
3282 // are uninitialized, monomorphic (indicated by a JSFunction), and
3284 // a0 : number of arguments to the construct function
3285 // a1 : the function to call
3286 // a2 : cache cell for call target
3287 Label initialize, done, miss, megamorphic, not_array_function;
3289 ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
3290 masm->isolate()->heap()->undefined_value());
3291 ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
3292 masm->isolate()->heap()->the_hole_value());
3294 // Load the cache state into a3.
3295 __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
3297 // A monomorphic cache hit or an already megamorphic state: invoke the
3298 // function without changing the state.
3299 __ Branch(&done, eq, a3, Operand(a1));
3301 // If we came here, we need to see if we are the array function.
3302 // If we didn't have a matching function, and we didn't find the megamorph
3303 // sentinel, then we have in the cell either some other function or an
3304 // AllocationSite. Do a map check on the object in a3.
3305 __ lw(t1, FieldMemOperand(a3, 0));
3306 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
3307 __ Branch(&miss, ne, t1, Operand(at));
3309 // Make sure the function is the Array() function
3310 __ LoadArrayFunction(a3);
3311 __ Branch(&megamorphic, ne, a1, Operand(a3));
3316 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
3318 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
3319 __ Branch(&initialize, eq, a3, Operand(at));
3320 // MegamorphicSentinel is an immortal immovable object (undefined) so no
3321 // write-barrier is needed.
3322 __ bind(&megamorphic);
3323 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3324 __ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
3327 // An uninitialized cache is patched with the function or sentinel to
3328 // indicate the ElementsKind if function is the Array constructor.
3329 __ bind(&initialize);
3330 // Make sure the function is the Array() function
3331 __ LoadArrayFunction(a3);
3332 __ Branch(¬_array_function, ne, a1, Operand(a3));
3334 // The target function is the Array constructor.
3335 // Create an AllocationSite if we don't already have it, store it in the cell.
3337 FrameScope scope(masm, StackFrame::INTERNAL);
3338 const RegList kSavedRegs =
3343 // Arguments register must be smi-tagged to call out.
3345 __ MultiPush(kSavedRegs);
3347 CreateAllocationSiteStub create_stub;
3348 __ CallStub(&create_stub);
3350 __ MultiPop(kSavedRegs);
3355 __ bind(¬_array_function);
3356 __ sw(a1, FieldMemOperand(a2, Cell::kValueOffset));
3357 // No need for a write barrier here - cells are rescanned.
3363 void CallFunctionStub::Generate(MacroAssembler* masm) {
3364 // a1 : the function to call
3365 // a2 : cache cell for call target
3366 Label slow, non_function;
3368 // Check that the function is really a JavaScript function.
3369 // a1: pushed function (to be verified)
3370 __ JumpIfSmi(a1, &non_function);
3372 // Goto slow case if we do not have a function.
3373 __ GetObjectType(a1, a3, a3);
3374 __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
3376 if (RecordCallTarget()) {
3377 GenerateRecordCallTarget(masm);
3380 // Fast-case: Invoke the function now.
3381 // a1: pushed function
3382 ParameterCount actual(argc_);
3384 __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
3386 // Slow-case: Non-function called.
3388 if (RecordCallTarget()) {
3389 // If there is a call target cache, mark it megamorphic in the
3390 // non-function case. MegamorphicSentinel is an immortal immovable
3391 // object (undefined) so no write barrier is needed.
3392 ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
3393 masm->isolate()->heap()->undefined_value());
3394 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3395 __ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
3397 // Check for function proxy.
3398 __ Branch(&non_function, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
3399 __ push(a1); // Put proxy as additional argument.
3400 __ li(a0, Operand(argc_ + 1, RelocInfo::NONE32));
3401 __ li(a2, Operand(0, RelocInfo::NONE32));
3402 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
3404 Handle<Code> adaptor =
3405 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
3406 __ Jump(adaptor, RelocInfo::CODE_TARGET);
3409 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
3410 // of the original receiver from the call site).
3411 __ bind(&non_function);
3412 __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
3413 __ li(a0, Operand(argc_)); // Set up the number of arguments.
3414 __ mov(a2, zero_reg);
3415 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
3416 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3417 RelocInfo::CODE_TARGET);
3421 void CallConstructStub::Generate(MacroAssembler* masm) {
3422 // a0 : number of arguments
3423 // a1 : the function to call
3424 // a2 : cache cell for call target
3425 Label slow, non_function_call;
3427 // Check that the function is not a smi.
3428 __ JumpIfSmi(a1, &non_function_call);
3429 // Check that the function is a JSFunction.
3430 __ GetObjectType(a1, a3, a3);
3431 __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
3433 if (RecordCallTarget()) {
3434 GenerateRecordCallTarget(masm);
3437 // Jump to the function-specific construct stub.
3438 Register jmp_reg = a3;
3439 __ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3440 __ lw(jmp_reg, FieldMemOperand(jmp_reg,
3441 SharedFunctionInfo::kConstructStubOffset));
3442 __ Addu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
3445 // a0: number of arguments
3446 // a1: called object
3450 __ Branch(&non_function_call, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
3451 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
3454 __ bind(&non_function_call);
3455 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
3457 // Set expected number of arguments to zero (not changing r0).
3458 __ li(a2, Operand(0, RelocInfo::NONE32));
3459 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3460 RelocInfo::CODE_TARGET);
3464 // StringCharCodeAtGenerator.
3465 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
3468 Label got_char_code;
3469 Label sliced_string;
3471 ASSERT(!t0.is(index_));
3472 ASSERT(!t0.is(result_));
3473 ASSERT(!t0.is(object_));
3475 // If the receiver is a smi trigger the non-string case.
3476 __ JumpIfSmi(object_, receiver_not_string_);
3478 // Fetch the instance type of the receiver into result register.
3479 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3480 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3481 // If the receiver is not a string trigger the non-string case.
3482 __ And(t0, result_, Operand(kIsNotStringMask));
3483 __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
3485 // If the index is non-smi trigger the non-smi case.
3486 __ JumpIfNotSmi(index_, &index_not_smi_);
3488 __ bind(&got_smi_index_);
3490 // Check for index out of range.
3491 __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
3492 __ Branch(index_out_of_range_, ls, t0, Operand(index_));
3494 __ sra(index_, index_, kSmiTagSize);
3496 StringCharLoadGenerator::Generate(masm,
3502 __ sll(result_, result_, kSmiTagSize);
3507 void StringCharCodeAtGenerator::GenerateSlow(
3508 MacroAssembler* masm,
3509 const RuntimeCallHelper& call_helper) {
3510 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
3512 // Index is not a smi.
3513 __ bind(&index_not_smi_);
3514 // If index is a heap number, try converting it to an integer.
3517 Heap::kHeapNumberMapRootIndex,
3520 call_helper.BeforeCall(masm);
3521 // Consumed by runtime conversion function:
3522 __ Push(object_, index_);
3523 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3524 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3526 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3527 // NumberToSmi discards numbers that are not exact integers.
3528 __ CallRuntime(Runtime::kNumberToSmi, 1);
3531 // Save the conversion result before the pop instructions below
3532 // have a chance to overwrite it.
3534 __ Move(index_, v0);
3536 // Reload the instance type.
3537 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3538 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3539 call_helper.AfterCall(masm);
3540 // If index is still not a smi, it must be out of range.
3541 __ JumpIfNotSmi(index_, index_out_of_range_);
3542 // Otherwise, return to the fast path.
3543 __ Branch(&got_smi_index_);
3545 // Call runtime. We get here when the receiver is a string and the
3546 // index is a number, but the code of getting the actual character
3547 // is too complex (e.g., when the string needs to be flattened).
3548 __ bind(&call_runtime_);
3549 call_helper.BeforeCall(masm);
3550 __ sll(index_, index_, kSmiTagSize);
3551 __ Push(object_, index_);
3552 __ CallRuntime(Runtime::kStringCharCodeAt, 2);
3554 __ Move(result_, v0);
3556 call_helper.AfterCall(masm);
3559 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3563 // -------------------------------------------------------------------------
3564 // StringCharFromCodeGenerator
3566 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3567 // Fast case of Heap::LookupSingleCharacterStringFromCode.
3569 ASSERT(!t0.is(result_));
3570 ASSERT(!t0.is(code_));
3572 STATIC_ASSERT(kSmiTag == 0);
3573 STATIC_ASSERT(kSmiShiftSize == 0);
3574 ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
3577 Operand(kSmiTagMask |
3578 ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
3579 __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
3581 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3582 // At this point code register contains smi tagged ASCII char code.
3583 STATIC_ASSERT(kSmiTag == 0);
3584 __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
3585 __ Addu(result_, result_, t0);
3586 __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3587 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3588 __ Branch(&slow_case_, eq, result_, Operand(t0));
3593 void StringCharFromCodeGenerator::GenerateSlow(
3594 MacroAssembler* masm,
3595 const RuntimeCallHelper& call_helper) {
3596 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3598 __ bind(&slow_case_);
3599 call_helper.BeforeCall(masm);
3601 __ CallRuntime(Runtime::kCharFromCode, 1);
3602 __ Move(result_, v0);
3604 call_helper.AfterCall(masm);
3607 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3611 enum CopyCharactersFlags {
3613 DEST_ALWAYS_ALIGNED = 2
3617 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
3627 bool ascii = (flags & COPY_ASCII) != 0;
3628 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
3630 if (dest_always_aligned && FLAG_debug_code) {
3631 // Check that destination is actually word aligned if the flag says
3633 __ And(scratch4, dest, Operand(kPointerAlignmentMask));
3635 kDestinationOfCopyNotAligned,
3640 const int kReadAlignment = 4;
3641 const int kReadAlignmentMask = kReadAlignment - 1;
3642 // Ensure that reading an entire aligned word containing the last character
3643 // of a string will not read outside the allocated area (because we pad up
3644 // to kObjectAlignment).
3645 STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
3646 // Assumes word reads and writes are little endian.
3647 // Nothing to do for zero characters.
3651 __ addu(count, count, count);
3653 __ Branch(&done, eq, count, Operand(zero_reg));
3656 // Must copy at least eight bytes, otherwise just do it one byte at a time.
3657 __ Subu(scratch1, count, Operand(8));
3658 __ Addu(count, dest, Operand(count));
3659 Register limit = count; // Read until src equals this.
3660 __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
3662 if (!dest_always_aligned) {
3663 // Align dest by byte copying. Copies between zero and three bytes.
3664 __ And(scratch4, dest, Operand(kReadAlignmentMask));
3666 __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
3668 __ bind(&aligned_loop);
3669 __ lbu(scratch1, MemOperand(src));
3670 __ addiu(src, src, 1);
3671 __ sb(scratch1, MemOperand(dest));
3672 __ addiu(dest, dest, 1);
3673 __ addiu(scratch4, scratch4, 1);
3674 __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
3675 __ bind(&dest_aligned);
3680 __ And(scratch4, src, Operand(kReadAlignmentMask));
3681 __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
3683 // Loop for src/dst that are not aligned the same way.
3684 // This loop uses lwl and lwr instructions. These instructions
3685 // depend on the endianness, and the implementation assumes little-endian.
3689 __ lwr(scratch1, MemOperand(src));
3690 __ Addu(src, src, Operand(kReadAlignment));
3691 __ lwl(scratch1, MemOperand(src, -1));
3692 __ sw(scratch1, MemOperand(dest));
3693 __ Addu(dest, dest, Operand(kReadAlignment));
3694 __ Subu(scratch2, limit, dest);
3695 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
3698 __ Branch(&byte_loop);
3701 // Copy words from src to dest, until less than four bytes left.
3702 // Both src and dest are word aligned.
3703 __ bind(&simple_loop);
3707 __ lw(scratch1, MemOperand(src));
3708 __ Addu(src, src, Operand(kReadAlignment));
3709 __ sw(scratch1, MemOperand(dest));
3710 __ Addu(dest, dest, Operand(kReadAlignment));
3711 __ Subu(scratch2, limit, dest);
3712 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
3715 // Copy bytes from src to dest until dest hits limit.
3716 __ bind(&byte_loop);
3717 // Test if dest has already reached the limit.
3718 __ Branch(&done, ge, dest, Operand(limit));
3719 __ lbu(scratch1, MemOperand(src));
3720 __ addiu(src, src, 1);
3721 __ sb(scratch1, MemOperand(dest));
3722 __ addiu(dest, dest, 1);
3723 __ Branch(&byte_loop);
3729 void StringHelper::GenerateHashInit(MacroAssembler* masm,
3731 Register character) {
3732 // hash = seed + character + ((seed + character) << 10);
3733 __ LoadRoot(hash, Heap::kHashSeedRootIndex);
3734 // Untag smi seed and add the character.
3736 __ addu(hash, hash, character);
3737 __ sll(at, hash, 10);
3738 __ addu(hash, hash, at);
3739 // hash ^= hash >> 6;
3740 __ srl(at, hash, 6);
3741 __ xor_(hash, hash, at);
3745 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
3747 Register character) {
3748 // hash += character;
3749 __ addu(hash, hash, character);
3750 // hash += hash << 10;
3751 __ sll(at, hash, 10);
3752 __ addu(hash, hash, at);
3753 // hash ^= hash >> 6;
3754 __ srl(at, hash, 6);
3755 __ xor_(hash, hash, at);
3759 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
3761 // hash += hash << 3;
3762 __ sll(at, hash, 3);
3763 __ addu(hash, hash, at);
3764 // hash ^= hash >> 11;
3765 __ srl(at, hash, 11);
3766 __ xor_(hash, hash, at);
3767 // hash += hash << 15;
3768 __ sll(at, hash, 15);
3769 __ addu(hash, hash, at);
3771 __ li(at, Operand(String::kHashBitMask));
3772 __ and_(hash, hash, at);
3774 // if (hash == 0) hash = 27;
3775 __ ori(at, zero_reg, StringHasher::kZeroHash);
3776 __ Movz(hash, at, hash);
3780 void SubStringStub::Generate(MacroAssembler* masm) {
3782 // Stack frame on entry.
3783 // ra: return address
3788 // This stub is called from the native-call %_SubString(...), so
3789 // nothing can be assumed about the arguments. It is tested that:
3790 // "string" is a sequential string,
3791 // both "from" and "to" are smis, and
3792 // 0 <= from <= to <= string.length.
3793 // If any of these assumptions fail, we call the runtime system.
3795 const int kToOffset = 0 * kPointerSize;
3796 const int kFromOffset = 1 * kPointerSize;
3797 const int kStringOffset = 2 * kPointerSize;
3799 __ lw(a2, MemOperand(sp, kToOffset));
3800 __ lw(a3, MemOperand(sp, kFromOffset));
3801 STATIC_ASSERT(kFromOffset == kToOffset + 4);
3802 STATIC_ASSERT(kSmiTag == 0);
3803 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3805 // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
3806 // safe in this case.
3807 __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
3808 __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
3809 // Both a2 and a3 are untagged integers.
3811 __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
3813 __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to.
3814 __ Subu(a2, a2, a3);
3816 // Make sure first argument is a string.
3817 __ lw(v0, MemOperand(sp, kStringOffset));
3818 __ JumpIfSmi(v0, &runtime);
3819 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
3820 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3821 __ And(t0, a1, Operand(kIsNotStringMask));
3823 __ Branch(&runtime, ne, t0, Operand(zero_reg));
3826 __ Branch(&single_char, eq, a2, Operand(1));
3828 // Short-cut for the case of trivial substring.
3830 // v0: original string
3831 // a2: result string length
3832 __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
3834 // Return original string.
3835 __ Branch(&return_v0, eq, a2, Operand(t0));
3836 // Longer than original string's length or negative: unsafe arguments.
3837 __ Branch(&runtime, hi, a2, Operand(t0));
3838 // Shorter than original string's length: an actual substring.
3840 // Deal with different string types: update the index if necessary
3841 // and put the underlying string into t1.
3842 // v0: original string
3843 // a1: instance type
3845 // a3: from index (untagged)
3846 Label underlying_unpacked, sliced_string, seq_or_external_string;
3847 // If the string is not indirect, it can only be sequential or external.
3848 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
3849 STATIC_ASSERT(kIsIndirectStringMask != 0);
3850 __ And(t0, a1, Operand(kIsIndirectStringMask));
3851 __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
3852 // t0 is used as a scratch register and can be overwritten in either case.
3853 __ And(t0, a1, Operand(kSlicedNotConsMask));
3854 __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
3855 // Cons string. Check whether it is flat, then fetch first part.
3856 __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
3857 __ LoadRoot(t0, Heap::kempty_stringRootIndex);
3858 __ Branch(&runtime, ne, t1, Operand(t0));
3859 __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
3860 // Update instance type.
3861 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
3862 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3863 __ jmp(&underlying_unpacked);
3865 __ bind(&sliced_string);
3866 // Sliced string. Fetch parent and correct start index by offset.
3867 __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
3868 __ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
3869 __ sra(t0, t0, 1); // Add offset to index.
3870 __ Addu(a3, a3, t0);
3871 // Update instance type.
3872 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
3873 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3874 __ jmp(&underlying_unpacked);
3876 __ bind(&seq_or_external_string);
3877 // Sequential or external string. Just move string to the expected register.
3880 __ bind(&underlying_unpacked);
3882 if (FLAG_string_slices) {
3884 // t1: underlying subject string
3885 // a1: instance type of underlying subject string
3887 // a3: adjusted start index (untagged)
3888 // Short slice. Copy instead of slicing.
3889 __ Branch(©_routine, lt, a2, Operand(SlicedString::kMinLength));
3890 // Allocate new sliced string. At this point we do not reload the instance
3891 // type including the string encoding because we simply rely on the info
3892 // provided by the original string. It does not matter if the original
3893 // string's encoding is wrong because we always have to recheck encoding of
3894 // the newly created string's parent anyways due to externalized strings.
3895 Label two_byte_slice, set_slice_header;
3896 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
3897 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
3898 __ And(t0, a1, Operand(kStringEncodingMask));
3899 __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
3900 __ AllocateAsciiSlicedString(v0, a2, t2, t3, &runtime);
3901 __ jmp(&set_slice_header);
3902 __ bind(&two_byte_slice);
3903 __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
3904 __ bind(&set_slice_header);
3906 __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
3907 __ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
3910 __ bind(©_routine);
3913 // t1: underlying subject string
3914 // a1: instance type of underlying subject string
3916 // a3: adjusted start index (untagged)
3917 Label two_byte_sequential, sequential_string, allocate_result;
3918 STATIC_ASSERT(kExternalStringTag != 0);
3919 STATIC_ASSERT(kSeqStringTag == 0);
3920 __ And(t0, a1, Operand(kExternalStringTag));
3921 __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
3923 // Handle external string.
3924 // Rule out short external strings.
3925 STATIC_CHECK(kShortExternalStringTag != 0);
3926 __ And(t0, a1, Operand(kShortExternalStringTag));
3927 __ Branch(&runtime, ne, t0, Operand(zero_reg));
3928 __ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
3929 // t1 already points to the first character of underlying string.
3930 __ jmp(&allocate_result);
3932 __ bind(&sequential_string);
3933 // Locate first character of underlying subject string.
3934 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3935 __ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3937 __ bind(&allocate_result);
3938 // Sequential acii string. Allocate the result.
3939 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3940 __ And(t0, a1, Operand(kStringEncodingMask));
3941 __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
3943 // Allocate and copy the resulting ASCII string.
3944 __ AllocateAsciiString(v0, a2, t0, t2, t3, &runtime);
3946 // Locate first character of substring to copy.
3947 __ Addu(t1, t1, a3);
3949 // Locate first character of result.
3950 __ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3952 // v0: result string
3953 // a1: first character of result string
3954 // a2: result string length
3955 // t1: first character of substring to copy
3956 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3957 StringHelper::GenerateCopyCharactersLong(
3958 masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
3961 // Allocate and copy the resulting two-byte string.
3962 __ bind(&two_byte_sequential);
3963 __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
3965 // Locate first character of substring to copy.
3966 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
3968 __ Addu(t1, t1, t0);
3969 // Locate first character of result.
3970 __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3972 // v0: result string.
3973 // a1: first character of result.
3974 // a2: result length.
3975 // t1: first character of substring to copy.
3976 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3977 StringHelper::GenerateCopyCharactersLong(
3978 masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
3980 __ bind(&return_v0);
3981 Counters* counters = masm->isolate()->counters();
3982 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
3985 // Just jump to runtime to create the sub string.
3987 __ TailCallRuntime(Runtime::kSubString, 3, 1);
3989 __ bind(&single_char);
3990 // v0: original string
3991 // a1: instance type
3993 // a3: from index (untagged)
3995 StringCharAtGenerator generator(
3996 v0, a3, a2, v0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
3997 generator.GenerateFast(masm);
3999 generator.SkipSlow(masm, &runtime);
4003 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
4008 Register scratch3) {
4009 Register length = scratch1;
4012 Label strings_not_equal, check_zero_length;
4013 __ lw(length, FieldMemOperand(left, String::kLengthOffset));
4014 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
4015 __ Branch(&check_zero_length, eq, length, Operand(scratch2));
4016 __ bind(&strings_not_equal);
4017 ASSERT(is_int16(NOT_EQUAL));
4018 __ Ret(USE_DELAY_SLOT);
4019 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
4021 // Check if the length is zero.
4022 Label compare_chars;
4023 __ bind(&check_zero_length);
4024 STATIC_ASSERT(kSmiTag == 0);
4025 __ Branch(&compare_chars, ne, length, Operand(zero_reg));
4026 ASSERT(is_int16(EQUAL));
4027 __ Ret(USE_DELAY_SLOT);
4028 __ li(v0, Operand(Smi::FromInt(EQUAL)));
4030 // Compare characters.
4031 __ bind(&compare_chars);
4033 GenerateAsciiCharsCompareLoop(masm,
4034 left, right, length, scratch2, scratch3, v0,
4035 &strings_not_equal);
4037 // Characters are equal.
4038 __ Ret(USE_DELAY_SLOT);
4039 __ li(v0, Operand(Smi::FromInt(EQUAL)));
4043 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
4049 Register scratch4) {
4050 Label result_not_equal, compare_lengths;
4051 // Find minimum length and length difference.
4052 __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
4053 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
4054 __ Subu(scratch3, scratch1, Operand(scratch2));
4055 Register length_delta = scratch3;
4056 __ slt(scratch4, scratch2, scratch1);
4057 __ Movn(scratch1, scratch2, scratch4);
4058 Register min_length = scratch1;
4059 STATIC_ASSERT(kSmiTag == 0);
4060 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
4063 GenerateAsciiCharsCompareLoop(masm,
4064 left, right, min_length, scratch2, scratch4, v0,
4067 // Compare lengths - strings up to min-length are equal.
4068 __ bind(&compare_lengths);
4069 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
4070 // Use length_delta as result if it's zero.
4071 __ mov(scratch2, length_delta);
4072 __ mov(scratch4, zero_reg);
4073 __ mov(v0, zero_reg);
4075 __ bind(&result_not_equal);
4076 // Conditionally update the result based either on length_delta or
4077 // the last comparion performed in the loop above.
4079 __ Branch(&ret, eq, scratch2, Operand(scratch4));
4080 __ li(v0, Operand(Smi::FromInt(GREATER)));
4081 __ Branch(&ret, gt, scratch2, Operand(scratch4));
4082 __ li(v0, Operand(Smi::FromInt(LESS)));
4088 void StringCompareStub::GenerateAsciiCharsCompareLoop(
4089 MacroAssembler* masm,
4096 Label* chars_not_equal) {
4097 // Change index to run from -length to -1 by adding length to string
4098 // start. This means that loop ends when index reaches zero, which
4099 // doesn't need an additional compare.
4100 __ SmiUntag(length);
4101 __ Addu(scratch1, length,
4102 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
4103 __ Addu(left, left, Operand(scratch1));
4104 __ Addu(right, right, Operand(scratch1));
4105 __ Subu(length, zero_reg, length);
4106 Register index = length; // index = -length;
4112 __ Addu(scratch3, left, index);
4113 __ lbu(scratch1, MemOperand(scratch3));
4114 __ Addu(scratch3, right, index);
4115 __ lbu(scratch2, MemOperand(scratch3));
4116 __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
4117 __ Addu(index, index, 1);
4118 __ Branch(&loop, ne, index, Operand(zero_reg));
4122 void StringCompareStub::Generate(MacroAssembler* masm) {
4125 Counters* counters = masm->isolate()->counters();
4127 // Stack frame on entry.
4128 // sp[0]: right string
4129 // sp[4]: left string
4130 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
4131 __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
4134 __ Branch(¬_same, ne, a0, Operand(a1));
4135 STATIC_ASSERT(EQUAL == 0);
4136 STATIC_ASSERT(kSmiTag == 0);
4137 __ li(v0, Operand(Smi::FromInt(EQUAL)));
4138 __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
4143 // Check that both objects are sequential ASCII strings.
4144 __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
4146 // Compare flat ASCII strings natively. Remove arguments from stack first.
4147 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
4148 __ Addu(sp, sp, Operand(2 * kPointerSize));
4149 GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
4152 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
4156 void ArrayPushStub::Generate(MacroAssembler* masm) {
4157 Register receiver = a0;
4158 Register scratch = a1;
4160 int argc = arguments_count();
4163 // Nothing to do, just return the length.
4164 __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
4165 __ DropAndRet(argc + 1);
4169 Isolate* isolate = masm->isolate();
4172 __ TailCallExternalReference(
4173 ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
4177 Label call_builtin, attempt_to_grow_elements, with_write_barrier;
4179 Register elements = t2;
4180 Register end_elements = t1;
4181 // Get the elements array of the object.
4182 __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
4184 if (IsFastSmiOrObjectElementsKind(elements_kind())) {
4185 // Check that the elements are in fast mode and writable.
4186 __ CheckMap(elements,
4188 Heap::kFixedArrayMapRootIndex,
4193 // Get the array's length into scratch and calculate new length.
4194 __ lw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
4195 __ Addu(scratch, scratch, Operand(Smi::FromInt(argc)));
4197 // Get the elements' length.
4198 __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
4200 const int kEndElementsOffset =
4201 FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
4203 if (IsFastSmiOrObjectElementsKind(elements_kind())) {
4204 // Check if we could survive without allocation.
4205 __ Branch(&attempt_to_grow_elements, gt, scratch, Operand(t0));
4207 // Check if value is a smi.
4208 __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
4209 __ JumpIfNotSmi(t0, &with_write_barrier);
4212 // We may need a register containing the address end_elements below,
4213 // so write back the value in end_elements.
4214 __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
4215 __ Addu(end_elements, elements, end_elements);
4216 __ Addu(end_elements, end_elements, kEndElementsOffset);
4217 __ sw(t0, MemOperand(end_elements));
4219 // Check if we could survive without allocation.
4220 __ Branch(&call_builtin, gt, scratch, Operand(t0));
4222 __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
4223 __ StoreNumberToDoubleElements(t0, scratch, elements, a3, t1, a2,
4224 &call_builtin, argc * kDoubleSize);
4228 __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
4229 __ mov(v0, scratch);
4230 __ DropAndRet(argc + 1);
4232 if (IsFastDoubleElementsKind(elements_kind())) {
4233 __ bind(&call_builtin);
4234 __ TailCallExternalReference(
4235 ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
4239 __ bind(&with_write_barrier);
4241 if (IsFastSmiElementsKind(elements_kind())) {
4242 if (FLAG_trace_elements_transitions) __ jmp(&call_builtin);
4244 __ lw(t3, FieldMemOperand(t0, HeapObject::kMapOffset));
4245 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4246 __ Branch(&call_builtin, eq, t3, Operand(at));
4248 ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
4249 ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
4250 __ lw(a3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
4251 __ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
4252 __ lw(a3, ContextOperand(a3, Context::JS_ARRAY_MAPS_INDEX));
4253 const int header_size = FixedArrayBase::kHeaderSize;
4254 // Verify that the object can be transitioned in place.
4255 const int origin_offset = header_size + elements_kind() * kPointerSize;
4256 __ lw(a2, FieldMemOperand(receiver, origin_offset));
4257 __ lw(at, FieldMemOperand(a3, HeapObject::kMapOffset));
4258 __ Branch(&call_builtin, ne, a2, Operand(at));
4261 const int target_offset = header_size + target_kind * kPointerSize;
4262 __ lw(a3, FieldMemOperand(a3, target_offset));
4263 __ mov(a2, receiver);
4264 ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
4265 masm, DONT_TRACK_ALLOCATION_SITE, NULL);
4269 __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
4272 // We may need a register containing the address end_elements below, so write
4273 // back the value in end_elements.
4274 __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
4275 __ Addu(end_elements, elements, end_elements);
4276 __ Addu(end_elements, end_elements, kEndElementsOffset);
4277 __ sw(t0, MemOperand(end_elements));
4279 __ RecordWrite(elements,
4284 EMIT_REMEMBERED_SET,
4286 __ mov(v0, scratch);
4287 __ DropAndRet(argc + 1);
4289 __ bind(&attempt_to_grow_elements);
4290 // scratch: array's length + 1.
4292 if (!FLAG_inline_new) {
4293 __ bind(&call_builtin);
4294 __ TailCallExternalReference(
4295 ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
4299 __ lw(a2, MemOperand(sp, (argc - 1) * kPointerSize));
4300 // Growing elements that are SMI-only requires special handling in case the
4301 // new element is non-Smi. For now, delegate to the builtin.
4302 if (IsFastSmiElementsKind(elements_kind())) {
4303 __ JumpIfNotSmi(a2, &call_builtin);
4306 // We could be lucky and the elements array could be at the top of new-space.
4307 // In this case we can just grow it in place by moving the allocation pointer
4309 ExternalReference new_space_allocation_top =
4310 ExternalReference::new_space_allocation_top_address(isolate);
4311 ExternalReference new_space_allocation_limit =
4312 ExternalReference::new_space_allocation_limit_address(isolate);
4314 const int kAllocationDelta = 4;
4315 ASSERT(kAllocationDelta >= argc);
4316 // Load top and check if it is the end of elements.
4317 __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
4318 __ Addu(end_elements, elements, end_elements);
4319 __ Addu(end_elements, end_elements, Operand(kEndElementsOffset));
4320 __ li(t0, Operand(new_space_allocation_top));
4321 __ lw(a3, MemOperand(t0));
4322 __ Branch(&call_builtin, ne, a3, Operand(end_elements));
4324 __ li(t3, Operand(new_space_allocation_limit));
4325 __ lw(t3, MemOperand(t3));
4326 __ Addu(a3, a3, Operand(kAllocationDelta * kPointerSize));
4327 __ Branch(&call_builtin, hi, a3, Operand(t3));
4329 // We fit and could grow elements.
4330 // Update new_space_allocation_top.
4331 __ sw(a3, MemOperand(t0));
4332 // Push the argument.
4333 __ sw(a2, MemOperand(end_elements));
4334 // Fill the rest with holes.
4335 __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
4336 for (int i = 1; i < kAllocationDelta; i++) {
4337 __ sw(a3, MemOperand(end_elements, i * kPointerSize));
4340 // Update elements' and array's sizes.
4341 __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
4342 __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
4343 __ Addu(t0, t0, Operand(Smi::FromInt(kAllocationDelta)));
4344 __ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
4346 // Elements are in new space, so write barrier is not required.
4347 __ mov(v0, scratch);
4348 __ DropAndRet(argc + 1);
4350 __ bind(&call_builtin);
4351 __ TailCallExternalReference(
4352 ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
4356 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
4357 // ----------- S t a t e -------------
4360 // -- ra : return address
4361 // -----------------------------------
4362 Isolate* isolate = masm->isolate();
4364 // Load a2 with the allocation site. We stick an undefined dummy value here
4365 // and replace it with the real allocation site later when we instantiate this
4366 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
4367 __ li(a2, handle(isolate->heap()->undefined_value()));
4369 // Make sure that we actually patched the allocation site.
4370 if (FLAG_debug_code) {
4371 __ And(at, a2, Operand(kSmiTagMask));
4372 __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
4373 __ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
4374 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
4375 __ Assert(eq, kExpectedAllocationSite, t0, Operand(at));
4378 // Tail call into the stub that handles binary operations with allocation
4380 BinaryOpWithAllocationSiteStub stub(state_);
4381 __ TailCallStub(&stub);
4385 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
4386 ASSERT(state_ == CompareIC::SMI);
4389 __ JumpIfNotSmi(a2, &miss);
4391 if (GetCondition() == eq) {
4392 // For equality we do not care about the sign of the result.
4393 __ Ret(USE_DELAY_SLOT);
4394 __ Subu(v0, a0, a1);
4396 // Untag before subtracting to avoid handling overflow.
4399 __ Ret(USE_DELAY_SLOT);
4400 __ Subu(v0, a1, a0);
4408 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
4409 ASSERT(state_ == CompareIC::NUMBER);
4412 Label unordered, maybe_undefined1, maybe_undefined2;
4415 if (left_ == CompareIC::SMI) {
4416 __ JumpIfNotSmi(a1, &miss);
4418 if (right_ == CompareIC::SMI) {
4419 __ JumpIfNotSmi(a0, &miss);
4422 // Inlining the double comparison and falling back to the general compare
4423 // stub if NaN is involved.
4424 // Load left and right operand.
4425 Label done, left, left_smi, right_smi;
4426 __ JumpIfSmi(a0, &right_smi);
4427 __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
4429 __ Subu(a2, a0, Operand(kHeapObjectTag));
4430 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
4432 __ bind(&right_smi);
4433 __ SmiUntag(a2, a0); // Can't clobber a0 yet.
4434 FPURegister single_scratch = f6;
4435 __ mtc1(a2, single_scratch);
4436 __ cvt_d_w(f2, single_scratch);
4439 __ JumpIfSmi(a1, &left_smi);
4440 __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
4442 __ Subu(a2, a1, Operand(kHeapObjectTag));
4443 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
4446 __ SmiUntag(a2, a1); // Can't clobber a1 yet.
4447 single_scratch = f8;
4448 __ mtc1(a2, single_scratch);
4449 __ cvt_d_w(f0, single_scratch);
4453 // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
4454 Label fpu_eq, fpu_lt;
4455 // Test if equal, and also handle the unordered/NaN case.
4456 __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
4458 // Test if less (unordered case is already handled).
4459 __ BranchF(&fpu_lt, NULL, lt, f0, f2);
4461 // Otherwise it's greater, so just fall thru, and return.
4462 ASSERT(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
4463 __ Ret(USE_DELAY_SLOT);
4464 __ li(v0, Operand(GREATER));
4467 __ Ret(USE_DELAY_SLOT);
4468 __ li(v0, Operand(EQUAL));
4471 __ Ret(USE_DELAY_SLOT);
4472 __ li(v0, Operand(LESS));
4474 __ bind(&unordered);
4475 __ bind(&generic_stub);
4476 ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
4477 CompareIC::GENERIC);
4478 __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
4480 __ bind(&maybe_undefined1);
4481 if (Token::IsOrderedRelationalCompareOp(op_)) {
4482 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4483 __ Branch(&miss, ne, a0, Operand(at));
4484 __ JumpIfSmi(a1, &unordered);
4485 __ GetObjectType(a1, a2, a2);
4486 __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
4490 __ bind(&maybe_undefined2);
4491 if (Token::IsOrderedRelationalCompareOp(op_)) {
4492 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4493 __ Branch(&unordered, eq, a1, Operand(at));
4501 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
4502 ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
4505 // Registers containing left and right operands respectively.
4507 Register right = a0;
4511 // Check that both operands are heap objects.
4512 __ JumpIfEitherSmi(left, right, &miss);
4514 // Check that both operands are internalized strings.
4515 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4516 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4517 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4518 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4519 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
4520 __ Or(tmp1, tmp1, Operand(tmp2));
4521 __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
4522 __ Branch(&miss, ne, at, Operand(zero_reg));
4524 // Make sure a0 is non-zero. At this point input operands are
4525 // guaranteed to be non-zero.
4526 ASSERT(right.is(a0));
4527 STATIC_ASSERT(EQUAL == 0);
4528 STATIC_ASSERT(kSmiTag == 0);
4530 // Internalized strings are compared by identity.
4531 __ Ret(ne, left, Operand(right));
4532 ASSERT(is_int16(EQUAL));
4533 __ Ret(USE_DELAY_SLOT);
4534 __ li(v0, Operand(Smi::FromInt(EQUAL)));
4541 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
4542 ASSERT(state_ == CompareIC::UNIQUE_NAME);
4543 ASSERT(GetCondition() == eq);
4546 // Registers containing left and right operands respectively.
4548 Register right = a0;
4552 // Check that both operands are heap objects.
4553 __ JumpIfEitherSmi(left, right, &miss);
4555 // Check that both operands are unique names. This leaves the instance
4556 // types loaded in tmp1 and tmp2.
4557 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4558 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4559 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4560 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4562 __ JumpIfNotUniqueName(tmp1, &miss);
4563 __ JumpIfNotUniqueName(tmp2, &miss);
4568 // Unique names are compared by identity.
4570 __ Branch(&done, ne, left, Operand(right));
4571 // Make sure a0 is non-zero. At this point input operands are
4572 // guaranteed to be non-zero.
4573 ASSERT(right.is(a0));
4574 STATIC_ASSERT(EQUAL == 0);
4575 STATIC_ASSERT(kSmiTag == 0);
4576 __ li(v0, Operand(Smi::FromInt(EQUAL)));
4585 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
4586 ASSERT(state_ == CompareIC::STRING);
4589 bool equality = Token::IsEqualityOp(op_);
4591 // Registers containing left and right operands respectively.
4593 Register right = a0;
4600 // Check that both operands are heap objects.
4601 __ JumpIfEitherSmi(left, right, &miss);
4603 // Check that both operands are strings. This leaves the instance
4604 // types loaded in tmp1 and tmp2.
4605 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4606 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4607 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4608 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4609 STATIC_ASSERT(kNotStringTag != 0);
4610 __ Or(tmp3, tmp1, tmp2);
4611 __ And(tmp5, tmp3, Operand(kIsNotStringMask));
4612 __ Branch(&miss, ne, tmp5, Operand(zero_reg));
4614 // Fast check for identical strings.
4615 Label left_ne_right;
4616 STATIC_ASSERT(EQUAL == 0);
4617 STATIC_ASSERT(kSmiTag == 0);
4618 __ Branch(&left_ne_right, ne, left, Operand(right));
4619 __ Ret(USE_DELAY_SLOT);
4620 __ mov(v0, zero_reg); // In the delay slot.
4621 __ bind(&left_ne_right);
4623 // Handle not identical strings.
4625 // Check that both strings are internalized strings. If they are, we're done
4626 // because we already know they are not identical. We know they are both
4629 ASSERT(GetCondition() == eq);
4630 STATIC_ASSERT(kInternalizedTag == 0);
4631 __ Or(tmp3, tmp1, Operand(tmp2));
4632 __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
4634 __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
4635 // Make sure a0 is non-zero. At this point input operands are
4636 // guaranteed to be non-zero.
4637 ASSERT(right.is(a0));
4638 __ Ret(USE_DELAY_SLOT);
4639 __ mov(v0, a0); // In the delay slot.
4640 __ bind(&is_symbol);
4643 // Check that both strings are sequential ASCII.
4645 __ JumpIfBothInstanceTypesAreNotSequentialAscii(
4646 tmp1, tmp2, tmp3, tmp4, &runtime);
4648 // Compare flat ASCII strings. Returns when done.
4650 StringCompareStub::GenerateFlatAsciiStringEquals(
4651 masm, left, right, tmp1, tmp2, tmp3);
4653 StringCompareStub::GenerateCompareFlatAsciiStrings(
4654 masm, left, right, tmp1, tmp2, tmp3, tmp4);
4657 // Handle more complex cases in runtime.
4659 __ Push(left, right);
4661 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
4663 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
4671 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
4672 ASSERT(state_ == CompareIC::OBJECT);
4674 __ And(a2, a1, Operand(a0));
4675 __ JumpIfSmi(a2, &miss);
4677 __ GetObjectType(a0, a2, a2);
4678 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
4679 __ GetObjectType(a1, a2, a2);
4680 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
4682 ASSERT(GetCondition() == eq);
4683 __ Ret(USE_DELAY_SLOT);
4684 __ subu(v0, a0, a1);
4691 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
4694 __ JumpIfSmi(a2, &miss);
4695 __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
4696 __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
4697 __ Branch(&miss, ne, a2, Operand(known_map_));
4698 __ Branch(&miss, ne, a3, Operand(known_map_));
4700 __ Ret(USE_DELAY_SLOT);
4701 __ subu(v0, a0, a1);
4708 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
4710 // Call the runtime system in a fresh internal frame.
4711 ExternalReference miss =
4712 ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
4713 FrameScope scope(masm, StackFrame::INTERNAL);
4715 __ Push(ra, a1, a0);
4716 __ li(t0, Operand(Smi::FromInt(op_)));
4717 __ addiu(sp, sp, -kPointerSize);
4718 __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
4719 __ sw(t0, MemOperand(sp)); // In the delay slot.
4720 // Compute the entry point of the rewritten stub.
4721 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
4722 // Restore registers.
4729 void DirectCEntryStub::Generate(MacroAssembler* masm) {
4730 // Make place for arguments to fit C calling convention. Most of the callers
4731 // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
4732 // so they handle stack restoring and we don't have to do that here.
4733 // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
4734 // kCArgsSlotsSize stack space after the call.
4735 __ Subu(sp, sp, Operand(kCArgsSlotsSize));
4736 // Place the return address on the stack, making the call
4737 // GC safe. The RegExp backend also relies on this.
4738 __ sw(ra, MemOperand(sp, kCArgsSlotsSize));
4739 __ Call(t9); // Call the C++ function.
4740 __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
4742 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
4743 // In case of an error the return address may point to a memory area
4744 // filled with kZapValue by the GC.
4745 // Dereference the address and check for this.
4746 __ lw(t0, MemOperand(t9));
4747 __ Assert(ne, kReceivedInvalidReturnAddress, t0,
4748 Operand(reinterpret_cast<uint32_t>(kZapValue)));
4754 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
4757 reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
4758 __ Move(t9, target);
4759 __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
4764 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
4768 Register properties,
4770 Register scratch0) {
4771 ASSERT(name->IsUniqueName());
4772 // If names of slots in range from 1 to kProbes - 1 for the hash value are
4773 // not equal to the name and kProbes-th slot is not used (its name is the
4774 // undefined value), it guarantees the hash table doesn't contain the
4775 // property. It's true even if some slots represent deleted properties
4776 // (their names are the hole value).
4777 for (int i = 0; i < kInlinedProbes; i++) {
4778 // scratch0 points to properties hash.
4779 // Compute the masked index: (hash + i + i * i) & mask.
4780 Register index = scratch0;
4781 // Capacity is smi 2^n.
4782 __ lw(index, FieldMemOperand(properties, kCapacityOffset));
4783 __ Subu(index, index, Operand(1));
4784 __ And(index, index, Operand(
4785 Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
4787 // Scale the index by multiplying by the entry size.
4788 ASSERT(NameDictionary::kEntrySize == 3);
4789 __ sll(at, index, 1);
4790 __ Addu(index, index, at);
4792 Register entity_name = scratch0;
4793 // Having undefined at this place means the name is not contained.
4794 ASSERT_EQ(kSmiTagSize, 1);
4795 Register tmp = properties;
4796 __ sll(scratch0, index, 1);
4797 __ Addu(tmp, properties, scratch0);
4798 __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
4800 ASSERT(!tmp.is(entity_name));
4801 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
4802 __ Branch(done, eq, entity_name, Operand(tmp));
4804 // Load the hole ready for use below:
4805 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
4807 // Stop if found the property.
4808 __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
4811 __ Branch(&good, eq, entity_name, Operand(tmp));
4813 // Check if the entry name is not a unique name.
4814 __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
4816 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
4817 __ JumpIfNotUniqueName(entity_name, miss);
4820 // Restore the properties.
4822 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4825 const int spill_mask =
4826 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
4827 a2.bit() | a1.bit() | a0.bit() | v0.bit());
4829 __ MultiPush(spill_mask);
4830 __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4831 __ li(a1, Operand(Handle<Name>(name)));
4832 NameDictionaryLookupStub stub(NEGATIVE_LOOKUP);
4835 __ MultiPop(spill_mask);
4837 __ Branch(done, eq, at, Operand(zero_reg));
4838 __ Branch(miss, ne, at, Operand(zero_reg));
4842 // Probe the name dictionary in the |elements| register. Jump to the
4843 // |done| label if a property with the given name is found. Jump to
4844 // the |miss| label otherwise.
4845 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
4846 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
4852 Register scratch2) {
4853 ASSERT(!elements.is(scratch1));
4854 ASSERT(!elements.is(scratch2));
4855 ASSERT(!name.is(scratch1));
4856 ASSERT(!name.is(scratch2));
4858 __ AssertName(name);
4860 // Compute the capacity mask.
4861 __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
4862 __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
4863 __ Subu(scratch1, scratch1, Operand(1));
4865 // Generate an unrolled loop that performs a few probes before
4866 // giving up. Measurements done on Gmail indicate that 2 probes
4867 // cover ~93% of loads from dictionaries.
4868 for (int i = 0; i < kInlinedProbes; i++) {
4869 // Compute the masked index: (hash + i + i * i) & mask.
4870 __ lw(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
4872 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4873 // the hash in a separate instruction. The value hash + i + i * i is right
4874 // shifted in the following and instruction.
4875 ASSERT(NameDictionary::GetProbeOffset(i) <
4876 1 << (32 - Name::kHashFieldOffset));
4877 __ Addu(scratch2, scratch2, Operand(
4878 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4880 __ srl(scratch2, scratch2, Name::kHashShift);
4881 __ And(scratch2, scratch1, scratch2);
4883 // Scale the index by multiplying by the element size.
4884 ASSERT(NameDictionary::kEntrySize == 3);
4885 // scratch2 = scratch2 * 3.
4887 __ sll(at, scratch2, 1);
4888 __ Addu(scratch2, scratch2, at);
4890 // Check if the key is identical to the name.
4891 __ sll(at, scratch2, 2);
4892 __ Addu(scratch2, elements, at);
4893 __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
4894 __ Branch(done, eq, name, Operand(at));
4897 const int spill_mask =
4898 (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
4899 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
4900 ~(scratch1.bit() | scratch2.bit());
4902 __ MultiPush(spill_mask);
4904 ASSERT(!elements.is(a1));
4906 __ Move(a0, elements);
4908 __ Move(a0, elements);
4911 NameDictionaryLookupStub stub(POSITIVE_LOOKUP);
4913 __ mov(scratch2, a2);
4915 __ MultiPop(spill_mask);
4917 __ Branch(done, ne, at, Operand(zero_reg));
4918 __ Branch(miss, eq, at, Operand(zero_reg));
4922 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
4923 // This stub overrides SometimesSetsUpAFrame() to return false. That means
4924 // we cannot call anything that could cause a GC from this stub.
4926 // result: NameDictionary to probe
4928 // dictionary: NameDictionary to probe.
4929 // index: will hold an index of entry if lookup is successful.
4930 // might alias with result_.
4932 // result_ is zero if lookup failed, non zero otherwise.
4934 Register result = v0;
4935 Register dictionary = a0;
4937 Register index = a2;
4940 Register undefined = t1;
4941 Register entry_key = t2;
4943 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
4945 __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
4946 __ sra(mask, mask, kSmiTagSize);
4947 __ Subu(mask, mask, Operand(1));
4949 __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
4951 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
4953 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
4954 // Compute the masked index: (hash + i + i * i) & mask.
4955 // Capacity is smi 2^n.
4957 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4958 // the hash in a separate instruction. The value hash + i + i * i is right
4959 // shifted in the following and instruction.
4960 ASSERT(NameDictionary::GetProbeOffset(i) <
4961 1 << (32 - Name::kHashFieldOffset));
4962 __ Addu(index, hash, Operand(
4963 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4965 __ mov(index, hash);
4967 __ srl(index, index, Name::kHashShift);
4968 __ And(index, mask, index);
4970 // Scale the index by multiplying by the entry size.
4971 ASSERT(NameDictionary::kEntrySize == 3);
4974 __ sll(index, index, 1);
4975 __ Addu(index, index, at);
4978 ASSERT_EQ(kSmiTagSize, 1);
4979 __ sll(index, index, 2);
4980 __ Addu(index, index, dictionary);
4981 __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
4983 // Having undefined at this place means the name is not contained.
4984 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined));
4986 // Stop if found the property.
4987 __ Branch(&in_dictionary, eq, entry_key, Operand(key));
4989 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
4990 // Check if the entry name is not a unique name.
4991 __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
4993 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
4994 __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
4998 __ bind(&maybe_in_dictionary);
4999 // If we are doing negative lookup then probing failure should be
5000 // treated as a lookup success. For positive lookup probing failure
5001 // should be treated as lookup failure.
5002 if (mode_ == POSITIVE_LOOKUP) {
5003 __ Ret(USE_DELAY_SLOT);
5004 __ mov(result, zero_reg);
5007 __ bind(&in_dictionary);
5008 __ Ret(USE_DELAY_SLOT);
5011 __ bind(¬_in_dictionary);
5012 __ Ret(USE_DELAY_SLOT);
5013 __ mov(result, zero_reg);
5017 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
5019 StoreBufferOverflowStub stub1(kDontSaveFPRegs);
5020 stub1.GetCode(isolate);
5021 // Hydrogen code stubs need stub2 at snapshot time.
5022 StoreBufferOverflowStub stub2(kSaveFPRegs);
5023 stub2.GetCode(isolate);
5027 bool CodeStub::CanUseFPRegisters() {
5028 return true; // FPU is a base requirement for V8.
5032 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
5033 // the value has just been written into the object, now this stub makes sure
5034 // we keep the GC informed. The word in the object where the value has been
5035 // written is in the address register.
5036 void RecordWriteStub::Generate(MacroAssembler* masm) {
5037 Label skip_to_incremental_noncompacting;
5038 Label skip_to_incremental_compacting;
5040 // The first two branch+nop instructions are generated with labels so as to
5041 // get the offset fixed up correctly by the bind(Label*) call. We patch it
5042 // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
5043 // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
5044 // incremental heap marking.
5045 // See RecordWriteStub::Patch for details.
5046 __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
5048 __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
5051 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
5052 __ RememberedSetHelper(object_,
5056 MacroAssembler::kReturnAtEnd);
5060 __ bind(&skip_to_incremental_noncompacting);
5061 GenerateIncremental(masm, INCREMENTAL);
5063 __ bind(&skip_to_incremental_compacting);
5064 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
5066 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
5067 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
5069 PatchBranchIntoNop(masm, 0);
5070 PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
5074 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
5077 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
5078 Label dont_need_remembered_set;
5080 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
5081 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
5083 &dont_need_remembered_set);
5085 __ CheckPageFlag(regs_.object(),
5087 1 << MemoryChunk::SCAN_ON_SCAVENGE,
5089 &dont_need_remembered_set);
5091 // First notify the incremental marker if necessary, then update the
5093 CheckNeedsToInformIncrementalMarker(
5094 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
5095 InformIncrementalMarker(masm, mode);
5096 regs_.Restore(masm);
5097 __ RememberedSetHelper(object_,
5101 MacroAssembler::kReturnAtEnd);
5103 __ bind(&dont_need_remembered_set);
5106 CheckNeedsToInformIncrementalMarker(
5107 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
5108 InformIncrementalMarker(masm, mode);
5109 regs_.Restore(masm);
5114 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
5115 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
5116 int argument_count = 3;
5117 __ PrepareCallCFunction(argument_count, regs_.scratch0());
5119 a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
5120 ASSERT(!address.is(regs_.object()));
5121 ASSERT(!address.is(a0));
5122 __ Move(address, regs_.address());
5123 __ Move(a0, regs_.object());
5124 __ Move(a1, address);
5125 __ li(a2, Operand(ExternalReference::isolate_address(masm->isolate())));
5127 AllowExternalCallThatCantCauseGC scope(masm);
5128 if (mode == INCREMENTAL_COMPACTION) {
5130 ExternalReference::incremental_evacuation_record_write_function(
5134 ASSERT(mode == INCREMENTAL);
5136 ExternalReference::incremental_marking_record_write_function(
5140 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
5144 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
5145 MacroAssembler* masm,
5146 OnNoNeedToInformIncrementalMarker on_no_need,
5149 Label need_incremental;
5150 Label need_incremental_pop_scratch;
5152 __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
5153 __ lw(regs_.scratch1(),
5154 MemOperand(regs_.scratch0(),
5155 MemoryChunk::kWriteBarrierCounterOffset));
5156 __ Subu(regs_.scratch1(), regs_.scratch1(), Operand(1));
5157 __ sw(regs_.scratch1(),
5158 MemOperand(regs_.scratch0(),
5159 MemoryChunk::kWriteBarrierCounterOffset));
5160 __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
5162 // Let's look at the color of the object: If it is not black we don't have
5163 // to inform the incremental marker.
5164 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
5166 regs_.Restore(masm);
5167 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
5168 __ RememberedSetHelper(object_,
5172 MacroAssembler::kReturnAtEnd);
5179 // Get the value from the slot.
5180 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
5182 if (mode == INCREMENTAL_COMPACTION) {
5183 Label ensure_not_white;
5185 __ CheckPageFlag(regs_.scratch0(), // Contains value.
5186 regs_.scratch1(), // Scratch.
5187 MemoryChunk::kEvacuationCandidateMask,
5191 __ CheckPageFlag(regs_.object(),
5192 regs_.scratch1(), // Scratch.
5193 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
5197 __ bind(&ensure_not_white);
5200 // We need extra registers for this, so we push the object and the address
5201 // register temporarily.
5202 __ Push(regs_.object(), regs_.address());
5203 __ EnsureNotWhite(regs_.scratch0(), // The value.
5204 regs_.scratch1(), // Scratch.
5205 regs_.object(), // Scratch.
5206 regs_.address(), // Scratch.
5207 &need_incremental_pop_scratch);
5208 __ Pop(regs_.object(), regs_.address());
5210 regs_.Restore(masm);
5211 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
5212 __ RememberedSetHelper(object_,
5216 MacroAssembler::kReturnAtEnd);
5221 __ bind(&need_incremental_pop_scratch);
5222 __ Pop(regs_.object(), regs_.address());
5224 __ bind(&need_incremental);
5226 // Fall through when we need to inform the incremental marker.
5230 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
5231 // ----------- S t a t e -------------
5232 // -- a0 : element value to store
5233 // -- a3 : element index as smi
5234 // -- sp[0] : array literal index in function as smi
5235 // -- sp[4] : array literal
5236 // clobbers a1, a2, t0
5237 // -----------------------------------
5240 Label double_elements;
5242 Label slow_elements;
5243 Label fast_elements;
5245 // Get array literal index, array literal and its map.
5246 __ lw(t0, MemOperand(sp, 0 * kPointerSize));
5247 __ lw(a1, MemOperand(sp, 1 * kPointerSize));
5248 __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
5250 __ CheckFastElements(a2, t1, &double_elements);
5251 // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
5252 __ JumpIfSmi(a0, &smi_element);
5253 __ CheckFastSmiElements(a2, t1, &fast_elements);
5255 // Store into the array literal requires a elements transition. Call into
5257 __ bind(&slow_elements);
5259 __ Push(a1, a3, a0);
5260 __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
5261 __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset));
5263 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
5265 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
5266 __ bind(&fast_elements);
5267 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
5268 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
5269 __ Addu(t2, t1, t2);
5270 __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5271 __ sw(a0, MemOperand(t2, 0));
5272 // Update the write barrier for the array store.
5273 __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
5274 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
5275 __ Ret(USE_DELAY_SLOT);
5278 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
5279 // and value is Smi.
5280 __ bind(&smi_element);
5281 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
5282 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
5283 __ Addu(t2, t1, t2);
5284 __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
5285 __ Ret(USE_DELAY_SLOT);
5288 // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
5289 __ bind(&double_elements);
5290 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
5291 __ StoreNumberToDoubleElements(a0, a3, t1, t3, t5, a2, &slow_elements);
5292 __ Ret(USE_DELAY_SLOT);
5297 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
5298 CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
5299 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
5300 int parameter_count_offset =
5301 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
5302 __ lw(a1, MemOperand(fp, parameter_count_offset));
5303 if (function_mode_ == JS_FUNCTION_STUB_MODE) {
5304 __ Addu(a1, a1, Operand(1));
5306 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
5307 __ sll(a1, a1, kPointerSizeLog2);
5308 __ Ret(USE_DELAY_SLOT);
5309 __ Addu(sp, sp, a1);
5313 void StubFailureTailCallTrampolineStub::Generate(MacroAssembler* masm) {
5314 CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
5315 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
5317 int parameter_count_offset =
5318 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
5319 __ lw(a0, MemOperand(fp, parameter_count_offset));
5320 // The parameter count above includes the receiver for the arguments passed to
5321 // the deoptimization handler. Subtract the receiver for the parameter count
5324 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
5325 ParameterCount argument_count(a0);
5326 __ InvokeFunction(a1, argument_count, JUMP_FUNCTION, NullCallWrapper());
5330 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
5331 if (masm->isolate()->function_entry_hook() != NULL) {
5332 ProfileEntryHookStub stub;
5340 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
5341 // The entry hook is a "push ra" instruction, followed by a call.
5342 // Note: on MIPS "push" is 2 instruction
5343 const int32_t kReturnAddressDistanceFromFunctionStart =
5344 Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
5346 // This should contain all kJSCallerSaved registers.
5347 const RegList kSavedRegs =
5348 kJSCallerSaved | // Caller saved registers.
5349 s5.bit(); // Saved stack pointer.
5351 // We also save ra, so the count here is one higher than the mask indicates.
5352 const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
5354 // Save all caller-save registers as this may be called from anywhere.
5355 __ MultiPush(kSavedRegs | ra.bit());
5357 // Compute the function's address for the first argument.
5358 __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
5360 // The caller's return address is above the saved temporaries.
5361 // Grab that for the second argument to the hook.
5362 __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
5364 // Align the stack if necessary.
5365 int frame_alignment = masm->ActivationFrameAlignment();
5366 if (frame_alignment > kPointerSize) {
5368 ASSERT(IsPowerOf2(frame_alignment));
5369 __ And(sp, sp, Operand(-frame_alignment));
5372 #if defined(V8_HOST_ARCH_MIPS)
5373 int32_t entry_hook =
5374 reinterpret_cast<int32_t>(masm->isolate()->function_entry_hook());
5375 __ li(at, Operand(entry_hook));
5377 // Under the simulator we need to indirect the entry hook through a
5378 // trampoline function at a known address.
5379 // It additionally takes an isolate as a third parameter.
5380 __ li(a2, Operand(ExternalReference::isolate_address(masm->isolate())));
5382 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
5383 __ li(at, Operand(ExternalReference(&dispatcher,
5384 ExternalReference::BUILTIN_CALL,
5389 // Restore the stack pointer if needed.
5390 if (frame_alignment > kPointerSize) {
5394 // Also pop ra to get Ret(0).
5395 __ MultiPop(kSavedRegs | ra.bit());
5401 static void CreateArrayDispatch(MacroAssembler* masm,
5402 AllocationSiteOverrideMode mode) {
5403 if (mode == DISABLE_ALLOCATION_SITES) {
5404 T stub(GetInitialFastElementsKind(), mode);
5405 __ TailCallStub(&stub);
5406 } else if (mode == DONT_OVERRIDE) {
5407 int last_index = GetSequenceIndexFromFastElementsKind(
5408 TERMINAL_FAST_ELEMENTS_KIND);
5409 for (int i = 0; i <= last_index; ++i) {
5410 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
5412 __ TailCallStub(&stub, eq, a3, Operand(kind));
5415 // If we reached this point there is a problem.
5416 __ Abort(kUnexpectedElementsKindInArrayConstructor);
5423 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
5424 AllocationSiteOverrideMode mode) {
5425 // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
5426 // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
5427 // a0 - number of arguments
5428 // a1 - constructor?
5429 // sp[0] - last argument
5430 Label normal_sequence;
5431 if (mode == DONT_OVERRIDE) {
5432 ASSERT(FAST_SMI_ELEMENTS == 0);
5433 ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
5434 ASSERT(FAST_ELEMENTS == 2);
5435 ASSERT(FAST_HOLEY_ELEMENTS == 3);
5436 ASSERT(FAST_DOUBLE_ELEMENTS == 4);
5437 ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
5439 // is the low bit set? If so, we are holey and that is good.
5440 __ And(at, a3, Operand(1));
5441 __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
5444 // look at the first argument
5445 __ lw(t1, MemOperand(sp, 0));
5446 __ Branch(&normal_sequence, eq, t1, Operand(zero_reg));
5448 if (mode == DISABLE_ALLOCATION_SITES) {
5449 ElementsKind initial = GetInitialFastElementsKind();
5450 ElementsKind holey_initial = GetHoleyElementsKind(initial);
5452 ArraySingleArgumentConstructorStub stub_holey(holey_initial,
5453 DISABLE_ALLOCATION_SITES);
5454 __ TailCallStub(&stub_holey);
5456 __ bind(&normal_sequence);
5457 ArraySingleArgumentConstructorStub stub(initial,
5458 DISABLE_ALLOCATION_SITES);
5459 __ TailCallStub(&stub);
5460 } else if (mode == DONT_OVERRIDE) {
5461 // We are going to create a holey array, but our kind is non-holey.
5462 // Fix kind and retry (only if we have an allocation site in the cell).
5463 __ Addu(a3, a3, Operand(1));
5465 if (FLAG_debug_code) {
5466 __ lw(t1, FieldMemOperand(a2, 0));
5467 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
5468 __ Assert(eq, kExpectedAllocationSite, t1, Operand(at));
5471 // Save the resulting elements kind in type info. We can't just store a3
5472 // in the AllocationSite::transition_info field because elements kind is
5473 // restricted to a portion of the field...upper bits need to be left alone.
5474 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
5475 __ lw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
5476 __ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
5477 __ sw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
5480 __ bind(&normal_sequence);
5481 int last_index = GetSequenceIndexFromFastElementsKind(
5482 TERMINAL_FAST_ELEMENTS_KIND);
5483 for (int i = 0; i <= last_index; ++i) {
5484 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
5485 ArraySingleArgumentConstructorStub stub(kind);
5486 __ TailCallStub(&stub, eq, a3, Operand(kind));
5489 // If we reached this point there is a problem.
5490 __ Abort(kUnexpectedElementsKindInArrayConstructor);
5498 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
5499 int to_index = GetSequenceIndexFromFastElementsKind(
5500 TERMINAL_FAST_ELEMENTS_KIND);
5501 for (int i = 0; i <= to_index; ++i) {
5502 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
5504 stub.GetCode(isolate);
5505 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
5506 T stub1(kind, DISABLE_ALLOCATION_SITES);
5507 stub1.GetCode(isolate);
5513 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
5514 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
5516 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
5518 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
5523 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
5525 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
5526 for (int i = 0; i < 2; i++) {
5527 // For internal arrays we only need a few things.
5528 InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
5529 stubh1.GetCode(isolate);
5530 InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
5531 stubh2.GetCode(isolate);
5532 InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
5533 stubh3.GetCode(isolate);
5538 void ArrayConstructorStub::GenerateDispatchToArrayStub(
5539 MacroAssembler* masm,
5540 AllocationSiteOverrideMode mode) {
5541 if (argument_count_ == ANY) {
5542 Label not_zero_case, not_one_case;
5544 __ Branch(¬_zero_case, ne, at, Operand(zero_reg));
5545 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5547 __ bind(¬_zero_case);
5548 __ Branch(¬_one_case, gt, a0, Operand(1));
5549 CreateArrayDispatchOneArgument(masm, mode);
5551 __ bind(¬_one_case);
5552 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5553 } else if (argument_count_ == NONE) {
5554 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5555 } else if (argument_count_ == ONE) {
5556 CreateArrayDispatchOneArgument(masm, mode);
5557 } else if (argument_count_ == MORE_THAN_ONE) {
5558 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5565 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
5566 // ----------- S t a t e -------------
5567 // -- a0 : argc (only if argument_count_ == ANY)
5568 // -- a1 : constructor
5569 // -- a2 : type info cell
5570 // -- sp[0] : return address
5571 // -- sp[4] : last argument
5572 // -----------------------------------
5573 if (FLAG_debug_code) {
5574 // The array construct code is only set for the global and natives
5575 // builtin Array functions which always have maps.
5577 // Initial map for the builtin Array function should be a map.
5578 __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
5579 // Will both indicate a NULL and a Smi.
5581 __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
5582 at, Operand(zero_reg));
5583 __ GetObjectType(a3, a3, t0);
5584 __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
5585 t0, Operand(MAP_TYPE));
5587 // We should either have undefined in a2 or a valid cell.
5589 Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
5590 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5591 __ Branch(&okay_here, eq, a2, Operand(at));
5592 __ lw(a3, FieldMemOperand(a2, 0));
5593 __ Assert(eq, kExpectedPropertyCellInRegisterA2,
5594 a3, Operand(cell_map));
5595 __ bind(&okay_here);
5599 // Get the elements kind and case on that.
5600 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5601 __ Branch(&no_info, eq, a2, Operand(at));
5602 __ lw(a2, FieldMemOperand(a2, Cell::kValueOffset));
5604 // If the type cell is undefined, or contains anything other than an
5605 // AllocationSite, call an array constructor that doesn't use AllocationSites.
5606 __ lw(t0, FieldMemOperand(a2, 0));
5607 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
5608 __ Branch(&no_info, ne, t0, Operand(at));
5610 __ lw(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
5612 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
5613 __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
5614 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
5617 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
5621 void InternalArrayConstructorStub::GenerateCase(
5622 MacroAssembler* masm, ElementsKind kind) {
5624 InternalArrayNoArgumentConstructorStub stub0(kind);
5625 __ TailCallStub(&stub0, lo, a0, Operand(1));
5627 InternalArrayNArgumentsConstructorStub stubN(kind);
5628 __ TailCallStub(&stubN, hi, a0, Operand(1));
5630 if (IsFastPackedElementsKind(kind)) {
5631 // We might need to create a holey array
5632 // look at the first argument.
5633 __ lw(at, MemOperand(sp, 0));
5635 InternalArraySingleArgumentConstructorStub
5636 stub1_holey(GetHoleyElementsKind(kind));
5637 __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
5640 InternalArraySingleArgumentConstructorStub stub1(kind);
5641 __ TailCallStub(&stub1);
5645 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
5646 // ----------- S t a t e -------------
5648 // -- a1 : constructor
5649 // -- sp[0] : return address
5650 // -- sp[4] : last argument
5651 // -----------------------------------
5653 if (FLAG_debug_code) {
5654 // The array construct code is only set for the global and natives
5655 // builtin Array functions which always have maps.
5657 // Initial map for the builtin Array function should be a map.
5658 __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
5659 // Will both indicate a NULL and a Smi.
5661 __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
5662 at, Operand(zero_reg));
5663 __ GetObjectType(a3, a3, t0);
5664 __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
5665 t0, Operand(MAP_TYPE));
5668 // Figure out the right elements kind.
5669 __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
5671 // Load the map's "bit field 2" into a3. We only need the first byte,
5672 // but the following bit field extraction takes care of that anyway.
5673 __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
5674 // Retrieve elements_kind from bit field 2.
5675 __ Ext(a3, a3, Map::kElementsKindShift, Map::kElementsKindBitCount);
5677 if (FLAG_debug_code) {
5679 __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
5681 eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
5682 a3, Operand(FAST_HOLEY_ELEMENTS));
5686 Label fast_elements_case;
5687 __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
5688 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
5690 __ bind(&fast_elements_case);
5691 GenerateCase(masm, FAST_ELEMENTS);
5697 } } // namespace v8::internal
5699 #endif // V8_TARGET_ARCH_MIPS