1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #if V8_TARGET_ARCH_MIPS
9 #include "bootstrapper.h"
10 #include "code-stubs.h"
12 #include "regexp-macro-assembler.h"
13 #include "stub-cache.h"
19 void FastNewClosureStub::InitializeInterfaceDescriptor(
20 CodeStubInterfaceDescriptor* descriptor) {
21 static Register registers[] = { a2 };
22 descriptor->register_param_count_ = 1;
23 descriptor->register_params_ = registers;
24 descriptor->deoptimization_handler_ =
25 Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
29 void FastNewContextStub::InitializeInterfaceDescriptor(
30 CodeStubInterfaceDescriptor* descriptor) {
31 static Register registers[] = { a1 };
32 descriptor->register_param_count_ = 1;
33 descriptor->register_params_ = registers;
34 descriptor->deoptimization_handler_ = NULL;
38 void ToNumberStub::InitializeInterfaceDescriptor(
39 CodeStubInterfaceDescriptor* descriptor) {
40 static Register registers[] = { a0 };
41 descriptor->register_param_count_ = 1;
42 descriptor->register_params_ = registers;
43 descriptor->deoptimization_handler_ = NULL;
47 void NumberToStringStub::InitializeInterfaceDescriptor(
48 CodeStubInterfaceDescriptor* descriptor) {
49 static Register registers[] = { a0 };
50 descriptor->register_param_count_ = 1;
51 descriptor->register_params_ = registers;
52 descriptor->deoptimization_handler_ =
53 Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
57 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
58 CodeStubInterfaceDescriptor* descriptor) {
59 static Register registers[] = { a3, a2, a1 };
60 descriptor->register_param_count_ = 3;
61 descriptor->register_params_ = registers;
62 descriptor->deoptimization_handler_ =
63 Runtime::FunctionForId(
64 Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
68 void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
69 CodeStubInterfaceDescriptor* descriptor) {
70 static Register registers[] = { a3, a2, a1, a0 };
71 descriptor->register_param_count_ = 4;
72 descriptor->register_params_ = registers;
73 descriptor->deoptimization_handler_ =
74 Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
78 void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
79 CodeStubInterfaceDescriptor* descriptor) {
80 static Register registers[] = { a2, a3 };
81 descriptor->register_param_count_ = 2;
82 descriptor->register_params_ = registers;
83 descriptor->deoptimization_handler_ = NULL;
87 void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
88 CodeStubInterfaceDescriptor* descriptor) {
89 static Register registers[] = { a1, a0 };
90 descriptor->register_param_count_ = 2;
91 descriptor->register_params_ = registers;
92 descriptor->deoptimization_handler_ =
93 FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
97 void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
98 CodeStubInterfaceDescriptor* descriptor) {
99 static Register registers[] = {a1, a0 };
100 descriptor->register_param_count_ = 2;
101 descriptor->register_params_ = registers;
102 descriptor->deoptimization_handler_ =
103 FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
107 void RegExpConstructResultStub::InitializeInterfaceDescriptor(
108 CodeStubInterfaceDescriptor* descriptor) {
109 static Register registers[] = { a2, a1, a0 };
110 descriptor->register_param_count_ = 3;
111 descriptor->register_params_ = registers;
112 descriptor->deoptimization_handler_ =
113 Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
117 void LoadFieldStub::InitializeInterfaceDescriptor(
118 CodeStubInterfaceDescriptor* descriptor) {
119 static Register registers[] = { a0 };
120 descriptor->register_param_count_ = 1;
121 descriptor->register_params_ = registers;
122 descriptor->deoptimization_handler_ = NULL;
126 void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
127 CodeStubInterfaceDescriptor* descriptor) {
128 static Register registers[] = { a1 };
129 descriptor->register_param_count_ = 1;
130 descriptor->register_params_ = registers;
131 descriptor->deoptimization_handler_ = NULL;
135 void StringLengthStub::InitializeInterfaceDescriptor(
136 CodeStubInterfaceDescriptor* descriptor) {
137 static Register registers[] = { a0, a2 };
138 descriptor->register_param_count_ = 2;
139 descriptor->register_params_ = registers;
140 descriptor->deoptimization_handler_ = NULL;
144 void KeyedStringLengthStub::InitializeInterfaceDescriptor(
145 CodeStubInterfaceDescriptor* descriptor) {
146 static Register registers[] = { a1, a0 };
147 descriptor->register_param_count_ = 2;
148 descriptor->register_params_ = registers;
149 descriptor->deoptimization_handler_ = NULL;
153 void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
154 CodeStubInterfaceDescriptor* descriptor) {
155 static Register registers[] = { a2, a1, a0 };
156 descriptor->register_param_count_ = 3;
157 descriptor->register_params_ = registers;
158 descriptor->deoptimization_handler_ =
159 FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
163 void TransitionElementsKindStub::InitializeInterfaceDescriptor(
164 CodeStubInterfaceDescriptor* descriptor) {
165 static Register registers[] = { a0, a1 };
166 descriptor->register_param_count_ = 2;
167 descriptor->register_params_ = registers;
169 Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
170 descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
174 void CompareNilICStub::InitializeInterfaceDescriptor(
175 CodeStubInterfaceDescriptor* descriptor) {
176 static Register registers[] = { a0 };
177 descriptor->register_param_count_ = 1;
178 descriptor->register_params_ = registers;
179 descriptor->deoptimization_handler_ =
180 FUNCTION_ADDR(CompareNilIC_Miss);
181 descriptor->SetMissHandler(
182 ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
186 static void InitializeArrayConstructorDescriptor(
187 CodeStubInterfaceDescriptor* descriptor,
188 int constant_stack_parameter_count) {
190 // a0 -- number of arguments
192 // a2 -- allocation site with elements kind
193 static Register registers_variable_args[] = { a1, a2, a0 };
194 static Register registers_no_args[] = { a1, a2 };
196 if (constant_stack_parameter_count == 0) {
197 descriptor->register_param_count_ = 2;
198 descriptor->register_params_ = registers_no_args;
200 // stack param count needs (constructor pointer, and single argument)
201 descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
202 descriptor->stack_parameter_count_ = a0;
203 descriptor->register_param_count_ = 3;
204 descriptor->register_params_ = registers_variable_args;
207 descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
208 descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
209 descriptor->deoptimization_handler_ =
210 Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
214 static void InitializeInternalArrayConstructorDescriptor(
215 CodeStubInterfaceDescriptor* descriptor,
216 int constant_stack_parameter_count) {
218 // a0 -- number of arguments
219 // a1 -- constructor function
220 static Register registers_variable_args[] = { a1, a0 };
221 static Register registers_no_args[] = { a1 };
223 if (constant_stack_parameter_count == 0) {
224 descriptor->register_param_count_ = 1;
225 descriptor->register_params_ = registers_no_args;
227 // stack param count needs (constructor pointer, and single argument)
228 descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
229 descriptor->stack_parameter_count_ = a0;
230 descriptor->register_param_count_ = 2;
231 descriptor->register_params_ = registers_variable_args;
234 descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
235 descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
236 descriptor->deoptimization_handler_ =
237 Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
241 void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
242 CodeStubInterfaceDescriptor* descriptor) {
243 InitializeArrayConstructorDescriptor(descriptor, 0);
247 void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
248 CodeStubInterfaceDescriptor* descriptor) {
249 InitializeArrayConstructorDescriptor(descriptor, 1);
253 void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
254 CodeStubInterfaceDescriptor* descriptor) {
255 InitializeArrayConstructorDescriptor(descriptor, -1);
259 void ToBooleanStub::InitializeInterfaceDescriptor(
260 CodeStubInterfaceDescriptor* descriptor) {
261 static Register registers[] = { a0 };
262 descriptor->register_param_count_ = 1;
263 descriptor->register_params_ = registers;
264 descriptor->deoptimization_handler_ =
265 FUNCTION_ADDR(ToBooleanIC_Miss);
266 descriptor->SetMissHandler(
267 ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
271 void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
272 CodeStubInterfaceDescriptor* descriptor) {
273 InitializeInternalArrayConstructorDescriptor(descriptor, 0);
277 void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
278 CodeStubInterfaceDescriptor* descriptor) {
279 InitializeInternalArrayConstructorDescriptor(descriptor, 1);
283 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
284 CodeStubInterfaceDescriptor* descriptor) {
285 InitializeInternalArrayConstructorDescriptor(descriptor, -1);
289 void StoreGlobalStub::InitializeInterfaceDescriptor(
290 CodeStubInterfaceDescriptor* descriptor) {
291 static Register registers[] = { a1, a2, a0 };
292 descriptor->register_param_count_ = 3;
293 descriptor->register_params_ = registers;
294 descriptor->deoptimization_handler_ =
295 FUNCTION_ADDR(StoreIC_MissFromStubFailure);
299 void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
300 CodeStubInterfaceDescriptor* descriptor) {
301 static Register registers[] = { a0, a3, a1, a2 };
302 descriptor->register_param_count_ = 4;
303 descriptor->register_params_ = registers;
304 descriptor->deoptimization_handler_ =
305 FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
309 void BinaryOpICStub::InitializeInterfaceDescriptor(
310 CodeStubInterfaceDescriptor* descriptor) {
311 static Register registers[] = { a1, a0 };
312 descriptor->register_param_count_ = 2;
313 descriptor->register_params_ = registers;
314 descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
315 descriptor->SetMissHandler(
316 ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
320 void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
321 CodeStubInterfaceDescriptor* descriptor) {
322 static Register registers[] = { a2, a1, a0 };
323 descriptor->register_param_count_ = 3;
324 descriptor->register_params_ = registers;
325 descriptor->deoptimization_handler_ =
326 FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
330 void StringAddStub::InitializeInterfaceDescriptor(
331 CodeStubInterfaceDescriptor* descriptor) {
332 static Register registers[] = { a1, a0 };
333 descriptor->register_param_count_ = 2;
334 descriptor->register_params_ = registers;
335 descriptor->deoptimization_handler_ =
336 Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
340 void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
342 CallInterfaceDescriptor* descriptor =
343 isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
344 static Register registers[] = { a1, // JSFunction
346 a0, // actual number of arguments
347 a2, // expected number of arguments
349 static Representation representations[] = {
350 Representation::Tagged(), // JSFunction
351 Representation::Tagged(), // context
352 Representation::Integer32(), // actual number of arguments
353 Representation::Integer32(), // expected number of arguments
355 descriptor->register_param_count_ = 4;
356 descriptor->register_params_ = registers;
357 descriptor->param_representations_ = representations;
360 CallInterfaceDescriptor* descriptor =
361 isolate->call_descriptor(Isolate::KeyedCall);
362 static Register registers[] = { cp, // context
365 static Representation representations[] = {
366 Representation::Tagged(), // context
367 Representation::Tagged(), // key
369 descriptor->register_param_count_ = 2;
370 descriptor->register_params_ = registers;
371 descriptor->param_representations_ = representations;
374 CallInterfaceDescriptor* descriptor =
375 isolate->call_descriptor(Isolate::NamedCall);
376 static Register registers[] = { cp, // context
379 static Representation representations[] = {
380 Representation::Tagged(), // context
381 Representation::Tagged(), // name
383 descriptor->register_param_count_ = 2;
384 descriptor->register_params_ = registers;
385 descriptor->param_representations_ = representations;
388 CallInterfaceDescriptor* descriptor =
389 isolate->call_descriptor(Isolate::CallHandler);
390 static Register registers[] = { cp, // context
393 static Representation representations[] = {
394 Representation::Tagged(), // context
395 Representation::Tagged(), // receiver
397 descriptor->register_param_count_ = 2;
398 descriptor->register_params_ = registers;
399 descriptor->param_representations_ = representations;
402 CallInterfaceDescriptor* descriptor =
403 isolate->call_descriptor(Isolate::ApiFunctionCall);
404 static Register registers[] = { a0, // callee
407 a1, // api_function_address
410 static Representation representations[] = {
411 Representation::Tagged(), // callee
412 Representation::Tagged(), // call_data
413 Representation::Tagged(), // holder
414 Representation::External(), // api_function_address
415 Representation::Tagged(), // context
417 descriptor->register_param_count_ = 5;
418 descriptor->register_params_ = registers;
419 descriptor->param_representations_ = representations;
424 #define __ ACCESS_MASM(masm)
427 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
430 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
436 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
441 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
442 // Update the static counter each time a new code stub is generated.
443 isolate()->counters()->code_stubs()->Increment();
445 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
446 int param_count = descriptor->register_param_count_;
448 // Call the runtime system in a fresh internal frame.
449 FrameScope scope(masm, StackFrame::INTERNAL);
450 ASSERT(descriptor->register_param_count_ == 0 ||
451 a0.is(descriptor->register_params_[param_count - 1]));
452 // Push arguments, adjust sp.
453 __ Subu(sp, sp, Operand(param_count * kPointerSize));
454 for (int i = 0; i < param_count; ++i) {
455 // Store argument to stack.
456 __ sw(descriptor->register_params_[i],
457 MemOperand(sp, (param_count-1-i) * kPointerSize));
459 ExternalReference miss = descriptor->miss_handler();
460 __ CallExternalReference(miss, descriptor->register_param_count_);
467 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
468 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
469 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
470 // scratch register. Destroys the source register. No GC occurs during this
471 // stub so you don't have to set up the frame.
472 class ConvertToDoubleStub : public PlatformCodeStub {
474 ConvertToDoubleStub(Isolate* isolate,
475 Register result_reg_1,
476 Register result_reg_2,
478 Register scratch_reg)
479 : PlatformCodeStub(isolate),
480 result1_(result_reg_1),
481 result2_(result_reg_2),
483 zeros_(scratch_reg) { }
491 // Minor key encoding in 16 bits.
492 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
493 class OpBits: public BitField<Token::Value, 2, 14> {};
495 Major MajorKey() { return ConvertToDouble; }
497 // Encode the parameters in a unique 16 bit value.
498 return result1_.code() +
499 (result2_.code() << 4) +
500 (source_.code() << 8) +
501 (zeros_.code() << 12);
504 void Generate(MacroAssembler* masm);
508 void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
509 Register exponent, mantissa;
510 if (kArchEndian == kLittle) {
518 // Convert from Smi to integer.
519 __ sra(source_, source_, kSmiTagSize);
520 // Move sign bit from source to destination. This works because the sign bit
521 // in the exponent word of the double has the same position and polarity as
522 // the 2's complement sign bit in a Smi.
523 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
524 __ And(exponent, source_, Operand(HeapNumber::kSignMask));
525 // Subtract from 0 if source was negative.
526 __ subu(at, zero_reg, source_);
527 __ Movn(source_, at, exponent);
529 // We have -1, 0 or 1, which we treat specially. Register source_ contains
530 // absolute value: it is either equal to 1 (special case of -1 and 1),
531 // greater than 1 (not a special case) or less than 1 (special case of 0).
532 __ Branch(¬_special, gt, source_, Operand(1));
534 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
535 const uint32_t exponent_word_for_1 =
536 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
537 // Safe to use 'at' as dest reg here.
538 __ Or(at, exponent, Operand(exponent_word_for_1));
539 __ Movn(exponent, at, source_); // Write exp when source not 0.
540 // 1, 0 and -1 all have 0 for the second word.
541 __ Ret(USE_DELAY_SLOT);
542 __ mov(mantissa, zero_reg);
544 __ bind(¬_special);
545 // Count leading zeros.
546 // Gets the wrong answer for 0, but we already checked for that case above.
547 __ Clz(zeros_, source_);
548 // Compute exponent and or it into the exponent register.
549 // We use mantissa as a scratch register here.
550 __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
551 __ subu(mantissa, mantissa, zeros_);
552 __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
553 __ Or(exponent, exponent, mantissa);
555 // Shift up the source chopping the top bit off.
556 __ Addu(zeros_, zeros_, Operand(1));
557 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
558 __ sllv(source_, source_, zeros_);
559 // Compute lower part of fraction (last 12 bits).
560 __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
561 // And the top (top 20 bits).
562 __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
564 __ Ret(USE_DELAY_SLOT);
565 __ or_(exponent, exponent, source_);
569 void DoubleToIStub::Generate(MacroAssembler* masm) {
570 Label out_of_range, only_low, negate, done;
571 Register input_reg = source();
572 Register result_reg = destination();
574 int double_offset = offset();
575 // Account for saved regs if input is sp.
576 if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
579 GetRegisterThatIsNotOneOf(input_reg, result_reg);
581 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
583 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
584 DoubleRegister double_scratch = kLithiumScratchDouble;
586 __ Push(scratch, scratch2, scratch3);
588 if (!skip_fastpath()) {
589 // Load double input.
590 __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
592 // Clear cumulative exception flags and save the FCSR.
593 __ cfc1(scratch2, FCSR);
594 __ ctc1(zero_reg, FCSR);
596 // Try a conversion to a signed integer.
597 __ Trunc_w_d(double_scratch, double_scratch);
598 // Move the converted value into the result register.
599 __ mfc1(scratch3, double_scratch);
601 // Retrieve and restore the FCSR.
602 __ cfc1(scratch, FCSR);
603 __ ctc1(scratch2, FCSR);
605 // Check for overflow and NaNs.
608 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
609 | kFCSRInvalidOpFlagMask);
610 // If we had no exceptions then set result_reg and we are done.
612 __ Branch(&error, ne, scratch, Operand(zero_reg));
613 __ Move(result_reg, scratch3);
618 // Load the double value and perform a manual truncation.
619 Register input_high = scratch2;
620 Register input_low = scratch3;
623 MemOperand(input_reg, double_offset + Register::kMantissaOffset));
625 MemOperand(input_reg, double_offset + Register::kExponentOffset));
627 Label normal_exponent, restore_sign;
628 // Extract the biased exponent in result.
631 HeapNumber::kExponentShift,
632 HeapNumber::kExponentBits);
634 // Check for Infinity and NaNs, which should return 0.
635 __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
636 __ Movz(result_reg, zero_reg, scratch);
637 __ Branch(&done, eq, scratch, Operand(zero_reg));
639 // Express exponent as delta to (number of mantissa bits + 31).
642 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
644 // If the delta is strictly positive, all bits would be shifted away,
645 // which means that we can return 0.
646 __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
647 __ mov(result_reg, zero_reg);
650 __ bind(&normal_exponent);
651 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
653 __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
656 Register sign = result_reg;
658 __ And(sign, input_high, Operand(HeapNumber::kSignMask));
660 // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
661 // to check for this specific case.
662 Label high_shift_needed, high_shift_done;
663 __ Branch(&high_shift_needed, lt, scratch, Operand(32));
664 __ mov(input_high, zero_reg);
665 __ Branch(&high_shift_done);
666 __ bind(&high_shift_needed);
668 // Set the implicit 1 before the mantissa part in input_high.
671 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
672 // Shift the mantissa bits to the correct position.
673 // We don't need to clear non-mantissa bits as they will be shifted away.
674 // If they weren't, it would mean that the answer is in the 32bit range.
675 __ sllv(input_high, input_high, scratch);
677 __ bind(&high_shift_done);
679 // Replace the shifted bits with bits from the lower mantissa word.
680 Label pos_shift, shift_done;
682 __ subu(scratch, at, scratch);
683 __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
686 __ Subu(scratch, zero_reg, scratch);
687 __ sllv(input_low, input_low, scratch);
688 __ Branch(&shift_done);
691 __ srlv(input_low, input_low, scratch);
693 __ bind(&shift_done);
694 __ Or(input_high, input_high, Operand(input_low));
695 // Restore sign if necessary.
696 __ mov(scratch, sign);
699 __ Subu(result_reg, zero_reg, input_high);
700 __ Movz(result_reg, input_high, scratch);
704 __ Pop(scratch, scratch2, scratch3);
709 void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
711 WriteInt32ToHeapNumberStub stub1(isolate, a1, v0, a2, a3);
712 WriteInt32ToHeapNumberStub stub2(isolate, a2, v0, a3, a0);
718 // See comment for class, this does NOT work for int32's that are in Smi range.
719 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
720 Label max_negative_int;
721 // the_int_ has the answer which is a signed int32 but not a Smi.
722 // We test for the special value that has a different exponent.
723 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
724 // Test sign, and save for later conditionals.
725 __ And(sign_, the_int_, Operand(0x80000000u));
726 __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
728 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
729 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
730 uint32_t non_smi_exponent =
731 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
732 __ li(scratch_, Operand(non_smi_exponent));
733 // Set the sign bit in scratch_ if the value was negative.
734 __ or_(scratch_, scratch_, sign_);
735 // Subtract from 0 if the value was negative.
736 __ subu(at, zero_reg, the_int_);
737 __ Movn(the_int_, at, sign_);
738 // We should be masking the implict first digit of the mantissa away here,
739 // but it just ends up combining harmlessly with the last digit of the
740 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
741 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
742 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
743 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
744 __ srl(at, the_int_, shift_distance);
745 __ or_(scratch_, scratch_, at);
746 __ sw(scratch_, FieldMemOperand(the_heap_number_,
747 HeapNumber::kExponentOffset));
748 __ sll(scratch_, the_int_, 32 - shift_distance);
749 __ Ret(USE_DELAY_SLOT);
750 __ sw(scratch_, FieldMemOperand(the_heap_number_,
751 HeapNumber::kMantissaOffset));
753 __ bind(&max_negative_int);
754 // The max negative int32 is stored as a positive number in the mantissa of
755 // a double because it uses a sign bit instead of using two's complement.
756 // The actual mantissa bits stored are all 0 because the implicit most
757 // significant 1 bit is not stored.
758 non_smi_exponent += 1 << HeapNumber::kExponentShift;
759 __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
761 FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
762 __ mov(scratch_, zero_reg);
763 __ Ret(USE_DELAY_SLOT);
765 FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
769 // Handle the case where the lhs and rhs are the same object.
770 // Equality is almost reflexive (everything but NaN), so this is a test
771 // for "identity and not NaN".
772 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
776 Label heap_number, return_equal;
777 Register exp_mask_reg = t5;
779 __ Branch(¬_identical, ne, a0, Operand(a1));
781 __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
783 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
784 // so we do the second best thing - test it ourselves.
785 // They are both equal and they are not both Smis so both of them are not
786 // Smis. If it's not a heap number, then return equal.
787 if (cc == less || cc == greater) {
788 __ GetObjectType(a0, t4, t4);
789 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
791 __ GetObjectType(a0, t4, t4);
792 __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
793 // Comparing JS objects with <=, >= is complicated.
795 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
796 // Normally here we fall through to return_equal, but undefined is
797 // special: (undefined == undefined) == true, but
798 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
799 if (cc == less_equal || cc == greater_equal) {
800 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
801 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
802 __ Branch(&return_equal, ne, a0, Operand(t2));
803 ASSERT(is_int16(GREATER) && is_int16(LESS));
804 __ Ret(USE_DELAY_SLOT);
806 // undefined <= undefined should fail.
807 __ li(v0, Operand(GREATER));
809 // undefined >= undefined should fail.
810 __ li(v0, Operand(LESS));
816 __ bind(&return_equal);
817 ASSERT(is_int16(GREATER) && is_int16(LESS));
818 __ Ret(USE_DELAY_SLOT);
820 __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
821 } else if (cc == greater) {
822 __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
824 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
827 // For less and greater we don't have to check for NaN since the result of
828 // x < x is false regardless. For the others here is some code to check
830 if (cc != lt && cc != gt) {
831 __ bind(&heap_number);
832 // It is a heap number, so return non-equal if it's NaN and equal if it's
835 // The representation of NaN values has all exponent bits (52..62) set,
836 // and not all mantissa bits (0..51) clear.
837 // Read top bits of double representation (second word of value).
838 __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
839 // Test that exponent bits are all set.
840 __ And(t3, t2, Operand(exp_mask_reg));
841 // If all bits not set (ne cond), then not a NaN, objects are equal.
842 __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
844 // Shift out flag and all exponent bits, retaining only mantissa.
845 __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
846 // Or with all low-bits of mantissa.
847 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
848 __ Or(v0, t3, Operand(t2));
849 // For equal we already have the right value in v0: Return zero (equal)
850 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
851 // not (it's a NaN). For <= and >= we need to load v0 with the failing
852 // value if it's a NaN.
854 // All-zero means Infinity means equal.
855 __ Ret(eq, v0, Operand(zero_reg));
856 ASSERT(is_int16(GREATER) && is_int16(LESS));
857 __ Ret(USE_DELAY_SLOT);
859 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
861 __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
865 // No fall through here.
867 __ bind(¬_identical);
871 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
874 Label* both_loaded_as_doubles,
877 ASSERT((lhs.is(a0) && rhs.is(a1)) ||
878 (lhs.is(a1) && rhs.is(a0)));
881 __ JumpIfSmi(lhs, &lhs_is_smi);
883 // Check whether the non-smi is a heap number.
884 __ GetObjectType(lhs, t4, t4);
886 // If lhs was not a number and rhs was a Smi then strict equality cannot
887 // succeed. Return non-equal (lhs is already not zero).
888 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
891 // Smi compared non-strictly with a non-Smi non-heap-number. Call
893 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
896 // Rhs is a smi, lhs is a number.
897 // Convert smi rhs to double.
898 __ sra(at, rhs, kSmiTagSize);
900 __ cvt_d_w(f14, f14);
901 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
903 // We now have both loaded as doubles.
904 __ jmp(both_loaded_as_doubles);
906 __ bind(&lhs_is_smi);
907 // Lhs is a Smi. Check whether the non-smi is a heap number.
908 __ GetObjectType(rhs, t4, t4);
910 // If lhs was not a number and rhs was a Smi then strict equality cannot
911 // succeed. Return non-equal.
912 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
913 __ li(v0, Operand(1));
915 // Smi compared non-strictly with a non-Smi non-heap-number. Call
917 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
920 // Lhs is a smi, rhs is a number.
921 // Convert smi lhs to double.
922 __ sra(at, lhs, kSmiTagSize);
924 __ cvt_d_w(f12, f12);
925 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
926 // Fall through to both_loaded_as_doubles.
930 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
933 // If either operand is a JS object or an oddball value, then they are
934 // not equal since their pointers are different.
935 // There is no test for undetectability in strict equality.
936 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
937 Label first_non_object;
938 // Get the type of the first operand into a2 and compare it with
939 // FIRST_SPEC_OBJECT_TYPE.
940 __ GetObjectType(lhs, a2, a2);
941 __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
944 Label return_not_equal;
945 __ bind(&return_not_equal);
946 __ Ret(USE_DELAY_SLOT);
947 __ li(v0, Operand(1));
949 __ bind(&first_non_object);
950 // Check for oddballs: true, false, null, undefined.
951 __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
953 __ GetObjectType(rhs, a3, a3);
954 __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
956 // Check for oddballs: true, false, null, undefined.
957 __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
959 // Now that we have the types we might as well check for
960 // internalized-internalized.
961 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
962 __ Or(a2, a2, Operand(a3));
963 __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
964 __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
968 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
971 Label* both_loaded_as_doubles,
972 Label* not_heap_numbers,
974 __ GetObjectType(lhs, a3, a2);
975 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
976 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
977 // If first was a heap number & second wasn't, go to slow case.
978 __ Branch(slow, ne, a3, Operand(a2));
980 // Both are heap numbers. Load them up then jump to the code we have
982 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
983 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
985 __ jmp(both_loaded_as_doubles);
989 // Fast negative check for internalized-to-internalized equality.
990 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
993 Label* possible_strings,
994 Label* not_both_strings) {
995 ASSERT((lhs.is(a0) && rhs.is(a1)) ||
996 (lhs.is(a1) && rhs.is(a0)));
998 // a2 is object type of rhs.
1000 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
1001 __ And(at, a2, Operand(kIsNotStringMask));
1002 __ Branch(&object_test, ne, at, Operand(zero_reg));
1003 __ And(at, a2, Operand(kIsNotInternalizedMask));
1004 __ Branch(possible_strings, ne, at, Operand(zero_reg));
1005 __ GetObjectType(rhs, a3, a3);
1006 __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
1007 __ And(at, a3, Operand(kIsNotInternalizedMask));
1008 __ Branch(possible_strings, ne, at, Operand(zero_reg));
1010 // Both are internalized strings. We already checked they weren't the same
1011 // pointer so they are not equal.
1012 __ Ret(USE_DELAY_SLOT);
1013 __ li(v0, Operand(1)); // Non-zero indicates not equal.
1015 __ bind(&object_test);
1016 __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
1017 __ GetObjectType(rhs, a2, a3);
1018 __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1020 // If both objects are undetectable, they are equal. Otherwise, they
1021 // are not equal, since they are different objects and an object is not
1022 // equal to undefined.
1023 __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
1024 __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
1025 __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
1026 __ and_(a0, a2, a3);
1027 __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
1028 __ Ret(USE_DELAY_SLOT);
1029 __ xori(v0, a0, 1 << Map::kIsUndetectable);
1033 static void ICCompareStub_CheckInputType(MacroAssembler* masm,
1036 CompareIC::State expected,
1039 if (expected == CompareIC::SMI) {
1040 __ JumpIfNotSmi(input, fail);
1041 } else if (expected == CompareIC::NUMBER) {
1042 __ JumpIfSmi(input, &ok);
1043 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
1046 // We could be strict about internalized/string here, but as long as
1047 // hydrogen doesn't care, the stub doesn't have to care either.
1052 // On entry a1 and a2 are the values to be compared.
1053 // On exit a0 is 0, positive or negative to indicate the result of
1055 void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
1058 Condition cc = GetCondition();
1061 ICCompareStub_CheckInputType(masm, lhs, a2, left_, &miss);
1062 ICCompareStub_CheckInputType(masm, rhs, a3, right_, &miss);
1064 Label slow; // Call builtin.
1065 Label not_smis, both_loaded_as_doubles;
1067 Label not_two_smis, smi_done;
1069 __ JumpIfNotSmi(a2, ¬_two_smis);
1072 __ Ret(USE_DELAY_SLOT);
1073 __ subu(v0, a1, a0);
1074 __ bind(¬_two_smis);
1076 // NOTICE! This code is only reached after a smi-fast-case check, so
1077 // it is certain that at least one operand isn't a smi.
1079 // Handle the case where the objects are identical. Either returns the answer
1080 // or goes to slow. Only falls through if the objects were not identical.
1081 EmitIdenticalObjectComparison(masm, &slow, cc);
1083 // If either is a Smi (we know that not both are), then they can only
1084 // be strictly equal if the other is a HeapNumber.
1085 STATIC_ASSERT(kSmiTag == 0);
1086 ASSERT_EQ(0, Smi::FromInt(0));
1087 __ And(t2, lhs, Operand(rhs));
1088 __ JumpIfNotSmi(t2, ¬_smis, t0);
1089 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
1090 // 1) Return the answer.
1092 // 3) Fall through to both_loaded_as_doubles.
1093 // 4) Jump to rhs_not_nan.
1094 // In cases 3 and 4 we have found out we were dealing with a number-number
1095 // comparison and the numbers have been loaded into f12 and f14 as doubles,
1096 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
1097 EmitSmiNonsmiComparison(masm, lhs, rhs,
1098 &both_loaded_as_doubles, &slow, strict());
1100 __ bind(&both_loaded_as_doubles);
1101 // f12, f14 are the double representations of the left hand side
1102 // and the right hand side if we have FPU. Otherwise a2, a3 represent
1103 // left hand side and a0, a1 represent right hand side.
1105 __ li(t0, Operand(LESS));
1106 __ li(t1, Operand(GREATER));
1107 __ li(t2, Operand(EQUAL));
1109 // Check if either rhs or lhs is NaN.
1110 __ BranchF(NULL, &nan, eq, f12, f14);
1112 // Check if LESS condition is satisfied. If true, move conditionally
1114 __ c(OLT, D, f12, f14);
1116 // Use previous check to store conditionally to v0 oposite condition
1117 // (GREATER). If rhs is equal to lhs, this will be corrected in next
1120 // Check if EQUAL condition is satisfied. If true, move conditionally
1122 __ c(EQ, D, f12, f14);
1128 // NaN comparisons always fail.
1129 // Load whatever we need in v0 to make the comparison fail.
1130 ASSERT(is_int16(GREATER) && is_int16(LESS));
1131 __ Ret(USE_DELAY_SLOT);
1132 if (cc == lt || cc == le) {
1133 __ li(v0, Operand(GREATER));
1135 __ li(v0, Operand(LESS));
1140 // At this point we know we are dealing with two different objects,
1141 // and neither of them is a Smi. The objects are in lhs_ and rhs_.
1143 // This returns non-equal for some object types, or falls through if it
1145 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
1148 Label check_for_internalized_strings;
1149 Label flat_string_check;
1150 // Check for heap-number-heap-number comparison. Can jump to slow case,
1151 // or load both doubles and jump to the code that handles
1152 // that case. If the inputs are not doubles then jumps to
1153 // check_for_internalized_strings.
1154 // In this case a2 will contain the type of lhs_.
1155 EmitCheckForTwoHeapNumbers(masm,
1158 &both_loaded_as_doubles,
1159 &check_for_internalized_strings,
1160 &flat_string_check);
1162 __ bind(&check_for_internalized_strings);
1163 if (cc == eq && !strict()) {
1164 // Returns an answer for two internalized strings or two
1165 // detectable objects.
1166 // Otherwise jumps to string case or not both strings case.
1167 // Assumes that a2 is the type of lhs_ on entry.
1168 EmitCheckForInternalizedStringsOrObjects(
1169 masm, lhs, rhs, &flat_string_check, &slow);
1172 // Check for both being sequential ASCII strings, and inline if that is the
1174 __ bind(&flat_string_check);
1176 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow);
1178 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
1181 StringCompareStub::GenerateFlatAsciiStringEquals(masm,
1188 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
1196 // Never falls through to here.
1199 // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
1202 // Figure out which native to call and setup the arguments.
1203 Builtins::JavaScript native;
1205 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1207 native = Builtins::COMPARE;
1208 int ncr; // NaN compare result.
1209 if (cc == lt || cc == le) {
1212 ASSERT(cc == gt || cc == ge); // Remaining cases.
1215 __ li(a0, Operand(Smi::FromInt(ncr)));
1219 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1220 // tagged as a small integer.
1221 __ InvokeBuiltin(native, JUMP_FUNCTION);
1228 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
1231 if (save_doubles_ == kSaveFPRegs) {
1232 __ PushSafepointRegistersAndDoubles();
1234 __ PushSafepointRegisters();
1240 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
1243 __ StoreToSafepointRegisterSlot(t9, t9);
1244 if (save_doubles_ == kSaveFPRegs) {
1245 __ PopSafepointRegistersAndDoubles();
1247 __ PopSafepointRegisters();
1253 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1254 // We don't allow a GC during a store buffer overflow so there is no need to
1255 // store the registers in any particular way, but we do have to store and
1257 __ MultiPush(kJSCallerSaved | ra.bit());
1258 if (save_doubles_ == kSaveFPRegs) {
1259 __ MultiPushFPU(kCallerSavedFPU);
1261 const int argument_count = 1;
1262 const int fp_argument_count = 0;
1263 const Register scratch = a1;
1265 AllowExternalCallThatCantCauseGC scope(masm);
1266 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
1267 __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
1269 ExternalReference::store_buffer_overflow_function(isolate()),
1271 if (save_doubles_ == kSaveFPRegs) {
1272 __ MultiPopFPU(kCallerSavedFPU);
1275 __ MultiPop(kJSCallerSaved | ra.bit());
1280 void MathPowStub::Generate(MacroAssembler* masm) {
1281 const Register base = a1;
1282 const Register exponent = a2;
1283 const Register heapnumbermap = t1;
1284 const Register heapnumber = v0;
1285 const DoubleRegister double_base = f2;
1286 const DoubleRegister double_exponent = f4;
1287 const DoubleRegister double_result = f0;
1288 const DoubleRegister double_scratch = f6;
1289 const FPURegister single_scratch = f8;
1290 const Register scratch = t5;
1291 const Register scratch2 = t3;
1293 Label call_runtime, done, int_exponent;
1294 if (exponent_type_ == ON_STACK) {
1295 Label base_is_smi, unpack_exponent;
1296 // The exponent and base are supplied as arguments on the stack.
1297 // This can only happen if the stub is called from non-optimized code.
1298 // Load input parameters from stack to double registers.
1299 __ lw(base, MemOperand(sp, 1 * kPointerSize));
1300 __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
1302 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
1304 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
1305 __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
1306 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
1308 __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
1309 __ jmp(&unpack_exponent);
1311 __ bind(&base_is_smi);
1312 __ mtc1(scratch, single_scratch);
1313 __ cvt_d_w(double_base, single_scratch);
1314 __ bind(&unpack_exponent);
1316 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
1318 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
1319 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
1320 __ ldc1(double_exponent,
1321 FieldMemOperand(exponent, HeapNumber::kValueOffset));
1322 } else if (exponent_type_ == TAGGED) {
1323 // Base is already in double_base.
1324 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
1326 __ ldc1(double_exponent,
1327 FieldMemOperand(exponent, HeapNumber::kValueOffset));
1330 if (exponent_type_ != INTEGER) {
1331 Label int_exponent_convert;
1332 // Detect integer exponents stored as double.
1333 __ EmitFPUTruncate(kRoundToMinusInf,
1339 kCheckForInexactConversion);
1340 // scratch2 == 0 means there was no conversion error.
1341 __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
1343 if (exponent_type_ == ON_STACK) {
1344 // Detect square root case. Crankshaft detects constant +/-0.5 at
1345 // compile time and uses DoMathPowHalf instead. We then skip this check
1346 // for non-constant cases of +/-0.5 as these hardly occur.
1347 Label not_plus_half;
1350 __ Move(double_scratch, 0.5);
1351 __ BranchF(USE_DELAY_SLOT,
1357 // double_scratch can be overwritten in the delay slot.
1358 // Calculates square root of base. Check for the special case of
1359 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
1360 __ Move(double_scratch, -V8_INFINITY);
1361 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
1362 __ neg_d(double_result, double_scratch);
1364 // Add +0 to convert -0 to +0.
1365 __ add_d(double_scratch, double_base, kDoubleRegZero);
1366 __ sqrt_d(double_result, double_scratch);
1369 __ bind(¬_plus_half);
1370 __ Move(double_scratch, -0.5);
1371 __ BranchF(USE_DELAY_SLOT,
1377 // double_scratch can be overwritten in the delay slot.
1378 // Calculates square root of base. Check for the special case of
1379 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
1380 __ Move(double_scratch, -V8_INFINITY);
1381 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
1382 __ Move(double_result, kDoubleRegZero);
1384 // Add +0 to convert -0 to +0.
1385 __ add_d(double_scratch, double_base, kDoubleRegZero);
1386 __ Move(double_result, 1);
1387 __ sqrt_d(double_scratch, double_scratch);
1388 __ div_d(double_result, double_result, double_scratch);
1394 AllowExternalCallThatCantCauseGC scope(masm);
1395 __ PrepareCallCFunction(0, 2, scratch2);
1396 __ MovToFloatParameters(double_base, double_exponent);
1398 ExternalReference::power_double_double_function(isolate()),
1402 __ MovFromFloatResult(double_result);
1405 __ bind(&int_exponent_convert);
1408 // Calculate power with integer exponent.
1409 __ bind(&int_exponent);
1411 // Get two copies of exponent in the registers scratch and exponent.
1412 if (exponent_type_ == INTEGER) {
1413 __ mov(scratch, exponent);
1415 // Exponent has previously been stored into scratch as untagged integer.
1416 __ mov(exponent, scratch);
1419 __ mov_d(double_scratch, double_base); // Back up base.
1420 __ Move(double_result, 1.0);
1422 // Get absolute value of exponent.
1423 Label positive_exponent;
1424 __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
1425 __ Subu(scratch, zero_reg, scratch);
1426 __ bind(&positive_exponent);
1428 Label while_true, no_carry, loop_end;
1429 __ bind(&while_true);
1431 __ And(scratch2, scratch, 1);
1433 __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
1434 __ mul_d(double_result, double_result, double_scratch);
1437 __ sra(scratch, scratch, 1);
1439 __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
1440 __ mul_d(double_scratch, double_scratch, double_scratch);
1442 __ Branch(&while_true);
1446 __ Branch(&done, ge, exponent, Operand(zero_reg));
1447 __ Move(double_scratch, 1.0);
1448 __ div_d(double_result, double_scratch, double_result);
1449 // Test whether result is zero. Bail out to check for subnormal result.
1450 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
1451 __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
1453 // double_exponent may not contain the exponent value if the input was a
1454 // smi. We set it with exponent value before bailing out.
1455 __ mtc1(exponent, single_scratch);
1456 __ cvt_d_w(double_exponent, single_scratch);
1458 // Returning or bailing out.
1459 Counters* counters = isolate()->counters();
1460 if (exponent_type_ == ON_STACK) {
1461 // The arguments are still on the stack.
1462 __ bind(&call_runtime);
1463 __ TailCallRuntime(Runtime::kHiddenMathPow, 2, 1);
1465 // The stub is called from non-optimized code, which expects the result
1466 // as heap number in exponent.
1468 __ AllocateHeapNumber(
1469 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
1470 __ sdc1(double_result,
1471 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
1472 ASSERT(heapnumber.is(v0));
1473 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1478 AllowExternalCallThatCantCauseGC scope(masm);
1479 __ PrepareCallCFunction(0, 2, scratch);
1480 __ MovToFloatParameters(double_base, double_exponent);
1482 ExternalReference::power_double_double_function(isolate()),
1486 __ MovFromFloatResult(double_result);
1489 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1495 bool CEntryStub::NeedsImmovableCode() {
1500 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
1501 CEntryStub::GenerateAheadOfTime(isolate);
1502 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
1503 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
1504 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
1505 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
1506 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
1507 BinaryOpICStub::GenerateAheadOfTime(isolate);
1508 StoreRegistersStateStub::GenerateAheadOfTime(isolate);
1509 RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
1510 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
1514 void StoreRegistersStateStub::GenerateAheadOfTime(
1516 StoreRegistersStateStub stub1(isolate, kDontSaveFPRegs);
1518 // Hydrogen code stubs need stub2 at snapshot time.
1519 StoreRegistersStateStub stub2(isolate, kSaveFPRegs);
1524 void RestoreRegistersStateStub::GenerateAheadOfTime(
1526 RestoreRegistersStateStub stub1(isolate, kDontSaveFPRegs);
1528 // Hydrogen code stubs need stub2 at snapshot time.
1529 RestoreRegistersStateStub stub2(isolate, kSaveFPRegs);
1534 void CodeStub::GenerateFPStubs(Isolate* isolate) {
1535 SaveFPRegsMode mode = kSaveFPRegs;
1536 CEntryStub save_doubles(isolate, 1, mode);
1537 StoreBufferOverflowStub stub(isolate, mode);
1538 // These stubs might already be in the snapshot, detect that and don't
1539 // regenerate, which would lead to code stub initialization state being messed
1541 Code* save_doubles_code;
1542 if (!save_doubles.FindCodeInCache(&save_doubles_code)) {
1543 save_doubles_code = *save_doubles.GetCode();
1545 Code* store_buffer_overflow_code;
1546 if (!stub.FindCodeInCache(&store_buffer_overflow_code)) {
1547 store_buffer_overflow_code = *stub.GetCode();
1549 isolate->set_fp_stubs_generated(true);
1553 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1554 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
1559 void CEntryStub::Generate(MacroAssembler* masm) {
1560 // Called from JavaScript; parameters are on stack as if calling JS function
1561 // s0: number of arguments including receiver
1562 // s1: size of arguments excluding receiver
1563 // s2: pointer to builtin function
1564 // fp: frame pointer (restored after C call)
1565 // sp: stack pointer (restored as callee's sp after C call)
1566 // cp: current context (C callee-saved)
1568 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1570 // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
1571 // The reason for this is that these arguments would need to be saved anyway
1572 // so it's faster to set them up directly.
1573 // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
1575 // Compute the argv pointer in a callee-saved register.
1576 __ Addu(s1, sp, s1);
1578 // Enter the exit frame that transitions from JavaScript to C++.
1579 FrameScope scope(masm, StackFrame::MANUAL);
1580 __ EnterExitFrame(save_doubles_);
1582 // s0: number of arguments including receiver (C callee-saved)
1583 // s1: pointer to first argument (C callee-saved)
1584 // s2: pointer to builtin function (C callee-saved)
1586 // Prepare arguments for C routine.
1589 // a1 = argv (set in the delay slot after find_ra below).
1591 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
1592 // also need to reserve the 4 argument slots on the stack.
1594 __ AssertStackIsAligned();
1596 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
1598 // To let the GC traverse the return address of the exit frames, we need to
1599 // know where the return address is. The CEntryStub is unmovable, so
1600 // we can store the address on the stack to be able to find it again and
1601 // we never have to restore it, because it will not change.
1602 { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
1603 // This branch-and-link sequence is needed to find the current PC on mips,
1604 // saved to the ra register.
1605 // Use masm-> here instead of the double-underscore macro since extra
1606 // coverage code can interfere with the proper calculation of ra.
1608 masm->bal(&find_ra); // bal exposes branch delay slot.
1610 masm->bind(&find_ra);
1612 // Adjust the value in ra to point to the correct return location, 2nd
1613 // instruction past the real call into C code (the jalr(t9)), and push it.
1614 // This is the return address of the exit frame.
1615 const int kNumInstructionsToJump = 5;
1616 masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
1617 masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
1618 // Stack space reservation moved to the branch delay slot below.
1619 // Stack is still aligned.
1621 // Call the C routine.
1622 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
1624 // Set up sp in the delay slot.
1625 masm->addiu(sp, sp, -kCArgsSlotsSize);
1626 // Make sure the stored 'ra' points to this position.
1627 ASSERT_EQ(kNumInstructionsToJump,
1628 masm->InstructionsGeneratedSince(&find_ra));
1632 // Runtime functions should not return 'the hole'. Allowing it to escape may
1633 // lead to crashes in the IC code later.
1634 if (FLAG_debug_code) {
1636 __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
1637 __ Branch(&okay, ne, v0, Operand(t0));
1638 __ stop("The hole escaped");
1642 // Check result for exception sentinel.
1643 Label exception_returned;
1644 __ LoadRoot(t0, Heap::kExceptionRootIndex);
1645 __ Branch(&exception_returned, eq, t0, Operand(v0));
1647 ExternalReference pending_exception_address(
1648 Isolate::kPendingExceptionAddress, isolate());
1650 // Check that there is no pending exception, otherwise we
1651 // should have returned the exception sentinel.
1652 if (FLAG_debug_code) {
1654 __ li(a2, Operand(pending_exception_address));
1655 __ lw(a2, MemOperand(a2));
1656 __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
1657 // Cannot use check here as it attempts to generate call into runtime.
1658 __ Branch(&okay, eq, t0, Operand(a2));
1659 __ stop("Unexpected pending exception");
1663 // Exit C frame and return.
1665 // sp: stack pointer
1666 // fp: frame pointer
1667 // s0: still holds argc (callee-saved).
1668 __ LeaveExitFrame(save_doubles_, s0, true, EMIT_RETURN);
1670 // Handling of exception.
1671 __ bind(&exception_returned);
1673 // Retrieve the pending exception.
1674 __ li(a2, Operand(pending_exception_address));
1675 __ lw(v0, MemOperand(a2));
1677 // Clear the pending exception.
1678 __ li(a3, Operand(isolate()->factory()->the_hole_value()));
1679 __ sw(a3, MemOperand(a2));
1681 // Special handling of termination exceptions which are uncatchable
1682 // by javascript code.
1683 Label throw_termination_exception;
1684 __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
1685 __ Branch(&throw_termination_exception, eq, v0, Operand(t0));
1687 // Handle normal exception.
1690 __ bind(&throw_termination_exception);
1691 __ ThrowUncatchable(v0);
1695 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
1696 Label invoke, handler_entry, exit;
1697 Isolate* isolate = masm->isolate();
1700 // a0: entry address
1709 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1711 // Save callee saved registers on the stack.
1712 __ MultiPush(kCalleeSaved | ra.bit());
1714 // Save callee-saved FPU registers.
1715 __ MultiPushFPU(kCalleeSavedFPU);
1716 // Set up the reserved register for 0.0.
1717 __ Move(kDoubleRegZero, 0.0);
1720 // Load argv in s0 register.
1721 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1722 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
1724 __ InitializeRootRegister();
1725 __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
1727 // We build an EntryFrame.
1728 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
1729 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
1730 __ li(t2, Operand(Smi::FromInt(marker)));
1731 __ li(t1, Operand(Smi::FromInt(marker)));
1732 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1734 __ lw(t0, MemOperand(t0));
1735 __ Push(t3, t2, t1, t0);
1736 // Set up frame pointer for the frame to be pushed.
1737 __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
1740 // a0: entry_address
1742 // a2: receiver_pointer
1748 // function slot | entry frame
1750 // bad fp (0xff...f) |
1751 // callee saved registers + ra
1755 // If this is the outermost JS call, set js_entry_sp value.
1756 Label non_outermost_js;
1757 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
1758 __ li(t1, Operand(ExternalReference(js_entry_sp)));
1759 __ lw(t2, MemOperand(t1));
1760 __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
1761 __ sw(fp, MemOperand(t1));
1762 __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1765 __ nop(); // Branch delay slot nop.
1766 __ bind(&non_outermost_js);
1767 __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1771 // Jump to a faked try block that does the invoke, with a faked catch
1772 // block that sets the pending exception.
1774 __ bind(&handler_entry);
1775 handler_offset_ = handler_entry.pos();
1776 // Caught exception: Store result (exception) in the pending exception
1777 // field in the JSEnv and return a failure sentinel. Coming in here the
1778 // fp will be invalid because the PushTryHandler below sets it to 0 to
1779 // signal the existence of the JSEntry frame.
1780 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1782 __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
1783 __ LoadRoot(v0, Heap::kExceptionRootIndex);
1784 __ b(&exit); // b exposes branch delay slot.
1785 __ nop(); // Branch delay slot nop.
1787 // Invoke: Link this frame into the handler chain. There's only one
1788 // handler block in this code object, so its index is 0.
1790 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
1791 // If an exception not caught by another handler occurs, this handler
1792 // returns control to the code after the bal(&invoke) above, which
1793 // restores all kCalleeSaved registers (including cp and fp) to their
1794 // saved values before returning a failure to C.
1796 // Clear any pending exceptions.
1797 __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
1798 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1800 __ sw(t1, MemOperand(t0));
1802 // Invoke the function by calling through JS entry trampoline builtin.
1803 // Notice that we cannot store a reference to the trampoline code directly in
1804 // this stub, because runtime stubs are not traversed when doing GC.
1807 // a0: entry_address
1809 // a2: receiver_pointer
1816 // callee saved registers + ra
1821 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1823 __ li(t0, Operand(construct_entry));
1825 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
1826 __ li(t0, Operand(entry));
1828 __ lw(t9, MemOperand(t0)); // Deref address.
1830 // Call JSEntryTrampoline.
1831 __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
1834 // Unlink this frame from the handler chain.
1837 __ bind(&exit); // v0 holds result
1838 // Check if the current stack frame is marked as the outermost JS frame.
1839 Label non_outermost_js_2;
1841 __ Branch(&non_outermost_js_2,
1844 Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1845 __ li(t1, Operand(ExternalReference(js_entry_sp)));
1846 __ sw(zero_reg, MemOperand(t1));
1847 __ bind(&non_outermost_js_2);
1849 // Restore the top frame descriptors from the stack.
1851 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1853 __ sw(t1, MemOperand(t0));
1855 // Reset the stack to the callee saved registers.
1856 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
1858 // Restore callee-saved fpu registers.
1859 __ MultiPopFPU(kCalleeSavedFPU);
1861 // Restore callee saved registers from the stack.
1862 __ MultiPop(kCalleeSaved | ra.bit());
1868 // Uses registers a0 to t0.
1869 // Expected input (depending on whether args are in registers or on the stack):
1870 // * object: a0 or at sp + 1 * kPointerSize.
1871 // * function: a1 or at sp.
1873 // An inlined call site may have been generated before calling this stub.
1874 // In this case the offset to the inline site to patch is passed on the stack,
1875 // in the safepoint slot for register t0.
1876 void InstanceofStub::Generate(MacroAssembler* masm) {
1877 // Call site inlining and patching implies arguments in registers.
1878 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
1879 // ReturnTrueFalse is only implemented for inlined call sites.
1880 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
1882 // Fixed register usage throughout the stub:
1883 const Register object = a0; // Object (lhs).
1884 Register map = a3; // Map of the object.
1885 const Register function = a1; // Function (rhs).
1886 const Register prototype = t0; // Prototype of the function.
1887 const Register inline_site = t5;
1888 const Register scratch = a2;
1890 const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
1892 Label slow, loop, is_instance, is_not_instance, not_js_object;
1894 if (!HasArgsInRegisters()) {
1895 __ lw(object, MemOperand(sp, 1 * kPointerSize));
1896 __ lw(function, MemOperand(sp, 0));
1899 // Check that the left hand is a JS object and load map.
1900 __ JumpIfSmi(object, ¬_js_object);
1901 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
1903 // If there is a call site cache don't look in the global cache, but do the
1904 // real lookup and update the call site cache.
1905 if (!HasCallSiteInlineCheck()) {
1907 __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
1908 __ Branch(&miss, ne, function, Operand(at));
1909 __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
1910 __ Branch(&miss, ne, map, Operand(at));
1911 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1912 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1917 // Get the prototype of the function.
1918 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
1920 // Check that the function prototype is a JS object.
1921 __ JumpIfSmi(prototype, &slow);
1922 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
1924 // Update the global instanceof or call site inlined cache with the current
1925 // map and function. The cached answer will be set when it is known below.
1926 if (!HasCallSiteInlineCheck()) {
1927 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1928 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
1930 ASSERT(HasArgsInRegisters());
1931 // Patch the (relocated) inlined map check.
1933 // The offset was stored in t0 safepoint slot.
1934 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
1935 __ LoadFromSafepointRegisterSlot(scratch, t0);
1936 __ Subu(inline_site, ra, scratch);
1937 // Get the map location in scratch and patch it.
1938 __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
1939 __ sw(map, FieldMemOperand(scratch, Cell::kValueOffset));
1942 // Register mapping: a3 is object map and t0 is function prototype.
1943 // Get prototype of object into a2.
1944 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
1946 // We don't need map any more. Use it as a scratch register.
1947 Register scratch2 = map;
1950 // Loop through the prototype chain looking for the function prototype.
1951 __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
1953 __ Branch(&is_instance, eq, scratch, Operand(prototype));
1954 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
1955 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
1956 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
1959 __ bind(&is_instance);
1960 ASSERT(Smi::FromInt(0) == 0);
1961 if (!HasCallSiteInlineCheck()) {
1962 __ mov(v0, zero_reg);
1963 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1965 // Patch the call site to return true.
1966 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
1967 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1968 // Get the boolean result location in scratch and patch it.
1969 __ PatchRelocatedValue(inline_site, scratch, v0);
1971 if (!ReturnTrueFalseObject()) {
1972 ASSERT_EQ(Smi::FromInt(0), 0);
1973 __ mov(v0, zero_reg);
1976 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1978 __ bind(&is_not_instance);
1979 if (!HasCallSiteInlineCheck()) {
1980 __ li(v0, Operand(Smi::FromInt(1)));
1981 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1983 // Patch the call site to return false.
1984 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
1985 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1986 // Get the boolean result location in scratch and patch it.
1987 __ PatchRelocatedValue(inline_site, scratch, v0);
1989 if (!ReturnTrueFalseObject()) {
1990 __ li(v0, Operand(Smi::FromInt(1)));
1994 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1996 Label object_not_null, object_not_null_or_smi;
1997 __ bind(¬_js_object);
1998 // Before null, smi and string value checks, check that the rhs is a function
1999 // as for a non-function rhs an exception needs to be thrown.
2000 __ JumpIfSmi(function, &slow);
2001 __ GetObjectType(function, scratch2, scratch);
2002 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
2004 // Null is not instance of anything.
2005 __ Branch(&object_not_null,
2008 Operand(isolate()->factory()->null_value()));
2009 __ li(v0, Operand(Smi::FromInt(1)));
2010 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2012 __ bind(&object_not_null);
2013 // Smi values are not instances of anything.
2014 __ JumpIfNotSmi(object, &object_not_null_or_smi);
2015 __ li(v0, Operand(Smi::FromInt(1)));
2016 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2018 __ bind(&object_not_null_or_smi);
2019 // String values are not instances of anything.
2020 __ IsObjectJSStringType(object, scratch, &slow);
2021 __ li(v0, Operand(Smi::FromInt(1)));
2022 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2024 // Slow-case. Tail call builtin.
2026 if (!ReturnTrueFalseObject()) {
2027 if (HasArgsInRegisters()) {
2030 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
2033 FrameScope scope(masm, StackFrame::INTERNAL);
2035 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
2038 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
2039 __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
2040 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
2041 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2046 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
2049 if (kind() == Code::KEYED_LOAD_IC) {
2050 // ----------- S t a t e -------------
2051 // -- ra : return address
2054 // -----------------------------------
2055 __ Branch(&miss, ne, a0,
2056 Operand(isolate()->factory()->prototype_string()));
2059 ASSERT(kind() == Code::LOAD_IC);
2060 // ----------- S t a t e -------------
2062 // -- ra : return address
2064 // -- sp[0] : receiver
2065 // -----------------------------------
2069 StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3, t0, &miss);
2071 StubCompiler::TailCallBuiltin(
2072 masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
2076 Register InstanceofStub::left() { return a0; }
2079 Register InstanceofStub::right() { return a1; }
2082 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
2083 // The displacement is the offset of the last parameter (if any)
2084 // relative to the frame pointer.
2085 const int kDisplacement =
2086 StandardFrameConstants::kCallerSPOffset - kPointerSize;
2088 // Check that the key is a smiGenerateReadElement.
2090 __ JumpIfNotSmi(a1, &slow);
2092 // Check if the calling frame is an arguments adaptor frame.
2094 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2095 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
2099 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2101 // Check index (a1) against formal parameters count limit passed in
2102 // through register a0. Use unsigned comparison to get negative
2104 __ Branch(&slow, hs, a1, Operand(a0));
2106 // Read the argument from the stack and return it.
2107 __ subu(a3, a0, a1);
2108 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
2109 __ Addu(a3, fp, Operand(t3));
2110 __ Ret(USE_DELAY_SLOT);
2111 __ lw(v0, MemOperand(a3, kDisplacement));
2113 // Arguments adaptor case: Check index (a1) against actual arguments
2114 // limit found in the arguments adaptor frame. Use unsigned
2115 // comparison to get negative check for free.
2117 __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
2118 __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
2120 // Read the argument from the adaptor frame and return it.
2121 __ subu(a3, a0, a1);
2122 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
2123 __ Addu(a3, a2, Operand(t3));
2124 __ Ret(USE_DELAY_SLOT);
2125 __ lw(v0, MemOperand(a3, kDisplacement));
2127 // Slow-case: Handle non-smi or out-of-bounds access to arguments
2128 // by calling the runtime system.
2131 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
2135 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
2136 // sp[0] : number of parameters
2137 // sp[4] : receiver displacement
2139 // Check if the calling frame is an arguments adaptor frame.
2141 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2142 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
2146 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2148 // Patch the arguments.length and the parameters pointer in the current frame.
2149 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
2150 __ sw(a2, MemOperand(sp, 0 * kPointerSize));
2152 __ Addu(a3, a3, Operand(t3));
2153 __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
2154 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
2157 __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
2161 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
2163 // sp[0] : number of parameters (tagged)
2164 // sp[4] : address of receiver argument
2166 // Registers used over whole function:
2167 // t2 : allocated object (tagged)
2168 // t5 : mapped parameter count (tagged)
2170 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
2171 // a1 = parameter count (tagged)
2173 // Check if the calling frame is an arguments adaptor frame.
2175 Label adaptor_frame, try_allocate;
2176 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2177 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
2178 __ Branch(&adaptor_frame,
2181 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2183 // No adaptor, parameter count = argument count.
2185 __ b(&try_allocate);
2186 __ nop(); // Branch delay slot nop.
2188 // We have an adaptor frame. Patch the parameters pointer.
2189 __ bind(&adaptor_frame);
2190 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
2192 __ Addu(a3, a3, Operand(t6));
2193 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
2194 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
2196 // a1 = parameter count (tagged)
2197 // a2 = argument count (tagged)
2198 // Compute the mapped parameter count = min(a1, a2) in a1.
2200 __ Branch(&skip_min, lt, a1, Operand(a2));
2204 __ bind(&try_allocate);
2206 // Compute the sizes of backing store, parameter map, and arguments object.
2207 // 1. Parameter map, has 2 extra words containing context and backing store.
2208 const int kParameterMapHeaderSize =
2209 FixedArray::kHeaderSize + 2 * kPointerSize;
2210 // If there are no mapped parameters, we do not need the parameter_map.
2211 Label param_map_size;
2212 ASSERT_EQ(0, Smi::FromInt(0));
2213 __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg));
2214 __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
2216 __ addiu(t5, t5, kParameterMapHeaderSize);
2217 __ bind(¶m_map_size);
2219 // 2. Backing store.
2221 __ Addu(t5, t5, Operand(t6));
2222 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
2224 // 3. Arguments object.
2225 __ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize));
2227 // Do the allocation of all three objects in one go.
2228 __ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT);
2230 // v0 = address of new object(s) (tagged)
2231 // a2 = argument count (tagged)
2232 // Get the arguments boilerplate from the current native context into t0.
2233 const int kNormalOffset =
2234 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX);
2235 const int kAliasedOffset =
2236 Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
2238 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2239 __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
2240 Label skip2_ne, skip2_eq;
2241 __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
2242 __ lw(t0, MemOperand(t0, kNormalOffset));
2245 __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
2246 __ lw(t0, MemOperand(t0, kAliasedOffset));
2249 // v0 = address of new object (tagged)
2250 // a1 = mapped parameter count (tagged)
2251 // a2 = argument count (tagged)
2252 // t0 = address of boilerplate object (tagged)
2253 // Copy the JS object part.
2254 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
2255 __ lw(a3, FieldMemOperand(t0, i));
2256 __ sw(a3, FieldMemOperand(v0, i));
2259 // Set up the callee in-object property.
2260 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
2261 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
2262 const int kCalleeOffset = JSObject::kHeaderSize +
2263 Heap::kArgumentsCalleeIndex * kPointerSize;
2264 __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
2266 // Use the length (smi tagged) and set that as an in-object property too.
2267 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2268 const int kLengthOffset = JSObject::kHeaderSize +
2269 Heap::kArgumentsLengthIndex * kPointerSize;
2270 __ sw(a2, FieldMemOperand(v0, kLengthOffset));
2272 // Set up the elements pointer in the allocated arguments object.
2273 // If we allocated a parameter map, t0 will point there, otherwise
2274 // it will point to the backing store.
2275 __ Addu(t0, v0, Operand(Heap::kSloppyArgumentsObjectSize));
2276 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
2278 // v0 = address of new object (tagged)
2279 // a1 = mapped parameter count (tagged)
2280 // a2 = argument count (tagged)
2281 // t0 = address of parameter map or backing store (tagged)
2282 // Initialize parameter map. If there are no mapped arguments, we're done.
2283 Label skip_parameter_map;
2285 __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
2286 // Move backing store address to a3, because it is
2287 // expected there when filling in the unmapped arguments.
2291 __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
2293 __ LoadRoot(t2, Heap::kSloppyArgumentsElementsMapRootIndex);
2294 __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
2295 __ Addu(t2, a1, Operand(Smi::FromInt(2)));
2296 __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
2297 __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
2299 __ Addu(t2, t0, Operand(t6));
2300 __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
2301 __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
2303 // Copy the parameter slots and the holes in the arguments.
2304 // We need to fill in mapped_parameter_count slots. They index the context,
2305 // where parameters are stored in reverse order, at
2306 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
2307 // The mapped parameter thus need to get indices
2308 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
2309 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
2310 // We loop from right to left.
2311 Label parameters_loop, parameters_test;
2313 __ lw(t5, MemOperand(sp, 0 * kPointerSize));
2314 __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
2315 __ Subu(t5, t5, Operand(a1));
2316 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
2318 __ Addu(a3, t0, Operand(t6));
2319 __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
2321 // t2 = loop variable (tagged)
2322 // a1 = mapping index (tagged)
2323 // a3 = address of backing store (tagged)
2324 // t0 = address of parameter map (tagged)
2325 // t1 = temporary scratch (a.o., for address calculation)
2326 // t3 = the hole value
2327 __ jmp(¶meters_test);
2329 __ bind(¶meters_loop);
2330 __ Subu(t2, t2, Operand(Smi::FromInt(1)));
2332 __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
2333 __ Addu(t6, t0, t1);
2334 __ sw(t5, MemOperand(t6));
2335 __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
2336 __ Addu(t6, a3, t1);
2337 __ sw(t3, MemOperand(t6));
2338 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
2339 __ bind(¶meters_test);
2340 __ Branch(¶meters_loop, ne, t2, Operand(Smi::FromInt(0)));
2342 __ bind(&skip_parameter_map);
2343 // a2 = argument count (tagged)
2344 // a3 = address of backing store (tagged)
2346 // Copy arguments header and remaining slots (if there are any).
2347 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
2348 __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
2349 __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
2351 Label arguments_loop, arguments_test;
2353 __ lw(t0, MemOperand(sp, 1 * kPointerSize));
2355 __ Subu(t0, t0, Operand(t6));
2356 __ jmp(&arguments_test);
2358 __ bind(&arguments_loop);
2359 __ Subu(t0, t0, Operand(kPointerSize));
2360 __ lw(t2, MemOperand(t0, 0));
2362 __ Addu(t1, a3, Operand(t6));
2363 __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
2364 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
2366 __ bind(&arguments_test);
2367 __ Branch(&arguments_loop, lt, t5, Operand(a2));
2369 // Return and remove the on-stack parameters.
2372 // Do the runtime call to allocate the arguments object.
2373 // a2 = argument count (tagged)
2375 __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
2376 __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
2380 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
2381 // sp[0] : number of parameters
2382 // sp[4] : receiver displacement
2384 // Check if the calling frame is an arguments adaptor frame.
2385 Label adaptor_frame, try_allocate, runtime;
2386 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2387 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
2388 __ Branch(&adaptor_frame,
2391 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2393 // Get the length from the frame.
2394 __ lw(a1, MemOperand(sp, 0));
2395 __ Branch(&try_allocate);
2397 // Patch the arguments.length and the parameters pointer.
2398 __ bind(&adaptor_frame);
2399 __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
2400 __ sw(a1, MemOperand(sp, 0));
2401 __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
2402 __ Addu(a3, a2, Operand(at));
2404 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
2405 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
2407 // Try the new space allocation. Start out with computing the size
2408 // of the arguments object and the elements array in words.
2409 Label add_arguments_object;
2410 __ bind(&try_allocate);
2411 __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
2412 __ srl(a1, a1, kSmiTagSize);
2414 __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
2415 __ bind(&add_arguments_object);
2416 __ Addu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
2418 // Do the allocation of both objects in one go.
2419 __ Allocate(a1, v0, a2, a3, &runtime,
2420 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
2422 // Get the arguments boilerplate from the current native context.
2423 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2424 __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
2425 __ lw(t0, MemOperand(t0, Context::SlotOffset(
2426 Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX)));
2428 // Copy the JS object part.
2429 __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
2431 // Get the length (smi tagged) and set that as an in-object property too.
2432 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2433 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
2434 __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
2435 Heap::kArgumentsLengthIndex * kPointerSize));
2438 __ Branch(&done, eq, a1, Operand(zero_reg));
2440 // Get the parameters pointer from the stack.
2441 __ lw(a2, MemOperand(sp, 1 * kPointerSize));
2443 // Set up the elements pointer in the allocated arguments object and
2444 // initialize the header in the elements fixed array.
2445 __ Addu(t0, v0, Operand(Heap::kStrictArgumentsObjectSize));
2446 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
2447 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
2448 __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
2449 __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
2450 // Untag the length for the loop.
2451 __ srl(a1, a1, kSmiTagSize);
2453 // Copy the fixed array slots.
2455 // Set up t0 to point to the first array slot.
2456 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2458 // Pre-decrement a2 with kPointerSize on each iteration.
2459 // Pre-decrement in order to skip receiver.
2460 __ Addu(a2, a2, Operand(-kPointerSize));
2461 __ lw(a3, MemOperand(a2));
2462 // Post-increment t0 with kPointerSize on each iteration.
2463 __ sw(a3, MemOperand(t0));
2464 __ Addu(t0, t0, Operand(kPointerSize));
2465 __ Subu(a1, a1, Operand(1));
2466 __ Branch(&loop, ne, a1, Operand(zero_reg));
2468 // Return and remove the on-stack parameters.
2472 // Do the runtime call to allocate the arguments object.
2474 __ TailCallRuntime(Runtime::kHiddenNewStrictArgumentsFast, 3, 1);
2478 void RegExpExecStub::Generate(MacroAssembler* masm) {
2479 // Just jump directly to runtime if native RegExp is not selected at compile
2480 // time or if regexp entry in generated code is turned off runtime switch or
2482 #ifdef V8_INTERPRETED_REGEXP
2483 __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
2484 #else // V8_INTERPRETED_REGEXP
2486 // Stack frame on entry.
2487 // sp[0]: last_match_info (expected JSArray)
2488 // sp[4]: previous index
2489 // sp[8]: subject string
2490 // sp[12]: JSRegExp object
2492 const int kLastMatchInfoOffset = 0 * kPointerSize;
2493 const int kPreviousIndexOffset = 1 * kPointerSize;
2494 const int kSubjectOffset = 2 * kPointerSize;
2495 const int kJSRegExpOffset = 3 * kPointerSize;
2498 // Allocation of registers for this function. These are in callee save
2499 // registers and will be preserved by the call to the native RegExp code, as
2500 // this code is called using the normal C calling convention. When calling
2501 // directly from generated code the native RegExp code will not do a GC and
2502 // therefore the content of these registers are safe to use after the call.
2503 // MIPS - using s0..s2, since we are not using CEntry Stub.
2504 Register subject = s0;
2505 Register regexp_data = s1;
2506 Register last_match_info_elements = s2;
2508 // Ensure that a RegExp stack is allocated.
2509 ExternalReference address_of_regexp_stack_memory_address =
2510 ExternalReference::address_of_regexp_stack_memory_address(
2512 ExternalReference address_of_regexp_stack_memory_size =
2513 ExternalReference::address_of_regexp_stack_memory_size(isolate());
2514 __ li(a0, Operand(address_of_regexp_stack_memory_size));
2515 __ lw(a0, MemOperand(a0, 0));
2516 __ Branch(&runtime, eq, a0, Operand(zero_reg));
2518 // Check that the first argument is a JSRegExp object.
2519 __ lw(a0, MemOperand(sp, kJSRegExpOffset));
2520 STATIC_ASSERT(kSmiTag == 0);
2521 __ JumpIfSmi(a0, &runtime);
2522 __ GetObjectType(a0, a1, a1);
2523 __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
2525 // Check that the RegExp has been compiled (data contains a fixed array).
2526 __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
2527 if (FLAG_debug_code) {
2528 __ SmiTst(regexp_data, t0);
2530 kUnexpectedTypeForRegExpDataFixedArrayExpected,
2533 __ GetObjectType(regexp_data, a0, a0);
2535 kUnexpectedTypeForRegExpDataFixedArrayExpected,
2537 Operand(FIXED_ARRAY_TYPE));
2540 // regexp_data: RegExp data (FixedArray)
2541 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2542 __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2543 __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
2545 // regexp_data: RegExp data (FixedArray)
2546 // Check that the number of captures fit in the static offsets vector buffer.
2548 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2549 // Check (number_of_captures + 1) * 2 <= offsets vector size
2550 // Or number_of_captures * 2 <= offsets vector size - 2
2551 // Multiplying by 2 comes for free since a2 is smi-tagged.
2552 STATIC_ASSERT(kSmiTag == 0);
2553 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2554 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
2556 &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
2558 // Reset offset for possibly sliced string.
2559 __ mov(t0, zero_reg);
2560 __ lw(subject, MemOperand(sp, kSubjectOffset));
2561 __ JumpIfSmi(subject, &runtime);
2562 __ mov(a3, subject); // Make a copy of the original subject string.
2563 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2564 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2565 // subject: subject string
2566 // a3: subject string
2567 // a0: subject string instance type
2568 // regexp_data: RegExp data (FixedArray)
2569 // Handle subject string according to its encoding and representation:
2570 // (1) Sequential string? If yes, go to (5).
2571 // (2) Anything but sequential or cons? If yes, go to (6).
2572 // (3) Cons string. If the string is flat, replace subject with first string.
2573 // Otherwise bailout.
2574 // (4) Is subject external? If yes, go to (7).
2575 // (5) Sequential string. Load regexp code according to encoding.
2579 // Deferred code at the end of the stub:
2580 // (6) Not a long external string? If yes, go to (8).
2581 // (7) External string. Make it, offset-wise, look like a sequential string.
2583 // (8) Short external string or not a string? If yes, bail out to runtime.
2584 // (9) Sliced string. Replace subject with parent. Go to (4).
2586 Label seq_string /* 5 */, external_string /* 7 */,
2587 check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
2588 not_long_external /* 8 */;
2590 // (1) Sequential string? If yes, go to (5).
2593 Operand(kIsNotStringMask |
2594 kStringRepresentationMask |
2595 kShortExternalStringMask));
2596 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
2597 __ Branch(&seq_string, eq, a1, Operand(zero_reg)); // Go to (5).
2599 // (2) Anything but sequential or cons? If yes, go to (6).
2600 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2601 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2602 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2603 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2605 __ Branch(¬_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
2607 // (3) Cons string. Check that it's flat.
2608 // Replace subject with first string and reload instance type.
2609 __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
2610 __ LoadRoot(a1, Heap::kempty_stringRootIndex);
2611 __ Branch(&runtime, ne, a0, Operand(a1));
2612 __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2614 // (4) Is subject external? If yes, go to (7).
2615 __ bind(&check_underlying);
2616 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2617 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2618 STATIC_ASSERT(kSeqStringTag == 0);
2619 __ And(at, a0, Operand(kStringRepresentationMask));
2620 // The underlying external string is never a short external string.
2621 STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
2622 STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2623 __ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7).
2625 // (5) Sequential string. Load regexp code according to encoding.
2626 __ bind(&seq_string);
2627 // subject: sequential subject string (or look-alike, external string)
2628 // a3: original subject string
2629 // Load previous index and check range before a3 is overwritten. We have to
2630 // use a3 instead of subject here because subject might have been only made
2631 // to look like a sequential string when it actually is an external string.
2632 __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
2633 __ JumpIfNotSmi(a1, &runtime);
2634 __ lw(a3, FieldMemOperand(a3, String::kLengthOffset));
2635 __ Branch(&runtime, ls, a3, Operand(a1));
2636 __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
2638 STATIC_ASSERT(kStringEncodingMask == 4);
2639 STATIC_ASSERT(kOneByteStringTag == 4);
2640 STATIC_ASSERT(kTwoByteStringTag == 0);
2641 __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII.
2642 __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
2643 __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
2644 __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
2645 __ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
2647 // (E) Carry on. String handling is done.
2648 // t9: irregexp code
2649 // Check that the irregexp code has been generated for the actual string
2650 // encoding. If it has, the field contains a code object otherwise it contains
2651 // a smi (code flushing support).
2652 __ JumpIfSmi(t9, &runtime);
2654 // a1: previous index
2655 // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
2657 // subject: Subject string
2658 // regexp_data: RegExp data (FixedArray)
2659 // All checks done. Now push arguments for native regexp code.
2660 __ IncrementCounter(isolate()->counters()->regexp_entry_native(),
2663 // Isolates: note we add an additional parameter here (isolate pointer).
2664 const int kRegExpExecuteArguments = 9;
2665 const int kParameterRegisters = 4;
2666 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
2668 // Stack pointer now points to cell where return address is to be written.
2669 // Arguments are before that on the stack or in registers, meaning we
2670 // treat the return address as argument 5. Thus every argument after that
2671 // needs to be shifted back by 1. Since DirectCEntryStub will handle
2672 // allocating space for the c argument slots, we don't need to calculate
2673 // that into the argument positions on the stack. This is how the stack will
2674 // look (sp meaning the value of sp at this moment):
2675 // [sp + 5] - Argument 9
2676 // [sp + 4] - Argument 8
2677 // [sp + 3] - Argument 7
2678 // [sp + 2] - Argument 6
2679 // [sp + 1] - Argument 5
2680 // [sp + 0] - saved ra
2682 // Argument 9: Pass current isolate address.
2683 // CFunctionArgumentOperand handles MIPS stack argument slots.
2684 __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
2685 __ sw(a0, MemOperand(sp, 5 * kPointerSize));
2687 // Argument 8: Indicate that this is a direct call from JavaScript.
2688 __ li(a0, Operand(1));
2689 __ sw(a0, MemOperand(sp, 4 * kPointerSize));
2691 // Argument 7: Start (high end) of backtracking stack memory area.
2692 __ li(a0, Operand(address_of_regexp_stack_memory_address));
2693 __ lw(a0, MemOperand(a0, 0));
2694 __ li(a2, Operand(address_of_regexp_stack_memory_size));
2695 __ lw(a2, MemOperand(a2, 0));
2696 __ addu(a0, a0, a2);
2697 __ sw(a0, MemOperand(sp, 3 * kPointerSize));
2699 // Argument 6: Set the number of capture registers to zero to force global
2700 // regexps to behave as non-global. This does not affect non-global regexps.
2701 __ mov(a0, zero_reg);
2702 __ sw(a0, MemOperand(sp, 2 * kPointerSize));
2704 // Argument 5: static offsets vector buffer.
2706 ExternalReference::address_of_static_offsets_vector(isolate())));
2707 __ sw(a0, MemOperand(sp, 1 * kPointerSize));
2709 // For arguments 4 and 3 get string length, calculate start of string data
2710 // and calculate the shift of the index (0 for ASCII and 1 for two byte).
2711 __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
2712 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
2713 // Load the length from the original subject string from the previous stack
2714 // frame. Therefore we have to use fp, which points exactly to two pointer
2715 // sizes below the previous sp. (Because creating a new stack frame pushes
2716 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
2717 __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2718 // If slice offset is not 0, load the length from the original sliced string.
2719 // Argument 4, a3: End of string data
2720 // Argument 3, a2: Start of string data
2721 // Prepare start and end index of the input.
2722 __ sllv(t1, t0, a3);
2723 __ addu(t0, t2, t1);
2724 __ sllv(t1, a1, a3);
2725 __ addu(a2, t0, t1);
2727 __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
2728 __ sra(t2, t2, kSmiTagSize);
2729 __ sllv(t1, t2, a3);
2730 __ addu(a3, t0, t1);
2731 // Argument 2 (a1): Previous index.
2734 // Argument 1 (a0): Subject string.
2735 __ mov(a0, subject);
2737 // Locate the code entry and call it.
2738 __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
2739 DirectCEntryStub stub(isolate());
2740 stub.GenerateCall(masm, t9);
2742 __ LeaveExitFrame(false, no_reg, true);
2745 // subject: subject string (callee saved)
2746 // regexp_data: RegExp data (callee saved)
2747 // last_match_info_elements: Last match info elements (callee saved)
2748 // Check the result.
2750 __ Branch(&success, eq, v0, Operand(1));
2751 // We expect exactly one result since we force the called regexp to behave
2754 __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
2755 // If not exception it can only be retry. Handle that in the runtime system.
2756 __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
2757 // Result must now be exception. If there is no pending exception already a
2758 // stack overflow (on the backtrack stack) was detected in RegExp code but
2759 // haven't created the exception yet. Handle that in the runtime system.
2760 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
2761 __ li(a1, Operand(isolate()->factory()->the_hole_value()));
2762 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2764 __ lw(v0, MemOperand(a2, 0));
2765 __ Branch(&runtime, eq, v0, Operand(a1));
2767 __ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
2769 // Check if the exception is a termination. If so, throw as uncatchable.
2770 __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
2771 Label termination_exception;
2772 __ Branch(&termination_exception, eq, v0, Operand(a0));
2776 __ bind(&termination_exception);
2777 __ ThrowUncatchable(v0);
2780 // For failure and exception return null.
2781 __ li(v0, Operand(isolate()->factory()->null_value()));
2784 // Process the result from the native regexp code.
2787 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2788 // Calculate number of capture registers (number_of_captures + 1) * 2.
2789 // Multiplying by 2 comes for free since r1 is smi-tagged.
2790 STATIC_ASSERT(kSmiTag == 0);
2791 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2792 __ Addu(a1, a1, Operand(2)); // a1 was a smi.
2794 __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
2795 __ JumpIfSmi(a0, &runtime);
2796 __ GetObjectType(a0, a2, a2);
2797 __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
2798 // Check that the JSArray is in fast case.
2799 __ lw(last_match_info_elements,
2800 FieldMemOperand(a0, JSArray::kElementsOffset));
2801 __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2802 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
2803 __ Branch(&runtime, ne, a0, Operand(at));
2804 // Check that the last match info has space for the capture registers and the
2805 // additional information.
2807 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
2808 __ Addu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
2809 __ sra(at, a0, kSmiTagSize);
2810 __ Branch(&runtime, gt, a2, Operand(at));
2812 // a1: number of capture registers
2813 // subject: subject string
2814 // Store the capture count.
2815 __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
2816 __ sw(a2, FieldMemOperand(last_match_info_elements,
2817 RegExpImpl::kLastCaptureCountOffset));
2818 // Store last subject and last input.
2820 FieldMemOperand(last_match_info_elements,
2821 RegExpImpl::kLastSubjectOffset));
2822 __ mov(a2, subject);
2823 __ RecordWriteField(last_match_info_elements,
2824 RegExpImpl::kLastSubjectOffset,
2829 __ mov(subject, a2);
2831 FieldMemOperand(last_match_info_elements,
2832 RegExpImpl::kLastInputOffset));
2833 __ RecordWriteField(last_match_info_elements,
2834 RegExpImpl::kLastInputOffset,
2840 // Get the static offsets vector filled by the native regexp code.
2841 ExternalReference address_of_static_offsets_vector =
2842 ExternalReference::address_of_static_offsets_vector(isolate());
2843 __ li(a2, Operand(address_of_static_offsets_vector));
2845 // a1: number of capture registers
2846 // a2: offsets vector
2847 Label next_capture, done;
2848 // Capture register counter starts from number of capture registers and
2849 // counts down until wrapping after zero.
2851 last_match_info_elements,
2852 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
2853 __ bind(&next_capture);
2854 __ Subu(a1, a1, Operand(1));
2855 __ Branch(&done, lt, a1, Operand(zero_reg));
2856 // Read the value from the static offsets vector buffer.
2857 __ lw(a3, MemOperand(a2, 0));
2858 __ addiu(a2, a2, kPointerSize);
2859 // Store the smi value in the last match info.
2860 __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
2861 __ sw(a3, MemOperand(a0, 0));
2862 __ Branch(&next_capture, USE_DELAY_SLOT);
2863 __ addiu(a0, a0, kPointerSize); // In branch delay slot.
2867 // Return last match info.
2868 __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
2871 // Do the runtime call to execute the regexp.
2873 __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
2875 // Deferred code for string handling.
2876 // (6) Not a long external string? If yes, go to (8).
2877 __ bind(¬_seq_nor_cons);
2879 __ Branch(¬_long_external, gt, a1, Operand(kExternalStringTag));
2881 // (7) External string. Make it, offset-wise, look like a sequential string.
2882 __ bind(&external_string);
2883 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2884 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2885 if (FLAG_debug_code) {
2886 // Assert that we do not have a cons or slice (indirect strings) here.
2887 // Sequential strings have already been ruled out.
2888 __ And(at, a0, Operand(kIsIndirectStringMask));
2890 kExternalStringExpectedButNotFound,
2895 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
2896 // Move the pointer so that offset-wise, it looks like a sequential string.
2897 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2900 SeqTwoByteString::kHeaderSize - kHeapObjectTag);
2901 __ jmp(&seq_string); // Go to (5).
2903 // (8) Short external string or not a string? If yes, bail out to runtime.
2904 __ bind(¬_long_external);
2905 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
2906 __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
2907 __ Branch(&runtime, ne, at, Operand(zero_reg));
2909 // (9) Sliced string. Replace subject with parent. Go to (4).
2910 // Load offset into t0 and replace subject string with parent.
2911 __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
2912 __ sra(t0, t0, kSmiTagSize);
2913 __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
2914 __ jmp(&check_underlying); // Go to (4).
2915 #endif // V8_INTERPRETED_REGEXP
2919 static void GenerateRecordCallTarget(MacroAssembler* masm) {
2920 // Cache the called function in a feedback vector slot. Cache states
2921 // are uninitialized, monomorphic (indicated by a JSFunction), and
2923 // a0 : number of arguments to the construct function
2924 // a1 : the function to call
2925 // a2 : Feedback vector
2926 // a3 : slot in feedback vector (Smi)
2927 Label initialize, done, miss, megamorphic, not_array_function;
2929 ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
2930 masm->isolate()->heap()->megamorphic_symbol());
2931 ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
2932 masm->isolate()->heap()->uninitialized_symbol());
2934 // Load the cache state into t0.
2935 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2936 __ Addu(t0, a2, Operand(t0));
2937 __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
2939 // A monomorphic cache hit or an already megamorphic state: invoke the
2940 // function without changing the state.
2941 __ Branch(&done, eq, t0, Operand(a1));
2943 if (!FLAG_pretenuring_call_new) {
2944 // If we came here, we need to see if we are the array function.
2945 // If we didn't have a matching function, and we didn't find the megamorph
2946 // sentinel, then we have in the slot either some other function or an
2947 // AllocationSite. Do a map check on the object in a3.
2948 __ lw(t1, FieldMemOperand(t0, 0));
2949 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2950 __ Branch(&miss, ne, t1, Operand(at));
2952 // Make sure the function is the Array() function
2953 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
2954 __ Branch(&megamorphic, ne, a1, Operand(t0));
2960 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2962 __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
2963 __ Branch(&initialize, eq, t0, Operand(at));
2964 // MegamorphicSentinel is an immortal immovable object (undefined) so no
2965 // write-barrier is needed.
2966 __ bind(&megamorphic);
2967 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2968 __ Addu(t0, a2, Operand(t0));
2969 __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
2970 __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
2973 // An uninitialized cache is patched with the function.
2974 __ bind(&initialize);
2975 if (!FLAG_pretenuring_call_new) {
2976 // Make sure the function is the Array() function.
2977 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
2978 __ Branch(¬_array_function, ne, a1, Operand(t0));
2980 // The target function is the Array constructor,
2981 // Create an AllocationSite if we don't already have it, store it in the
2984 FrameScope scope(masm, StackFrame::INTERNAL);
2985 const RegList kSavedRegs =
2991 // Arguments register must be smi-tagged to call out.
2993 __ MultiPush(kSavedRegs);
2995 CreateAllocationSiteStub create_stub(masm->isolate());
2996 __ CallStub(&create_stub);
2998 __ MultiPop(kSavedRegs);
3003 __ bind(¬_array_function);
3006 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
3007 __ Addu(t0, a2, Operand(t0));
3008 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3009 __ sw(a1, MemOperand(t0, 0));
3011 __ Push(t0, a2, a1);
3012 __ RecordWrite(a2, t0, a1, kRAHasNotBeenSaved, kDontSaveFPRegs,
3013 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
3020 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
3021 __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3022 __ lw(t0, FieldMemOperand(a3, SharedFunctionInfo::kCompilerHintsOffset));
3024 // Do not transform the receiver for strict mode functions.
3025 int32_t strict_mode_function_mask =
3026 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
3027 // Do not transform the receiver for native (Compilerhints already in a3).
3028 int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
3029 __ And(at, t0, Operand(strict_mode_function_mask | native_mask));
3030 __ Branch(cont, ne, at, Operand(zero_reg));
3034 static void EmitSlowCase(MacroAssembler* masm,
3036 Label* non_function) {
3037 // Check for function proxy.
3038 __ Branch(non_function, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
3039 __ push(a1); // put proxy as additional argument
3040 __ li(a0, Operand(argc + 1, RelocInfo::NONE32));
3041 __ mov(a2, zero_reg);
3042 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
3044 Handle<Code> adaptor =
3045 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
3046 __ Jump(adaptor, RelocInfo::CODE_TARGET);
3049 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
3050 // of the original receiver from the call site).
3051 __ bind(non_function);
3052 __ sw(a1, MemOperand(sp, argc * kPointerSize));
3053 __ li(a0, Operand(argc)); // Set up the number of arguments.
3054 __ mov(a2, zero_reg);
3055 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
3056 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3057 RelocInfo::CODE_TARGET);
3061 static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
3062 // Wrap the receiver and patch it back onto the stack.
3063 { FrameScope frame_scope(masm, StackFrame::INTERNAL);
3065 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
3068 __ Branch(USE_DELAY_SLOT, cont);
3069 __ sw(v0, MemOperand(sp, argc * kPointerSize));
3073 void CallFunctionStub::Generate(MacroAssembler* masm) {
3074 // a1 : the function to call
3075 Label slow, non_function, wrap, cont;
3077 if (NeedsChecks()) {
3078 // Check that the function is really a JavaScript function.
3079 // a1: pushed function (to be verified)
3080 __ JumpIfSmi(a1, &non_function);
3082 // Goto slow case if we do not have a function.
3083 __ GetObjectType(a1, t0, t0);
3084 __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
3087 // Fast-case: Invoke the function now.
3088 // a1: pushed function
3090 ParameterCount actual(argc);
3092 if (CallAsMethod()) {
3093 if (NeedsChecks()) {
3094 EmitContinueIfStrictOrNative(masm, &cont);
3097 // Compute the receiver in sloppy mode.
3098 __ lw(a3, MemOperand(sp, argc * kPointerSize));
3100 if (NeedsChecks()) {
3101 __ JumpIfSmi(a3, &wrap);
3102 __ GetObjectType(a3, t0, t0);
3103 __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
3111 __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
3113 if (NeedsChecks()) {
3114 // Slow-case: Non-function called.
3116 EmitSlowCase(masm, argc, &non_function);
3119 if (CallAsMethod()) {
3121 // Wrap the receiver and patch it back onto the stack.
3122 EmitWrapCase(masm, argc, &cont);
3127 void CallConstructStub::Generate(MacroAssembler* masm) {
3128 // a0 : number of arguments
3129 // a1 : the function to call
3130 // a2 : feedback vector
3131 // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
3132 Label slow, non_function_call;
3134 // Check that the function is not a smi.
3135 __ JumpIfSmi(a1, &non_function_call);
3136 // Check that the function is a JSFunction.
3137 __ GetObjectType(a1, t0, t0);
3138 __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
3140 if (RecordCallTarget()) {
3141 GenerateRecordCallTarget(masm);
3143 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
3144 __ Addu(t1, a2, at);
3145 if (FLAG_pretenuring_call_new) {
3146 // Put the AllocationSite from the feedback vector into a2.
3147 // By adding kPointerSize we encode that we know the AllocationSite
3148 // entry is at the feedback vector slot given by a3 + 1.
3149 __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize + kPointerSize));
3151 Label feedback_register_initialized;
3152 // Put the AllocationSite from the feedback vector into a2, or undefined.
3153 __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
3154 __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
3155 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
3156 __ Branch(&feedback_register_initialized, eq, t1, Operand(at));
3157 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
3158 __ bind(&feedback_register_initialized);
3161 __ AssertUndefinedOrAllocationSite(a2, t1);
3164 // Jump to the function-specific construct stub.
3165 Register jmp_reg = t0;
3166 __ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3167 __ lw(jmp_reg, FieldMemOperand(jmp_reg,
3168 SharedFunctionInfo::kConstructStubOffset));
3169 __ Addu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
3172 // a0: number of arguments
3173 // a1: called object
3177 __ Branch(&non_function_call, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
3178 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
3181 __ bind(&non_function_call);
3182 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
3184 // Set expected number of arguments to zero (not changing r0).
3185 __ li(a2, Operand(0, RelocInfo::NONE32));
3186 __ Jump(isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3187 RelocInfo::CODE_TARGET);
3191 static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
3192 __ lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3193 __ lw(vector, FieldMemOperand(vector,
3194 JSFunction::kSharedFunctionInfoOffset));
3195 __ lw(vector, FieldMemOperand(vector,
3196 SharedFunctionInfo::kFeedbackVectorOffset));
3200 void CallICStub::Generate(MacroAssembler* masm) {
3202 // r3 - slot id (Smi)
3203 Label extra_checks_or_miss, slow_start;
3204 Label slow, non_function, wrap, cont;
3205 Label have_js_function;
3206 int argc = state_.arg_count();
3207 ParameterCount actual(argc);
3209 EmitLoadTypeFeedbackVector(masm, a2);
3211 // The checks. First, does r1 match the recorded monomorphic target?
3212 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
3213 __ Addu(t0, a2, Operand(t0));
3214 __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
3215 __ Branch(&extra_checks_or_miss, ne, a1, Operand(t0));
3217 __ bind(&have_js_function);
3218 if (state_.CallAsMethod()) {
3219 EmitContinueIfStrictOrNative(masm, &cont);
3220 // Compute the receiver in sloppy mode.
3221 __ lw(a3, MemOperand(sp, argc * kPointerSize));
3223 __ JumpIfSmi(a3, &wrap);
3224 __ GetObjectType(a3, t0, t0);
3225 __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
3230 __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
3233 EmitSlowCase(masm, argc, &non_function);
3235 if (state_.CallAsMethod()) {
3237 EmitWrapCase(masm, argc, &cont);
3240 __ bind(&extra_checks_or_miss);
3243 __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
3244 __ Branch(&slow_start, eq, t0, Operand(at));
3245 __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
3246 __ Branch(&miss, eq, t0, Operand(at));
3248 if (!FLAG_trace_ic) {
3249 // We are going megamorphic, and we don't want to visit the runtime.
3250 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
3251 __ Addu(t0, a2, Operand(t0));
3252 __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
3253 __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
3254 __ Branch(&slow_start);
3257 // We are here because tracing is on or we are going monomorphic.
3262 __ bind(&slow_start);
3263 // Check that the function is really a JavaScript function.
3264 // r1: pushed function (to be verified)
3265 __ JumpIfSmi(a1, &non_function);
3267 // Goto slow case if we do not have a function.
3268 __ GetObjectType(a1, t0, t0);
3269 __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
3270 __ Branch(&have_js_function);
3274 void CallICStub::GenerateMiss(MacroAssembler* masm) {
3275 // Get the receiver of the function from the stack; 1 ~ return address.
3276 __ lw(t0, MemOperand(sp, (state_.arg_count() + 1) * kPointerSize));
3279 FrameScope scope(masm, StackFrame::INTERNAL);
3281 // Push the receiver and the function and feedback info.
3282 __ Push(t0, a1, a2, a3);
3285 ExternalReference miss = ExternalReference(IC_Utility(IC::kCallIC_Miss),
3287 __ CallExternalReference(miss, 4);
3289 // Move result to a1 and exit the internal frame.
3295 // StringCharCodeAtGenerator.
3296 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
3299 Label got_char_code;
3300 Label sliced_string;
3302 ASSERT(!t0.is(index_));
3303 ASSERT(!t0.is(result_));
3304 ASSERT(!t0.is(object_));
3306 // If the receiver is a smi trigger the non-string case.
3307 __ JumpIfSmi(object_, receiver_not_string_);
3309 // Fetch the instance type of the receiver into result register.
3310 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3311 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3312 // If the receiver is not a string trigger the non-string case.
3313 __ And(t0, result_, Operand(kIsNotStringMask));
3314 __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
3316 // If the index is non-smi trigger the non-smi case.
3317 __ JumpIfNotSmi(index_, &index_not_smi_);
3319 __ bind(&got_smi_index_);
3321 // Check for index out of range.
3322 __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
3323 __ Branch(index_out_of_range_, ls, t0, Operand(index_));
3325 __ sra(index_, index_, kSmiTagSize);
3327 StringCharLoadGenerator::Generate(masm,
3333 __ sll(result_, result_, kSmiTagSize);
3338 void StringCharCodeAtGenerator::GenerateSlow(
3339 MacroAssembler* masm,
3340 const RuntimeCallHelper& call_helper) {
3341 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
3343 // Index is not a smi.
3344 __ bind(&index_not_smi_);
3345 // If index is a heap number, try converting it to an integer.
3348 Heap::kHeapNumberMapRootIndex,
3351 call_helper.BeforeCall(masm);
3352 // Consumed by runtime conversion function:
3353 __ Push(object_, index_);
3354 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3355 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3357 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3358 // NumberToSmi discards numbers that are not exact integers.
3359 __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
3362 // Save the conversion result before the pop instructions below
3363 // have a chance to overwrite it.
3365 __ Move(index_, v0);
3367 // Reload the instance type.
3368 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3369 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3370 call_helper.AfterCall(masm);
3371 // If index is still not a smi, it must be out of range.
3372 __ JumpIfNotSmi(index_, index_out_of_range_);
3373 // Otherwise, return to the fast path.
3374 __ Branch(&got_smi_index_);
3376 // Call runtime. We get here when the receiver is a string and the
3377 // index is a number, but the code of getting the actual character
3378 // is too complex (e.g., when the string needs to be flattened).
3379 __ bind(&call_runtime_);
3380 call_helper.BeforeCall(masm);
3381 __ sll(index_, index_, kSmiTagSize);
3382 __ Push(object_, index_);
3383 __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
3385 __ Move(result_, v0);
3387 call_helper.AfterCall(masm);
3390 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3394 // -------------------------------------------------------------------------
3395 // StringCharFromCodeGenerator
3397 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3398 // Fast case of Heap::LookupSingleCharacterStringFromCode.
3400 ASSERT(!t0.is(result_));
3401 ASSERT(!t0.is(code_));
3403 STATIC_ASSERT(kSmiTag == 0);
3404 STATIC_ASSERT(kSmiShiftSize == 0);
3405 ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
3408 Operand(kSmiTagMask |
3409 ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
3410 __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
3412 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3413 // At this point code register contains smi tagged ASCII char code.
3414 STATIC_ASSERT(kSmiTag == 0);
3415 __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
3416 __ Addu(result_, result_, t0);
3417 __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3418 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3419 __ Branch(&slow_case_, eq, result_, Operand(t0));
3424 void StringCharFromCodeGenerator::GenerateSlow(
3425 MacroAssembler* masm,
3426 const RuntimeCallHelper& call_helper) {
3427 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3429 __ bind(&slow_case_);
3430 call_helper.BeforeCall(masm);
3432 __ CallRuntime(Runtime::kCharFromCode, 1);
3433 __ Move(result_, v0);
3435 call_helper.AfterCall(masm);
3438 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3442 enum CopyCharactersFlags {
3444 DEST_ALWAYS_ALIGNED = 2
3448 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
3458 bool ascii = (flags & COPY_ASCII) != 0;
3459 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
3461 if (dest_always_aligned && FLAG_debug_code) {
3462 // Check that destination is actually word aligned if the flag says
3464 __ And(scratch4, dest, Operand(kPointerAlignmentMask));
3466 kDestinationOfCopyNotAligned,
3471 const int kReadAlignment = 4;
3472 const int kReadAlignmentMask = kReadAlignment - 1;
3473 // Ensure that reading an entire aligned word containing the last character
3474 // of a string will not read outside the allocated area (because we pad up
3475 // to kObjectAlignment).
3476 STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
3477 // Assumes word reads and writes are little endian.
3478 // Nothing to do for zero characters.
3482 __ addu(count, count, count);
3484 __ Branch(&done, eq, count, Operand(zero_reg));
3487 // Must copy at least eight bytes, otherwise just do it one byte at a time.
3488 __ Subu(scratch1, count, Operand(8));
3489 __ Addu(count, dest, Operand(count));
3490 Register limit = count; // Read until src equals this.
3491 __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
3493 if (!dest_always_aligned) {
3494 // Align dest by byte copying. Copies between zero and three bytes.
3495 __ And(scratch4, dest, Operand(kReadAlignmentMask));
3497 __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
3499 __ bind(&aligned_loop);
3500 __ lbu(scratch1, MemOperand(src));
3501 __ addiu(src, src, 1);
3502 __ sb(scratch1, MemOperand(dest));
3503 __ addiu(dest, dest, 1);
3504 __ addiu(scratch4, scratch4, 1);
3505 __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
3506 __ bind(&dest_aligned);
3511 __ And(scratch4, src, Operand(kReadAlignmentMask));
3512 __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
3514 // Loop for src/dst that are not aligned the same way.
3515 // This loop uses lwl and lwr instructions. These instructions
3516 // depend on the endianness, and the implementation assumes little-endian.
3520 if (kArchEndian == kBig) {
3521 __ lwl(scratch1, MemOperand(src));
3522 __ Addu(src, src, Operand(kReadAlignment));
3523 __ lwr(scratch1, MemOperand(src, -1));
3525 __ lwr(scratch1, MemOperand(src));
3526 __ Addu(src, src, Operand(kReadAlignment));
3527 __ lwl(scratch1, MemOperand(src, -1));
3529 __ sw(scratch1, MemOperand(dest));
3530 __ Addu(dest, dest, Operand(kReadAlignment));
3531 __ Subu(scratch2, limit, dest);
3532 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
3535 __ Branch(&byte_loop);
3538 // Copy words from src to dest, until less than four bytes left.
3539 // Both src and dest are word aligned.
3540 __ bind(&simple_loop);
3544 __ lw(scratch1, MemOperand(src));
3545 __ Addu(src, src, Operand(kReadAlignment));
3546 __ sw(scratch1, MemOperand(dest));
3547 __ Addu(dest, dest, Operand(kReadAlignment));
3548 __ Subu(scratch2, limit, dest);
3549 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
3552 // Copy bytes from src to dest until dest hits limit.
3553 __ bind(&byte_loop);
3554 // Test if dest has already reached the limit.
3555 __ Branch(&done, ge, dest, Operand(limit));
3556 __ lbu(scratch1, MemOperand(src));
3557 __ addiu(src, src, 1);
3558 __ sb(scratch1, MemOperand(dest));
3559 __ addiu(dest, dest, 1);
3560 __ Branch(&byte_loop);
3566 void StringHelper::GenerateHashInit(MacroAssembler* masm,
3568 Register character) {
3569 // hash = seed + character + ((seed + character) << 10);
3570 __ LoadRoot(hash, Heap::kHashSeedRootIndex);
3571 // Untag smi seed and add the character.
3573 __ addu(hash, hash, character);
3574 __ sll(at, hash, 10);
3575 __ addu(hash, hash, at);
3576 // hash ^= hash >> 6;
3577 __ srl(at, hash, 6);
3578 __ xor_(hash, hash, at);
3582 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
3584 Register character) {
3585 // hash += character;
3586 __ addu(hash, hash, character);
3587 // hash += hash << 10;
3588 __ sll(at, hash, 10);
3589 __ addu(hash, hash, at);
3590 // hash ^= hash >> 6;
3591 __ srl(at, hash, 6);
3592 __ xor_(hash, hash, at);
3596 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
3598 // hash += hash << 3;
3599 __ sll(at, hash, 3);
3600 __ addu(hash, hash, at);
3601 // hash ^= hash >> 11;
3602 __ srl(at, hash, 11);
3603 __ xor_(hash, hash, at);
3604 // hash += hash << 15;
3605 __ sll(at, hash, 15);
3606 __ addu(hash, hash, at);
3608 __ li(at, Operand(String::kHashBitMask));
3609 __ and_(hash, hash, at);
3611 // if (hash == 0) hash = 27;
3612 __ ori(at, zero_reg, StringHasher::kZeroHash);
3613 __ Movz(hash, at, hash);
3617 void SubStringStub::Generate(MacroAssembler* masm) {
3619 // Stack frame on entry.
3620 // ra: return address
3625 // This stub is called from the native-call %_SubString(...), so
3626 // nothing can be assumed about the arguments. It is tested that:
3627 // "string" is a sequential string,
3628 // both "from" and "to" are smis, and
3629 // 0 <= from <= to <= string.length.
3630 // If any of these assumptions fail, we call the runtime system.
3632 const int kToOffset = 0 * kPointerSize;
3633 const int kFromOffset = 1 * kPointerSize;
3634 const int kStringOffset = 2 * kPointerSize;
3636 __ lw(a2, MemOperand(sp, kToOffset));
3637 __ lw(a3, MemOperand(sp, kFromOffset));
3638 STATIC_ASSERT(kFromOffset == kToOffset + 4);
3639 STATIC_ASSERT(kSmiTag == 0);
3640 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3642 // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
3643 // safe in this case.
3644 __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
3645 __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
3646 // Both a2 and a3 are untagged integers.
3648 __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
3650 __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to.
3651 __ Subu(a2, a2, a3);
3653 // Make sure first argument is a string.
3654 __ lw(v0, MemOperand(sp, kStringOffset));
3655 __ JumpIfSmi(v0, &runtime);
3656 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
3657 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3658 __ And(t0, a1, Operand(kIsNotStringMask));
3660 __ Branch(&runtime, ne, t0, Operand(zero_reg));
3663 __ Branch(&single_char, eq, a2, Operand(1));
3665 // Short-cut for the case of trivial substring.
3667 // v0: original string
3668 // a2: result string length
3669 __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
3671 // Return original string.
3672 __ Branch(&return_v0, eq, a2, Operand(t0));
3673 // Longer than original string's length or negative: unsafe arguments.
3674 __ Branch(&runtime, hi, a2, Operand(t0));
3675 // Shorter than original string's length: an actual substring.
3677 // Deal with different string types: update the index if necessary
3678 // and put the underlying string into t1.
3679 // v0: original string
3680 // a1: instance type
3682 // a3: from index (untagged)
3683 Label underlying_unpacked, sliced_string, seq_or_external_string;
3684 // If the string is not indirect, it can only be sequential or external.
3685 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
3686 STATIC_ASSERT(kIsIndirectStringMask != 0);
3687 __ And(t0, a1, Operand(kIsIndirectStringMask));
3688 __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
3689 // t0 is used as a scratch register and can be overwritten in either case.
3690 __ And(t0, a1, Operand(kSlicedNotConsMask));
3691 __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
3692 // Cons string. Check whether it is flat, then fetch first part.
3693 __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
3694 __ LoadRoot(t0, Heap::kempty_stringRootIndex);
3695 __ Branch(&runtime, ne, t1, Operand(t0));
3696 __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
3697 // Update instance type.
3698 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
3699 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3700 __ jmp(&underlying_unpacked);
3702 __ bind(&sliced_string);
3703 // Sliced string. Fetch parent and correct start index by offset.
3704 __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
3705 __ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
3706 __ sra(t0, t0, 1); // Add offset to index.
3707 __ Addu(a3, a3, t0);
3708 // Update instance type.
3709 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
3710 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3711 __ jmp(&underlying_unpacked);
3713 __ bind(&seq_or_external_string);
3714 // Sequential or external string. Just move string to the expected register.
3717 __ bind(&underlying_unpacked);
3719 if (FLAG_string_slices) {
3721 // t1: underlying subject string
3722 // a1: instance type of underlying subject string
3724 // a3: adjusted start index (untagged)
3725 // Short slice. Copy instead of slicing.
3726 __ Branch(©_routine, lt, a2, Operand(SlicedString::kMinLength));
3727 // Allocate new sliced string. At this point we do not reload the instance
3728 // type including the string encoding because we simply rely on the info
3729 // provided by the original string. It does not matter if the original
3730 // string's encoding is wrong because we always have to recheck encoding of
3731 // the newly created string's parent anyways due to externalized strings.
3732 Label two_byte_slice, set_slice_header;
3733 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
3734 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
3735 __ And(t0, a1, Operand(kStringEncodingMask));
3736 __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
3737 __ AllocateAsciiSlicedString(v0, a2, t2, t3, &runtime);
3738 __ jmp(&set_slice_header);
3739 __ bind(&two_byte_slice);
3740 __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
3741 __ bind(&set_slice_header);
3743 __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
3744 __ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
3747 __ bind(©_routine);
3750 // t1: underlying subject string
3751 // a1: instance type of underlying subject string
3753 // a3: adjusted start index (untagged)
3754 Label two_byte_sequential, sequential_string, allocate_result;
3755 STATIC_ASSERT(kExternalStringTag != 0);
3756 STATIC_ASSERT(kSeqStringTag == 0);
3757 __ And(t0, a1, Operand(kExternalStringTag));
3758 __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
3760 // Handle external string.
3761 // Rule out short external strings.
3762 STATIC_CHECK(kShortExternalStringTag != 0);
3763 __ And(t0, a1, Operand(kShortExternalStringTag));
3764 __ Branch(&runtime, ne, t0, Operand(zero_reg));
3765 __ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
3766 // t1 already points to the first character of underlying string.
3767 __ jmp(&allocate_result);
3769 __ bind(&sequential_string);
3770 // Locate first character of underlying subject string.
3771 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3772 __ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3774 __ bind(&allocate_result);
3775 // Sequential acii string. Allocate the result.
3776 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3777 __ And(t0, a1, Operand(kStringEncodingMask));
3778 __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
3780 // Allocate and copy the resulting ASCII string.
3781 __ AllocateAsciiString(v0, a2, t0, t2, t3, &runtime);
3783 // Locate first character of substring to copy.
3784 __ Addu(t1, t1, a3);
3786 // Locate first character of result.
3787 __ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3789 // v0: result string
3790 // a1: first character of result string
3791 // a2: result string length
3792 // t1: first character of substring to copy
3793 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3794 StringHelper::GenerateCopyCharactersLong(
3795 masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
3798 // Allocate and copy the resulting two-byte string.
3799 __ bind(&two_byte_sequential);
3800 __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
3802 // Locate first character of substring to copy.
3803 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
3805 __ Addu(t1, t1, t0);
3806 // Locate first character of result.
3807 __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3809 // v0: result string.
3810 // a1: first character of result.
3811 // a2: result length.
3812 // t1: first character of substring to copy.
3813 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3814 StringHelper::GenerateCopyCharactersLong(
3815 masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
3817 __ bind(&return_v0);
3818 Counters* counters = isolate()->counters();
3819 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
3822 // Just jump to runtime to create the sub string.
3824 __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
3826 __ bind(&single_char);
3827 // v0: original string
3828 // a1: instance type
3830 // a3: from index (untagged)
3832 StringCharAtGenerator generator(
3833 v0, a3, a2, v0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
3834 generator.GenerateFast(masm);
3836 generator.SkipSlow(masm, &runtime);
3840 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
3845 Register scratch3) {
3846 Register length = scratch1;
3849 Label strings_not_equal, check_zero_length;
3850 __ lw(length, FieldMemOperand(left, String::kLengthOffset));
3851 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
3852 __ Branch(&check_zero_length, eq, length, Operand(scratch2));
3853 __ bind(&strings_not_equal);
3854 ASSERT(is_int16(NOT_EQUAL));
3855 __ Ret(USE_DELAY_SLOT);
3856 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
3858 // Check if the length is zero.
3859 Label compare_chars;
3860 __ bind(&check_zero_length);
3861 STATIC_ASSERT(kSmiTag == 0);
3862 __ Branch(&compare_chars, ne, length, Operand(zero_reg));
3863 ASSERT(is_int16(EQUAL));
3864 __ Ret(USE_DELAY_SLOT);
3865 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3867 // Compare characters.
3868 __ bind(&compare_chars);
3870 GenerateAsciiCharsCompareLoop(masm,
3871 left, right, length, scratch2, scratch3, v0,
3872 &strings_not_equal);
3874 // Characters are equal.
3875 __ Ret(USE_DELAY_SLOT);
3876 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3880 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
3886 Register scratch4) {
3887 Label result_not_equal, compare_lengths;
3888 // Find minimum length and length difference.
3889 __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
3890 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
3891 __ Subu(scratch3, scratch1, Operand(scratch2));
3892 Register length_delta = scratch3;
3893 __ slt(scratch4, scratch2, scratch1);
3894 __ Movn(scratch1, scratch2, scratch4);
3895 Register min_length = scratch1;
3896 STATIC_ASSERT(kSmiTag == 0);
3897 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
3900 GenerateAsciiCharsCompareLoop(masm,
3901 left, right, min_length, scratch2, scratch4, v0,
3904 // Compare lengths - strings up to min-length are equal.
3905 __ bind(&compare_lengths);
3906 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
3907 // Use length_delta as result if it's zero.
3908 __ mov(scratch2, length_delta);
3909 __ mov(scratch4, zero_reg);
3910 __ mov(v0, zero_reg);
3912 __ bind(&result_not_equal);
3913 // Conditionally update the result based either on length_delta or
3914 // the last comparion performed in the loop above.
3916 __ Branch(&ret, eq, scratch2, Operand(scratch4));
3917 __ li(v0, Operand(Smi::FromInt(GREATER)));
3918 __ Branch(&ret, gt, scratch2, Operand(scratch4));
3919 __ li(v0, Operand(Smi::FromInt(LESS)));
3925 void StringCompareStub::GenerateAsciiCharsCompareLoop(
3926 MacroAssembler* masm,
3933 Label* chars_not_equal) {
3934 // Change index to run from -length to -1 by adding length to string
3935 // start. This means that loop ends when index reaches zero, which
3936 // doesn't need an additional compare.
3937 __ SmiUntag(length);
3938 __ Addu(scratch1, length,
3939 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3940 __ Addu(left, left, Operand(scratch1));
3941 __ Addu(right, right, Operand(scratch1));
3942 __ Subu(length, zero_reg, length);
3943 Register index = length; // index = -length;
3949 __ Addu(scratch3, left, index);
3950 __ lbu(scratch1, MemOperand(scratch3));
3951 __ Addu(scratch3, right, index);
3952 __ lbu(scratch2, MemOperand(scratch3));
3953 __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
3954 __ Addu(index, index, 1);
3955 __ Branch(&loop, ne, index, Operand(zero_reg));
3959 void StringCompareStub::Generate(MacroAssembler* masm) {
3962 Counters* counters = isolate()->counters();
3964 // Stack frame on entry.
3965 // sp[0]: right string
3966 // sp[4]: left string
3967 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
3968 __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
3971 __ Branch(¬_same, ne, a0, Operand(a1));
3972 STATIC_ASSERT(EQUAL == 0);
3973 STATIC_ASSERT(kSmiTag == 0);
3974 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3975 __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
3980 // Check that both objects are sequential ASCII strings.
3981 __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
3983 // Compare flat ASCII strings natively. Remove arguments from stack first.
3984 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
3985 __ Addu(sp, sp, Operand(2 * kPointerSize));
3986 GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
3989 __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
3993 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
3994 // ----------- S t a t e -------------
3997 // -- ra : return address
3998 // -----------------------------------
4000 // Load a2 with the allocation site. We stick an undefined dummy value here
4001 // and replace it with the real allocation site later when we instantiate this
4002 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
4003 __ li(a2, handle(isolate()->heap()->undefined_value()));
4005 // Make sure that we actually patched the allocation site.
4006 if (FLAG_debug_code) {
4007 __ And(at, a2, Operand(kSmiTagMask));
4008 __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
4009 __ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
4010 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
4011 __ Assert(eq, kExpectedAllocationSite, t0, Operand(at));
4014 // Tail call into the stub that handles binary operations with allocation
4016 BinaryOpWithAllocationSiteStub stub(isolate(), state_);
4017 __ TailCallStub(&stub);
4021 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
4022 ASSERT(state_ == CompareIC::SMI);
4025 __ JumpIfNotSmi(a2, &miss);
4027 if (GetCondition() == eq) {
4028 // For equality we do not care about the sign of the result.
4029 __ Ret(USE_DELAY_SLOT);
4030 __ Subu(v0, a0, a1);
4032 // Untag before subtracting to avoid handling overflow.
4035 __ Ret(USE_DELAY_SLOT);
4036 __ Subu(v0, a1, a0);
4044 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
4045 ASSERT(state_ == CompareIC::NUMBER);
4048 Label unordered, maybe_undefined1, maybe_undefined2;
4051 if (left_ == CompareIC::SMI) {
4052 __ JumpIfNotSmi(a1, &miss);
4054 if (right_ == CompareIC::SMI) {
4055 __ JumpIfNotSmi(a0, &miss);
4058 // Inlining the double comparison and falling back to the general compare
4059 // stub if NaN is involved.
4060 // Load left and right operand.
4061 Label done, left, left_smi, right_smi;
4062 __ JumpIfSmi(a0, &right_smi);
4063 __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
4065 __ Subu(a2, a0, Operand(kHeapObjectTag));
4066 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
4068 __ bind(&right_smi);
4069 __ SmiUntag(a2, a0); // Can't clobber a0 yet.
4070 FPURegister single_scratch = f6;
4071 __ mtc1(a2, single_scratch);
4072 __ cvt_d_w(f2, single_scratch);
4075 __ JumpIfSmi(a1, &left_smi);
4076 __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
4078 __ Subu(a2, a1, Operand(kHeapObjectTag));
4079 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
4082 __ SmiUntag(a2, a1); // Can't clobber a1 yet.
4083 single_scratch = f8;
4084 __ mtc1(a2, single_scratch);
4085 __ cvt_d_w(f0, single_scratch);
4089 // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
4090 Label fpu_eq, fpu_lt;
4091 // Test if equal, and also handle the unordered/NaN case.
4092 __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
4094 // Test if less (unordered case is already handled).
4095 __ BranchF(&fpu_lt, NULL, lt, f0, f2);
4097 // Otherwise it's greater, so just fall thru, and return.
4098 ASSERT(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
4099 __ Ret(USE_DELAY_SLOT);
4100 __ li(v0, Operand(GREATER));
4103 __ Ret(USE_DELAY_SLOT);
4104 __ li(v0, Operand(EQUAL));
4107 __ Ret(USE_DELAY_SLOT);
4108 __ li(v0, Operand(LESS));
4110 __ bind(&unordered);
4111 __ bind(&generic_stub);
4112 ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
4113 CompareIC::GENERIC);
4114 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4116 __ bind(&maybe_undefined1);
4117 if (Token::IsOrderedRelationalCompareOp(op_)) {
4118 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4119 __ Branch(&miss, ne, a0, Operand(at));
4120 __ JumpIfSmi(a1, &unordered);
4121 __ GetObjectType(a1, a2, a2);
4122 __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
4126 __ bind(&maybe_undefined2);
4127 if (Token::IsOrderedRelationalCompareOp(op_)) {
4128 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4129 __ Branch(&unordered, eq, a1, Operand(at));
4137 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
4138 ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
4141 // Registers containing left and right operands respectively.
4143 Register right = a0;
4147 // Check that both operands are heap objects.
4148 __ JumpIfEitherSmi(left, right, &miss);
4150 // Check that both operands are internalized strings.
4151 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4152 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4153 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4154 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4155 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
4156 __ Or(tmp1, tmp1, Operand(tmp2));
4157 __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
4158 __ Branch(&miss, ne, at, Operand(zero_reg));
4160 // Make sure a0 is non-zero. At this point input operands are
4161 // guaranteed to be non-zero.
4162 ASSERT(right.is(a0));
4163 STATIC_ASSERT(EQUAL == 0);
4164 STATIC_ASSERT(kSmiTag == 0);
4166 // Internalized strings are compared by identity.
4167 __ Ret(ne, left, Operand(right));
4168 ASSERT(is_int16(EQUAL));
4169 __ Ret(USE_DELAY_SLOT);
4170 __ li(v0, Operand(Smi::FromInt(EQUAL)));
4177 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
4178 ASSERT(state_ == CompareIC::UNIQUE_NAME);
4179 ASSERT(GetCondition() == eq);
4182 // Registers containing left and right operands respectively.
4184 Register right = a0;
4188 // Check that both operands are heap objects.
4189 __ JumpIfEitherSmi(left, right, &miss);
4191 // Check that both operands are unique names. This leaves the instance
4192 // types loaded in tmp1 and tmp2.
4193 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4194 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4195 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4196 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4198 __ JumpIfNotUniqueName(tmp1, &miss);
4199 __ JumpIfNotUniqueName(tmp2, &miss);
4204 // Unique names are compared by identity.
4206 __ Branch(&done, ne, left, Operand(right));
4207 // Make sure a0 is non-zero. At this point input operands are
4208 // guaranteed to be non-zero.
4209 ASSERT(right.is(a0));
4210 STATIC_ASSERT(EQUAL == 0);
4211 STATIC_ASSERT(kSmiTag == 0);
4212 __ li(v0, Operand(Smi::FromInt(EQUAL)));
4221 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
4222 ASSERT(state_ == CompareIC::STRING);
4225 bool equality = Token::IsEqualityOp(op_);
4227 // Registers containing left and right operands respectively.
4229 Register right = a0;
4236 // Check that both operands are heap objects.
4237 __ JumpIfEitherSmi(left, right, &miss);
4239 // Check that both operands are strings. This leaves the instance
4240 // types loaded in tmp1 and tmp2.
4241 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4242 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4243 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4244 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4245 STATIC_ASSERT(kNotStringTag != 0);
4246 __ Or(tmp3, tmp1, tmp2);
4247 __ And(tmp5, tmp3, Operand(kIsNotStringMask));
4248 __ Branch(&miss, ne, tmp5, Operand(zero_reg));
4250 // Fast check for identical strings.
4251 Label left_ne_right;
4252 STATIC_ASSERT(EQUAL == 0);
4253 STATIC_ASSERT(kSmiTag == 0);
4254 __ Branch(&left_ne_right, ne, left, Operand(right));
4255 __ Ret(USE_DELAY_SLOT);
4256 __ mov(v0, zero_reg); // In the delay slot.
4257 __ bind(&left_ne_right);
4259 // Handle not identical strings.
4261 // Check that both strings are internalized strings. If they are, we're done
4262 // because we already know they are not identical. We know they are both
4265 ASSERT(GetCondition() == eq);
4266 STATIC_ASSERT(kInternalizedTag == 0);
4267 __ Or(tmp3, tmp1, Operand(tmp2));
4268 __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
4270 __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
4271 // Make sure a0 is non-zero. At this point input operands are
4272 // guaranteed to be non-zero.
4273 ASSERT(right.is(a0));
4274 __ Ret(USE_DELAY_SLOT);
4275 __ mov(v0, a0); // In the delay slot.
4276 __ bind(&is_symbol);
4279 // Check that both strings are sequential ASCII.
4281 __ JumpIfBothInstanceTypesAreNotSequentialAscii(
4282 tmp1, tmp2, tmp3, tmp4, &runtime);
4284 // Compare flat ASCII strings. Returns when done.
4286 StringCompareStub::GenerateFlatAsciiStringEquals(
4287 masm, left, right, tmp1, tmp2, tmp3);
4289 StringCompareStub::GenerateCompareFlatAsciiStrings(
4290 masm, left, right, tmp1, tmp2, tmp3, tmp4);
4293 // Handle more complex cases in runtime.
4295 __ Push(left, right);
4297 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
4299 __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
4307 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
4308 ASSERT(state_ == CompareIC::OBJECT);
4310 __ And(a2, a1, Operand(a0));
4311 __ JumpIfSmi(a2, &miss);
4313 __ GetObjectType(a0, a2, a2);
4314 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
4315 __ GetObjectType(a1, a2, a2);
4316 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
4318 ASSERT(GetCondition() == eq);
4319 __ Ret(USE_DELAY_SLOT);
4320 __ subu(v0, a0, a1);
4327 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
4330 __ JumpIfSmi(a2, &miss);
4331 __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
4332 __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
4333 __ Branch(&miss, ne, a2, Operand(known_map_));
4334 __ Branch(&miss, ne, a3, Operand(known_map_));
4336 __ Ret(USE_DELAY_SLOT);
4337 __ subu(v0, a0, a1);
4344 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
4346 // Call the runtime system in a fresh internal frame.
4347 ExternalReference miss =
4348 ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
4349 FrameScope scope(masm, StackFrame::INTERNAL);
4351 __ Push(ra, a1, a0);
4352 __ li(t0, Operand(Smi::FromInt(op_)));
4353 __ addiu(sp, sp, -kPointerSize);
4354 __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
4355 __ sw(t0, MemOperand(sp)); // In the delay slot.
4356 // Compute the entry point of the rewritten stub.
4357 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
4358 // Restore registers.
4365 void DirectCEntryStub::Generate(MacroAssembler* masm) {
4366 // Make place for arguments to fit C calling convention. Most of the callers
4367 // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
4368 // so they handle stack restoring and we don't have to do that here.
4369 // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
4370 // kCArgsSlotsSize stack space after the call.
4371 __ Subu(sp, sp, Operand(kCArgsSlotsSize));
4372 // Place the return address on the stack, making the call
4373 // GC safe. The RegExp backend also relies on this.
4374 __ sw(ra, MemOperand(sp, kCArgsSlotsSize));
4375 __ Call(t9); // Call the C++ function.
4376 __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
4378 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
4379 // In case of an error the return address may point to a memory area
4380 // filled with kZapValue by the GC.
4381 // Dereference the address and check for this.
4382 __ lw(t0, MemOperand(t9));
4383 __ Assert(ne, kReceivedInvalidReturnAddress, t0,
4384 Operand(reinterpret_cast<uint32_t>(kZapValue)));
4390 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
4393 reinterpret_cast<intptr_t>(GetCode().location());
4394 __ Move(t9, target);
4395 __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
4400 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
4404 Register properties,
4406 Register scratch0) {
4407 ASSERT(name->IsUniqueName());
4408 // If names of slots in range from 1 to kProbes - 1 for the hash value are
4409 // not equal to the name and kProbes-th slot is not used (its name is the
4410 // undefined value), it guarantees the hash table doesn't contain the
4411 // property. It's true even if some slots represent deleted properties
4412 // (their names are the hole value).
4413 for (int i = 0; i < kInlinedProbes; i++) {
4414 // scratch0 points to properties hash.
4415 // Compute the masked index: (hash + i + i * i) & mask.
4416 Register index = scratch0;
4417 // Capacity is smi 2^n.
4418 __ lw(index, FieldMemOperand(properties, kCapacityOffset));
4419 __ Subu(index, index, Operand(1));
4420 __ And(index, index, Operand(
4421 Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
4423 // Scale the index by multiplying by the entry size.
4424 ASSERT(NameDictionary::kEntrySize == 3);
4425 __ sll(at, index, 1);
4426 __ Addu(index, index, at);
4428 Register entity_name = scratch0;
4429 // Having undefined at this place means the name is not contained.
4430 ASSERT_EQ(kSmiTagSize, 1);
4431 Register tmp = properties;
4432 __ sll(scratch0, index, 1);
4433 __ Addu(tmp, properties, scratch0);
4434 __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
4436 ASSERT(!tmp.is(entity_name));
4437 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
4438 __ Branch(done, eq, entity_name, Operand(tmp));
4440 // Load the hole ready for use below:
4441 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
4443 // Stop if found the property.
4444 __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
4447 __ Branch(&good, eq, entity_name, Operand(tmp));
4449 // Check if the entry name is not a unique name.
4450 __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
4452 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
4453 __ JumpIfNotUniqueName(entity_name, miss);
4456 // Restore the properties.
4458 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4461 const int spill_mask =
4462 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
4463 a2.bit() | a1.bit() | a0.bit() | v0.bit());
4465 __ MultiPush(spill_mask);
4466 __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4467 __ li(a1, Operand(Handle<Name>(name)));
4468 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
4471 __ MultiPop(spill_mask);
4473 __ Branch(done, eq, at, Operand(zero_reg));
4474 __ Branch(miss, ne, at, Operand(zero_reg));
4478 // Probe the name dictionary in the |elements| register. Jump to the
4479 // |done| label if a property with the given name is found. Jump to
4480 // the |miss| label otherwise.
4481 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
4482 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
4488 Register scratch2) {
4489 ASSERT(!elements.is(scratch1));
4490 ASSERT(!elements.is(scratch2));
4491 ASSERT(!name.is(scratch1));
4492 ASSERT(!name.is(scratch2));
4494 __ AssertName(name);
4496 // Compute the capacity mask.
4497 __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
4498 __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
4499 __ Subu(scratch1, scratch1, Operand(1));
4501 // Generate an unrolled loop that performs a few probes before
4502 // giving up. Measurements done on Gmail indicate that 2 probes
4503 // cover ~93% of loads from dictionaries.
4504 for (int i = 0; i < kInlinedProbes; i++) {
4505 // Compute the masked index: (hash + i + i * i) & mask.
4506 __ lw(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
4508 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4509 // the hash in a separate instruction. The value hash + i + i * i is right
4510 // shifted in the following and instruction.
4511 ASSERT(NameDictionary::GetProbeOffset(i) <
4512 1 << (32 - Name::kHashFieldOffset));
4513 __ Addu(scratch2, scratch2, Operand(
4514 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4516 __ srl(scratch2, scratch2, Name::kHashShift);
4517 __ And(scratch2, scratch1, scratch2);
4519 // Scale the index by multiplying by the element size.
4520 ASSERT(NameDictionary::kEntrySize == 3);
4521 // scratch2 = scratch2 * 3.
4523 __ sll(at, scratch2, 1);
4524 __ Addu(scratch2, scratch2, at);
4526 // Check if the key is identical to the name.
4527 __ sll(at, scratch2, 2);
4528 __ Addu(scratch2, elements, at);
4529 __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
4530 __ Branch(done, eq, name, Operand(at));
4533 const int spill_mask =
4534 (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
4535 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
4536 ~(scratch1.bit() | scratch2.bit());
4538 __ MultiPush(spill_mask);
4540 ASSERT(!elements.is(a1));
4542 __ Move(a0, elements);
4544 __ Move(a0, elements);
4547 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
4549 __ mov(scratch2, a2);
4551 __ MultiPop(spill_mask);
4553 __ Branch(done, ne, at, Operand(zero_reg));
4554 __ Branch(miss, eq, at, Operand(zero_reg));
4558 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
4559 // This stub overrides SometimesSetsUpAFrame() to return false. That means
4560 // we cannot call anything that could cause a GC from this stub.
4562 // result: NameDictionary to probe
4564 // dictionary: NameDictionary to probe.
4565 // index: will hold an index of entry if lookup is successful.
4566 // might alias with result_.
4568 // result_ is zero if lookup failed, non zero otherwise.
4570 Register result = v0;
4571 Register dictionary = a0;
4573 Register index = a2;
4576 Register undefined = t1;
4577 Register entry_key = t2;
4579 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
4581 __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
4582 __ sra(mask, mask, kSmiTagSize);
4583 __ Subu(mask, mask, Operand(1));
4585 __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
4587 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
4589 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
4590 // Compute the masked index: (hash + i + i * i) & mask.
4591 // Capacity is smi 2^n.
4593 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4594 // the hash in a separate instruction. The value hash + i + i * i is right
4595 // shifted in the following and instruction.
4596 ASSERT(NameDictionary::GetProbeOffset(i) <
4597 1 << (32 - Name::kHashFieldOffset));
4598 __ Addu(index, hash, Operand(
4599 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4601 __ mov(index, hash);
4603 __ srl(index, index, Name::kHashShift);
4604 __ And(index, mask, index);
4606 // Scale the index by multiplying by the entry size.
4607 ASSERT(NameDictionary::kEntrySize == 3);
4610 __ sll(index, index, 1);
4611 __ Addu(index, index, at);
4614 ASSERT_EQ(kSmiTagSize, 1);
4615 __ sll(index, index, 2);
4616 __ Addu(index, index, dictionary);
4617 __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
4619 // Having undefined at this place means the name is not contained.
4620 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined));
4622 // Stop if found the property.
4623 __ Branch(&in_dictionary, eq, entry_key, Operand(key));
4625 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
4626 // Check if the entry name is not a unique name.
4627 __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
4629 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
4630 __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
4634 __ bind(&maybe_in_dictionary);
4635 // If we are doing negative lookup then probing failure should be
4636 // treated as a lookup success. For positive lookup probing failure
4637 // should be treated as lookup failure.
4638 if (mode_ == POSITIVE_LOOKUP) {
4639 __ Ret(USE_DELAY_SLOT);
4640 __ mov(result, zero_reg);
4643 __ bind(&in_dictionary);
4644 __ Ret(USE_DELAY_SLOT);
4647 __ bind(¬_in_dictionary);
4648 __ Ret(USE_DELAY_SLOT);
4649 __ mov(result, zero_reg);
4653 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
4655 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
4657 // Hydrogen code stubs need stub2 at snapshot time.
4658 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
4663 bool CodeStub::CanUseFPRegisters() {
4664 return true; // FPU is a base requirement for V8.
4668 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
4669 // the value has just been written into the object, now this stub makes sure
4670 // we keep the GC informed. The word in the object where the value has been
4671 // written is in the address register.
4672 void RecordWriteStub::Generate(MacroAssembler* masm) {
4673 Label skip_to_incremental_noncompacting;
4674 Label skip_to_incremental_compacting;
4676 // The first two branch+nop instructions are generated with labels so as to
4677 // get the offset fixed up correctly by the bind(Label*) call. We patch it
4678 // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
4679 // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
4680 // incremental heap marking.
4681 // See RecordWriteStub::Patch for details.
4682 __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
4684 __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
4687 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4688 __ RememberedSetHelper(object_,
4692 MacroAssembler::kReturnAtEnd);
4696 __ bind(&skip_to_incremental_noncompacting);
4697 GenerateIncremental(masm, INCREMENTAL);
4699 __ bind(&skip_to_incremental_compacting);
4700 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4702 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
4703 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
4705 PatchBranchIntoNop(masm, 0);
4706 PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
4710 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4713 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4714 Label dont_need_remembered_set;
4716 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
4717 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
4719 &dont_need_remembered_set);
4721 __ CheckPageFlag(regs_.object(),
4723 1 << MemoryChunk::SCAN_ON_SCAVENGE,
4725 &dont_need_remembered_set);
4727 // First notify the incremental marker if necessary, then update the
4729 CheckNeedsToInformIncrementalMarker(
4730 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4731 InformIncrementalMarker(masm);
4732 regs_.Restore(masm);
4733 __ RememberedSetHelper(object_,
4737 MacroAssembler::kReturnAtEnd);
4739 __ bind(&dont_need_remembered_set);
4742 CheckNeedsToInformIncrementalMarker(
4743 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4744 InformIncrementalMarker(masm);
4745 regs_.Restore(masm);
4750 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4751 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
4752 int argument_count = 3;
4753 __ PrepareCallCFunction(argument_count, regs_.scratch0());
4755 a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
4756 ASSERT(!address.is(regs_.object()));
4757 ASSERT(!address.is(a0));
4758 __ Move(address, regs_.address());
4759 __ Move(a0, regs_.object());
4760 __ Move(a1, address);
4761 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
4763 AllowExternalCallThatCantCauseGC scope(masm);
4765 ExternalReference::incremental_marking_record_write_function(isolate()),
4767 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
4771 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4772 MacroAssembler* masm,
4773 OnNoNeedToInformIncrementalMarker on_no_need,
4776 Label need_incremental;
4777 Label need_incremental_pop_scratch;
4779 __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
4780 __ lw(regs_.scratch1(),
4781 MemOperand(regs_.scratch0(),
4782 MemoryChunk::kWriteBarrierCounterOffset));
4783 __ Subu(regs_.scratch1(), regs_.scratch1(), Operand(1));
4784 __ sw(regs_.scratch1(),
4785 MemOperand(regs_.scratch0(),
4786 MemoryChunk::kWriteBarrierCounterOffset));
4787 __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
4789 // Let's look at the color of the object: If it is not black we don't have
4790 // to inform the incremental marker.
4791 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4793 regs_.Restore(masm);
4794 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4795 __ RememberedSetHelper(object_,
4799 MacroAssembler::kReturnAtEnd);
4806 // Get the value from the slot.
4807 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
4809 if (mode == INCREMENTAL_COMPACTION) {
4810 Label ensure_not_white;
4812 __ CheckPageFlag(regs_.scratch0(), // Contains value.
4813 regs_.scratch1(), // Scratch.
4814 MemoryChunk::kEvacuationCandidateMask,
4818 __ CheckPageFlag(regs_.object(),
4819 regs_.scratch1(), // Scratch.
4820 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
4824 __ bind(&ensure_not_white);
4827 // We need extra registers for this, so we push the object and the address
4828 // register temporarily.
4829 __ Push(regs_.object(), regs_.address());
4830 __ EnsureNotWhite(regs_.scratch0(), // The value.
4831 regs_.scratch1(), // Scratch.
4832 regs_.object(), // Scratch.
4833 regs_.address(), // Scratch.
4834 &need_incremental_pop_scratch);
4835 __ Pop(regs_.object(), regs_.address());
4837 regs_.Restore(masm);
4838 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4839 __ RememberedSetHelper(object_,
4843 MacroAssembler::kReturnAtEnd);
4848 __ bind(&need_incremental_pop_scratch);
4849 __ Pop(regs_.object(), regs_.address());
4851 __ bind(&need_incremental);
4853 // Fall through when we need to inform the incremental marker.
4857 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4858 // ----------- S t a t e -------------
4859 // -- a0 : element value to store
4860 // -- a3 : element index as smi
4861 // -- sp[0] : array literal index in function as smi
4862 // -- sp[4] : array literal
4863 // clobbers a1, a2, t0
4864 // -----------------------------------
4867 Label double_elements;
4869 Label slow_elements;
4870 Label fast_elements;
4872 // Get array literal index, array literal and its map.
4873 __ lw(t0, MemOperand(sp, 0 * kPointerSize));
4874 __ lw(a1, MemOperand(sp, 1 * kPointerSize));
4875 __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
4877 __ CheckFastElements(a2, t1, &double_elements);
4878 // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
4879 __ JumpIfSmi(a0, &smi_element);
4880 __ CheckFastSmiElements(a2, t1, &fast_elements);
4882 // Store into the array literal requires a elements transition. Call into
4884 __ bind(&slow_elements);
4886 __ Push(a1, a3, a0);
4887 __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4888 __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset));
4890 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4892 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4893 __ bind(&fast_elements);
4894 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
4895 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
4896 __ Addu(t2, t1, t2);
4897 __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4898 __ sw(a0, MemOperand(t2, 0));
4899 // Update the write barrier for the array store.
4900 __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
4901 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
4902 __ Ret(USE_DELAY_SLOT);
4905 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
4906 // and value is Smi.
4907 __ bind(&smi_element);
4908 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
4909 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
4910 __ Addu(t2, t1, t2);
4911 __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
4912 __ Ret(USE_DELAY_SLOT);
4915 // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
4916 __ bind(&double_elements);
4917 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
4918 __ StoreNumberToDoubleElements(a0, a3, t1, t3, t5, a2, &slow_elements);
4919 __ Ret(USE_DELAY_SLOT);
4924 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4925 CEntryStub ces(isolate(), 1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
4926 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4927 int parameter_count_offset =
4928 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4929 __ lw(a1, MemOperand(fp, parameter_count_offset));
4930 if (function_mode_ == JS_FUNCTION_STUB_MODE) {
4931 __ Addu(a1, a1, Operand(1));
4933 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4934 __ sll(a1, a1, kPointerSizeLog2);
4935 __ Ret(USE_DELAY_SLOT);
4936 __ Addu(sp, sp, a1);
4940 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4941 if (masm->isolate()->function_entry_hook() != NULL) {
4942 ProfileEntryHookStub stub(masm->isolate());
4950 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4951 // The entry hook is a "push ra" instruction, followed by a call.
4952 // Note: on MIPS "push" is 2 instruction
4953 const int32_t kReturnAddressDistanceFromFunctionStart =
4954 Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
4956 // This should contain all kJSCallerSaved registers.
4957 const RegList kSavedRegs =
4958 kJSCallerSaved | // Caller saved registers.
4959 s5.bit(); // Saved stack pointer.
4961 // We also save ra, so the count here is one higher than the mask indicates.
4962 const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
4964 // Save all caller-save registers as this may be called from anywhere.
4965 __ MultiPush(kSavedRegs | ra.bit());
4967 // Compute the function's address for the first argument.
4968 __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
4970 // The caller's return address is above the saved temporaries.
4971 // Grab that for the second argument to the hook.
4972 __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
4974 // Align the stack if necessary.
4975 int frame_alignment = masm->ActivationFrameAlignment();
4976 if (frame_alignment > kPointerSize) {
4978 ASSERT(IsPowerOf2(frame_alignment));
4979 __ And(sp, sp, Operand(-frame_alignment));
4981 __ Subu(sp, sp, kCArgsSlotsSize);
4982 #if defined(V8_HOST_ARCH_MIPS)
4983 int32_t entry_hook =
4984 reinterpret_cast<int32_t>(isolate()->function_entry_hook());
4985 __ li(t9, Operand(entry_hook));
4987 // Under the simulator we need to indirect the entry hook through a
4988 // trampoline function at a known address.
4989 // It additionally takes an isolate as a third parameter.
4990 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
4992 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4993 __ li(t9, Operand(ExternalReference(&dispatcher,
4994 ExternalReference::BUILTIN_CALL,
4997 // Call C function through t9 to conform ABI for PIC.
5000 // Restore the stack pointer if needed.
5001 if (frame_alignment > kPointerSize) {
5004 __ Addu(sp, sp, kCArgsSlotsSize);
5007 // Also pop ra to get Ret(0).
5008 __ MultiPop(kSavedRegs | ra.bit());
5014 static void CreateArrayDispatch(MacroAssembler* masm,
5015 AllocationSiteOverrideMode mode) {
5016 if (mode == DISABLE_ALLOCATION_SITES) {
5017 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
5018 __ TailCallStub(&stub);
5019 } else if (mode == DONT_OVERRIDE) {
5020 int last_index = GetSequenceIndexFromFastElementsKind(
5021 TERMINAL_FAST_ELEMENTS_KIND);
5022 for (int i = 0; i <= last_index; ++i) {
5023 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
5024 T stub(masm->isolate(), kind);
5025 __ TailCallStub(&stub, eq, a3, Operand(kind));
5028 // If we reached this point there is a problem.
5029 __ Abort(kUnexpectedElementsKindInArrayConstructor);
5036 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
5037 AllocationSiteOverrideMode mode) {
5038 // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
5039 // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
5040 // a0 - number of arguments
5041 // a1 - constructor?
5042 // sp[0] - last argument
5043 Label normal_sequence;
5044 if (mode == DONT_OVERRIDE) {
5045 ASSERT(FAST_SMI_ELEMENTS == 0);
5046 ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
5047 ASSERT(FAST_ELEMENTS == 2);
5048 ASSERT(FAST_HOLEY_ELEMENTS == 3);
5049 ASSERT(FAST_DOUBLE_ELEMENTS == 4);
5050 ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
5052 // is the low bit set? If so, we are holey and that is good.
5053 __ And(at, a3, Operand(1));
5054 __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
5057 // look at the first argument
5058 __ lw(t1, MemOperand(sp, 0));
5059 __ Branch(&normal_sequence, eq, t1, Operand(zero_reg));
5061 if (mode == DISABLE_ALLOCATION_SITES) {
5062 ElementsKind initial = GetInitialFastElementsKind();
5063 ElementsKind holey_initial = GetHoleyElementsKind(initial);
5065 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
5067 DISABLE_ALLOCATION_SITES);
5068 __ TailCallStub(&stub_holey);
5070 __ bind(&normal_sequence);
5071 ArraySingleArgumentConstructorStub stub(masm->isolate(),
5073 DISABLE_ALLOCATION_SITES);
5074 __ TailCallStub(&stub);
5075 } else if (mode == DONT_OVERRIDE) {
5076 // We are going to create a holey array, but our kind is non-holey.
5077 // Fix kind and retry (only if we have an allocation site in the slot).
5078 __ Addu(a3, a3, Operand(1));
5080 if (FLAG_debug_code) {
5081 __ lw(t1, FieldMemOperand(a2, 0));
5082 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
5083 __ Assert(eq, kExpectedAllocationSite, t1, Operand(at));
5086 // Save the resulting elements kind in type info. We can't just store a3
5087 // in the AllocationSite::transition_info field because elements kind is
5088 // restricted to a portion of the field...upper bits need to be left alone.
5089 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
5090 __ lw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
5091 __ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
5092 __ sw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
5095 __ bind(&normal_sequence);
5096 int last_index = GetSequenceIndexFromFastElementsKind(
5097 TERMINAL_FAST_ELEMENTS_KIND);
5098 for (int i = 0; i <= last_index; ++i) {
5099 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
5100 ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
5101 __ TailCallStub(&stub, eq, a3, Operand(kind));
5104 // If we reached this point there is a problem.
5105 __ Abort(kUnexpectedElementsKindInArrayConstructor);
5113 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
5114 int to_index = GetSequenceIndexFromFastElementsKind(
5115 TERMINAL_FAST_ELEMENTS_KIND);
5116 for (int i = 0; i <= to_index; ++i) {
5117 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
5118 T stub(isolate, kind);
5120 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
5121 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
5128 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
5129 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
5131 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
5133 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
5138 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
5140 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
5141 for (int i = 0; i < 2; i++) {
5142 // For internal arrays we only need a few things.
5143 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
5145 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
5147 InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
5153 void ArrayConstructorStub::GenerateDispatchToArrayStub(
5154 MacroAssembler* masm,
5155 AllocationSiteOverrideMode mode) {
5156 if (argument_count_ == ANY) {
5157 Label not_zero_case, not_one_case;
5159 __ Branch(¬_zero_case, ne, at, Operand(zero_reg));
5160 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5162 __ bind(¬_zero_case);
5163 __ Branch(¬_one_case, gt, a0, Operand(1));
5164 CreateArrayDispatchOneArgument(masm, mode);
5166 __ bind(¬_one_case);
5167 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5168 } else if (argument_count_ == NONE) {
5169 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5170 } else if (argument_count_ == ONE) {
5171 CreateArrayDispatchOneArgument(masm, mode);
5172 } else if (argument_count_ == MORE_THAN_ONE) {
5173 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5180 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
5181 // ----------- S t a t e -------------
5182 // -- a0 : argc (only if argument_count_ == ANY)
5183 // -- a1 : constructor
5184 // -- a2 : AllocationSite or undefined
5185 // -- sp[0] : return address
5186 // -- sp[4] : last argument
5187 // -----------------------------------
5189 if (FLAG_debug_code) {
5190 // The array construct code is only set for the global and natives
5191 // builtin Array functions which always have maps.
5193 // Initial map for the builtin Array function should be a map.
5194 __ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
5195 // Will both indicate a NULL and a Smi.
5197 __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
5198 at, Operand(zero_reg));
5199 __ GetObjectType(t0, t0, t1);
5200 __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
5201 t1, Operand(MAP_TYPE));
5203 // We should either have undefined in a2 or a valid AllocationSite
5204 __ AssertUndefinedOrAllocationSite(a2, t0);
5208 // Get the elements kind and case on that.
5209 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5210 __ Branch(&no_info, eq, a2, Operand(at));
5212 __ lw(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
5214 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
5215 __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
5216 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
5219 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
5223 void InternalArrayConstructorStub::GenerateCase(
5224 MacroAssembler* masm, ElementsKind kind) {
5226 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
5227 __ TailCallStub(&stub0, lo, a0, Operand(1));
5229 InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
5230 __ TailCallStub(&stubN, hi, a0, Operand(1));
5232 if (IsFastPackedElementsKind(kind)) {
5233 // We might need to create a holey array
5234 // look at the first argument.
5235 __ lw(at, MemOperand(sp, 0));
5237 InternalArraySingleArgumentConstructorStub
5238 stub1_holey(isolate(), GetHoleyElementsKind(kind));
5239 __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
5242 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
5243 __ TailCallStub(&stub1);
5247 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
5248 // ----------- S t a t e -------------
5250 // -- a1 : constructor
5251 // -- sp[0] : return address
5252 // -- sp[4] : last argument
5253 // -----------------------------------
5255 if (FLAG_debug_code) {
5256 // The array construct code is only set for the global and natives
5257 // builtin Array functions which always have maps.
5259 // Initial map for the builtin Array function should be a map.
5260 __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
5261 // Will both indicate a NULL and a Smi.
5263 __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
5264 at, Operand(zero_reg));
5265 __ GetObjectType(a3, a3, t0);
5266 __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
5267 t0, Operand(MAP_TYPE));
5270 // Figure out the right elements kind.
5271 __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
5273 // Load the map's "bit field 2" into a3. We only need the first byte,
5274 // but the following bit field extraction takes care of that anyway.
5275 __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
5276 // Retrieve elements_kind from bit field 2.
5277 __ Ext(a3, a3, Map::kElementsKindShift, Map::kElementsKindBitCount);
5279 if (FLAG_debug_code) {
5281 __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
5283 eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
5284 a3, Operand(FAST_HOLEY_ELEMENTS));
5288 Label fast_elements_case;
5289 __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
5290 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
5292 __ bind(&fast_elements_case);
5293 GenerateCase(masm, FAST_ELEMENTS);
5297 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
5298 // ----------- S t a t e -------------
5300 // -- t0 : call_data
5302 // -- a1 : api_function_address
5305 // -- sp[0] : last argument
5307 // -- sp[(argc - 1)* 4] : first argument
5308 // -- sp[argc * 4] : receiver
5309 // -----------------------------------
5311 Register callee = a0;
5312 Register call_data = t0;
5313 Register holder = a2;
5314 Register api_function_address = a1;
5315 Register context = cp;
5317 int argc = ArgumentBits::decode(bit_field_);
5318 bool is_store = IsStoreBits::decode(bit_field_);
5319 bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
5321 typedef FunctionCallbackArguments FCA;
5323 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
5324 STATIC_ASSERT(FCA::kCalleeIndex == 5);
5325 STATIC_ASSERT(FCA::kDataIndex == 4);
5326 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
5327 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
5328 STATIC_ASSERT(FCA::kIsolateIndex == 1);
5329 STATIC_ASSERT(FCA::kHolderIndex == 0);
5330 STATIC_ASSERT(FCA::kArgsLength == 7);
5332 // Save context, callee and call data.
5333 __ Push(context, callee, call_data);
5334 // Load context from callee.
5335 __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
5337 Register scratch = call_data;
5338 if (!call_data_undefined) {
5339 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5341 // Push return value and default return value.
5342 __ Push(scratch, scratch);
5344 Operand(ExternalReference::isolate_address(isolate())));
5345 // Push isolate and holder.
5346 __ Push(scratch, holder);
5348 // Prepare arguments.
5349 __ mov(scratch, sp);
5351 // Allocate the v8::Arguments structure in the arguments' space since
5352 // it's not controlled by GC.
5353 const int kApiStackSpace = 4;
5355 FrameScope frame_scope(masm, StackFrame::MANUAL);
5356 __ EnterExitFrame(false, kApiStackSpace);
5358 ASSERT(!api_function_address.is(a0) && !scratch.is(a0));
5359 // a0 = FunctionCallbackInfo&
5360 // Arguments is after the return address.
5361 __ Addu(a0, sp, Operand(1 * kPointerSize));
5362 // FunctionCallbackInfo::implicit_args_
5363 __ sw(scratch, MemOperand(a0, 0 * kPointerSize));
5364 // FunctionCallbackInfo::values_
5365 __ Addu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
5366 __ sw(at, MemOperand(a0, 1 * kPointerSize));
5367 // FunctionCallbackInfo::length_ = argc
5368 __ li(at, Operand(argc));
5369 __ sw(at, MemOperand(a0, 2 * kPointerSize));
5370 // FunctionCallbackInfo::is_construct_call = 0
5371 __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
5373 const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
5374 ExternalReference thunk_ref =
5375 ExternalReference::invoke_function_callback(isolate());
5377 AllowExternalCallThatCantCauseGC scope(masm);
5378 MemOperand context_restore_operand(
5379 fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
5380 // Stores return the first js argument.
5381 int return_value_offset = 0;
5383 return_value_offset = 2 + FCA::kArgsLength;
5385 return_value_offset = 2 + FCA::kReturnValueOffset;
5387 MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
5389 __ CallApiFunctionAndReturn(api_function_address,
5392 return_value_operand,
5393 &context_restore_operand);
5397 void CallApiGetterStub::Generate(MacroAssembler* masm) {
5398 // ----------- S t a t e -------------
5400 // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
5402 // -- a2 : api_function_address
5403 // -----------------------------------
5405 Register api_function_address = a2;
5407 __ mov(a0, sp); // a0 = Handle<Name>
5408 __ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA
5410 const int kApiStackSpace = 1;
5411 FrameScope frame_scope(masm, StackFrame::MANUAL);
5412 __ EnterExitFrame(false, kApiStackSpace);
5414 // Create PropertyAccessorInfo instance on the stack above the exit frame with
5415 // a1 (internal::Object** args_) as the data.
5416 __ sw(a1, MemOperand(sp, 1 * kPointerSize));
5417 __ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo&
5419 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
5421 ExternalReference thunk_ref =
5422 ExternalReference::invoke_accessor_getter_callback(isolate());
5423 __ CallApiFunctionAndReturn(api_function_address,
5426 MemOperand(fp, 6 * kPointerSize),
5433 } } // namespace v8::internal
5435 #endif // V8_TARGET_ARCH_MIPS