1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #if V8_TARGET_ARCH_MIPS
9 #include "src/bootstrapper.h"
10 #include "src/code-stubs.h"
11 #include "src/codegen.h"
12 #include "src/regexp-macro-assembler.h"
13 #include "src/stub-cache.h"
19 void FastNewClosureStub::InitializeInterfaceDescriptor(
20 CodeStubInterfaceDescriptor* descriptor) {
21 static Register registers[] = { a2 };
22 descriptor->register_param_count_ = 1;
23 descriptor->register_params_ = registers;
24 descriptor->deoptimization_handler_ =
25 Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
29 void FastNewContextStub::InitializeInterfaceDescriptor(
30 CodeStubInterfaceDescriptor* descriptor) {
31 static Register registers[] = { a1 };
32 descriptor->register_param_count_ = 1;
33 descriptor->register_params_ = registers;
34 descriptor->deoptimization_handler_ = NULL;
38 void ToNumberStub::InitializeInterfaceDescriptor(
39 CodeStubInterfaceDescriptor* descriptor) {
40 static Register registers[] = { a0 };
41 descriptor->register_param_count_ = 1;
42 descriptor->register_params_ = registers;
43 descriptor->deoptimization_handler_ = NULL;
47 void NumberToStringStub::InitializeInterfaceDescriptor(
48 CodeStubInterfaceDescriptor* descriptor) {
49 static Register registers[] = { a0 };
50 descriptor->register_param_count_ = 1;
51 descriptor->register_params_ = registers;
52 descriptor->deoptimization_handler_ =
53 Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
57 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
58 CodeStubInterfaceDescriptor* descriptor) {
59 static Register registers[] = { a3, a2, a1 };
60 descriptor->register_param_count_ = 3;
61 descriptor->register_params_ = registers;
62 static Representation representations[] = {
63 Representation::Tagged(),
64 Representation::Smi(),
65 Representation::Tagged() };
66 descriptor->register_param_representations_ = representations;
67 descriptor->deoptimization_handler_ =
68 Runtime::FunctionForId(
69 Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
73 void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
74 CodeStubInterfaceDescriptor* descriptor) {
75 static Register registers[] = { a3, a2, a1, a0 };
76 descriptor->register_param_count_ = 4;
77 descriptor->register_params_ = registers;
78 descriptor->deoptimization_handler_ =
79 Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
83 void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
84 CodeStubInterfaceDescriptor* descriptor) {
85 static Register registers[] = { a2, a3 };
86 descriptor->register_param_count_ = 2;
87 descriptor->register_params_ = registers;
88 descriptor->deoptimization_handler_ = NULL;
92 void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
93 CodeStubInterfaceDescriptor* descriptor) {
94 static Register registers[] = { a1, a0 };
95 descriptor->register_param_count_ = 2;
96 descriptor->register_params_ = registers;
97 descriptor->deoptimization_handler_ =
98 FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
102 void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
103 CodeStubInterfaceDescriptor* descriptor) {
104 static Register registers[] = {a1, a0 };
105 descriptor->register_param_count_ = 2;
106 descriptor->register_params_ = registers;
107 descriptor->deoptimization_handler_ =
108 FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
112 void RegExpConstructResultStub::InitializeInterfaceDescriptor(
113 CodeStubInterfaceDescriptor* descriptor) {
114 static Register registers[] = { a2, a1, a0 };
115 descriptor->register_param_count_ = 3;
116 descriptor->register_params_ = registers;
117 descriptor->deoptimization_handler_ =
118 Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
122 void KeyedLoadGenericElementStub::InitializeInterfaceDescriptor(
123 CodeStubInterfaceDescriptor* descriptor) {
124 static Register registers[] = { a1, a0 };
125 descriptor->register_param_count_ = 2;
126 descriptor->register_params_ = registers;
127 descriptor->deoptimization_handler_ =
128 Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry;
132 void LoadFieldStub::InitializeInterfaceDescriptor(
133 CodeStubInterfaceDescriptor* descriptor) {
134 static Register registers[] = { a0 };
135 descriptor->register_param_count_ = 1;
136 descriptor->register_params_ = registers;
137 descriptor->deoptimization_handler_ = NULL;
141 void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
142 CodeStubInterfaceDescriptor* descriptor) {
143 static Register registers[] = { a1 };
144 descriptor->register_param_count_ = 1;
145 descriptor->register_params_ = registers;
146 descriptor->deoptimization_handler_ = NULL;
150 void StringLengthStub::InitializeInterfaceDescriptor(
151 CodeStubInterfaceDescriptor* descriptor) {
152 static Register registers[] = { a0, a2 };
153 descriptor->register_param_count_ = 2;
154 descriptor->register_params_ = registers;
155 descriptor->deoptimization_handler_ = NULL;
159 void KeyedStringLengthStub::InitializeInterfaceDescriptor(
160 CodeStubInterfaceDescriptor* descriptor) {
161 static Register registers[] = { a1, a0 };
162 descriptor->register_param_count_ = 2;
163 descriptor->register_params_ = registers;
164 descriptor->deoptimization_handler_ = NULL;
168 void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
169 CodeStubInterfaceDescriptor* descriptor) {
170 static Register registers[] = { a2, a1, a0 };
171 descriptor->register_param_count_ = 3;
172 descriptor->register_params_ = registers;
173 descriptor->deoptimization_handler_ =
174 FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
178 void TransitionElementsKindStub::InitializeInterfaceDescriptor(
179 CodeStubInterfaceDescriptor* descriptor) {
180 static Register registers[] = { a0, a1 };
181 descriptor->register_param_count_ = 2;
182 descriptor->register_params_ = registers;
184 Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
185 descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
189 void CompareNilICStub::InitializeInterfaceDescriptor(
190 CodeStubInterfaceDescriptor* descriptor) {
191 static Register registers[] = { a0 };
192 descriptor->register_param_count_ = 1;
193 descriptor->register_params_ = registers;
194 descriptor->deoptimization_handler_ =
195 FUNCTION_ADDR(CompareNilIC_Miss);
196 descriptor->SetMissHandler(
197 ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
201 static void InitializeArrayConstructorDescriptor(
202 CodeStubInterfaceDescriptor* descriptor,
203 int constant_stack_parameter_count) {
205 // a0 -- number of arguments
207 // a2 -- allocation site with elements kind
208 static Register registers_variable_args[] = { a1, a2, a0 };
209 static Register registers_no_args[] = { a1, a2 };
211 if (constant_stack_parameter_count == 0) {
212 descriptor->register_param_count_ = 2;
213 descriptor->register_params_ = registers_no_args;
215 // stack param count needs (constructor pointer, and single argument)
216 descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
217 descriptor->stack_parameter_count_ = a0;
218 descriptor->register_param_count_ = 3;
219 descriptor->register_params_ = registers_variable_args;
220 static Representation representations[] = {
221 Representation::Tagged(),
222 Representation::Tagged(),
223 Representation::Integer32() };
224 descriptor->register_param_representations_ = representations;
227 descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
228 descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
229 descriptor->deoptimization_handler_ =
230 Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
234 static void InitializeInternalArrayConstructorDescriptor(
235 CodeStubInterfaceDescriptor* descriptor,
236 int constant_stack_parameter_count) {
238 // a0 -- number of arguments
239 // a1 -- constructor function
240 static Register registers_variable_args[] = { a1, a0 };
241 static Register registers_no_args[] = { a1 };
243 if (constant_stack_parameter_count == 0) {
244 descriptor->register_param_count_ = 1;
245 descriptor->register_params_ = registers_no_args;
247 // stack param count needs (constructor pointer, and single argument)
248 descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
249 descriptor->stack_parameter_count_ = a0;
250 descriptor->register_param_count_ = 2;
251 descriptor->register_params_ = registers_variable_args;
252 static Representation representations[] = {
253 Representation::Tagged(),
254 Representation::Integer32() };
255 descriptor->register_param_representations_ = representations;
258 descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
259 descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
260 descriptor->deoptimization_handler_ =
261 Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
265 void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
266 CodeStubInterfaceDescriptor* descriptor) {
267 InitializeArrayConstructorDescriptor(descriptor, 0);
271 void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
272 CodeStubInterfaceDescriptor* descriptor) {
273 InitializeArrayConstructorDescriptor(descriptor, 1);
277 void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
278 CodeStubInterfaceDescriptor* descriptor) {
279 InitializeArrayConstructorDescriptor(descriptor, -1);
283 void ToBooleanStub::InitializeInterfaceDescriptor(
284 CodeStubInterfaceDescriptor* descriptor) {
285 static Register registers[] = { a0 };
286 descriptor->register_param_count_ = 1;
287 descriptor->register_params_ = registers;
288 descriptor->deoptimization_handler_ =
289 FUNCTION_ADDR(ToBooleanIC_Miss);
290 descriptor->SetMissHandler(
291 ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
295 void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
296 CodeStubInterfaceDescriptor* descriptor) {
297 InitializeInternalArrayConstructorDescriptor(descriptor, 0);
301 void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
302 CodeStubInterfaceDescriptor* descriptor) {
303 InitializeInternalArrayConstructorDescriptor(descriptor, 1);
307 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
308 CodeStubInterfaceDescriptor* descriptor) {
309 InitializeInternalArrayConstructorDescriptor(descriptor, -1);
313 void StoreGlobalStub::InitializeInterfaceDescriptor(
314 CodeStubInterfaceDescriptor* descriptor) {
315 static Register registers[] = { a1, a2, a0 };
316 descriptor->register_param_count_ = 3;
317 descriptor->register_params_ = registers;
318 descriptor->deoptimization_handler_ =
319 FUNCTION_ADDR(StoreIC_MissFromStubFailure);
323 void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
324 CodeStubInterfaceDescriptor* descriptor) {
325 static Register registers[] = { a0, a3, a1, a2 };
326 descriptor->register_param_count_ = 4;
327 descriptor->register_params_ = registers;
328 descriptor->deoptimization_handler_ =
329 FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
333 void BinaryOpICStub::InitializeInterfaceDescriptor(
334 CodeStubInterfaceDescriptor* descriptor) {
335 static Register registers[] = { a1, a0 };
336 descriptor->register_param_count_ = 2;
337 descriptor->register_params_ = registers;
338 descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
339 descriptor->SetMissHandler(
340 ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
344 void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
345 CodeStubInterfaceDescriptor* descriptor) {
346 static Register registers[] = { a2, a1, a0 };
347 descriptor->register_param_count_ = 3;
348 descriptor->register_params_ = registers;
349 descriptor->deoptimization_handler_ =
350 FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
354 void StringAddStub::InitializeInterfaceDescriptor(
355 CodeStubInterfaceDescriptor* descriptor) {
356 static Register registers[] = { a1, a0 };
357 descriptor->register_param_count_ = 2;
358 descriptor->register_params_ = registers;
359 descriptor->deoptimization_handler_ =
360 Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
364 void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
366 CallInterfaceDescriptor* descriptor =
367 isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
368 static Register registers[] = { a1, // JSFunction
370 a0, // actual number of arguments
371 a2, // expected number of arguments
373 static Representation representations[] = {
374 Representation::Tagged(), // JSFunction
375 Representation::Tagged(), // context
376 Representation::Integer32(), // actual number of arguments
377 Representation::Integer32(), // expected number of arguments
379 descriptor->register_param_count_ = 4;
380 descriptor->register_params_ = registers;
381 descriptor->param_representations_ = representations;
384 CallInterfaceDescriptor* descriptor =
385 isolate->call_descriptor(Isolate::KeyedCall);
386 static Register registers[] = { cp, // context
389 static Representation representations[] = {
390 Representation::Tagged(), // context
391 Representation::Tagged(), // key
393 descriptor->register_param_count_ = 2;
394 descriptor->register_params_ = registers;
395 descriptor->param_representations_ = representations;
398 CallInterfaceDescriptor* descriptor =
399 isolate->call_descriptor(Isolate::NamedCall);
400 static Register registers[] = { cp, // context
403 static Representation representations[] = {
404 Representation::Tagged(), // context
405 Representation::Tagged(), // name
407 descriptor->register_param_count_ = 2;
408 descriptor->register_params_ = registers;
409 descriptor->param_representations_ = representations;
412 CallInterfaceDescriptor* descriptor =
413 isolate->call_descriptor(Isolate::CallHandler);
414 static Register registers[] = { cp, // context
417 static Representation representations[] = {
418 Representation::Tagged(), // context
419 Representation::Tagged(), // receiver
421 descriptor->register_param_count_ = 2;
422 descriptor->register_params_ = registers;
423 descriptor->param_representations_ = representations;
426 CallInterfaceDescriptor* descriptor =
427 isolate->call_descriptor(Isolate::ApiFunctionCall);
428 static Register registers[] = { a0, // callee
431 a1, // api_function_address
434 static Representation representations[] = {
435 Representation::Tagged(), // callee
436 Representation::Tagged(), // call_data
437 Representation::Tagged(), // holder
438 Representation::External(), // api_function_address
439 Representation::Tagged(), // context
441 descriptor->register_param_count_ = 5;
442 descriptor->register_params_ = registers;
443 descriptor->param_representations_ = representations;
448 #define __ ACCESS_MASM(masm)
451 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
454 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
460 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
465 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
466 // Update the static counter each time a new code stub is generated.
467 isolate()->counters()->code_stubs()->Increment();
469 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
470 int param_count = descriptor->register_param_count_;
472 // Call the runtime system in a fresh internal frame.
473 FrameScope scope(masm, StackFrame::INTERNAL);
474 ASSERT(descriptor->register_param_count_ == 0 ||
475 a0.is(descriptor->register_params_[param_count - 1]));
476 // Push arguments, adjust sp.
477 __ Subu(sp, sp, Operand(param_count * kPointerSize));
478 for (int i = 0; i < param_count; ++i) {
479 // Store argument to stack.
480 __ sw(descriptor->register_params_[i],
481 MemOperand(sp, (param_count-1-i) * kPointerSize));
483 ExternalReference miss = descriptor->miss_handler();
484 __ CallExternalReference(miss, descriptor->register_param_count_);
491 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
492 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
493 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
494 // scratch register. Destroys the source register. No GC occurs during this
495 // stub so you don't have to set up the frame.
496 class ConvertToDoubleStub : public PlatformCodeStub {
498 ConvertToDoubleStub(Isolate* isolate,
499 Register result_reg_1,
500 Register result_reg_2,
502 Register scratch_reg)
503 : PlatformCodeStub(isolate),
504 result1_(result_reg_1),
505 result2_(result_reg_2),
507 zeros_(scratch_reg) { }
515 // Minor key encoding in 16 bits.
516 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
517 class OpBits: public BitField<Token::Value, 2, 14> {};
519 Major MajorKey() { return ConvertToDouble; }
521 // Encode the parameters in a unique 16 bit value.
522 return result1_.code() +
523 (result2_.code() << 4) +
524 (source_.code() << 8) +
525 (zeros_.code() << 12);
528 void Generate(MacroAssembler* masm);
532 void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
533 Register exponent, mantissa;
534 if (kArchEndian == kLittle) {
542 // Convert from Smi to integer.
543 __ sra(source_, source_, kSmiTagSize);
544 // Move sign bit from source to destination. This works because the sign bit
545 // in the exponent word of the double has the same position and polarity as
546 // the 2's complement sign bit in a Smi.
547 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
548 __ And(exponent, source_, Operand(HeapNumber::kSignMask));
549 // Subtract from 0 if source was negative.
550 __ subu(at, zero_reg, source_);
551 __ Movn(source_, at, exponent);
553 // We have -1, 0 or 1, which we treat specially. Register source_ contains
554 // absolute value: it is either equal to 1 (special case of -1 and 1),
555 // greater than 1 (not a special case) or less than 1 (special case of 0).
556 __ Branch(¬_special, gt, source_, Operand(1));
558 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
559 const uint32_t exponent_word_for_1 =
560 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
561 // Safe to use 'at' as dest reg here.
562 __ Or(at, exponent, Operand(exponent_word_for_1));
563 __ Movn(exponent, at, source_); // Write exp when source not 0.
564 // 1, 0 and -1 all have 0 for the second word.
565 __ Ret(USE_DELAY_SLOT);
566 __ mov(mantissa, zero_reg);
568 __ bind(¬_special);
569 // Count leading zeros.
570 // Gets the wrong answer for 0, but we already checked for that case above.
571 __ Clz(zeros_, source_);
572 // Compute exponent and or it into the exponent register.
573 // We use mantissa as a scratch register here.
574 __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
575 __ subu(mantissa, mantissa, zeros_);
576 __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
577 __ Or(exponent, exponent, mantissa);
579 // Shift up the source chopping the top bit off.
580 __ Addu(zeros_, zeros_, Operand(1));
581 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
582 __ sllv(source_, source_, zeros_);
583 // Compute lower part of fraction (last 12 bits).
584 __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
585 // And the top (top 20 bits).
586 __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
588 __ Ret(USE_DELAY_SLOT);
589 __ or_(exponent, exponent, source_);
593 void DoubleToIStub::Generate(MacroAssembler* masm) {
594 Label out_of_range, only_low, negate, done;
595 Register input_reg = source();
596 Register result_reg = destination();
598 int double_offset = offset();
599 // Account for saved regs if input is sp.
600 if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
603 GetRegisterThatIsNotOneOf(input_reg, result_reg);
605 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
607 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
608 DoubleRegister double_scratch = kLithiumScratchDouble;
610 __ Push(scratch, scratch2, scratch3);
612 if (!skip_fastpath()) {
613 // Load double input.
614 __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
616 // Clear cumulative exception flags and save the FCSR.
617 __ cfc1(scratch2, FCSR);
618 __ ctc1(zero_reg, FCSR);
620 // Try a conversion to a signed integer.
621 __ Trunc_w_d(double_scratch, double_scratch);
622 // Move the converted value into the result register.
623 __ mfc1(scratch3, double_scratch);
625 // Retrieve and restore the FCSR.
626 __ cfc1(scratch, FCSR);
627 __ ctc1(scratch2, FCSR);
629 // Check for overflow and NaNs.
632 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
633 | kFCSRInvalidOpFlagMask);
634 // If we had no exceptions then set result_reg and we are done.
636 __ Branch(&error, ne, scratch, Operand(zero_reg));
637 __ Move(result_reg, scratch3);
642 // Load the double value and perform a manual truncation.
643 Register input_high = scratch2;
644 Register input_low = scratch3;
647 MemOperand(input_reg, double_offset + Register::kMantissaOffset));
649 MemOperand(input_reg, double_offset + Register::kExponentOffset));
651 Label normal_exponent, restore_sign;
652 // Extract the biased exponent in result.
655 HeapNumber::kExponentShift,
656 HeapNumber::kExponentBits);
658 // Check for Infinity and NaNs, which should return 0.
659 __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
660 __ Movz(result_reg, zero_reg, scratch);
661 __ Branch(&done, eq, scratch, Operand(zero_reg));
663 // Express exponent as delta to (number of mantissa bits + 31).
666 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
668 // If the delta is strictly positive, all bits would be shifted away,
669 // which means that we can return 0.
670 __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
671 __ mov(result_reg, zero_reg);
674 __ bind(&normal_exponent);
675 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
677 __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
680 Register sign = result_reg;
682 __ And(sign, input_high, Operand(HeapNumber::kSignMask));
684 // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
685 // to check for this specific case.
686 Label high_shift_needed, high_shift_done;
687 __ Branch(&high_shift_needed, lt, scratch, Operand(32));
688 __ mov(input_high, zero_reg);
689 __ Branch(&high_shift_done);
690 __ bind(&high_shift_needed);
692 // Set the implicit 1 before the mantissa part in input_high.
695 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
696 // Shift the mantissa bits to the correct position.
697 // We don't need to clear non-mantissa bits as they will be shifted away.
698 // If they weren't, it would mean that the answer is in the 32bit range.
699 __ sllv(input_high, input_high, scratch);
701 __ bind(&high_shift_done);
703 // Replace the shifted bits with bits from the lower mantissa word.
704 Label pos_shift, shift_done;
706 __ subu(scratch, at, scratch);
707 __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
710 __ Subu(scratch, zero_reg, scratch);
711 __ sllv(input_low, input_low, scratch);
712 __ Branch(&shift_done);
715 __ srlv(input_low, input_low, scratch);
717 __ bind(&shift_done);
718 __ Or(input_high, input_high, Operand(input_low));
719 // Restore sign if necessary.
720 __ mov(scratch, sign);
723 __ Subu(result_reg, zero_reg, input_high);
724 __ Movz(result_reg, input_high, scratch);
728 __ Pop(scratch, scratch2, scratch3);
733 void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
735 WriteInt32ToHeapNumberStub stub1(isolate, a1, v0, a2, a3);
736 WriteInt32ToHeapNumberStub stub2(isolate, a2, v0, a3, a0);
742 // See comment for class, this does NOT work for int32's that are in Smi range.
743 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
744 Label max_negative_int;
745 // the_int_ has the answer which is a signed int32 but not a Smi.
746 // We test for the special value that has a different exponent.
747 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
748 // Test sign, and save for later conditionals.
749 __ And(sign_, the_int_, Operand(0x80000000u));
750 __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
752 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
753 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
754 uint32_t non_smi_exponent =
755 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
756 __ li(scratch_, Operand(non_smi_exponent));
757 // Set the sign bit in scratch_ if the value was negative.
758 __ or_(scratch_, scratch_, sign_);
759 // Subtract from 0 if the value was negative.
760 __ subu(at, zero_reg, the_int_);
761 __ Movn(the_int_, at, sign_);
762 // We should be masking the implict first digit of the mantissa away here,
763 // but it just ends up combining harmlessly with the last digit of the
764 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
765 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
766 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
767 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
768 __ srl(at, the_int_, shift_distance);
769 __ or_(scratch_, scratch_, at);
770 __ sw(scratch_, FieldMemOperand(the_heap_number_,
771 HeapNumber::kExponentOffset));
772 __ sll(scratch_, the_int_, 32 - shift_distance);
773 __ Ret(USE_DELAY_SLOT);
774 __ sw(scratch_, FieldMemOperand(the_heap_number_,
775 HeapNumber::kMantissaOffset));
777 __ bind(&max_negative_int);
778 // The max negative int32 is stored as a positive number in the mantissa of
779 // a double because it uses a sign bit instead of using two's complement.
780 // The actual mantissa bits stored are all 0 because the implicit most
781 // significant 1 bit is not stored.
782 non_smi_exponent += 1 << HeapNumber::kExponentShift;
783 __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
785 FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
786 __ mov(scratch_, zero_reg);
787 __ Ret(USE_DELAY_SLOT);
789 FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
793 // Handle the case where the lhs and rhs are the same object.
794 // Equality is almost reflexive (everything but NaN), so this is a test
795 // for "identity and not NaN".
796 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
800 Label heap_number, return_equal;
801 Register exp_mask_reg = t5;
803 __ Branch(¬_identical, ne, a0, Operand(a1));
805 __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
807 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
808 // so we do the second best thing - test it ourselves.
809 // They are both equal and they are not both Smis so both of them are not
810 // Smis. If it's not a heap number, then return equal.
811 if (cc == less || cc == greater) {
812 __ GetObjectType(a0, t4, t4);
813 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
815 __ GetObjectType(a0, t4, t4);
816 __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
817 // Comparing JS objects with <=, >= is complicated.
819 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
820 // Normally here we fall through to return_equal, but undefined is
821 // special: (undefined == undefined) == true, but
822 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
823 if (cc == less_equal || cc == greater_equal) {
824 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
825 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
826 __ Branch(&return_equal, ne, a0, Operand(t2));
827 ASSERT(is_int16(GREATER) && is_int16(LESS));
828 __ Ret(USE_DELAY_SLOT);
830 // undefined <= undefined should fail.
831 __ li(v0, Operand(GREATER));
833 // undefined >= undefined should fail.
834 __ li(v0, Operand(LESS));
840 __ bind(&return_equal);
841 ASSERT(is_int16(GREATER) && is_int16(LESS));
842 __ Ret(USE_DELAY_SLOT);
844 __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
845 } else if (cc == greater) {
846 __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
848 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
851 // For less and greater we don't have to check for NaN since the result of
852 // x < x is false regardless. For the others here is some code to check
854 if (cc != lt && cc != gt) {
855 __ bind(&heap_number);
856 // It is a heap number, so return non-equal if it's NaN and equal if it's
859 // The representation of NaN values has all exponent bits (52..62) set,
860 // and not all mantissa bits (0..51) clear.
861 // Read top bits of double representation (second word of value).
862 __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
863 // Test that exponent bits are all set.
864 __ And(t3, t2, Operand(exp_mask_reg));
865 // If all bits not set (ne cond), then not a NaN, objects are equal.
866 __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
868 // Shift out flag and all exponent bits, retaining only mantissa.
869 __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
870 // Or with all low-bits of mantissa.
871 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
872 __ Or(v0, t3, Operand(t2));
873 // For equal we already have the right value in v0: Return zero (equal)
874 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
875 // not (it's a NaN). For <= and >= we need to load v0 with the failing
876 // value if it's a NaN.
878 // All-zero means Infinity means equal.
879 __ Ret(eq, v0, Operand(zero_reg));
880 ASSERT(is_int16(GREATER) && is_int16(LESS));
881 __ Ret(USE_DELAY_SLOT);
883 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
885 __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
889 // No fall through here.
891 __ bind(¬_identical);
895 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
898 Label* both_loaded_as_doubles,
901 ASSERT((lhs.is(a0) && rhs.is(a1)) ||
902 (lhs.is(a1) && rhs.is(a0)));
905 __ JumpIfSmi(lhs, &lhs_is_smi);
907 // Check whether the non-smi is a heap number.
908 __ GetObjectType(lhs, t4, t4);
910 // If lhs was not a number and rhs was a Smi then strict equality cannot
911 // succeed. Return non-equal (lhs is already not zero).
912 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
915 // Smi compared non-strictly with a non-Smi non-heap-number. Call
917 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
920 // Rhs is a smi, lhs is a number.
921 // Convert smi rhs to double.
922 __ sra(at, rhs, kSmiTagSize);
924 __ cvt_d_w(f14, f14);
925 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
927 // We now have both loaded as doubles.
928 __ jmp(both_loaded_as_doubles);
930 __ bind(&lhs_is_smi);
931 // Lhs is a Smi. Check whether the non-smi is a heap number.
932 __ GetObjectType(rhs, t4, t4);
934 // If lhs was not a number and rhs was a Smi then strict equality cannot
935 // succeed. Return non-equal.
936 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
937 __ li(v0, Operand(1));
939 // Smi compared non-strictly with a non-Smi non-heap-number. Call
941 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
944 // Lhs is a smi, rhs is a number.
945 // Convert smi lhs to double.
946 __ sra(at, lhs, kSmiTagSize);
948 __ cvt_d_w(f12, f12);
949 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
950 // Fall through to both_loaded_as_doubles.
954 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
957 // If either operand is a JS object or an oddball value, then they are
958 // not equal since their pointers are different.
959 // There is no test for undetectability in strict equality.
960 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
961 Label first_non_object;
962 // Get the type of the first operand into a2 and compare it with
963 // FIRST_SPEC_OBJECT_TYPE.
964 __ GetObjectType(lhs, a2, a2);
965 __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
968 Label return_not_equal;
969 __ bind(&return_not_equal);
970 __ Ret(USE_DELAY_SLOT);
971 __ li(v0, Operand(1));
973 __ bind(&first_non_object);
974 // Check for oddballs: true, false, null, undefined.
975 __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
977 __ GetObjectType(rhs, a3, a3);
978 __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
980 // Check for oddballs: true, false, null, undefined.
981 __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
983 // Now that we have the types we might as well check for
984 // internalized-internalized.
985 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
986 __ Or(a2, a2, Operand(a3));
987 __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
988 __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
992 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
995 Label* both_loaded_as_doubles,
996 Label* not_heap_numbers,
998 __ GetObjectType(lhs, a3, a2);
999 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
1000 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
1001 // If first was a heap number & second wasn't, go to slow case.
1002 __ Branch(slow, ne, a3, Operand(a2));
1004 // Both are heap numbers. Load them up then jump to the code we have
1006 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1007 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1009 __ jmp(both_loaded_as_doubles);
1013 // Fast negative check for internalized-to-internalized equality.
1014 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
1017 Label* possible_strings,
1018 Label* not_both_strings) {
1019 ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1020 (lhs.is(a1) && rhs.is(a0)));
1022 // a2 is object type of rhs.
1024 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
1025 __ And(at, a2, Operand(kIsNotStringMask));
1026 __ Branch(&object_test, ne, at, Operand(zero_reg));
1027 __ And(at, a2, Operand(kIsNotInternalizedMask));
1028 __ Branch(possible_strings, ne, at, Operand(zero_reg));
1029 __ GetObjectType(rhs, a3, a3);
1030 __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
1031 __ And(at, a3, Operand(kIsNotInternalizedMask));
1032 __ Branch(possible_strings, ne, at, Operand(zero_reg));
1034 // Both are internalized strings. We already checked they weren't the same
1035 // pointer so they are not equal.
1036 __ Ret(USE_DELAY_SLOT);
1037 __ li(v0, Operand(1)); // Non-zero indicates not equal.
1039 __ bind(&object_test);
1040 __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
1041 __ GetObjectType(rhs, a2, a3);
1042 __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1044 // If both objects are undetectable, they are equal. Otherwise, they
1045 // are not equal, since they are different objects and an object is not
1046 // equal to undefined.
1047 __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
1048 __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
1049 __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
1050 __ and_(a0, a2, a3);
1051 __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
1052 __ Ret(USE_DELAY_SLOT);
1053 __ xori(v0, a0, 1 << Map::kIsUndetectable);
1057 static void ICCompareStub_CheckInputType(MacroAssembler* masm,
1060 CompareIC::State expected,
1063 if (expected == CompareIC::SMI) {
1064 __ JumpIfNotSmi(input, fail);
1065 } else if (expected == CompareIC::NUMBER) {
1066 __ JumpIfSmi(input, &ok);
1067 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
1070 // We could be strict about internalized/string here, but as long as
1071 // hydrogen doesn't care, the stub doesn't have to care either.
1076 // On entry a1 and a2 are the values to be compared.
1077 // On exit a0 is 0, positive or negative to indicate the result of
1079 void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
1082 Condition cc = GetCondition();
1085 ICCompareStub_CheckInputType(masm, lhs, a2, left_, &miss);
1086 ICCompareStub_CheckInputType(masm, rhs, a3, right_, &miss);
1088 Label slow; // Call builtin.
1089 Label not_smis, both_loaded_as_doubles;
1091 Label not_two_smis, smi_done;
1093 __ JumpIfNotSmi(a2, ¬_two_smis);
1096 __ Ret(USE_DELAY_SLOT);
1097 __ subu(v0, a1, a0);
1098 __ bind(¬_two_smis);
1100 // NOTICE! This code is only reached after a smi-fast-case check, so
1101 // it is certain that at least one operand isn't a smi.
1103 // Handle the case where the objects are identical. Either returns the answer
1104 // or goes to slow. Only falls through if the objects were not identical.
1105 EmitIdenticalObjectComparison(masm, &slow, cc);
1107 // If either is a Smi (we know that not both are), then they can only
1108 // be strictly equal if the other is a HeapNumber.
1109 STATIC_ASSERT(kSmiTag == 0);
1110 ASSERT_EQ(0, Smi::FromInt(0));
1111 __ And(t2, lhs, Operand(rhs));
1112 __ JumpIfNotSmi(t2, ¬_smis, t0);
1113 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
1114 // 1) Return the answer.
1116 // 3) Fall through to both_loaded_as_doubles.
1117 // 4) Jump to rhs_not_nan.
1118 // In cases 3 and 4 we have found out we were dealing with a number-number
1119 // comparison and the numbers have been loaded into f12 and f14 as doubles,
1120 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
1121 EmitSmiNonsmiComparison(masm, lhs, rhs,
1122 &both_loaded_as_doubles, &slow, strict());
1124 __ bind(&both_loaded_as_doubles);
1125 // f12, f14 are the double representations of the left hand side
1126 // and the right hand side if we have FPU. Otherwise a2, a3 represent
1127 // left hand side and a0, a1 represent right hand side.
1129 __ li(t0, Operand(LESS));
1130 __ li(t1, Operand(GREATER));
1131 __ li(t2, Operand(EQUAL));
1133 // Check if either rhs or lhs is NaN.
1134 __ BranchF(NULL, &nan, eq, f12, f14);
1136 // Check if LESS condition is satisfied. If true, move conditionally
1138 __ c(OLT, D, f12, f14);
1140 // Use previous check to store conditionally to v0 oposite condition
1141 // (GREATER). If rhs is equal to lhs, this will be corrected in next
1144 // Check if EQUAL condition is satisfied. If true, move conditionally
1146 __ c(EQ, D, f12, f14);
1152 // NaN comparisons always fail.
1153 // Load whatever we need in v0 to make the comparison fail.
1154 ASSERT(is_int16(GREATER) && is_int16(LESS));
1155 __ Ret(USE_DELAY_SLOT);
1156 if (cc == lt || cc == le) {
1157 __ li(v0, Operand(GREATER));
1159 __ li(v0, Operand(LESS));
1164 // At this point we know we are dealing with two different objects,
1165 // and neither of them is a Smi. The objects are in lhs_ and rhs_.
1167 // This returns non-equal for some object types, or falls through if it
1169 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
1172 Label check_for_internalized_strings;
1173 Label flat_string_check;
1174 // Check for heap-number-heap-number comparison. Can jump to slow case,
1175 // or load both doubles and jump to the code that handles
1176 // that case. If the inputs are not doubles then jumps to
1177 // check_for_internalized_strings.
1178 // In this case a2 will contain the type of lhs_.
1179 EmitCheckForTwoHeapNumbers(masm,
1182 &both_loaded_as_doubles,
1183 &check_for_internalized_strings,
1184 &flat_string_check);
1186 __ bind(&check_for_internalized_strings);
1187 if (cc == eq && !strict()) {
1188 // Returns an answer for two internalized strings or two
1189 // detectable objects.
1190 // Otherwise jumps to string case or not both strings case.
1191 // Assumes that a2 is the type of lhs_ on entry.
1192 EmitCheckForInternalizedStringsOrObjects(
1193 masm, lhs, rhs, &flat_string_check, &slow);
1196 // Check for both being sequential ASCII strings, and inline if that is the
1198 __ bind(&flat_string_check);
1200 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow);
1202 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
1205 StringCompareStub::GenerateFlatAsciiStringEquals(masm,
1212 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
1220 // Never falls through to here.
1223 // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
1226 // Figure out which native to call and setup the arguments.
1227 Builtins::JavaScript native;
1229 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1231 native = Builtins::COMPARE;
1232 int ncr; // NaN compare result.
1233 if (cc == lt || cc == le) {
1236 ASSERT(cc == gt || cc == ge); // Remaining cases.
1239 __ li(a0, Operand(Smi::FromInt(ncr)));
1243 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1244 // tagged as a small integer.
1245 __ InvokeBuiltin(native, JUMP_FUNCTION);
1252 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
1255 if (save_doubles_ == kSaveFPRegs) {
1256 __ PushSafepointRegistersAndDoubles();
1258 __ PushSafepointRegisters();
1264 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
1267 __ StoreToSafepointRegisterSlot(t9, t9);
1268 if (save_doubles_ == kSaveFPRegs) {
1269 __ PopSafepointRegistersAndDoubles();
1271 __ PopSafepointRegisters();
1277 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1278 // We don't allow a GC during a store buffer overflow so there is no need to
1279 // store the registers in any particular way, but we do have to store and
1281 __ MultiPush(kJSCallerSaved | ra.bit());
1282 if (save_doubles_ == kSaveFPRegs) {
1283 __ MultiPushFPU(kCallerSavedFPU);
1285 const int argument_count = 1;
1286 const int fp_argument_count = 0;
1287 const Register scratch = a1;
1289 AllowExternalCallThatCantCauseGC scope(masm);
1290 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
1291 __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
1293 ExternalReference::store_buffer_overflow_function(isolate()),
1295 if (save_doubles_ == kSaveFPRegs) {
1296 __ MultiPopFPU(kCallerSavedFPU);
1299 __ MultiPop(kJSCallerSaved | ra.bit());
1304 void MathPowStub::Generate(MacroAssembler* masm) {
1305 const Register base = a1;
1306 const Register exponent = a2;
1307 const Register heapnumbermap = t1;
1308 const Register heapnumber = v0;
1309 const DoubleRegister double_base = f2;
1310 const DoubleRegister double_exponent = f4;
1311 const DoubleRegister double_result = f0;
1312 const DoubleRegister double_scratch = f6;
1313 const FPURegister single_scratch = f8;
1314 const Register scratch = t5;
1315 const Register scratch2 = t3;
1317 Label call_runtime, done, int_exponent;
1318 if (exponent_type_ == ON_STACK) {
1319 Label base_is_smi, unpack_exponent;
1320 // The exponent and base are supplied as arguments on the stack.
1321 // This can only happen if the stub is called from non-optimized code.
1322 // Load input parameters from stack to double registers.
1323 __ lw(base, MemOperand(sp, 1 * kPointerSize));
1324 __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
1326 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
1328 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
1329 __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
1330 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
1332 __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
1333 __ jmp(&unpack_exponent);
1335 __ bind(&base_is_smi);
1336 __ mtc1(scratch, single_scratch);
1337 __ cvt_d_w(double_base, single_scratch);
1338 __ bind(&unpack_exponent);
1340 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
1342 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
1343 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
1344 __ ldc1(double_exponent,
1345 FieldMemOperand(exponent, HeapNumber::kValueOffset));
1346 } else if (exponent_type_ == TAGGED) {
1347 // Base is already in double_base.
1348 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
1350 __ ldc1(double_exponent,
1351 FieldMemOperand(exponent, HeapNumber::kValueOffset));
1354 if (exponent_type_ != INTEGER) {
1355 Label int_exponent_convert;
1356 // Detect integer exponents stored as double.
1357 __ EmitFPUTruncate(kRoundToMinusInf,
1363 kCheckForInexactConversion);
1364 // scratch2 == 0 means there was no conversion error.
1365 __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
1367 if (exponent_type_ == ON_STACK) {
1368 // Detect square root case. Crankshaft detects constant +/-0.5 at
1369 // compile time and uses DoMathPowHalf instead. We then skip this check
1370 // for non-constant cases of +/-0.5 as these hardly occur.
1371 Label not_plus_half;
1374 __ Move(double_scratch, 0.5);
1375 __ BranchF(USE_DELAY_SLOT,
1381 // double_scratch can be overwritten in the delay slot.
1382 // Calculates square root of base. Check for the special case of
1383 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
1384 __ Move(double_scratch, -V8_INFINITY);
1385 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
1386 __ neg_d(double_result, double_scratch);
1388 // Add +0 to convert -0 to +0.
1389 __ add_d(double_scratch, double_base, kDoubleRegZero);
1390 __ sqrt_d(double_result, double_scratch);
1393 __ bind(¬_plus_half);
1394 __ Move(double_scratch, -0.5);
1395 __ BranchF(USE_DELAY_SLOT,
1401 // double_scratch can be overwritten in the delay slot.
1402 // Calculates square root of base. Check for the special case of
1403 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
1404 __ Move(double_scratch, -V8_INFINITY);
1405 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
1406 __ Move(double_result, kDoubleRegZero);
1408 // Add +0 to convert -0 to +0.
1409 __ add_d(double_scratch, double_base, kDoubleRegZero);
1410 __ Move(double_result, 1);
1411 __ sqrt_d(double_scratch, double_scratch);
1412 __ div_d(double_result, double_result, double_scratch);
1418 AllowExternalCallThatCantCauseGC scope(masm);
1419 __ PrepareCallCFunction(0, 2, scratch2);
1420 __ MovToFloatParameters(double_base, double_exponent);
1422 ExternalReference::power_double_double_function(isolate()),
1426 __ MovFromFloatResult(double_result);
1429 __ bind(&int_exponent_convert);
1432 // Calculate power with integer exponent.
1433 __ bind(&int_exponent);
1435 // Get two copies of exponent in the registers scratch and exponent.
1436 if (exponent_type_ == INTEGER) {
1437 __ mov(scratch, exponent);
1439 // Exponent has previously been stored into scratch as untagged integer.
1440 __ mov(exponent, scratch);
1443 __ mov_d(double_scratch, double_base); // Back up base.
1444 __ Move(double_result, 1.0);
1446 // Get absolute value of exponent.
1447 Label positive_exponent;
1448 __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
1449 __ Subu(scratch, zero_reg, scratch);
1450 __ bind(&positive_exponent);
1452 Label while_true, no_carry, loop_end;
1453 __ bind(&while_true);
1455 __ And(scratch2, scratch, 1);
1457 __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
1458 __ mul_d(double_result, double_result, double_scratch);
1461 __ sra(scratch, scratch, 1);
1463 __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
1464 __ mul_d(double_scratch, double_scratch, double_scratch);
1466 __ Branch(&while_true);
1470 __ Branch(&done, ge, exponent, Operand(zero_reg));
1471 __ Move(double_scratch, 1.0);
1472 __ div_d(double_result, double_scratch, double_result);
1473 // Test whether result is zero. Bail out to check for subnormal result.
1474 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
1475 __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
1477 // double_exponent may not contain the exponent value if the input was a
1478 // smi. We set it with exponent value before bailing out.
1479 __ mtc1(exponent, single_scratch);
1480 __ cvt_d_w(double_exponent, single_scratch);
1482 // Returning or bailing out.
1483 Counters* counters = isolate()->counters();
1484 if (exponent_type_ == ON_STACK) {
1485 // The arguments are still on the stack.
1486 __ bind(&call_runtime);
1487 __ TailCallRuntime(Runtime::kHiddenMathPow, 2, 1);
1489 // The stub is called from non-optimized code, which expects the result
1490 // as heap number in exponent.
1492 __ AllocateHeapNumber(
1493 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
1494 __ sdc1(double_result,
1495 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
1496 ASSERT(heapnumber.is(v0));
1497 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1502 AllowExternalCallThatCantCauseGC scope(masm);
1503 __ PrepareCallCFunction(0, 2, scratch);
1504 __ MovToFloatParameters(double_base, double_exponent);
1506 ExternalReference::power_double_double_function(isolate()),
1510 __ MovFromFloatResult(double_result);
1513 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1519 bool CEntryStub::NeedsImmovableCode() {
1524 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
1525 CEntryStub::GenerateAheadOfTime(isolate);
1526 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
1527 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
1528 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
1529 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
1530 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
1531 BinaryOpICStub::GenerateAheadOfTime(isolate);
1532 StoreRegistersStateStub::GenerateAheadOfTime(isolate);
1533 RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
1534 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
1538 void StoreRegistersStateStub::GenerateAheadOfTime(
1540 StoreRegistersStateStub stub1(isolate, kDontSaveFPRegs);
1542 // Hydrogen code stubs need stub2 at snapshot time.
1543 StoreRegistersStateStub stub2(isolate, kSaveFPRegs);
1548 void RestoreRegistersStateStub::GenerateAheadOfTime(
1550 RestoreRegistersStateStub stub1(isolate, kDontSaveFPRegs);
1552 // Hydrogen code stubs need stub2 at snapshot time.
1553 RestoreRegistersStateStub stub2(isolate, kSaveFPRegs);
1558 void CodeStub::GenerateFPStubs(Isolate* isolate) {
1559 SaveFPRegsMode mode = kSaveFPRegs;
1560 CEntryStub save_doubles(isolate, 1, mode);
1561 StoreBufferOverflowStub stub(isolate, mode);
1562 // These stubs might already be in the snapshot, detect that and don't
1563 // regenerate, which would lead to code stub initialization state being messed
1565 Code* save_doubles_code;
1566 if (!save_doubles.FindCodeInCache(&save_doubles_code)) {
1567 save_doubles_code = *save_doubles.GetCode();
1569 Code* store_buffer_overflow_code;
1570 if (!stub.FindCodeInCache(&store_buffer_overflow_code)) {
1571 store_buffer_overflow_code = *stub.GetCode();
1573 isolate->set_fp_stubs_generated(true);
1577 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1578 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
1583 void CEntryStub::Generate(MacroAssembler* masm) {
1584 // Called from JavaScript; parameters are on stack as if calling JS function
1585 // s0: number of arguments including receiver
1586 // s1: size of arguments excluding receiver
1587 // s2: pointer to builtin function
1588 // fp: frame pointer (restored after C call)
1589 // sp: stack pointer (restored as callee's sp after C call)
1590 // cp: current context (C callee-saved)
1592 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1594 // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
1595 // The reason for this is that these arguments would need to be saved anyway
1596 // so it's faster to set them up directly.
1597 // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
1599 // Compute the argv pointer in a callee-saved register.
1600 __ Addu(s1, sp, s1);
1602 // Enter the exit frame that transitions from JavaScript to C++.
1603 FrameScope scope(masm, StackFrame::MANUAL);
1604 __ EnterExitFrame(save_doubles_);
1606 // s0: number of arguments including receiver (C callee-saved)
1607 // s1: pointer to first argument (C callee-saved)
1608 // s2: pointer to builtin function (C callee-saved)
1610 // Prepare arguments for C routine.
1613 // a1 = argv (set in the delay slot after find_ra below).
1615 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
1616 // also need to reserve the 4 argument slots on the stack.
1618 __ AssertStackIsAligned();
1620 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
1622 // To let the GC traverse the return address of the exit frames, we need to
1623 // know where the return address is. The CEntryStub is unmovable, so
1624 // we can store the address on the stack to be able to find it again and
1625 // we never have to restore it, because it will not change.
1626 { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
1627 // This branch-and-link sequence is needed to find the current PC on mips,
1628 // saved to the ra register.
1629 // Use masm-> here instead of the double-underscore macro since extra
1630 // coverage code can interfere with the proper calculation of ra.
1632 masm->bal(&find_ra); // bal exposes branch delay slot.
1634 masm->bind(&find_ra);
1636 // Adjust the value in ra to point to the correct return location, 2nd
1637 // instruction past the real call into C code (the jalr(t9)), and push it.
1638 // This is the return address of the exit frame.
1639 const int kNumInstructionsToJump = 5;
1640 masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
1641 masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
1642 // Stack space reservation moved to the branch delay slot below.
1643 // Stack is still aligned.
1645 // Call the C routine.
1646 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
1648 // Set up sp in the delay slot.
1649 masm->addiu(sp, sp, -kCArgsSlotsSize);
1650 // Make sure the stored 'ra' points to this position.
1651 ASSERT_EQ(kNumInstructionsToJump,
1652 masm->InstructionsGeneratedSince(&find_ra));
1656 // Runtime functions should not return 'the hole'. Allowing it to escape may
1657 // lead to crashes in the IC code later.
1658 if (FLAG_debug_code) {
1660 __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
1661 __ Branch(&okay, ne, v0, Operand(t0));
1662 __ stop("The hole escaped");
1666 // Check result for exception sentinel.
1667 Label exception_returned;
1668 __ LoadRoot(t0, Heap::kExceptionRootIndex);
1669 __ Branch(&exception_returned, eq, t0, Operand(v0));
1671 ExternalReference pending_exception_address(
1672 Isolate::kPendingExceptionAddress, isolate());
1674 // Check that there is no pending exception, otherwise we
1675 // should have returned the exception sentinel.
1676 if (FLAG_debug_code) {
1678 __ li(a2, Operand(pending_exception_address));
1679 __ lw(a2, MemOperand(a2));
1680 __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
1681 // Cannot use check here as it attempts to generate call into runtime.
1682 __ Branch(&okay, eq, t0, Operand(a2));
1683 __ stop("Unexpected pending exception");
1687 // Exit C frame and return.
1689 // sp: stack pointer
1690 // fp: frame pointer
1691 // s0: still holds argc (callee-saved).
1692 __ LeaveExitFrame(save_doubles_, s0, true, EMIT_RETURN);
1694 // Handling of exception.
1695 __ bind(&exception_returned);
1697 // Retrieve the pending exception.
1698 __ li(a2, Operand(pending_exception_address));
1699 __ lw(v0, MemOperand(a2));
1701 // Clear the pending exception.
1702 __ li(a3, Operand(isolate()->factory()->the_hole_value()));
1703 __ sw(a3, MemOperand(a2));
1705 // Special handling of termination exceptions which are uncatchable
1706 // by javascript code.
1707 Label throw_termination_exception;
1708 __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
1709 __ Branch(&throw_termination_exception, eq, v0, Operand(t0));
1711 // Handle normal exception.
1714 __ bind(&throw_termination_exception);
1715 __ ThrowUncatchable(v0);
1719 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
1720 Label invoke, handler_entry, exit;
1721 Isolate* isolate = masm->isolate();
1724 // a0: entry address
1733 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1735 // Save callee saved registers on the stack.
1736 __ MultiPush(kCalleeSaved | ra.bit());
1738 // Save callee-saved FPU registers.
1739 __ MultiPushFPU(kCalleeSavedFPU);
1740 // Set up the reserved register for 0.0.
1741 __ Move(kDoubleRegZero, 0.0);
1744 // Load argv in s0 register.
1745 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1746 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
1748 __ InitializeRootRegister();
1749 __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
1751 // We build an EntryFrame.
1752 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
1753 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
1754 __ li(t2, Operand(Smi::FromInt(marker)));
1755 __ li(t1, Operand(Smi::FromInt(marker)));
1756 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1758 __ lw(t0, MemOperand(t0));
1759 __ Push(t3, t2, t1, t0);
1760 // Set up frame pointer for the frame to be pushed.
1761 __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
1764 // a0: entry_address
1766 // a2: receiver_pointer
1772 // function slot | entry frame
1774 // bad fp (0xff...f) |
1775 // callee saved registers + ra
1779 // If this is the outermost JS call, set js_entry_sp value.
1780 Label non_outermost_js;
1781 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
1782 __ li(t1, Operand(ExternalReference(js_entry_sp)));
1783 __ lw(t2, MemOperand(t1));
1784 __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
1785 __ sw(fp, MemOperand(t1));
1786 __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1789 __ nop(); // Branch delay slot nop.
1790 __ bind(&non_outermost_js);
1791 __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1795 // Jump to a faked try block that does the invoke, with a faked catch
1796 // block that sets the pending exception.
1798 __ bind(&handler_entry);
1799 handler_offset_ = handler_entry.pos();
1800 // Caught exception: Store result (exception) in the pending exception
1801 // field in the JSEnv and return a failure sentinel. Coming in here the
1802 // fp will be invalid because the PushTryHandler below sets it to 0 to
1803 // signal the existence of the JSEntry frame.
1804 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1806 __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
1807 __ LoadRoot(v0, Heap::kExceptionRootIndex);
1808 __ b(&exit); // b exposes branch delay slot.
1809 __ nop(); // Branch delay slot nop.
1811 // Invoke: Link this frame into the handler chain. There's only one
1812 // handler block in this code object, so its index is 0.
1814 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
1815 // If an exception not caught by another handler occurs, this handler
1816 // returns control to the code after the bal(&invoke) above, which
1817 // restores all kCalleeSaved registers (including cp and fp) to their
1818 // saved values before returning a failure to C.
1820 // Clear any pending exceptions.
1821 __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
1822 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1824 __ sw(t1, MemOperand(t0));
1826 // Invoke the function by calling through JS entry trampoline builtin.
1827 // Notice that we cannot store a reference to the trampoline code directly in
1828 // this stub, because runtime stubs are not traversed when doing GC.
1831 // a0: entry_address
1833 // a2: receiver_pointer
1840 // callee saved registers + ra
1845 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1847 __ li(t0, Operand(construct_entry));
1849 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
1850 __ li(t0, Operand(entry));
1852 __ lw(t9, MemOperand(t0)); // Deref address.
1854 // Call JSEntryTrampoline.
1855 __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
1858 // Unlink this frame from the handler chain.
1861 __ bind(&exit); // v0 holds result
1862 // Check if the current stack frame is marked as the outermost JS frame.
1863 Label non_outermost_js_2;
1865 __ Branch(&non_outermost_js_2,
1868 Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1869 __ li(t1, Operand(ExternalReference(js_entry_sp)));
1870 __ sw(zero_reg, MemOperand(t1));
1871 __ bind(&non_outermost_js_2);
1873 // Restore the top frame descriptors from the stack.
1875 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1877 __ sw(t1, MemOperand(t0));
1879 // Reset the stack to the callee saved registers.
1880 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
1882 // Restore callee-saved fpu registers.
1883 __ MultiPopFPU(kCalleeSavedFPU);
1885 // Restore callee saved registers from the stack.
1886 __ MultiPop(kCalleeSaved | ra.bit());
1892 // Uses registers a0 to t0.
1893 // Expected input (depending on whether args are in registers or on the stack):
1894 // * object: a0 or at sp + 1 * kPointerSize.
1895 // * function: a1 or at sp.
1897 // An inlined call site may have been generated before calling this stub.
1898 // In this case the offset to the inline site to patch is passed on the stack,
1899 // in the safepoint slot for register t0.
1900 void InstanceofStub::Generate(MacroAssembler* masm) {
1901 // Call site inlining and patching implies arguments in registers.
1902 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
1903 // ReturnTrueFalse is only implemented for inlined call sites.
1904 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
1906 // Fixed register usage throughout the stub:
1907 const Register object = a0; // Object (lhs).
1908 Register map = a3; // Map of the object.
1909 const Register function = a1; // Function (rhs).
1910 const Register prototype = t0; // Prototype of the function.
1911 const Register inline_site = t5;
1912 const Register scratch = a2;
1914 const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
1916 Label slow, loop, is_instance, is_not_instance, not_js_object;
1918 if (!HasArgsInRegisters()) {
1919 __ lw(object, MemOperand(sp, 1 * kPointerSize));
1920 __ lw(function, MemOperand(sp, 0));
1923 // Check that the left hand is a JS object and load map.
1924 __ JumpIfSmi(object, ¬_js_object);
1925 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
1927 // If there is a call site cache don't look in the global cache, but do the
1928 // real lookup and update the call site cache.
1929 if (!HasCallSiteInlineCheck()) {
1931 __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
1932 __ Branch(&miss, ne, function, Operand(at));
1933 __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
1934 __ Branch(&miss, ne, map, Operand(at));
1935 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1936 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
1941 // Get the prototype of the function.
1942 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
1944 // Check that the function prototype is a JS object.
1945 __ JumpIfSmi(prototype, &slow);
1946 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
1948 // Update the global instanceof or call site inlined cache with the current
1949 // map and function. The cached answer will be set when it is known below.
1950 if (!HasCallSiteInlineCheck()) {
1951 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1952 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
1954 ASSERT(HasArgsInRegisters());
1955 // Patch the (relocated) inlined map check.
1957 // The offset was stored in t0 safepoint slot.
1958 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
1959 __ LoadFromSafepointRegisterSlot(scratch, t0);
1960 __ Subu(inline_site, ra, scratch);
1961 // Get the map location in scratch and patch it.
1962 __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
1963 __ sw(map, FieldMemOperand(scratch, Cell::kValueOffset));
1966 // Register mapping: a3 is object map and t0 is function prototype.
1967 // Get prototype of object into a2.
1968 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
1970 // We don't need map any more. Use it as a scratch register.
1971 Register scratch2 = map;
1974 // Loop through the prototype chain looking for the function prototype.
1975 __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
1977 __ Branch(&is_instance, eq, scratch, Operand(prototype));
1978 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
1979 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
1980 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
1983 __ bind(&is_instance);
1984 ASSERT(Smi::FromInt(0) == 0);
1985 if (!HasCallSiteInlineCheck()) {
1986 __ mov(v0, zero_reg);
1987 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
1989 // Patch the call site to return true.
1990 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
1991 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1992 // Get the boolean result location in scratch and patch it.
1993 __ PatchRelocatedValue(inline_site, scratch, v0);
1995 if (!ReturnTrueFalseObject()) {
1996 ASSERT_EQ(Smi::FromInt(0), 0);
1997 __ mov(v0, zero_reg);
2000 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2002 __ bind(&is_not_instance);
2003 if (!HasCallSiteInlineCheck()) {
2004 __ li(v0, Operand(Smi::FromInt(1)));
2005 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
2007 // Patch the call site to return false.
2008 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
2009 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
2010 // Get the boolean result location in scratch and patch it.
2011 __ PatchRelocatedValue(inline_site, scratch, v0);
2013 if (!ReturnTrueFalseObject()) {
2014 __ li(v0, Operand(Smi::FromInt(1)));
2018 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2020 Label object_not_null, object_not_null_or_smi;
2021 __ bind(¬_js_object);
2022 // Before null, smi and string value checks, check that the rhs is a function
2023 // as for a non-function rhs an exception needs to be thrown.
2024 __ JumpIfSmi(function, &slow);
2025 __ GetObjectType(function, scratch2, scratch);
2026 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
2028 // Null is not instance of anything.
2029 __ Branch(&object_not_null,
2032 Operand(isolate()->factory()->null_value()));
2033 __ li(v0, Operand(Smi::FromInt(1)));
2034 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2036 __ bind(&object_not_null);
2037 // Smi values are not instances of anything.
2038 __ JumpIfNotSmi(object, &object_not_null_or_smi);
2039 __ li(v0, Operand(Smi::FromInt(1)));
2040 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2042 __ bind(&object_not_null_or_smi);
2043 // String values are not instances of anything.
2044 __ IsObjectJSStringType(object, scratch, &slow);
2045 __ li(v0, Operand(Smi::FromInt(1)));
2046 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2048 // Slow-case. Tail call builtin.
2050 if (!ReturnTrueFalseObject()) {
2051 if (HasArgsInRegisters()) {
2054 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
2057 FrameScope scope(masm, StackFrame::INTERNAL);
2059 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
2062 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
2063 __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
2064 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
2065 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2070 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
2073 if (kind() == Code::KEYED_LOAD_IC) {
2074 // ----------- S t a t e -------------
2075 // -- ra : return address
2078 // -----------------------------------
2079 __ Branch(&miss, ne, a0,
2080 Operand(isolate()->factory()->prototype_string()));
2083 ASSERT(kind() == Code::LOAD_IC);
2084 // ----------- S t a t e -------------
2086 // -- ra : return address
2088 // -- sp[0] : receiver
2089 // -----------------------------------
2093 StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3, t0, &miss);
2095 StubCompiler::TailCallBuiltin(
2096 masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
2100 Register InstanceofStub::left() { return a0; }
2103 Register InstanceofStub::right() { return a1; }
2106 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
2107 // The displacement is the offset of the last parameter (if any)
2108 // relative to the frame pointer.
2109 const int kDisplacement =
2110 StandardFrameConstants::kCallerSPOffset - kPointerSize;
2112 // Check that the key is a smiGenerateReadElement.
2114 __ JumpIfNotSmi(a1, &slow);
2116 // Check if the calling frame is an arguments adaptor frame.
2118 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2119 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
2123 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2125 // Check index (a1) against formal parameters count limit passed in
2126 // through register a0. Use unsigned comparison to get negative
2128 __ Branch(&slow, hs, a1, Operand(a0));
2130 // Read the argument from the stack and return it.
2131 __ subu(a3, a0, a1);
2132 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
2133 __ Addu(a3, fp, Operand(t3));
2134 __ Ret(USE_DELAY_SLOT);
2135 __ lw(v0, MemOperand(a3, kDisplacement));
2137 // Arguments adaptor case: Check index (a1) against actual arguments
2138 // limit found in the arguments adaptor frame. Use unsigned
2139 // comparison to get negative check for free.
2141 __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
2142 __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
2144 // Read the argument from the adaptor frame and return it.
2145 __ subu(a3, a0, a1);
2146 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
2147 __ Addu(a3, a2, Operand(t3));
2148 __ Ret(USE_DELAY_SLOT);
2149 __ lw(v0, MemOperand(a3, kDisplacement));
2151 // Slow-case: Handle non-smi or out-of-bounds access to arguments
2152 // by calling the runtime system.
2155 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
2159 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
2160 // sp[0] : number of parameters
2161 // sp[4] : receiver displacement
2163 // Check if the calling frame is an arguments adaptor frame.
2165 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2166 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
2170 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2172 // Patch the arguments.length and the parameters pointer in the current frame.
2173 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
2174 __ sw(a2, MemOperand(sp, 0 * kPointerSize));
2176 __ Addu(a3, a3, Operand(t3));
2177 __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
2178 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
2181 __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
2185 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
2187 // sp[0] : number of parameters (tagged)
2188 // sp[4] : address of receiver argument
2190 // Registers used over whole function:
2191 // t2 : allocated object (tagged)
2192 // t5 : mapped parameter count (tagged)
2194 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
2195 // a1 = parameter count (tagged)
2197 // Check if the calling frame is an arguments adaptor frame.
2199 Label adaptor_frame, try_allocate;
2200 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2201 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
2202 __ Branch(&adaptor_frame,
2205 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2207 // No adaptor, parameter count = argument count.
2209 __ b(&try_allocate);
2210 __ nop(); // Branch delay slot nop.
2212 // We have an adaptor frame. Patch the parameters pointer.
2213 __ bind(&adaptor_frame);
2214 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
2216 __ Addu(a3, a3, Operand(t6));
2217 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
2218 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
2220 // a1 = parameter count (tagged)
2221 // a2 = argument count (tagged)
2222 // Compute the mapped parameter count = min(a1, a2) in a1.
2224 __ Branch(&skip_min, lt, a1, Operand(a2));
2228 __ bind(&try_allocate);
2230 // Compute the sizes of backing store, parameter map, and arguments object.
2231 // 1. Parameter map, has 2 extra words containing context and backing store.
2232 const int kParameterMapHeaderSize =
2233 FixedArray::kHeaderSize + 2 * kPointerSize;
2234 // If there are no mapped parameters, we do not need the parameter_map.
2235 Label param_map_size;
2236 ASSERT_EQ(0, Smi::FromInt(0));
2237 __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg));
2238 __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
2240 __ addiu(t5, t5, kParameterMapHeaderSize);
2241 __ bind(¶m_map_size);
2243 // 2. Backing store.
2245 __ Addu(t5, t5, Operand(t6));
2246 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
2248 // 3. Arguments object.
2249 __ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize));
2251 // Do the allocation of all three objects in one go.
2252 __ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT);
2254 // v0 = address of new object(s) (tagged)
2255 // a2 = argument count (tagged)
2256 // Get the arguments boilerplate from the current native context into t0.
2257 const int kNormalOffset =
2258 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX);
2259 const int kAliasedOffset =
2260 Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
2262 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2263 __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
2264 Label skip2_ne, skip2_eq;
2265 __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
2266 __ lw(t0, MemOperand(t0, kNormalOffset));
2269 __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
2270 __ lw(t0, MemOperand(t0, kAliasedOffset));
2273 // v0 = address of new object (tagged)
2274 // a1 = mapped parameter count (tagged)
2275 // a2 = argument count (tagged)
2276 // t0 = address of boilerplate object (tagged)
2277 // Copy the JS object part.
2278 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
2279 __ lw(a3, FieldMemOperand(t0, i));
2280 __ sw(a3, FieldMemOperand(v0, i));
2283 // Set up the callee in-object property.
2284 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
2285 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
2286 const int kCalleeOffset = JSObject::kHeaderSize +
2287 Heap::kArgumentsCalleeIndex * kPointerSize;
2288 __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
2290 // Use the length (smi tagged) and set that as an in-object property too.
2291 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2292 const int kLengthOffset = JSObject::kHeaderSize +
2293 Heap::kArgumentsLengthIndex * kPointerSize;
2294 __ sw(a2, FieldMemOperand(v0, kLengthOffset));
2296 // Set up the elements pointer in the allocated arguments object.
2297 // If we allocated a parameter map, t0 will point there, otherwise
2298 // it will point to the backing store.
2299 __ Addu(t0, v0, Operand(Heap::kSloppyArgumentsObjectSize));
2300 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
2302 // v0 = address of new object (tagged)
2303 // a1 = mapped parameter count (tagged)
2304 // a2 = argument count (tagged)
2305 // t0 = address of parameter map or backing store (tagged)
2306 // Initialize parameter map. If there are no mapped arguments, we're done.
2307 Label skip_parameter_map;
2309 __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
2310 // Move backing store address to a3, because it is
2311 // expected there when filling in the unmapped arguments.
2315 __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
2317 __ LoadRoot(t2, Heap::kSloppyArgumentsElementsMapRootIndex);
2318 __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
2319 __ Addu(t2, a1, Operand(Smi::FromInt(2)));
2320 __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
2321 __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
2323 __ Addu(t2, t0, Operand(t6));
2324 __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
2325 __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
2327 // Copy the parameter slots and the holes in the arguments.
2328 // We need to fill in mapped_parameter_count slots. They index the context,
2329 // where parameters are stored in reverse order, at
2330 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
2331 // The mapped parameter thus need to get indices
2332 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
2333 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
2334 // We loop from right to left.
2335 Label parameters_loop, parameters_test;
2337 __ lw(t5, MemOperand(sp, 0 * kPointerSize));
2338 __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
2339 __ Subu(t5, t5, Operand(a1));
2340 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
2342 __ Addu(a3, t0, Operand(t6));
2343 __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
2345 // t2 = loop variable (tagged)
2346 // a1 = mapping index (tagged)
2347 // a3 = address of backing store (tagged)
2348 // t0 = address of parameter map (tagged)
2349 // t1 = temporary scratch (a.o., for address calculation)
2350 // t3 = the hole value
2351 __ jmp(¶meters_test);
2353 __ bind(¶meters_loop);
2354 __ Subu(t2, t2, Operand(Smi::FromInt(1)));
2356 __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
2357 __ Addu(t6, t0, t1);
2358 __ sw(t5, MemOperand(t6));
2359 __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
2360 __ Addu(t6, a3, t1);
2361 __ sw(t3, MemOperand(t6));
2362 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
2363 __ bind(¶meters_test);
2364 __ Branch(¶meters_loop, ne, t2, Operand(Smi::FromInt(0)));
2366 __ bind(&skip_parameter_map);
2367 // a2 = argument count (tagged)
2368 // a3 = address of backing store (tagged)
2370 // Copy arguments header and remaining slots (if there are any).
2371 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
2372 __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
2373 __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
2375 Label arguments_loop, arguments_test;
2377 __ lw(t0, MemOperand(sp, 1 * kPointerSize));
2379 __ Subu(t0, t0, Operand(t6));
2380 __ jmp(&arguments_test);
2382 __ bind(&arguments_loop);
2383 __ Subu(t0, t0, Operand(kPointerSize));
2384 __ lw(t2, MemOperand(t0, 0));
2386 __ Addu(t1, a3, Operand(t6));
2387 __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
2388 __ Addu(t5, t5, Operand(Smi::FromInt(1)));
2390 __ bind(&arguments_test);
2391 __ Branch(&arguments_loop, lt, t5, Operand(a2));
2393 // Return and remove the on-stack parameters.
2396 // Do the runtime call to allocate the arguments object.
2397 // a2 = argument count (tagged)
2399 __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
2400 __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
2404 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
2405 // sp[0] : number of parameters
2406 // sp[4] : receiver displacement
2408 // Check if the calling frame is an arguments adaptor frame.
2409 Label adaptor_frame, try_allocate, runtime;
2410 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2411 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
2412 __ Branch(&adaptor_frame,
2415 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2417 // Get the length from the frame.
2418 __ lw(a1, MemOperand(sp, 0));
2419 __ Branch(&try_allocate);
2421 // Patch the arguments.length and the parameters pointer.
2422 __ bind(&adaptor_frame);
2423 __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
2424 __ sw(a1, MemOperand(sp, 0));
2425 __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
2426 __ Addu(a3, a2, Operand(at));
2428 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
2429 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
2431 // Try the new space allocation. Start out with computing the size
2432 // of the arguments object and the elements array in words.
2433 Label add_arguments_object;
2434 __ bind(&try_allocate);
2435 __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
2436 __ srl(a1, a1, kSmiTagSize);
2438 __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
2439 __ bind(&add_arguments_object);
2440 __ Addu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
2442 // Do the allocation of both objects in one go.
2443 __ Allocate(a1, v0, a2, a3, &runtime,
2444 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
2446 // Get the arguments boilerplate from the current native context.
2447 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2448 __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
2449 __ lw(t0, MemOperand(t0, Context::SlotOffset(
2450 Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX)));
2452 // Copy the JS object part.
2453 __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
2455 // Get the length (smi tagged) and set that as an in-object property too.
2456 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2457 __ lw(a1, MemOperand(sp, 0 * kPointerSize));
2458 __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
2459 Heap::kArgumentsLengthIndex * kPointerSize));
2462 __ Branch(&done, eq, a1, Operand(zero_reg));
2464 // Get the parameters pointer from the stack.
2465 __ lw(a2, MemOperand(sp, 1 * kPointerSize));
2467 // Set up the elements pointer in the allocated arguments object and
2468 // initialize the header in the elements fixed array.
2469 __ Addu(t0, v0, Operand(Heap::kStrictArgumentsObjectSize));
2470 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
2471 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
2472 __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
2473 __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
2474 // Untag the length for the loop.
2475 __ srl(a1, a1, kSmiTagSize);
2477 // Copy the fixed array slots.
2479 // Set up t0 to point to the first array slot.
2480 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2482 // Pre-decrement a2 with kPointerSize on each iteration.
2483 // Pre-decrement in order to skip receiver.
2484 __ Addu(a2, a2, Operand(-kPointerSize));
2485 __ lw(a3, MemOperand(a2));
2486 // Post-increment t0 with kPointerSize on each iteration.
2487 __ sw(a3, MemOperand(t0));
2488 __ Addu(t0, t0, Operand(kPointerSize));
2489 __ Subu(a1, a1, Operand(1));
2490 __ Branch(&loop, ne, a1, Operand(zero_reg));
2492 // Return and remove the on-stack parameters.
2496 // Do the runtime call to allocate the arguments object.
2498 __ TailCallRuntime(Runtime::kHiddenNewStrictArguments, 3, 1);
2502 void RegExpExecStub::Generate(MacroAssembler* masm) {
2503 // Just jump directly to runtime if native RegExp is not selected at compile
2504 // time or if regexp entry in generated code is turned off runtime switch or
2506 #ifdef V8_INTERPRETED_REGEXP
2507 __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
2508 #else // V8_INTERPRETED_REGEXP
2510 // Stack frame on entry.
2511 // sp[0]: last_match_info (expected JSArray)
2512 // sp[4]: previous index
2513 // sp[8]: subject string
2514 // sp[12]: JSRegExp object
2516 const int kLastMatchInfoOffset = 0 * kPointerSize;
2517 const int kPreviousIndexOffset = 1 * kPointerSize;
2518 const int kSubjectOffset = 2 * kPointerSize;
2519 const int kJSRegExpOffset = 3 * kPointerSize;
2522 // Allocation of registers for this function. These are in callee save
2523 // registers and will be preserved by the call to the native RegExp code, as
2524 // this code is called using the normal C calling convention. When calling
2525 // directly from generated code the native RegExp code will not do a GC and
2526 // therefore the content of these registers are safe to use after the call.
2527 // MIPS - using s0..s2, since we are not using CEntry Stub.
2528 Register subject = s0;
2529 Register regexp_data = s1;
2530 Register last_match_info_elements = s2;
2532 // Ensure that a RegExp stack is allocated.
2533 ExternalReference address_of_regexp_stack_memory_address =
2534 ExternalReference::address_of_regexp_stack_memory_address(
2536 ExternalReference address_of_regexp_stack_memory_size =
2537 ExternalReference::address_of_regexp_stack_memory_size(isolate());
2538 __ li(a0, Operand(address_of_regexp_stack_memory_size));
2539 __ lw(a0, MemOperand(a0, 0));
2540 __ Branch(&runtime, eq, a0, Operand(zero_reg));
2542 // Check that the first argument is a JSRegExp object.
2543 __ lw(a0, MemOperand(sp, kJSRegExpOffset));
2544 STATIC_ASSERT(kSmiTag == 0);
2545 __ JumpIfSmi(a0, &runtime);
2546 __ GetObjectType(a0, a1, a1);
2547 __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
2549 // Check that the RegExp has been compiled (data contains a fixed array).
2550 __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
2551 if (FLAG_debug_code) {
2552 __ SmiTst(regexp_data, t0);
2554 kUnexpectedTypeForRegExpDataFixedArrayExpected,
2557 __ GetObjectType(regexp_data, a0, a0);
2559 kUnexpectedTypeForRegExpDataFixedArrayExpected,
2561 Operand(FIXED_ARRAY_TYPE));
2564 // regexp_data: RegExp data (FixedArray)
2565 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2566 __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2567 __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
2569 // regexp_data: RegExp data (FixedArray)
2570 // Check that the number of captures fit in the static offsets vector buffer.
2572 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2573 // Check (number_of_captures + 1) * 2 <= offsets vector size
2574 // Or number_of_captures * 2 <= offsets vector size - 2
2575 // Multiplying by 2 comes for free since a2 is smi-tagged.
2576 STATIC_ASSERT(kSmiTag == 0);
2577 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2578 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
2580 &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
2582 // Reset offset for possibly sliced string.
2583 __ mov(t0, zero_reg);
2584 __ lw(subject, MemOperand(sp, kSubjectOffset));
2585 __ JumpIfSmi(subject, &runtime);
2586 __ mov(a3, subject); // Make a copy of the original subject string.
2587 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2588 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2589 // subject: subject string
2590 // a3: subject string
2591 // a0: subject string instance type
2592 // regexp_data: RegExp data (FixedArray)
2593 // Handle subject string according to its encoding and representation:
2594 // (1) Sequential string? If yes, go to (5).
2595 // (2) Anything but sequential or cons? If yes, go to (6).
2596 // (3) Cons string. If the string is flat, replace subject with first string.
2597 // Otherwise bailout.
2598 // (4) Is subject external? If yes, go to (7).
2599 // (5) Sequential string. Load regexp code according to encoding.
2603 // Deferred code at the end of the stub:
2604 // (6) Not a long external string? If yes, go to (8).
2605 // (7) External string. Make it, offset-wise, look like a sequential string.
2607 // (8) Short external string or not a string? If yes, bail out to runtime.
2608 // (9) Sliced string. Replace subject with parent. Go to (4).
2610 Label seq_string /* 5 */, external_string /* 7 */,
2611 check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
2612 not_long_external /* 8 */;
2614 // (1) Sequential string? If yes, go to (5).
2617 Operand(kIsNotStringMask |
2618 kStringRepresentationMask |
2619 kShortExternalStringMask));
2620 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
2621 __ Branch(&seq_string, eq, a1, Operand(zero_reg)); // Go to (5).
2623 // (2) Anything but sequential or cons? If yes, go to (6).
2624 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2625 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2626 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2627 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2629 __ Branch(¬_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
2631 // (3) Cons string. Check that it's flat.
2632 // Replace subject with first string and reload instance type.
2633 __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
2634 __ LoadRoot(a1, Heap::kempty_stringRootIndex);
2635 __ Branch(&runtime, ne, a0, Operand(a1));
2636 __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2638 // (4) Is subject external? If yes, go to (7).
2639 __ bind(&check_underlying);
2640 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2641 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2642 STATIC_ASSERT(kSeqStringTag == 0);
2643 __ And(at, a0, Operand(kStringRepresentationMask));
2644 // The underlying external string is never a short external string.
2645 STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
2646 STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2647 __ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7).
2649 // (5) Sequential string. Load regexp code according to encoding.
2650 __ bind(&seq_string);
2651 // subject: sequential subject string (or look-alike, external string)
2652 // a3: original subject string
2653 // Load previous index and check range before a3 is overwritten. We have to
2654 // use a3 instead of subject here because subject might have been only made
2655 // to look like a sequential string when it actually is an external string.
2656 __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
2657 __ JumpIfNotSmi(a1, &runtime);
2658 __ lw(a3, FieldMemOperand(a3, String::kLengthOffset));
2659 __ Branch(&runtime, ls, a3, Operand(a1));
2660 __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
2662 STATIC_ASSERT(kStringEncodingMask == 4);
2663 STATIC_ASSERT(kOneByteStringTag == 4);
2664 STATIC_ASSERT(kTwoByteStringTag == 0);
2665 __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII.
2666 __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
2667 __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
2668 __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
2669 __ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
2671 // (E) Carry on. String handling is done.
2672 // t9: irregexp code
2673 // Check that the irregexp code has been generated for the actual string
2674 // encoding. If it has, the field contains a code object otherwise it contains
2675 // a smi (code flushing support).
2676 __ JumpIfSmi(t9, &runtime);
2678 // a1: previous index
2679 // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
2681 // subject: Subject string
2682 // regexp_data: RegExp data (FixedArray)
2683 // All checks done. Now push arguments for native regexp code.
2684 __ IncrementCounter(isolate()->counters()->regexp_entry_native(),
2687 // Isolates: note we add an additional parameter here (isolate pointer).
2688 const int kRegExpExecuteArguments = 9;
2689 const int kParameterRegisters = 4;
2690 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
2692 // Stack pointer now points to cell where return address is to be written.
2693 // Arguments are before that on the stack or in registers, meaning we
2694 // treat the return address as argument 5. Thus every argument after that
2695 // needs to be shifted back by 1. Since DirectCEntryStub will handle
2696 // allocating space for the c argument slots, we don't need to calculate
2697 // that into the argument positions on the stack. This is how the stack will
2698 // look (sp meaning the value of sp at this moment):
2699 // [sp + 5] - Argument 9
2700 // [sp + 4] - Argument 8
2701 // [sp + 3] - Argument 7
2702 // [sp + 2] - Argument 6
2703 // [sp + 1] - Argument 5
2704 // [sp + 0] - saved ra
2706 // Argument 9: Pass current isolate address.
2707 // CFunctionArgumentOperand handles MIPS stack argument slots.
2708 __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
2709 __ sw(a0, MemOperand(sp, 5 * kPointerSize));
2711 // Argument 8: Indicate that this is a direct call from JavaScript.
2712 __ li(a0, Operand(1));
2713 __ sw(a0, MemOperand(sp, 4 * kPointerSize));
2715 // Argument 7: Start (high end) of backtracking stack memory area.
2716 __ li(a0, Operand(address_of_regexp_stack_memory_address));
2717 __ lw(a0, MemOperand(a0, 0));
2718 __ li(a2, Operand(address_of_regexp_stack_memory_size));
2719 __ lw(a2, MemOperand(a2, 0));
2720 __ addu(a0, a0, a2);
2721 __ sw(a0, MemOperand(sp, 3 * kPointerSize));
2723 // Argument 6: Set the number of capture registers to zero to force global
2724 // regexps to behave as non-global. This does not affect non-global regexps.
2725 __ mov(a0, zero_reg);
2726 __ sw(a0, MemOperand(sp, 2 * kPointerSize));
2728 // Argument 5: static offsets vector buffer.
2730 ExternalReference::address_of_static_offsets_vector(isolate())));
2731 __ sw(a0, MemOperand(sp, 1 * kPointerSize));
2733 // For arguments 4 and 3 get string length, calculate start of string data
2734 // and calculate the shift of the index (0 for ASCII and 1 for two byte).
2735 __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
2736 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
2737 // Load the length from the original subject string from the previous stack
2738 // frame. Therefore we have to use fp, which points exactly to two pointer
2739 // sizes below the previous sp. (Because creating a new stack frame pushes
2740 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
2741 __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2742 // If slice offset is not 0, load the length from the original sliced string.
2743 // Argument 4, a3: End of string data
2744 // Argument 3, a2: Start of string data
2745 // Prepare start and end index of the input.
2746 __ sllv(t1, t0, a3);
2747 __ addu(t0, t2, t1);
2748 __ sllv(t1, a1, a3);
2749 __ addu(a2, t0, t1);
2751 __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
2752 __ sra(t2, t2, kSmiTagSize);
2753 __ sllv(t1, t2, a3);
2754 __ addu(a3, t0, t1);
2755 // Argument 2 (a1): Previous index.
2758 // Argument 1 (a0): Subject string.
2759 __ mov(a0, subject);
2761 // Locate the code entry and call it.
2762 __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
2763 DirectCEntryStub stub(isolate());
2764 stub.GenerateCall(masm, t9);
2766 __ LeaveExitFrame(false, no_reg, true);
2769 // subject: subject string (callee saved)
2770 // regexp_data: RegExp data (callee saved)
2771 // last_match_info_elements: Last match info elements (callee saved)
2772 // Check the result.
2774 __ Branch(&success, eq, v0, Operand(1));
2775 // We expect exactly one result since we force the called regexp to behave
2778 __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
2779 // If not exception it can only be retry. Handle that in the runtime system.
2780 __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
2781 // Result must now be exception. If there is no pending exception already a
2782 // stack overflow (on the backtrack stack) was detected in RegExp code but
2783 // haven't created the exception yet. Handle that in the runtime system.
2784 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
2785 __ li(a1, Operand(isolate()->factory()->the_hole_value()));
2786 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2788 __ lw(v0, MemOperand(a2, 0));
2789 __ Branch(&runtime, eq, v0, Operand(a1));
2791 __ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
2793 // Check if the exception is a termination. If so, throw as uncatchable.
2794 __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
2795 Label termination_exception;
2796 __ Branch(&termination_exception, eq, v0, Operand(a0));
2800 __ bind(&termination_exception);
2801 __ ThrowUncatchable(v0);
2804 // For failure and exception return null.
2805 __ li(v0, Operand(isolate()->factory()->null_value()));
2808 // Process the result from the native regexp code.
2811 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2812 // Calculate number of capture registers (number_of_captures + 1) * 2.
2813 // Multiplying by 2 comes for free since r1 is smi-tagged.
2814 STATIC_ASSERT(kSmiTag == 0);
2815 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2816 __ Addu(a1, a1, Operand(2)); // a1 was a smi.
2818 __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
2819 __ JumpIfSmi(a0, &runtime);
2820 __ GetObjectType(a0, a2, a2);
2821 __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
2822 // Check that the JSArray is in fast case.
2823 __ lw(last_match_info_elements,
2824 FieldMemOperand(a0, JSArray::kElementsOffset));
2825 __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2826 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
2827 __ Branch(&runtime, ne, a0, Operand(at));
2828 // Check that the last match info has space for the capture registers and the
2829 // additional information.
2831 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
2832 __ Addu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
2833 __ sra(at, a0, kSmiTagSize);
2834 __ Branch(&runtime, gt, a2, Operand(at));
2836 // a1: number of capture registers
2837 // subject: subject string
2838 // Store the capture count.
2839 __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
2840 __ sw(a2, FieldMemOperand(last_match_info_elements,
2841 RegExpImpl::kLastCaptureCountOffset));
2842 // Store last subject and last input.
2844 FieldMemOperand(last_match_info_elements,
2845 RegExpImpl::kLastSubjectOffset));
2846 __ mov(a2, subject);
2847 __ RecordWriteField(last_match_info_elements,
2848 RegExpImpl::kLastSubjectOffset,
2853 __ mov(subject, a2);
2855 FieldMemOperand(last_match_info_elements,
2856 RegExpImpl::kLastInputOffset));
2857 __ RecordWriteField(last_match_info_elements,
2858 RegExpImpl::kLastInputOffset,
2864 // Get the static offsets vector filled by the native regexp code.
2865 ExternalReference address_of_static_offsets_vector =
2866 ExternalReference::address_of_static_offsets_vector(isolate());
2867 __ li(a2, Operand(address_of_static_offsets_vector));
2869 // a1: number of capture registers
2870 // a2: offsets vector
2871 Label next_capture, done;
2872 // Capture register counter starts from number of capture registers and
2873 // counts down until wrapping after zero.
2875 last_match_info_elements,
2876 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
2877 __ bind(&next_capture);
2878 __ Subu(a1, a1, Operand(1));
2879 __ Branch(&done, lt, a1, Operand(zero_reg));
2880 // Read the value from the static offsets vector buffer.
2881 __ lw(a3, MemOperand(a2, 0));
2882 __ addiu(a2, a2, kPointerSize);
2883 // Store the smi value in the last match info.
2884 __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
2885 __ sw(a3, MemOperand(a0, 0));
2886 __ Branch(&next_capture, USE_DELAY_SLOT);
2887 __ addiu(a0, a0, kPointerSize); // In branch delay slot.
2891 // Return last match info.
2892 __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
2895 // Do the runtime call to execute the regexp.
2897 __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
2899 // Deferred code for string handling.
2900 // (6) Not a long external string? If yes, go to (8).
2901 __ bind(¬_seq_nor_cons);
2903 __ Branch(¬_long_external, gt, a1, Operand(kExternalStringTag));
2905 // (7) External string. Make it, offset-wise, look like a sequential string.
2906 __ bind(&external_string);
2907 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2908 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2909 if (FLAG_debug_code) {
2910 // Assert that we do not have a cons or slice (indirect strings) here.
2911 // Sequential strings have already been ruled out.
2912 __ And(at, a0, Operand(kIsIndirectStringMask));
2914 kExternalStringExpectedButNotFound,
2919 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
2920 // Move the pointer so that offset-wise, it looks like a sequential string.
2921 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2924 SeqTwoByteString::kHeaderSize - kHeapObjectTag);
2925 __ jmp(&seq_string); // Go to (5).
2927 // (8) Short external string or not a string? If yes, bail out to runtime.
2928 __ bind(¬_long_external);
2929 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
2930 __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
2931 __ Branch(&runtime, ne, at, Operand(zero_reg));
2933 // (9) Sliced string. Replace subject with parent. Go to (4).
2934 // Load offset into t0 and replace subject string with parent.
2935 __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
2936 __ sra(t0, t0, kSmiTagSize);
2937 __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
2938 __ jmp(&check_underlying); // Go to (4).
2939 #endif // V8_INTERPRETED_REGEXP
2943 static void GenerateRecordCallTarget(MacroAssembler* masm) {
2944 // Cache the called function in a feedback vector slot. Cache states
2945 // are uninitialized, monomorphic (indicated by a JSFunction), and
2947 // a0 : number of arguments to the construct function
2948 // a1 : the function to call
2949 // a2 : Feedback vector
2950 // a3 : slot in feedback vector (Smi)
2951 Label initialize, done, miss, megamorphic, not_array_function;
2953 ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
2954 masm->isolate()->heap()->megamorphic_symbol());
2955 ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
2956 masm->isolate()->heap()->uninitialized_symbol());
2958 // Load the cache state into t0.
2959 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2960 __ Addu(t0, a2, Operand(t0));
2961 __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
2963 // A monomorphic cache hit or an already megamorphic state: invoke the
2964 // function without changing the state.
2965 __ Branch(&done, eq, t0, Operand(a1));
2967 if (!FLAG_pretenuring_call_new) {
2968 // If we came here, we need to see if we are the array function.
2969 // If we didn't have a matching function, and we didn't find the megamorph
2970 // sentinel, then we have in the slot either some other function or an
2971 // AllocationSite. Do a map check on the object in a3.
2972 __ lw(t1, FieldMemOperand(t0, 0));
2973 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2974 __ Branch(&miss, ne, t1, Operand(at));
2976 // Make sure the function is the Array() function
2977 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
2978 __ Branch(&megamorphic, ne, a1, Operand(t0));
2984 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2986 __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
2987 __ Branch(&initialize, eq, t0, Operand(at));
2988 // MegamorphicSentinel is an immortal immovable object (undefined) so no
2989 // write-barrier is needed.
2990 __ bind(&megamorphic);
2991 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
2992 __ Addu(t0, a2, Operand(t0));
2993 __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
2994 __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
2997 // An uninitialized cache is patched with the function.
2998 __ bind(&initialize);
2999 if (!FLAG_pretenuring_call_new) {
3000 // Make sure the function is the Array() function.
3001 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
3002 __ Branch(¬_array_function, ne, a1, Operand(t0));
3004 // The target function is the Array constructor,
3005 // Create an AllocationSite if we don't already have it, store it in the
3008 FrameScope scope(masm, StackFrame::INTERNAL);
3009 const RegList kSavedRegs =
3015 // Arguments register must be smi-tagged to call out.
3017 __ MultiPush(kSavedRegs);
3019 CreateAllocationSiteStub create_stub(masm->isolate());
3020 __ CallStub(&create_stub);
3022 __ MultiPop(kSavedRegs);
3027 __ bind(¬_array_function);
3030 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
3031 __ Addu(t0, a2, Operand(t0));
3032 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3033 __ sw(a1, MemOperand(t0, 0));
3035 __ Push(t0, a2, a1);
3036 __ RecordWrite(a2, t0, a1, kRAHasNotBeenSaved, kDontSaveFPRegs,
3037 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
3044 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
3045 __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3046 __ lw(t0, FieldMemOperand(a3, SharedFunctionInfo::kCompilerHintsOffset));
3048 // Do not transform the receiver for strict mode functions.
3049 int32_t strict_mode_function_mask =
3050 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
3051 // Do not transform the receiver for native (Compilerhints already in a3).
3052 int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
3053 __ And(at, t0, Operand(strict_mode_function_mask | native_mask));
3054 __ Branch(cont, ne, at, Operand(zero_reg));
3058 static void EmitSlowCase(MacroAssembler* masm,
3060 Label* non_function) {
3061 // Check for function proxy.
3062 __ Branch(non_function, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
3063 __ push(a1); // put proxy as additional argument
3064 __ li(a0, Operand(argc + 1, RelocInfo::NONE32));
3065 __ mov(a2, zero_reg);
3066 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
3068 Handle<Code> adaptor =
3069 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
3070 __ Jump(adaptor, RelocInfo::CODE_TARGET);
3073 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
3074 // of the original receiver from the call site).
3075 __ bind(non_function);
3076 __ sw(a1, MemOperand(sp, argc * kPointerSize));
3077 __ li(a0, Operand(argc)); // Set up the number of arguments.
3078 __ mov(a2, zero_reg);
3079 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
3080 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3081 RelocInfo::CODE_TARGET);
3085 static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
3086 // Wrap the receiver and patch it back onto the stack.
3087 { FrameScope frame_scope(masm, StackFrame::INTERNAL);
3089 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
3092 __ Branch(USE_DELAY_SLOT, cont);
3093 __ sw(v0, MemOperand(sp, argc * kPointerSize));
3097 static void CallFunctionNoFeedback(MacroAssembler* masm,
3098 int argc, bool needs_checks,
3099 bool call_as_method) {
3100 // a1 : the function to call
3101 Label slow, non_function, wrap, cont;
3104 // Check that the function is really a JavaScript function.
3105 // a1: pushed function (to be verified)
3106 __ JumpIfSmi(a1, &non_function);
3108 // Goto slow case if we do not have a function.
3109 __ GetObjectType(a1, t0, t0);
3110 __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
3113 // Fast-case: Invoke the function now.
3114 // a1: pushed function
3115 ParameterCount actual(argc);
3117 if (call_as_method) {
3119 EmitContinueIfStrictOrNative(masm, &cont);
3122 // Compute the receiver in sloppy mode.
3123 __ lw(a3, MemOperand(sp, argc * kPointerSize));
3126 __ JumpIfSmi(a3, &wrap);
3127 __ GetObjectType(a3, t0, t0);
3128 __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
3136 __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
3139 // Slow-case: Non-function called.
3141 EmitSlowCase(masm, argc, &non_function);
3144 if (call_as_method) {
3146 // Wrap the receiver and patch it back onto the stack.
3147 EmitWrapCase(masm, argc, &cont);
3152 void CallFunctionStub::Generate(MacroAssembler* masm) {
3153 CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
3157 void CallConstructStub::Generate(MacroAssembler* masm) {
3158 // a0 : number of arguments
3159 // a1 : the function to call
3160 // a2 : feedback vector
3161 // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
3162 Label slow, non_function_call;
3164 // Check that the function is not a smi.
3165 __ JumpIfSmi(a1, &non_function_call);
3166 // Check that the function is a JSFunction.
3167 __ GetObjectType(a1, t0, t0);
3168 __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
3170 if (RecordCallTarget()) {
3171 GenerateRecordCallTarget(masm);
3173 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
3174 __ Addu(t1, a2, at);
3175 if (FLAG_pretenuring_call_new) {
3176 // Put the AllocationSite from the feedback vector into a2.
3177 // By adding kPointerSize we encode that we know the AllocationSite
3178 // entry is at the feedback vector slot given by a3 + 1.
3179 __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize + kPointerSize));
3181 Label feedback_register_initialized;
3182 // Put the AllocationSite from the feedback vector into a2, or undefined.
3183 __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
3184 __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
3185 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
3186 __ Branch(&feedback_register_initialized, eq, t1, Operand(at));
3187 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
3188 __ bind(&feedback_register_initialized);
3191 __ AssertUndefinedOrAllocationSite(a2, t1);
3194 // Jump to the function-specific construct stub.
3195 Register jmp_reg = t0;
3196 __ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3197 __ lw(jmp_reg, FieldMemOperand(jmp_reg,
3198 SharedFunctionInfo::kConstructStubOffset));
3199 __ Addu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
3202 // a0: number of arguments
3203 // a1: called object
3207 __ Branch(&non_function_call, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
3208 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
3211 __ bind(&non_function_call);
3212 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
3214 // Set expected number of arguments to zero (not changing r0).
3215 __ li(a2, Operand(0, RelocInfo::NONE32));
3216 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3217 RelocInfo::CODE_TARGET);
3221 static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
3222 __ lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3223 __ lw(vector, FieldMemOperand(vector,
3224 JSFunction::kSharedFunctionInfoOffset));
3225 __ lw(vector, FieldMemOperand(vector,
3226 SharedFunctionInfo::kFeedbackVectorOffset));
3230 void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
3235 EmitLoadTypeFeedbackVector(masm, a2);
3237 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
3238 __ Branch(&miss, ne, a1, Operand(at));
3240 __ li(a0, Operand(arg_count()));
3241 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
3242 __ Addu(at, a2, Operand(at));
3243 __ lw(t0, FieldMemOperand(at, FixedArray::kHeaderSize));
3245 // Verify that t0 contains an AllocationSite
3246 __ lw(t1, FieldMemOperand(t0, HeapObject::kMapOffset));
3247 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
3248 __ Branch(&miss, ne, t1, Operand(at));
3251 ArrayConstructorStub stub(masm->isolate(), arg_count());
3252 __ TailCallStub(&stub);
3255 GenerateMiss(masm, IC::kCallIC_Customization_Miss);
3257 // The slow case, we need this no matter what to complete a call after a miss.
3258 CallFunctionNoFeedback(masm,
3264 __ stop("Unexpected code address");
3268 void CallICStub::Generate(MacroAssembler* masm) {
3270 // r3 - slot id (Smi)
3271 Label extra_checks_or_miss, slow_start;
3272 Label slow, non_function, wrap, cont;
3273 Label have_js_function;
3274 int argc = state_.arg_count();
3275 ParameterCount actual(argc);
3277 EmitLoadTypeFeedbackVector(masm, a2);
3279 // The checks. First, does r1 match the recorded monomorphic target?
3280 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
3281 __ Addu(t0, a2, Operand(t0));
3282 __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
3283 __ Branch(&extra_checks_or_miss, ne, a1, Operand(t0));
3285 __ bind(&have_js_function);
3286 if (state_.CallAsMethod()) {
3287 EmitContinueIfStrictOrNative(masm, &cont);
3288 // Compute the receiver in sloppy mode.
3289 __ lw(a3, MemOperand(sp, argc * kPointerSize));
3291 __ JumpIfSmi(a3, &wrap);
3292 __ GetObjectType(a3, t0, t0);
3293 __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
3298 __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
3301 EmitSlowCase(masm, argc, &non_function);
3303 if (state_.CallAsMethod()) {
3305 EmitWrapCase(masm, argc, &cont);
3308 __ bind(&extra_checks_or_miss);
3311 __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
3312 __ Branch(&slow_start, eq, t0, Operand(at));
3313 __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
3314 __ Branch(&miss, eq, t0, Operand(at));
3316 if (!FLAG_trace_ic) {
3317 // We are going megamorphic. If the feedback is a JSFunction, it is fine
3318 // to handle it here. More complex cases are dealt with in the runtime.
3319 __ AssertNotSmi(t0);
3320 __ GetObjectType(t0, t1, t1);
3321 __ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE));
3322 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
3323 __ Addu(t0, a2, Operand(t0));
3324 __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
3325 __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
3326 __ Branch(&slow_start);
3329 // We are here because tracing is on or we are going monomorphic.
3331 GenerateMiss(masm, IC::kCallIC_Miss);
3334 __ bind(&slow_start);
3335 // Check that the function is really a JavaScript function.
3336 // r1: pushed function (to be verified)
3337 __ JumpIfSmi(a1, &non_function);
3339 // Goto slow case if we do not have a function.
3340 __ GetObjectType(a1, t0, t0);
3341 __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
3342 __ Branch(&have_js_function);
3346 void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
3347 // Get the receiver of the function from the stack; 1 ~ return address.
3348 __ lw(t0, MemOperand(sp, (state_.arg_count() + 1) * kPointerSize));
3351 FrameScope scope(masm, StackFrame::INTERNAL);
3353 // Push the receiver and the function and feedback info.
3354 __ Push(t0, a1, a2, a3);
3357 ExternalReference miss = ExternalReference(IC_Utility(id),
3359 __ CallExternalReference(miss, 4);
3361 // Move result to a1 and exit the internal frame.
3367 // StringCharCodeAtGenerator.
3368 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
3371 Label got_char_code;
3372 Label sliced_string;
3374 ASSERT(!t0.is(index_));
3375 ASSERT(!t0.is(result_));
3376 ASSERT(!t0.is(object_));
3378 // If the receiver is a smi trigger the non-string case.
3379 __ JumpIfSmi(object_, receiver_not_string_);
3381 // Fetch the instance type of the receiver into result register.
3382 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3383 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3384 // If the receiver is not a string trigger the non-string case.
3385 __ And(t0, result_, Operand(kIsNotStringMask));
3386 __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
3388 // If the index is non-smi trigger the non-smi case.
3389 __ JumpIfNotSmi(index_, &index_not_smi_);
3391 __ bind(&got_smi_index_);
3393 // Check for index out of range.
3394 __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
3395 __ Branch(index_out_of_range_, ls, t0, Operand(index_));
3397 __ sra(index_, index_, kSmiTagSize);
3399 StringCharLoadGenerator::Generate(masm,
3405 __ sll(result_, result_, kSmiTagSize);
3410 void StringCharCodeAtGenerator::GenerateSlow(
3411 MacroAssembler* masm,
3412 const RuntimeCallHelper& call_helper) {
3413 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
3415 // Index is not a smi.
3416 __ bind(&index_not_smi_);
3417 // If index is a heap number, try converting it to an integer.
3420 Heap::kHeapNumberMapRootIndex,
3423 call_helper.BeforeCall(masm);
3424 // Consumed by runtime conversion function:
3425 __ Push(object_, index_);
3426 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3427 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3429 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3430 // NumberToSmi discards numbers that are not exact integers.
3431 __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
3434 // Save the conversion result before the pop instructions below
3435 // have a chance to overwrite it.
3437 __ Move(index_, v0);
3439 // Reload the instance type.
3440 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3441 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3442 call_helper.AfterCall(masm);
3443 // If index is still not a smi, it must be out of range.
3444 __ JumpIfNotSmi(index_, index_out_of_range_);
3445 // Otherwise, return to the fast path.
3446 __ Branch(&got_smi_index_);
3448 // Call runtime. We get here when the receiver is a string and the
3449 // index is a number, but the code of getting the actual character
3450 // is too complex (e.g., when the string needs to be flattened).
3451 __ bind(&call_runtime_);
3452 call_helper.BeforeCall(masm);
3453 __ sll(index_, index_, kSmiTagSize);
3454 __ Push(object_, index_);
3455 __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
3457 __ Move(result_, v0);
3459 call_helper.AfterCall(masm);
3462 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3466 // -------------------------------------------------------------------------
3467 // StringCharFromCodeGenerator
3469 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3470 // Fast case of Heap::LookupSingleCharacterStringFromCode.
3472 ASSERT(!t0.is(result_));
3473 ASSERT(!t0.is(code_));
3475 STATIC_ASSERT(kSmiTag == 0);
3476 STATIC_ASSERT(kSmiShiftSize == 0);
3477 ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
3480 Operand(kSmiTagMask |
3481 ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
3482 __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
3484 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3485 // At this point code register contains smi tagged ASCII char code.
3486 STATIC_ASSERT(kSmiTag == 0);
3487 __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
3488 __ Addu(result_, result_, t0);
3489 __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3490 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3491 __ Branch(&slow_case_, eq, result_, Operand(t0));
3496 void StringCharFromCodeGenerator::GenerateSlow(
3497 MacroAssembler* masm,
3498 const RuntimeCallHelper& call_helper) {
3499 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3501 __ bind(&slow_case_);
3502 call_helper.BeforeCall(masm);
3504 __ CallRuntime(Runtime::kCharFromCode, 1);
3505 __ Move(result_, v0);
3507 call_helper.AfterCall(masm);
3510 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3514 enum CopyCharactersFlags {
3516 DEST_ALWAYS_ALIGNED = 2
3520 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
3525 String::Encoding encoding) {
3526 if (FLAG_debug_code) {
3527 // Check that destination is word aligned.
3528 __ And(scratch, dest, Operand(kPointerAlignmentMask));
3530 kDestinationOfCopyNotAligned,
3535 // Assumes word reads and writes are little endian.
3536 // Nothing to do for zero characters.
3539 if (encoding == String::TWO_BYTE_ENCODING) {
3540 __ Addu(count, count, count);
3543 Register limit = count; // Read until dest equals this.
3544 __ Addu(limit, dest, Operand(count));
3546 Label loop_entry, loop;
3547 // Copy bytes from src to dest until dest hits limit.
3548 __ Branch(&loop_entry);
3550 __ lbu(scratch, MemOperand(src));
3551 __ Addu(src, src, Operand(1));
3552 __ sb(scratch, MemOperand(dest));
3553 __ Addu(dest, dest, Operand(1));
3554 __ bind(&loop_entry);
3555 __ Branch(&loop, lt, dest, Operand(limit));
3561 void StringHelper::GenerateHashInit(MacroAssembler* masm,
3563 Register character) {
3564 // hash = seed + character + ((seed + character) << 10);
3565 __ LoadRoot(hash, Heap::kHashSeedRootIndex);
3566 // Untag smi seed and add the character.
3568 __ addu(hash, hash, character);
3569 __ sll(at, hash, 10);
3570 __ addu(hash, hash, at);
3571 // hash ^= hash >> 6;
3572 __ srl(at, hash, 6);
3573 __ xor_(hash, hash, at);
3577 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
3579 Register character) {
3580 // hash += character;
3581 __ addu(hash, hash, character);
3582 // hash += hash << 10;
3583 __ sll(at, hash, 10);
3584 __ addu(hash, hash, at);
3585 // hash ^= hash >> 6;
3586 __ srl(at, hash, 6);
3587 __ xor_(hash, hash, at);
3591 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
3593 // hash += hash << 3;
3594 __ sll(at, hash, 3);
3595 __ addu(hash, hash, at);
3596 // hash ^= hash >> 11;
3597 __ srl(at, hash, 11);
3598 __ xor_(hash, hash, at);
3599 // hash += hash << 15;
3600 __ sll(at, hash, 15);
3601 __ addu(hash, hash, at);
3603 __ li(at, Operand(String::kHashBitMask));
3604 __ and_(hash, hash, at);
3606 // if (hash == 0) hash = 27;
3607 __ ori(at, zero_reg, StringHasher::kZeroHash);
3608 __ Movz(hash, at, hash);
3612 void SubStringStub::Generate(MacroAssembler* masm) {
3614 // Stack frame on entry.
3615 // ra: return address
3620 // This stub is called from the native-call %_SubString(...), so
3621 // nothing can be assumed about the arguments. It is tested that:
3622 // "string" is a sequential string,
3623 // both "from" and "to" are smis, and
3624 // 0 <= from <= to <= string.length.
3625 // If any of these assumptions fail, we call the runtime system.
3627 const int kToOffset = 0 * kPointerSize;
3628 const int kFromOffset = 1 * kPointerSize;
3629 const int kStringOffset = 2 * kPointerSize;
3631 __ lw(a2, MemOperand(sp, kToOffset));
3632 __ lw(a3, MemOperand(sp, kFromOffset));
3633 STATIC_ASSERT(kFromOffset == kToOffset + 4);
3634 STATIC_ASSERT(kSmiTag == 0);
3635 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3637 // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
3638 // safe in this case.
3639 __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
3640 __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
3641 // Both a2 and a3 are untagged integers.
3643 __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
3645 __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to.
3646 __ Subu(a2, a2, a3);
3648 // Make sure first argument is a string.
3649 __ lw(v0, MemOperand(sp, kStringOffset));
3650 __ JumpIfSmi(v0, &runtime);
3651 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
3652 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3653 __ And(t0, a1, Operand(kIsNotStringMask));
3655 __ Branch(&runtime, ne, t0, Operand(zero_reg));
3658 __ Branch(&single_char, eq, a2, Operand(1));
3660 // Short-cut for the case of trivial substring.
3662 // v0: original string
3663 // a2: result string length
3664 __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
3666 // Return original string.
3667 __ Branch(&return_v0, eq, a2, Operand(t0));
3668 // Longer than original string's length or negative: unsafe arguments.
3669 __ Branch(&runtime, hi, a2, Operand(t0));
3670 // Shorter than original string's length: an actual substring.
3672 // Deal with different string types: update the index if necessary
3673 // and put the underlying string into t1.
3674 // v0: original string
3675 // a1: instance type
3677 // a3: from index (untagged)
3678 Label underlying_unpacked, sliced_string, seq_or_external_string;
3679 // If the string is not indirect, it can only be sequential or external.
3680 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
3681 STATIC_ASSERT(kIsIndirectStringMask != 0);
3682 __ And(t0, a1, Operand(kIsIndirectStringMask));
3683 __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
3684 // t0 is used as a scratch register and can be overwritten in either case.
3685 __ And(t0, a1, Operand(kSlicedNotConsMask));
3686 __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
3687 // Cons string. Check whether it is flat, then fetch first part.
3688 __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
3689 __ LoadRoot(t0, Heap::kempty_stringRootIndex);
3690 __ Branch(&runtime, ne, t1, Operand(t0));
3691 __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
3692 // Update instance type.
3693 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
3694 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3695 __ jmp(&underlying_unpacked);
3697 __ bind(&sliced_string);
3698 // Sliced string. Fetch parent and correct start index by offset.
3699 __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
3700 __ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
3701 __ sra(t0, t0, 1); // Add offset to index.
3702 __ Addu(a3, a3, t0);
3703 // Update instance type.
3704 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
3705 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
3706 __ jmp(&underlying_unpacked);
3708 __ bind(&seq_or_external_string);
3709 // Sequential or external string. Just move string to the expected register.
3712 __ bind(&underlying_unpacked);
3714 if (FLAG_string_slices) {
3716 // t1: underlying subject string
3717 // a1: instance type of underlying subject string
3719 // a3: adjusted start index (untagged)
3720 // Short slice. Copy instead of slicing.
3721 __ Branch(©_routine, lt, a2, Operand(SlicedString::kMinLength));
3722 // Allocate new sliced string. At this point we do not reload the instance
3723 // type including the string encoding because we simply rely on the info
3724 // provided by the original string. It does not matter if the original
3725 // string's encoding is wrong because we always have to recheck encoding of
3726 // the newly created string's parent anyways due to externalized strings.
3727 Label two_byte_slice, set_slice_header;
3728 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
3729 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
3730 __ And(t0, a1, Operand(kStringEncodingMask));
3731 __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
3732 __ AllocateAsciiSlicedString(v0, a2, t2, t3, &runtime);
3733 __ jmp(&set_slice_header);
3734 __ bind(&two_byte_slice);
3735 __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
3736 __ bind(&set_slice_header);
3738 __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
3739 __ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
3742 __ bind(©_routine);
3745 // t1: underlying subject string
3746 // a1: instance type of underlying subject string
3748 // a3: adjusted start index (untagged)
3749 Label two_byte_sequential, sequential_string, allocate_result;
3750 STATIC_ASSERT(kExternalStringTag != 0);
3751 STATIC_ASSERT(kSeqStringTag == 0);
3752 __ And(t0, a1, Operand(kExternalStringTag));
3753 __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
3755 // Handle external string.
3756 // Rule out short external strings.
3757 STATIC_ASSERT(kShortExternalStringTag != 0);
3758 __ And(t0, a1, Operand(kShortExternalStringTag));
3759 __ Branch(&runtime, ne, t0, Operand(zero_reg));
3760 __ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
3761 // t1 already points to the first character of underlying string.
3762 __ jmp(&allocate_result);
3764 __ bind(&sequential_string);
3765 // Locate first character of underlying subject string.
3766 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3767 __ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3769 __ bind(&allocate_result);
3770 // Sequential acii string. Allocate the result.
3771 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3772 __ And(t0, a1, Operand(kStringEncodingMask));
3773 __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
3775 // Allocate and copy the resulting ASCII string.
3776 __ AllocateAsciiString(v0, a2, t0, t2, t3, &runtime);
3778 // Locate first character of substring to copy.
3779 __ Addu(t1, t1, a3);
3781 // Locate first character of result.
3782 __ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3784 // v0: result string
3785 // a1: first character of result string
3786 // a2: result string length
3787 // t1: first character of substring to copy
3788 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3789 StringHelper::GenerateCopyCharacters(
3790 masm, a1, t1, a2, a3, String::ONE_BYTE_ENCODING);
3793 // Allocate and copy the resulting two-byte string.
3794 __ bind(&two_byte_sequential);
3795 __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
3797 // Locate first character of substring to copy.
3798 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
3800 __ Addu(t1, t1, t0);
3801 // Locate first character of result.
3802 __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3804 // v0: result string.
3805 // a1: first character of result.
3806 // a2: result length.
3807 // t1: first character of substring to copy.
3808 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3809 StringHelper::GenerateCopyCharacters(
3810 masm, a1, t1, a2, a3, String::TWO_BYTE_ENCODING);
3812 __ bind(&return_v0);
3813 Counters* counters = isolate()->counters();
3814 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
3817 // Just jump to runtime to create the sub string.
3819 __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
3821 __ bind(&single_char);
3822 // v0: original string
3823 // a1: instance type
3825 // a3: from index (untagged)
3827 StringCharAtGenerator generator(
3828 v0, a3, a2, v0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
3829 generator.GenerateFast(masm);
3831 generator.SkipSlow(masm, &runtime);
3835 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
3840 Register scratch3) {
3841 Register length = scratch1;
3844 Label strings_not_equal, check_zero_length;
3845 __ lw(length, FieldMemOperand(left, String::kLengthOffset));
3846 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
3847 __ Branch(&check_zero_length, eq, length, Operand(scratch2));
3848 __ bind(&strings_not_equal);
3849 ASSERT(is_int16(NOT_EQUAL));
3850 __ Ret(USE_DELAY_SLOT);
3851 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
3853 // Check if the length is zero.
3854 Label compare_chars;
3855 __ bind(&check_zero_length);
3856 STATIC_ASSERT(kSmiTag == 0);
3857 __ Branch(&compare_chars, ne, length, Operand(zero_reg));
3858 ASSERT(is_int16(EQUAL));
3859 __ Ret(USE_DELAY_SLOT);
3860 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3862 // Compare characters.
3863 __ bind(&compare_chars);
3865 GenerateAsciiCharsCompareLoop(masm,
3866 left, right, length, scratch2, scratch3, v0,
3867 &strings_not_equal);
3869 // Characters are equal.
3870 __ Ret(USE_DELAY_SLOT);
3871 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3875 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
3881 Register scratch4) {
3882 Label result_not_equal, compare_lengths;
3883 // Find minimum length and length difference.
3884 __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
3885 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
3886 __ Subu(scratch3, scratch1, Operand(scratch2));
3887 Register length_delta = scratch3;
3888 __ slt(scratch4, scratch2, scratch1);
3889 __ Movn(scratch1, scratch2, scratch4);
3890 Register min_length = scratch1;
3891 STATIC_ASSERT(kSmiTag == 0);
3892 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
3895 GenerateAsciiCharsCompareLoop(masm,
3896 left, right, min_length, scratch2, scratch4, v0,
3899 // Compare lengths - strings up to min-length are equal.
3900 __ bind(&compare_lengths);
3901 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
3902 // Use length_delta as result if it's zero.
3903 __ mov(scratch2, length_delta);
3904 __ mov(scratch4, zero_reg);
3905 __ mov(v0, zero_reg);
3907 __ bind(&result_not_equal);
3908 // Conditionally update the result based either on length_delta or
3909 // the last comparion performed in the loop above.
3911 __ Branch(&ret, eq, scratch2, Operand(scratch4));
3912 __ li(v0, Operand(Smi::FromInt(GREATER)));
3913 __ Branch(&ret, gt, scratch2, Operand(scratch4));
3914 __ li(v0, Operand(Smi::FromInt(LESS)));
3920 void StringCompareStub::GenerateAsciiCharsCompareLoop(
3921 MacroAssembler* masm,
3928 Label* chars_not_equal) {
3929 // Change index to run from -length to -1 by adding length to string
3930 // start. This means that loop ends when index reaches zero, which
3931 // doesn't need an additional compare.
3932 __ SmiUntag(length);
3933 __ Addu(scratch1, length,
3934 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3935 __ Addu(left, left, Operand(scratch1));
3936 __ Addu(right, right, Operand(scratch1));
3937 __ Subu(length, zero_reg, length);
3938 Register index = length; // index = -length;
3944 __ Addu(scratch3, left, index);
3945 __ lbu(scratch1, MemOperand(scratch3));
3946 __ Addu(scratch3, right, index);
3947 __ lbu(scratch2, MemOperand(scratch3));
3948 __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
3949 __ Addu(index, index, 1);
3950 __ Branch(&loop, ne, index, Operand(zero_reg));
3954 void StringCompareStub::Generate(MacroAssembler* masm) {
3957 Counters* counters = isolate()->counters();
3959 // Stack frame on entry.
3960 // sp[0]: right string
3961 // sp[4]: left string
3962 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
3963 __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
3966 __ Branch(¬_same, ne, a0, Operand(a1));
3967 STATIC_ASSERT(EQUAL == 0);
3968 STATIC_ASSERT(kSmiTag == 0);
3969 __ li(v0, Operand(Smi::FromInt(EQUAL)));
3970 __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
3975 // Check that both objects are sequential ASCII strings.
3976 __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
3978 // Compare flat ASCII strings natively. Remove arguments from stack first.
3979 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
3980 __ Addu(sp, sp, Operand(2 * kPointerSize));
3981 GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
3984 __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
3988 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
3989 // ----------- S t a t e -------------
3992 // -- ra : return address
3993 // -----------------------------------
3995 // Load a2 with the allocation site. We stick an undefined dummy value here
3996 // and replace it with the real allocation site later when we instantiate this
3997 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
3998 __ li(a2, handle(isolate()->heap()->undefined_value()));
4000 // Make sure that we actually patched the allocation site.
4001 if (FLAG_debug_code) {
4002 __ And(at, a2, Operand(kSmiTagMask));
4003 __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
4004 __ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
4005 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
4006 __ Assert(eq, kExpectedAllocationSite, t0, Operand(at));
4009 // Tail call into the stub that handles binary operations with allocation
4011 BinaryOpWithAllocationSiteStub stub(isolate(), state_);
4012 __ TailCallStub(&stub);
4016 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
4017 ASSERT(state_ == CompareIC::SMI);
4020 __ JumpIfNotSmi(a2, &miss);
4022 if (GetCondition() == eq) {
4023 // For equality we do not care about the sign of the result.
4024 __ Ret(USE_DELAY_SLOT);
4025 __ Subu(v0, a0, a1);
4027 // Untag before subtracting to avoid handling overflow.
4030 __ Ret(USE_DELAY_SLOT);
4031 __ Subu(v0, a1, a0);
4039 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
4040 ASSERT(state_ == CompareIC::NUMBER);
4043 Label unordered, maybe_undefined1, maybe_undefined2;
4046 if (left_ == CompareIC::SMI) {
4047 __ JumpIfNotSmi(a1, &miss);
4049 if (right_ == CompareIC::SMI) {
4050 __ JumpIfNotSmi(a0, &miss);
4053 // Inlining the double comparison and falling back to the general compare
4054 // stub if NaN is involved.
4055 // Load left and right operand.
4056 Label done, left, left_smi, right_smi;
4057 __ JumpIfSmi(a0, &right_smi);
4058 __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
4060 __ Subu(a2, a0, Operand(kHeapObjectTag));
4061 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
4063 __ bind(&right_smi);
4064 __ SmiUntag(a2, a0); // Can't clobber a0 yet.
4065 FPURegister single_scratch = f6;
4066 __ mtc1(a2, single_scratch);
4067 __ cvt_d_w(f2, single_scratch);
4070 __ JumpIfSmi(a1, &left_smi);
4071 __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
4073 __ Subu(a2, a1, Operand(kHeapObjectTag));
4074 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
4077 __ SmiUntag(a2, a1); // Can't clobber a1 yet.
4078 single_scratch = f8;
4079 __ mtc1(a2, single_scratch);
4080 __ cvt_d_w(f0, single_scratch);
4084 // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
4085 Label fpu_eq, fpu_lt;
4086 // Test if equal, and also handle the unordered/NaN case.
4087 __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
4089 // Test if less (unordered case is already handled).
4090 __ BranchF(&fpu_lt, NULL, lt, f0, f2);
4092 // Otherwise it's greater, so just fall thru, and return.
4093 ASSERT(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
4094 __ Ret(USE_DELAY_SLOT);
4095 __ li(v0, Operand(GREATER));
4098 __ Ret(USE_DELAY_SLOT);
4099 __ li(v0, Operand(EQUAL));
4102 __ Ret(USE_DELAY_SLOT);
4103 __ li(v0, Operand(LESS));
4105 __ bind(&unordered);
4106 __ bind(&generic_stub);
4107 ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
4108 CompareIC::GENERIC);
4109 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4111 __ bind(&maybe_undefined1);
4112 if (Token::IsOrderedRelationalCompareOp(op_)) {
4113 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4114 __ Branch(&miss, ne, a0, Operand(at));
4115 __ JumpIfSmi(a1, &unordered);
4116 __ GetObjectType(a1, a2, a2);
4117 __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
4121 __ bind(&maybe_undefined2);
4122 if (Token::IsOrderedRelationalCompareOp(op_)) {
4123 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4124 __ Branch(&unordered, eq, a1, Operand(at));
4132 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
4133 ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
4136 // Registers containing left and right operands respectively.
4138 Register right = a0;
4142 // Check that both operands are heap objects.
4143 __ JumpIfEitherSmi(left, right, &miss);
4145 // Check that both operands are internalized strings.
4146 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4147 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4148 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4149 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4150 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
4151 __ Or(tmp1, tmp1, Operand(tmp2));
4152 __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
4153 __ Branch(&miss, ne, at, Operand(zero_reg));
4155 // Make sure a0 is non-zero. At this point input operands are
4156 // guaranteed to be non-zero.
4157 ASSERT(right.is(a0));
4158 STATIC_ASSERT(EQUAL == 0);
4159 STATIC_ASSERT(kSmiTag == 0);
4161 // Internalized strings are compared by identity.
4162 __ Ret(ne, left, Operand(right));
4163 ASSERT(is_int16(EQUAL));
4164 __ Ret(USE_DELAY_SLOT);
4165 __ li(v0, Operand(Smi::FromInt(EQUAL)));
4172 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
4173 ASSERT(state_ == CompareIC::UNIQUE_NAME);
4174 ASSERT(GetCondition() == eq);
4177 // Registers containing left and right operands respectively.
4179 Register right = a0;
4183 // Check that both operands are heap objects.
4184 __ JumpIfEitherSmi(left, right, &miss);
4186 // Check that both operands are unique names. This leaves the instance
4187 // types loaded in tmp1 and tmp2.
4188 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4189 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4190 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4191 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4193 __ JumpIfNotUniqueName(tmp1, &miss);
4194 __ JumpIfNotUniqueName(tmp2, &miss);
4199 // Unique names are compared by identity.
4201 __ Branch(&done, ne, left, Operand(right));
4202 // Make sure a0 is non-zero. At this point input operands are
4203 // guaranteed to be non-zero.
4204 ASSERT(right.is(a0));
4205 STATIC_ASSERT(EQUAL == 0);
4206 STATIC_ASSERT(kSmiTag == 0);
4207 __ li(v0, Operand(Smi::FromInt(EQUAL)));
4216 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
4217 ASSERT(state_ == CompareIC::STRING);
4220 bool equality = Token::IsEqualityOp(op_);
4222 // Registers containing left and right operands respectively.
4224 Register right = a0;
4231 // Check that both operands are heap objects.
4232 __ JumpIfEitherSmi(left, right, &miss);
4234 // Check that both operands are strings. This leaves the instance
4235 // types loaded in tmp1 and tmp2.
4236 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4237 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4238 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4239 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4240 STATIC_ASSERT(kNotStringTag != 0);
4241 __ Or(tmp3, tmp1, tmp2);
4242 __ And(tmp5, tmp3, Operand(kIsNotStringMask));
4243 __ Branch(&miss, ne, tmp5, Operand(zero_reg));
4245 // Fast check for identical strings.
4246 Label left_ne_right;
4247 STATIC_ASSERT(EQUAL == 0);
4248 STATIC_ASSERT(kSmiTag == 0);
4249 __ Branch(&left_ne_right, ne, left, Operand(right));
4250 __ Ret(USE_DELAY_SLOT);
4251 __ mov(v0, zero_reg); // In the delay slot.
4252 __ bind(&left_ne_right);
4254 // Handle not identical strings.
4256 // Check that both strings are internalized strings. If they are, we're done
4257 // because we already know they are not identical. We know they are both
4260 ASSERT(GetCondition() == eq);
4261 STATIC_ASSERT(kInternalizedTag == 0);
4262 __ Or(tmp3, tmp1, Operand(tmp2));
4263 __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
4265 __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
4266 // Make sure a0 is non-zero. At this point input operands are
4267 // guaranteed to be non-zero.
4268 ASSERT(right.is(a0));
4269 __ Ret(USE_DELAY_SLOT);
4270 __ mov(v0, a0); // In the delay slot.
4271 __ bind(&is_symbol);
4274 // Check that both strings are sequential ASCII.
4276 __ JumpIfBothInstanceTypesAreNotSequentialAscii(
4277 tmp1, tmp2, tmp3, tmp4, &runtime);
4279 // Compare flat ASCII strings. Returns when done.
4281 StringCompareStub::GenerateFlatAsciiStringEquals(
4282 masm, left, right, tmp1, tmp2, tmp3);
4284 StringCompareStub::GenerateCompareFlatAsciiStrings(
4285 masm, left, right, tmp1, tmp2, tmp3, tmp4);
4288 // Handle more complex cases in runtime.
4290 __ Push(left, right);
4292 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
4294 __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
4302 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
4303 ASSERT(state_ == CompareIC::OBJECT);
4305 __ And(a2, a1, Operand(a0));
4306 __ JumpIfSmi(a2, &miss);
4308 __ GetObjectType(a0, a2, a2);
4309 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
4310 __ GetObjectType(a1, a2, a2);
4311 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
4313 ASSERT(GetCondition() == eq);
4314 __ Ret(USE_DELAY_SLOT);
4315 __ subu(v0, a0, a1);
4322 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
4325 __ JumpIfSmi(a2, &miss);
4326 __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
4327 __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
4328 __ Branch(&miss, ne, a2, Operand(known_map_));
4329 __ Branch(&miss, ne, a3, Operand(known_map_));
4331 __ Ret(USE_DELAY_SLOT);
4332 __ subu(v0, a0, a1);
4339 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
4341 // Call the runtime system in a fresh internal frame.
4342 ExternalReference miss =
4343 ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
4344 FrameScope scope(masm, StackFrame::INTERNAL);
4346 __ Push(ra, a1, a0);
4347 __ li(t0, Operand(Smi::FromInt(op_)));
4348 __ addiu(sp, sp, -kPointerSize);
4349 __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
4350 __ sw(t0, MemOperand(sp)); // In the delay slot.
4351 // Compute the entry point of the rewritten stub.
4352 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
4353 // Restore registers.
4360 void DirectCEntryStub::Generate(MacroAssembler* masm) {
4361 // Make place for arguments to fit C calling convention. Most of the callers
4362 // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
4363 // so they handle stack restoring and we don't have to do that here.
4364 // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
4365 // kCArgsSlotsSize stack space after the call.
4366 __ Subu(sp, sp, Operand(kCArgsSlotsSize));
4367 // Place the return address on the stack, making the call
4368 // GC safe. The RegExp backend also relies on this.
4369 __ sw(ra, MemOperand(sp, kCArgsSlotsSize));
4370 __ Call(t9); // Call the C++ function.
4371 __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
4373 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
4374 // In case of an error the return address may point to a memory area
4375 // filled with kZapValue by the GC.
4376 // Dereference the address and check for this.
4377 __ lw(t0, MemOperand(t9));
4378 __ Assert(ne, kReceivedInvalidReturnAddress, t0,
4379 Operand(reinterpret_cast<uint32_t>(kZapValue)));
4385 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
4388 reinterpret_cast<intptr_t>(GetCode().location());
4389 __ Move(t9, target);
4390 __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
4395 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
4399 Register properties,
4401 Register scratch0) {
4402 ASSERT(name->IsUniqueName());
4403 // If names of slots in range from 1 to kProbes - 1 for the hash value are
4404 // not equal to the name and kProbes-th slot is not used (its name is the
4405 // undefined value), it guarantees the hash table doesn't contain the
4406 // property. It's true even if some slots represent deleted properties
4407 // (their names are the hole value).
4408 for (int i = 0; i < kInlinedProbes; i++) {
4409 // scratch0 points to properties hash.
4410 // Compute the masked index: (hash + i + i * i) & mask.
4411 Register index = scratch0;
4412 // Capacity is smi 2^n.
4413 __ lw(index, FieldMemOperand(properties, kCapacityOffset));
4414 __ Subu(index, index, Operand(1));
4415 __ And(index, index, Operand(
4416 Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
4418 // Scale the index by multiplying by the entry size.
4419 ASSERT(NameDictionary::kEntrySize == 3);
4420 __ sll(at, index, 1);
4421 __ Addu(index, index, at);
4423 Register entity_name = scratch0;
4424 // Having undefined at this place means the name is not contained.
4425 ASSERT_EQ(kSmiTagSize, 1);
4426 Register tmp = properties;
4427 __ sll(scratch0, index, 1);
4428 __ Addu(tmp, properties, scratch0);
4429 __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
4431 ASSERT(!tmp.is(entity_name));
4432 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
4433 __ Branch(done, eq, entity_name, Operand(tmp));
4435 // Load the hole ready for use below:
4436 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
4438 // Stop if found the property.
4439 __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
4442 __ Branch(&good, eq, entity_name, Operand(tmp));
4444 // Check if the entry name is not a unique name.
4445 __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
4447 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
4448 __ JumpIfNotUniqueName(entity_name, miss);
4451 // Restore the properties.
4453 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4456 const int spill_mask =
4457 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
4458 a2.bit() | a1.bit() | a0.bit() | v0.bit());
4460 __ MultiPush(spill_mask);
4461 __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4462 __ li(a1, Operand(Handle<Name>(name)));
4463 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
4466 __ MultiPop(spill_mask);
4468 __ Branch(done, eq, at, Operand(zero_reg));
4469 __ Branch(miss, ne, at, Operand(zero_reg));
4473 // Probe the name dictionary in the |elements| register. Jump to the
4474 // |done| label if a property with the given name is found. Jump to
4475 // the |miss| label otherwise.
4476 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
4477 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
4483 Register scratch2) {
4484 ASSERT(!elements.is(scratch1));
4485 ASSERT(!elements.is(scratch2));
4486 ASSERT(!name.is(scratch1));
4487 ASSERT(!name.is(scratch2));
4489 __ AssertName(name);
4491 // Compute the capacity mask.
4492 __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
4493 __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
4494 __ Subu(scratch1, scratch1, Operand(1));
4496 // Generate an unrolled loop that performs a few probes before
4497 // giving up. Measurements done on Gmail indicate that 2 probes
4498 // cover ~93% of loads from dictionaries.
4499 for (int i = 0; i < kInlinedProbes; i++) {
4500 // Compute the masked index: (hash + i + i * i) & mask.
4501 __ lw(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
4503 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4504 // the hash in a separate instruction. The value hash + i + i * i is right
4505 // shifted in the following and instruction.
4506 ASSERT(NameDictionary::GetProbeOffset(i) <
4507 1 << (32 - Name::kHashFieldOffset));
4508 __ Addu(scratch2, scratch2, Operand(
4509 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4511 __ srl(scratch2, scratch2, Name::kHashShift);
4512 __ And(scratch2, scratch1, scratch2);
4514 // Scale the index by multiplying by the element size.
4515 ASSERT(NameDictionary::kEntrySize == 3);
4516 // scratch2 = scratch2 * 3.
4518 __ sll(at, scratch2, 1);
4519 __ Addu(scratch2, scratch2, at);
4521 // Check if the key is identical to the name.
4522 __ sll(at, scratch2, 2);
4523 __ Addu(scratch2, elements, at);
4524 __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
4525 __ Branch(done, eq, name, Operand(at));
4528 const int spill_mask =
4529 (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
4530 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
4531 ~(scratch1.bit() | scratch2.bit());
4533 __ MultiPush(spill_mask);
4535 ASSERT(!elements.is(a1));
4537 __ Move(a0, elements);
4539 __ Move(a0, elements);
4542 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
4544 __ mov(scratch2, a2);
4546 __ MultiPop(spill_mask);
4548 __ Branch(done, ne, at, Operand(zero_reg));
4549 __ Branch(miss, eq, at, Operand(zero_reg));
4553 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
4554 // This stub overrides SometimesSetsUpAFrame() to return false. That means
4555 // we cannot call anything that could cause a GC from this stub.
4557 // result: NameDictionary to probe
4559 // dictionary: NameDictionary to probe.
4560 // index: will hold an index of entry if lookup is successful.
4561 // might alias with result_.
4563 // result_ is zero if lookup failed, non zero otherwise.
4565 Register result = v0;
4566 Register dictionary = a0;
4568 Register index = a2;
4571 Register undefined = t1;
4572 Register entry_key = t2;
4574 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
4576 __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
4577 __ sra(mask, mask, kSmiTagSize);
4578 __ Subu(mask, mask, Operand(1));
4580 __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
4582 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
4584 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
4585 // Compute the masked index: (hash + i + i * i) & mask.
4586 // Capacity is smi 2^n.
4588 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4589 // the hash in a separate instruction. The value hash + i + i * i is right
4590 // shifted in the following and instruction.
4591 ASSERT(NameDictionary::GetProbeOffset(i) <
4592 1 << (32 - Name::kHashFieldOffset));
4593 __ Addu(index, hash, Operand(
4594 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4596 __ mov(index, hash);
4598 __ srl(index, index, Name::kHashShift);
4599 __ And(index, mask, index);
4601 // Scale the index by multiplying by the entry size.
4602 ASSERT(NameDictionary::kEntrySize == 3);
4605 __ sll(index, index, 1);
4606 __ Addu(index, index, at);
4609 ASSERT_EQ(kSmiTagSize, 1);
4610 __ sll(index, index, 2);
4611 __ Addu(index, index, dictionary);
4612 __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
4614 // Having undefined at this place means the name is not contained.
4615 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined));
4617 // Stop if found the property.
4618 __ Branch(&in_dictionary, eq, entry_key, Operand(key));
4620 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
4621 // Check if the entry name is not a unique name.
4622 __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
4624 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
4625 __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
4629 __ bind(&maybe_in_dictionary);
4630 // If we are doing negative lookup then probing failure should be
4631 // treated as a lookup success. For positive lookup probing failure
4632 // should be treated as lookup failure.
4633 if (mode_ == POSITIVE_LOOKUP) {
4634 __ Ret(USE_DELAY_SLOT);
4635 __ mov(result, zero_reg);
4638 __ bind(&in_dictionary);
4639 __ Ret(USE_DELAY_SLOT);
4642 __ bind(¬_in_dictionary);
4643 __ Ret(USE_DELAY_SLOT);
4644 __ mov(result, zero_reg);
4648 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
4650 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
4652 // Hydrogen code stubs need stub2 at snapshot time.
4653 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
4658 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
4659 // the value has just been written into the object, now this stub makes sure
4660 // we keep the GC informed. The word in the object where the value has been
4661 // written is in the address register.
4662 void RecordWriteStub::Generate(MacroAssembler* masm) {
4663 Label skip_to_incremental_noncompacting;
4664 Label skip_to_incremental_compacting;
4666 // The first two branch+nop instructions are generated with labels so as to
4667 // get the offset fixed up correctly by the bind(Label*) call. We patch it
4668 // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
4669 // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
4670 // incremental heap marking.
4671 // See RecordWriteStub::Patch for details.
4672 __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
4674 __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
4677 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4678 __ RememberedSetHelper(object_,
4682 MacroAssembler::kReturnAtEnd);
4686 __ bind(&skip_to_incremental_noncompacting);
4687 GenerateIncremental(masm, INCREMENTAL);
4689 __ bind(&skip_to_incremental_compacting);
4690 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4692 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
4693 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
4695 PatchBranchIntoNop(masm, 0);
4696 PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
4700 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4703 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4704 Label dont_need_remembered_set;
4706 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
4707 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
4709 &dont_need_remembered_set);
4711 __ CheckPageFlag(regs_.object(),
4713 1 << MemoryChunk::SCAN_ON_SCAVENGE,
4715 &dont_need_remembered_set);
4717 // First notify the incremental marker if necessary, then update the
4719 CheckNeedsToInformIncrementalMarker(
4720 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4721 InformIncrementalMarker(masm);
4722 regs_.Restore(masm);
4723 __ RememberedSetHelper(object_,
4727 MacroAssembler::kReturnAtEnd);
4729 __ bind(&dont_need_remembered_set);
4732 CheckNeedsToInformIncrementalMarker(
4733 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4734 InformIncrementalMarker(masm);
4735 regs_.Restore(masm);
4740 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4741 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
4742 int argument_count = 3;
4743 __ PrepareCallCFunction(argument_count, regs_.scratch0());
4745 a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
4746 ASSERT(!address.is(regs_.object()));
4747 ASSERT(!address.is(a0));
4748 __ Move(address, regs_.address());
4749 __ Move(a0, regs_.object());
4750 __ Move(a1, address);
4751 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
4753 AllowExternalCallThatCantCauseGC scope(masm);
4755 ExternalReference::incremental_marking_record_write_function(isolate()),
4757 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
4761 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4762 MacroAssembler* masm,
4763 OnNoNeedToInformIncrementalMarker on_no_need,
4766 Label need_incremental;
4767 Label need_incremental_pop_scratch;
4769 __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
4770 __ lw(regs_.scratch1(),
4771 MemOperand(regs_.scratch0(),
4772 MemoryChunk::kWriteBarrierCounterOffset));
4773 __ Subu(regs_.scratch1(), regs_.scratch1(), Operand(1));
4774 __ sw(regs_.scratch1(),
4775 MemOperand(regs_.scratch0(),
4776 MemoryChunk::kWriteBarrierCounterOffset));
4777 __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
4779 // Let's look at the color of the object: If it is not black we don't have
4780 // to inform the incremental marker.
4781 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4783 regs_.Restore(masm);
4784 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4785 __ RememberedSetHelper(object_,
4789 MacroAssembler::kReturnAtEnd);
4796 // Get the value from the slot.
4797 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
4799 if (mode == INCREMENTAL_COMPACTION) {
4800 Label ensure_not_white;
4802 __ CheckPageFlag(regs_.scratch0(), // Contains value.
4803 regs_.scratch1(), // Scratch.
4804 MemoryChunk::kEvacuationCandidateMask,
4808 __ CheckPageFlag(regs_.object(),
4809 regs_.scratch1(), // Scratch.
4810 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
4814 __ bind(&ensure_not_white);
4817 // We need extra registers for this, so we push the object and the address
4818 // register temporarily.
4819 __ Push(regs_.object(), regs_.address());
4820 __ EnsureNotWhite(regs_.scratch0(), // The value.
4821 regs_.scratch1(), // Scratch.
4822 regs_.object(), // Scratch.
4823 regs_.address(), // Scratch.
4824 &need_incremental_pop_scratch);
4825 __ Pop(regs_.object(), regs_.address());
4827 regs_.Restore(masm);
4828 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4829 __ RememberedSetHelper(object_,
4833 MacroAssembler::kReturnAtEnd);
4838 __ bind(&need_incremental_pop_scratch);
4839 __ Pop(regs_.object(), regs_.address());
4841 __ bind(&need_incremental);
4843 // Fall through when we need to inform the incremental marker.
4847 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4848 // ----------- S t a t e -------------
4849 // -- a0 : element value to store
4850 // -- a3 : element index as smi
4851 // -- sp[0] : array literal index in function as smi
4852 // -- sp[4] : array literal
4853 // clobbers a1, a2, t0
4854 // -----------------------------------
4857 Label double_elements;
4859 Label slow_elements;
4860 Label fast_elements;
4862 // Get array literal index, array literal and its map.
4863 __ lw(t0, MemOperand(sp, 0 * kPointerSize));
4864 __ lw(a1, MemOperand(sp, 1 * kPointerSize));
4865 __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
4867 __ CheckFastElements(a2, t1, &double_elements);
4868 // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
4869 __ JumpIfSmi(a0, &smi_element);
4870 __ CheckFastSmiElements(a2, t1, &fast_elements);
4872 // Store into the array literal requires a elements transition. Call into
4874 __ bind(&slow_elements);
4876 __ Push(a1, a3, a0);
4877 __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4878 __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset));
4880 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4882 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4883 __ bind(&fast_elements);
4884 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
4885 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
4886 __ Addu(t2, t1, t2);
4887 __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4888 __ sw(a0, MemOperand(t2, 0));
4889 // Update the write barrier for the array store.
4890 __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
4891 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
4892 __ Ret(USE_DELAY_SLOT);
4895 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
4896 // and value is Smi.
4897 __ bind(&smi_element);
4898 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
4899 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
4900 __ Addu(t2, t1, t2);
4901 __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
4902 __ Ret(USE_DELAY_SLOT);
4905 // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
4906 __ bind(&double_elements);
4907 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
4908 __ StoreNumberToDoubleElements(a0, a3, t1, t3, t5, a2, &slow_elements);
4909 __ Ret(USE_DELAY_SLOT);
4914 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4915 CEntryStub ces(isolate(), 1, kSaveFPRegs);
4916 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4917 int parameter_count_offset =
4918 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4919 __ lw(a1, MemOperand(fp, parameter_count_offset));
4920 if (function_mode_ == JS_FUNCTION_STUB_MODE) {
4921 __ Addu(a1, a1, Operand(1));
4923 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4924 __ sll(a1, a1, kPointerSizeLog2);
4925 __ Ret(USE_DELAY_SLOT);
4926 __ Addu(sp, sp, a1);
4930 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4931 if (masm->isolate()->function_entry_hook() != NULL) {
4932 ProfileEntryHookStub stub(masm->isolate());
4940 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4941 // The entry hook is a "push ra" instruction, followed by a call.
4942 // Note: on MIPS "push" is 2 instruction
4943 const int32_t kReturnAddressDistanceFromFunctionStart =
4944 Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
4946 // This should contain all kJSCallerSaved registers.
4947 const RegList kSavedRegs =
4948 kJSCallerSaved | // Caller saved registers.
4949 s5.bit(); // Saved stack pointer.
4951 // We also save ra, so the count here is one higher than the mask indicates.
4952 const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
4954 // Save all caller-save registers as this may be called from anywhere.
4955 __ MultiPush(kSavedRegs | ra.bit());
4957 // Compute the function's address for the first argument.
4958 __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
4960 // The caller's return address is above the saved temporaries.
4961 // Grab that for the second argument to the hook.
4962 __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
4964 // Align the stack if necessary.
4965 int frame_alignment = masm->ActivationFrameAlignment();
4966 if (frame_alignment > kPointerSize) {
4968 ASSERT(IsPowerOf2(frame_alignment));
4969 __ And(sp, sp, Operand(-frame_alignment));
4971 __ Subu(sp, sp, kCArgsSlotsSize);
4972 #if defined(V8_HOST_ARCH_MIPS)
4973 int32_t entry_hook =
4974 reinterpret_cast<int32_t>(isolate()->function_entry_hook());
4975 __ li(t9, Operand(entry_hook));
4977 // Under the simulator we need to indirect the entry hook through a
4978 // trampoline function at a known address.
4979 // It additionally takes an isolate as a third parameter.
4980 __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
4982 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4983 __ li(t9, Operand(ExternalReference(&dispatcher,
4984 ExternalReference::BUILTIN_CALL,
4987 // Call C function through t9 to conform ABI for PIC.
4990 // Restore the stack pointer if needed.
4991 if (frame_alignment > kPointerSize) {
4994 __ Addu(sp, sp, kCArgsSlotsSize);
4997 // Also pop ra to get Ret(0).
4998 __ MultiPop(kSavedRegs | ra.bit());
5004 static void CreateArrayDispatch(MacroAssembler* masm,
5005 AllocationSiteOverrideMode mode) {
5006 if (mode == DISABLE_ALLOCATION_SITES) {
5007 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
5008 __ TailCallStub(&stub);
5009 } else if (mode == DONT_OVERRIDE) {
5010 int last_index = GetSequenceIndexFromFastElementsKind(
5011 TERMINAL_FAST_ELEMENTS_KIND);
5012 for (int i = 0; i <= last_index; ++i) {
5013 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
5014 T stub(masm->isolate(), kind);
5015 __ TailCallStub(&stub, eq, a3, Operand(kind));
5018 // If we reached this point there is a problem.
5019 __ Abort(kUnexpectedElementsKindInArrayConstructor);
5026 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
5027 AllocationSiteOverrideMode mode) {
5028 // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
5029 // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
5030 // a0 - number of arguments
5031 // a1 - constructor?
5032 // sp[0] - last argument
5033 Label normal_sequence;
5034 if (mode == DONT_OVERRIDE) {
5035 ASSERT(FAST_SMI_ELEMENTS == 0);
5036 ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
5037 ASSERT(FAST_ELEMENTS == 2);
5038 ASSERT(FAST_HOLEY_ELEMENTS == 3);
5039 ASSERT(FAST_DOUBLE_ELEMENTS == 4);
5040 ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
5042 // is the low bit set? If so, we are holey and that is good.
5043 __ And(at, a3, Operand(1));
5044 __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
5047 // look at the first argument
5048 __ lw(t1, MemOperand(sp, 0));
5049 __ Branch(&normal_sequence, eq, t1, Operand(zero_reg));
5051 if (mode == DISABLE_ALLOCATION_SITES) {
5052 ElementsKind initial = GetInitialFastElementsKind();
5053 ElementsKind holey_initial = GetHoleyElementsKind(initial);
5055 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
5057 DISABLE_ALLOCATION_SITES);
5058 __ TailCallStub(&stub_holey);
5060 __ bind(&normal_sequence);
5061 ArraySingleArgumentConstructorStub stub(masm->isolate(),
5063 DISABLE_ALLOCATION_SITES);
5064 __ TailCallStub(&stub);
5065 } else if (mode == DONT_OVERRIDE) {
5066 // We are going to create a holey array, but our kind is non-holey.
5067 // Fix kind and retry (only if we have an allocation site in the slot).
5068 __ Addu(a3, a3, Operand(1));
5070 if (FLAG_debug_code) {
5071 __ lw(t1, FieldMemOperand(a2, 0));
5072 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
5073 __ Assert(eq, kExpectedAllocationSite, t1, Operand(at));
5076 // Save the resulting elements kind in type info. We can't just store a3
5077 // in the AllocationSite::transition_info field because elements kind is
5078 // restricted to a portion of the field...upper bits need to be left alone.
5079 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
5080 __ lw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
5081 __ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
5082 __ sw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
5085 __ bind(&normal_sequence);
5086 int last_index = GetSequenceIndexFromFastElementsKind(
5087 TERMINAL_FAST_ELEMENTS_KIND);
5088 for (int i = 0; i <= last_index; ++i) {
5089 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
5090 ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
5091 __ TailCallStub(&stub, eq, a3, Operand(kind));
5094 // If we reached this point there is a problem.
5095 __ Abort(kUnexpectedElementsKindInArrayConstructor);
5103 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
5104 int to_index = GetSequenceIndexFromFastElementsKind(
5105 TERMINAL_FAST_ELEMENTS_KIND);
5106 for (int i = 0; i <= to_index; ++i) {
5107 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
5108 T stub(isolate, kind);
5110 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
5111 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
5118 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
5119 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
5121 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
5123 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
5128 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
5130 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
5131 for (int i = 0; i < 2; i++) {
5132 // For internal arrays we only need a few things.
5133 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
5135 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
5137 InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
5143 void ArrayConstructorStub::GenerateDispatchToArrayStub(
5144 MacroAssembler* masm,
5145 AllocationSiteOverrideMode mode) {
5146 if (argument_count_ == ANY) {
5147 Label not_zero_case, not_one_case;
5149 __ Branch(¬_zero_case, ne, at, Operand(zero_reg));
5150 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5152 __ bind(¬_zero_case);
5153 __ Branch(¬_one_case, gt, a0, Operand(1));
5154 CreateArrayDispatchOneArgument(masm, mode);
5156 __ bind(¬_one_case);
5157 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5158 } else if (argument_count_ == NONE) {
5159 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5160 } else if (argument_count_ == ONE) {
5161 CreateArrayDispatchOneArgument(masm, mode);
5162 } else if (argument_count_ == MORE_THAN_ONE) {
5163 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5170 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
5171 // ----------- S t a t e -------------
5172 // -- a0 : argc (only if argument_count_ == ANY)
5173 // -- a1 : constructor
5174 // -- a2 : AllocationSite or undefined
5175 // -- sp[0] : return address
5176 // -- sp[4] : last argument
5177 // -----------------------------------
5179 if (FLAG_debug_code) {
5180 // The array construct code is only set for the global and natives
5181 // builtin Array functions which always have maps.
5183 // Initial map for the builtin Array function should be a map.
5184 __ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
5185 // Will both indicate a NULL and a Smi.
5187 __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
5188 at, Operand(zero_reg));
5189 __ GetObjectType(t0, t0, t1);
5190 __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
5191 t1, Operand(MAP_TYPE));
5193 // We should either have undefined in a2 or a valid AllocationSite
5194 __ AssertUndefinedOrAllocationSite(a2, t0);
5198 // Get the elements kind and case on that.
5199 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5200 __ Branch(&no_info, eq, a2, Operand(at));
5202 __ lw(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
5204 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
5205 __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
5206 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
5209 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
5213 void InternalArrayConstructorStub::GenerateCase(
5214 MacroAssembler* masm, ElementsKind kind) {
5216 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
5217 __ TailCallStub(&stub0, lo, a0, Operand(1));
5219 InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
5220 __ TailCallStub(&stubN, hi, a0, Operand(1));
5222 if (IsFastPackedElementsKind(kind)) {
5223 // We might need to create a holey array
5224 // look at the first argument.
5225 __ lw(at, MemOperand(sp, 0));
5227 InternalArraySingleArgumentConstructorStub
5228 stub1_holey(isolate(), GetHoleyElementsKind(kind));
5229 __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
5232 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
5233 __ TailCallStub(&stub1);
5237 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
5238 // ----------- S t a t e -------------
5240 // -- a1 : constructor
5241 // -- sp[0] : return address
5242 // -- sp[4] : last argument
5243 // -----------------------------------
5245 if (FLAG_debug_code) {
5246 // The array construct code is only set for the global and natives
5247 // builtin Array functions which always have maps.
5249 // Initial map for the builtin Array function should be a map.
5250 __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
5251 // Will both indicate a NULL and a Smi.
5253 __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
5254 at, Operand(zero_reg));
5255 __ GetObjectType(a3, a3, t0);
5256 __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
5257 t0, Operand(MAP_TYPE));
5260 // Figure out the right elements kind.
5261 __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
5263 // Load the map's "bit field 2" into a3. We only need the first byte,
5264 // but the following bit field extraction takes care of that anyway.
5265 __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
5266 // Retrieve elements_kind from bit field 2.
5267 __ DecodeField<Map::ElementsKindBits>(a3);
5269 if (FLAG_debug_code) {
5271 __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
5273 eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
5274 a3, Operand(FAST_HOLEY_ELEMENTS));
5278 Label fast_elements_case;
5279 __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
5280 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
5282 __ bind(&fast_elements_case);
5283 GenerateCase(masm, FAST_ELEMENTS);
5287 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
5288 // ----------- S t a t e -------------
5290 // -- t0 : call_data
5292 // -- a1 : api_function_address
5295 // -- sp[0] : last argument
5297 // -- sp[(argc - 1)* 4] : first argument
5298 // -- sp[argc * 4] : receiver
5299 // -----------------------------------
5301 Register callee = a0;
5302 Register call_data = t0;
5303 Register holder = a2;
5304 Register api_function_address = a1;
5305 Register context = cp;
5307 int argc = ArgumentBits::decode(bit_field_);
5308 bool is_store = IsStoreBits::decode(bit_field_);
5309 bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
5311 typedef FunctionCallbackArguments FCA;
5313 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
5314 STATIC_ASSERT(FCA::kCalleeIndex == 5);
5315 STATIC_ASSERT(FCA::kDataIndex == 4);
5316 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
5317 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
5318 STATIC_ASSERT(FCA::kIsolateIndex == 1);
5319 STATIC_ASSERT(FCA::kHolderIndex == 0);
5320 STATIC_ASSERT(FCA::kArgsLength == 7);
5322 // Save context, callee and call data.
5323 __ Push(context, callee, call_data);
5324 // Load context from callee.
5325 __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
5327 Register scratch = call_data;
5328 if (!call_data_undefined) {
5329 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5331 // Push return value and default return value.
5332 __ Push(scratch, scratch);
5334 Operand(ExternalReference::isolate_address(isolate())));
5335 // Push isolate and holder.
5336 __ Push(scratch, holder);
5338 // Prepare arguments.
5339 __ mov(scratch, sp);
5341 // Allocate the v8::Arguments structure in the arguments' space since
5342 // it's not controlled by GC.
5343 const int kApiStackSpace = 4;
5345 FrameScope frame_scope(masm, StackFrame::MANUAL);
5346 __ EnterExitFrame(false, kApiStackSpace);
5348 ASSERT(!api_function_address.is(a0) && !scratch.is(a0));
5349 // a0 = FunctionCallbackInfo&
5350 // Arguments is after the return address.
5351 __ Addu(a0, sp, Operand(1 * kPointerSize));
5352 // FunctionCallbackInfo::implicit_args_
5353 __ sw(scratch, MemOperand(a0, 0 * kPointerSize));
5354 // FunctionCallbackInfo::values_
5355 __ Addu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
5356 __ sw(at, MemOperand(a0, 1 * kPointerSize));
5357 // FunctionCallbackInfo::length_ = argc
5358 __ li(at, Operand(argc));
5359 __ sw(at, MemOperand(a0, 2 * kPointerSize));
5360 // FunctionCallbackInfo::is_construct_call = 0
5361 __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
5363 const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
5364 ExternalReference thunk_ref =
5365 ExternalReference::invoke_function_callback(isolate());
5367 AllowExternalCallThatCantCauseGC scope(masm);
5368 MemOperand context_restore_operand(
5369 fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
5370 // Stores return the first js argument.
5371 int return_value_offset = 0;
5373 return_value_offset = 2 + FCA::kArgsLength;
5375 return_value_offset = 2 + FCA::kReturnValueOffset;
5377 MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
5379 __ CallApiFunctionAndReturn(api_function_address,
5382 return_value_operand,
5383 &context_restore_operand);
5387 void CallApiGetterStub::Generate(MacroAssembler* masm) {
5388 // ----------- S t a t e -------------
5390 // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
5392 // -- a2 : api_function_address
5393 // -----------------------------------
5395 Register api_function_address = a2;
5397 __ mov(a0, sp); // a0 = Handle<Name>
5398 __ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA
5400 const int kApiStackSpace = 1;
5401 FrameScope frame_scope(masm, StackFrame::MANUAL);
5402 __ EnterExitFrame(false, kApiStackSpace);
5404 // Create PropertyAccessorInfo instance on the stack above the exit frame with
5405 // a1 (internal::Object** args_) as the data.
5406 __ sw(a1, MemOperand(sp, 1 * kPointerSize));
5407 __ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo&
5409 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
5411 ExternalReference thunk_ref =
5412 ExternalReference::invoke_accessor_getter_callback(isolate());
5413 __ CallApiFunctionAndReturn(api_function_address,
5416 MemOperand(fp, 6 * kPointerSize),
5423 } } // namespace v8::internal
5425 #endif // V8_TARGET_ARCH_MIPS