1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #if V8_TARGET_ARCH_ARM
32 #include "bootstrapper.h"
33 #include "code-stubs.h"
34 #include "regexp-macro-assembler.h"
35 #include "stub-cache.h"
41 void FastNewClosureStub::InitializeInterfaceDescriptor(
43 CodeStubInterfaceDescriptor* descriptor) {
44 static Register registers[] = { r2 };
45 descriptor->register_param_count_ = 1;
46 descriptor->register_params_ = registers;
47 descriptor->deoptimization_handler_ =
48 Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
52 void FastNewContextStub::InitializeInterfaceDescriptor(
54 CodeStubInterfaceDescriptor* descriptor) {
55 static Register registers[] = { r1 };
56 descriptor->register_param_count_ = 1;
57 descriptor->register_params_ = registers;
58 descriptor->deoptimization_handler_ = NULL;
62 void ToNumberStub::InitializeInterfaceDescriptor(
64 CodeStubInterfaceDescriptor* descriptor) {
65 static Register registers[] = { r0 };
66 descriptor->register_param_count_ = 1;
67 descriptor->register_params_ = registers;
68 descriptor->deoptimization_handler_ = NULL;
72 void NumberToStringStub::InitializeInterfaceDescriptor(
74 CodeStubInterfaceDescriptor* descriptor) {
75 static Register registers[] = { r0 };
76 descriptor->register_param_count_ = 1;
77 descriptor->register_params_ = registers;
78 descriptor->deoptimization_handler_ =
79 Runtime::FunctionForId(Runtime::kNumberToString)->entry;
83 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
85 CodeStubInterfaceDescriptor* descriptor) {
86 static Register registers[] = { r3, r2, r1 };
87 descriptor->register_param_count_ = 3;
88 descriptor->register_params_ = registers;
89 descriptor->deoptimization_handler_ =
90 Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
94 void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
96 CodeStubInterfaceDescriptor* descriptor) {
97 static Register registers[] = { r3, r2, r1, r0 };
98 descriptor->register_param_count_ = 4;
99 descriptor->register_params_ = registers;
100 descriptor->deoptimization_handler_ =
101 Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
105 void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
107 CodeStubInterfaceDescriptor* descriptor) {
108 static Register registers[] = { r2 };
109 descriptor->register_param_count_ = 1;
110 descriptor->register_params_ = registers;
111 descriptor->deoptimization_handler_ = NULL;
115 void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
117 CodeStubInterfaceDescriptor* descriptor) {
118 static Register registers[] = { r1, r0 };
119 descriptor->register_param_count_ = 2;
120 descriptor->register_params_ = registers;
121 descriptor->deoptimization_handler_ =
122 FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
126 void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
128 CodeStubInterfaceDescriptor* descriptor) {
129 static Register registers[] = { r1, r0 };
130 descriptor->register_param_count_ = 2;
131 descriptor->register_params_ = registers;
132 descriptor->deoptimization_handler_ =
133 FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
137 void RegExpConstructResultStub::InitializeInterfaceDescriptor(
139 CodeStubInterfaceDescriptor* descriptor) {
140 static Register registers[] = { r2, r1, r0 };
141 descriptor->register_param_count_ = 3;
142 descriptor->register_params_ = registers;
143 descriptor->deoptimization_handler_ =
144 Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry;
148 void LoadFieldStub::InitializeInterfaceDescriptor(
150 CodeStubInterfaceDescriptor* descriptor) {
151 static Register registers[] = { r0 };
152 descriptor->register_param_count_ = 1;
153 descriptor->register_params_ = registers;
154 descriptor->deoptimization_handler_ = NULL;
158 void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
160 CodeStubInterfaceDescriptor* descriptor) {
161 static Register registers[] = { r1 };
162 descriptor->register_param_count_ = 1;
163 descriptor->register_params_ = registers;
164 descriptor->deoptimization_handler_ = NULL;
168 void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
170 CodeStubInterfaceDescriptor* descriptor) {
171 static Register registers[] = { r2, r1, r0 };
172 descriptor->register_param_count_ = 3;
173 descriptor->register_params_ = registers;
174 descriptor->deoptimization_handler_ =
175 FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
179 void TransitionElementsKindStub::InitializeInterfaceDescriptor(
181 CodeStubInterfaceDescriptor* descriptor) {
182 static Register registers[] = { r0, r1 };
183 descriptor->register_param_count_ = 2;
184 descriptor->register_params_ = registers;
186 Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
187 descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
191 void CompareNilICStub::InitializeInterfaceDescriptor(
193 CodeStubInterfaceDescriptor* descriptor) {
194 static Register registers[] = { r0 };
195 descriptor->register_param_count_ = 1;
196 descriptor->register_params_ = registers;
197 descriptor->deoptimization_handler_ =
198 FUNCTION_ADDR(CompareNilIC_Miss);
199 descriptor->SetMissHandler(
200 ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
204 static void InitializeArrayConstructorDescriptor(
206 CodeStubInterfaceDescriptor* descriptor,
207 int constant_stack_parameter_count) {
209 // r0 -- number of arguments
211 // r2 -- allocation site with elements kind
212 static Register registers_variable_args[] = { r1, r2, r0 };
213 static Register registers_no_args[] = { r1, r2 };
215 if (constant_stack_parameter_count == 0) {
216 descriptor->register_param_count_ = 2;
217 descriptor->register_params_ = registers_no_args;
219 // stack param count needs (constructor pointer, and single argument)
220 descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
221 descriptor->stack_parameter_count_ = r0;
222 descriptor->register_param_count_ = 3;
223 descriptor->register_params_ = registers_variable_args;
226 descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
227 descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
228 descriptor->deoptimization_handler_ =
229 Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
233 static void InitializeInternalArrayConstructorDescriptor(
235 CodeStubInterfaceDescriptor* descriptor,
236 int constant_stack_parameter_count) {
238 // r0 -- number of arguments
239 // r1 -- constructor function
240 static Register registers_variable_args[] = { r1, r0 };
241 static Register registers_no_args[] = { r1 };
243 if (constant_stack_parameter_count == 0) {
244 descriptor->register_param_count_ = 1;
245 descriptor->register_params_ = registers_no_args;
247 // stack param count needs (constructor pointer, and single argument)
248 descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
249 descriptor->stack_parameter_count_ = r0;
250 descriptor->register_param_count_ = 2;
251 descriptor->register_params_ = registers_variable_args;
254 descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
255 descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
256 descriptor->deoptimization_handler_ =
257 Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
261 void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
263 CodeStubInterfaceDescriptor* descriptor) {
264 InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
268 void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
270 CodeStubInterfaceDescriptor* descriptor) {
271 InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
275 void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
277 CodeStubInterfaceDescriptor* descriptor) {
278 InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
282 void ToBooleanStub::InitializeInterfaceDescriptor(
284 CodeStubInterfaceDescriptor* descriptor) {
285 static Register registers[] = { r0 };
286 descriptor->register_param_count_ = 1;
287 descriptor->register_params_ = registers;
288 descriptor->deoptimization_handler_ =
289 FUNCTION_ADDR(ToBooleanIC_Miss);
290 descriptor->SetMissHandler(
291 ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
295 void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
297 CodeStubInterfaceDescriptor* descriptor) {
298 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
302 void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
304 CodeStubInterfaceDescriptor* descriptor) {
305 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
309 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
311 CodeStubInterfaceDescriptor* descriptor) {
312 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
316 void StoreGlobalStub::InitializeInterfaceDescriptor(
318 CodeStubInterfaceDescriptor* descriptor) {
319 static Register registers[] = { r1, r2, r0 };
320 descriptor->register_param_count_ = 3;
321 descriptor->register_params_ = registers;
322 descriptor->deoptimization_handler_ =
323 FUNCTION_ADDR(StoreIC_MissFromStubFailure);
327 void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
329 CodeStubInterfaceDescriptor* descriptor) {
330 static Register registers[] = { r0, r3, r1, r2 };
331 descriptor->register_param_count_ = 4;
332 descriptor->register_params_ = registers;
333 descriptor->deoptimization_handler_ =
334 FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
338 void BinaryOpICStub::InitializeInterfaceDescriptor(
340 CodeStubInterfaceDescriptor* descriptor) {
341 static Register registers[] = { r1, r0 };
342 descriptor->register_param_count_ = 2;
343 descriptor->register_params_ = registers;
344 descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
345 descriptor->SetMissHandler(
346 ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
350 void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
352 CodeStubInterfaceDescriptor* descriptor) {
353 static Register registers[] = { r2, r1, r0 };
354 descriptor->register_param_count_ = 3;
355 descriptor->register_params_ = registers;
356 descriptor->deoptimization_handler_ =
357 FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
361 void StringAddStub::InitializeInterfaceDescriptor(
363 CodeStubInterfaceDescriptor* descriptor) {
364 static Register registers[] = { r1, r0 };
365 descriptor->register_param_count_ = 2;
366 descriptor->register_params_ = registers;
367 descriptor->deoptimization_handler_ =
368 Runtime::FunctionForId(Runtime::kStringAdd)->entry;
372 void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
373 static PlatformCallInterfaceDescriptor default_descriptor =
374 PlatformCallInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
376 static PlatformCallInterfaceDescriptor noInlineDescriptor =
377 PlatformCallInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
380 CallInterfaceDescriptor* descriptor =
381 isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
382 static Register registers[] = { r1, // JSFunction
384 r0, // actual number of arguments
385 r2, // expected number of arguments
387 static Representation representations[] = {
388 Representation::Tagged(), // JSFunction
389 Representation::Tagged(), // context
390 Representation::Integer32(), // actual number of arguments
391 Representation::Integer32(), // expected number of arguments
393 descriptor->register_param_count_ = 4;
394 descriptor->register_params_ = registers;
395 descriptor->param_representations_ = representations;
396 descriptor->platform_specific_descriptor_ = &default_descriptor;
399 CallInterfaceDescriptor* descriptor =
400 isolate->call_descriptor(Isolate::KeyedCall);
401 static Register registers[] = { cp, // context
404 static Representation representations[] = {
405 Representation::Tagged(), // context
406 Representation::Tagged(), // key
408 descriptor->register_param_count_ = 2;
409 descriptor->register_params_ = registers;
410 descriptor->param_representations_ = representations;
411 descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
414 CallInterfaceDescriptor* descriptor =
415 isolate->call_descriptor(Isolate::NamedCall);
416 static Register registers[] = { cp, // context
419 static Representation representations[] = {
420 Representation::Tagged(), // context
421 Representation::Tagged(), // name
423 descriptor->register_param_count_ = 2;
424 descriptor->register_params_ = registers;
425 descriptor->param_representations_ = representations;
426 descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
429 CallInterfaceDescriptor* descriptor =
430 isolate->call_descriptor(Isolate::CallHandler);
431 static Register registers[] = { cp, // context
434 static Representation representations[] = {
435 Representation::Tagged(), // context
436 Representation::Tagged(), // receiver
438 descriptor->register_param_count_ = 2;
439 descriptor->register_params_ = registers;
440 descriptor->param_representations_ = representations;
441 descriptor->platform_specific_descriptor_ = &default_descriptor;
444 CallInterfaceDescriptor* descriptor =
445 isolate->call_descriptor(Isolate::ApiFunctionCall);
446 static Register registers[] = { r0, // callee
449 r1, // api_function_address
452 static Representation representations[] = {
453 Representation::Tagged(), // callee
454 Representation::Tagged(), // call_data
455 Representation::Tagged(), // holder
456 Representation::External(), // api_function_address
457 Representation::Tagged(), // context
459 descriptor->register_param_count_ = 5;
460 descriptor->register_params_ = registers;
461 descriptor->param_representations_ = representations;
462 descriptor->platform_specific_descriptor_ = &default_descriptor;
467 #define __ ACCESS_MASM(masm)
470 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
473 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
479 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
484 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
485 // Update the static counter each time a new code stub is generated.
486 Isolate* isolate = masm->isolate();
487 isolate->counters()->code_stubs()->Increment();
489 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
490 int param_count = descriptor->register_param_count_;
492 // Call the runtime system in a fresh internal frame.
493 FrameScope scope(masm, StackFrame::INTERNAL);
494 ASSERT(descriptor->register_param_count_ == 0 ||
495 r0.is(descriptor->register_params_[param_count - 1]));
497 for (int i = 0; i < param_count; ++i) {
498 __ push(descriptor->register_params_[i]);
500 ExternalReference miss = descriptor->miss_handler();
501 __ CallExternalReference(miss, descriptor->register_param_count_);
508 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
509 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
510 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
511 // scratch register. Destroys the source register. No GC occurs during this
512 // stub so you don't have to set up the frame.
513 class ConvertToDoubleStub : public PlatformCodeStub {
515 ConvertToDoubleStub(Register result_reg_1,
516 Register result_reg_2,
518 Register scratch_reg)
519 : result1_(result_reg_1),
520 result2_(result_reg_2),
522 zeros_(scratch_reg) { }
530 // Minor key encoding in 16 bits.
531 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
532 class OpBits: public BitField<Token::Value, 2, 14> {};
534 Major MajorKey() { return ConvertToDouble; }
536 // Encode the parameters in a unique 16 bit value.
537 return result1_.code() +
538 (result2_.code() << 4) +
539 (source_.code() << 8) +
540 (zeros_.code() << 12);
543 void Generate(MacroAssembler* masm);
547 void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
548 Register exponent = result1_;
549 Register mantissa = result2_;
552 __ SmiUntag(source_);
553 // Move sign bit from source to destination. This works because the sign bit
554 // in the exponent word of the double has the same position and polarity as
555 // the 2's complement sign bit in a Smi.
556 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
557 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
558 // Subtract from 0 if source was negative.
559 __ rsb(source_, source_, Operand::Zero(), LeaveCC, ne);
561 // We have -1, 0 or 1, which we treat specially. Register source_ contains
562 // absolute value: it is either equal to 1 (special case of -1 and 1),
563 // greater than 1 (not a special case) or less than 1 (special case of 0).
564 __ cmp(source_, Operand(1));
565 __ b(gt, ¬_special);
567 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
568 const uint32_t exponent_word_for_1 =
569 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
570 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
571 // 1, 0 and -1 all have 0 for the second word.
572 __ mov(mantissa, Operand::Zero());
575 __ bind(¬_special);
576 __ clz(zeros_, source_);
577 // Compute exponent and or it into the exponent register.
578 // We use mantissa as a scratch register here. Use a fudge factor to
579 // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
580 // that fit in the ARM's constant field.
582 __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
583 __ add(mantissa, mantissa, Operand(fudge));
586 Operand(mantissa, LSL, HeapNumber::kExponentShift));
587 // Shift up the source chopping the top bit off.
588 __ add(zeros_, zeros_, Operand(1));
589 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
590 __ mov(source_, Operand(source_, LSL, zeros_));
591 // Compute lower part of fraction (last 12 bits).
592 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
593 // And the top (top 20 bits).
596 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
601 void DoubleToIStub::Generate(MacroAssembler* masm) {
602 Label out_of_range, only_low, negate, done;
603 Register input_reg = source();
604 Register result_reg = destination();
606 int double_offset = offset();
607 // Account for saved regs if input is sp.
608 if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
610 Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
611 Register scratch_low =
612 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
613 Register scratch_high =
614 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
615 LowDwVfpRegister double_scratch = kScratchDoubleReg;
617 __ Push(scratch_high, scratch_low, scratch);
619 if (!skip_fastpath()) {
620 // Load double input.
621 __ vldr(double_scratch, MemOperand(input_reg, double_offset));
622 __ vmov(scratch_low, scratch_high, double_scratch);
624 // Do fast-path convert from double to int.
625 __ vcvt_s32_f64(double_scratch.low(), double_scratch);
626 __ vmov(result_reg, double_scratch.low());
628 // If result is not saturated (0x7fffffff or 0x80000000), we are done.
629 __ sub(scratch, result_reg, Operand(1));
630 __ cmp(scratch, Operand(0x7ffffffe));
633 // We've already done MacroAssembler::TryFastTruncatedDoubleToILoad, so we
634 // know exponent > 31, so we can skip the vcvt_s32_f64 which will saturate.
635 if (double_offset == 0) {
636 __ ldm(ia, input_reg, scratch_low.bit() | scratch_high.bit());
638 __ ldr(scratch_low, MemOperand(input_reg, double_offset));
639 __ ldr(scratch_high, MemOperand(input_reg, double_offset + kIntSize));
643 __ Ubfx(scratch, scratch_high,
644 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
645 // Load scratch with exponent - 1. This is faster than loading
646 // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
647 STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
648 __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
649 // If exponent is greater than or equal to 84, the 32 less significant
650 // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
652 // Compare exponent with 84 (compare exponent - 1 with 83).
653 __ cmp(scratch, Operand(83));
654 __ b(ge, &out_of_range);
656 // If we reach this code, 31 <= exponent <= 83.
657 // So, we don't have to handle cases where 0 <= exponent <= 20 for
658 // which we would need to shift right the high part of the mantissa.
659 // Scratch contains exponent - 1.
660 // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
661 __ rsb(scratch, scratch, Operand(51), SetCC);
663 // 21 <= exponent <= 51, shift scratch_low and scratch_high
664 // to generate the result.
665 __ mov(scratch_low, Operand(scratch_low, LSR, scratch));
666 // Scratch contains: 52 - exponent.
667 // We needs: exponent - 20.
668 // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
669 __ rsb(scratch, scratch, Operand(32));
670 __ Ubfx(result_reg, scratch_high,
671 0, HeapNumber::kMantissaBitsInTopWord);
672 // Set the implicit 1 before the mantissa part in scratch_high.
673 __ orr(result_reg, result_reg,
674 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
675 __ orr(result_reg, scratch_low, Operand(result_reg, LSL, scratch));
678 __ bind(&out_of_range);
679 __ mov(result_reg, Operand::Zero());
683 // 52 <= exponent <= 83, shift only scratch_low.
684 // On entry, scratch contains: 52 - exponent.
685 __ rsb(scratch, scratch, Operand::Zero());
686 __ mov(result_reg, Operand(scratch_low, LSL, scratch));
689 // If input was positive, scratch_high ASR 31 equals 0 and
690 // scratch_high LSR 31 equals zero.
691 // New result = (result eor 0) + 0 = result.
692 // If the input was negative, we have to negate the result.
693 // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
694 // New result = (result eor 0xffffffff) + 1 = 0 - result.
695 __ eor(result_reg, result_reg, Operand(scratch_high, ASR, 31));
696 __ add(result_reg, result_reg, Operand(scratch_high, LSR, 31));
700 __ Pop(scratch_high, scratch_low, scratch);
705 void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
707 WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
708 WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
709 stub1.GetCode(isolate);
710 stub2.GetCode(isolate);
714 // See comment for class.
715 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
716 Label max_negative_int;
717 // the_int_ has the answer which is a signed int32 but not a Smi.
718 // We test for the special value that has a different exponent. This test
719 // has the neat side effect of setting the flags according to the sign.
720 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
721 __ cmp(the_int_, Operand(0x80000000u));
722 __ b(eq, &max_negative_int);
723 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
724 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
725 uint32_t non_smi_exponent =
726 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
727 __ mov(scratch_, Operand(non_smi_exponent));
728 // Set the sign bit in scratch_ if the value was negative.
729 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
730 // Subtract from 0 if the value was negative.
731 __ rsb(the_int_, the_int_, Operand::Zero(), LeaveCC, cs);
732 // We should be masking the implict first digit of the mantissa away here,
733 // but it just ends up combining harmlessly with the last digit of the
734 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
735 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
736 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
737 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
738 __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
739 __ str(scratch_, FieldMemOperand(the_heap_number_,
740 HeapNumber::kExponentOffset));
741 __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
742 __ str(scratch_, FieldMemOperand(the_heap_number_,
743 HeapNumber::kMantissaOffset));
746 __ bind(&max_negative_int);
747 // The max negative int32 is stored as a positive number in the mantissa of
748 // a double because it uses a sign bit instead of using two's complement.
749 // The actual mantissa bits stored are all 0 because the implicit most
750 // significant 1 bit is not stored.
751 non_smi_exponent += 1 << HeapNumber::kExponentShift;
752 __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
753 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
754 __ mov(ip, Operand::Zero());
755 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
760 // Handle the case where the lhs and rhs are the same object.
761 // Equality is almost reflexive (everything but NaN), so this is a test
762 // for "identity and not NaN".
763 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
767 Label heap_number, return_equal;
769 __ b(ne, ¬_identical);
771 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
772 // so we do the second best thing - test it ourselves.
773 // They are both equal and they are not both Smis so both of them are not
774 // Smis. If it's not a heap number, then return equal.
775 if (cond == lt || cond == gt) {
776 __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
779 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
780 __ b(eq, &heap_number);
781 // Comparing JS objects with <=, >= is complicated.
783 __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
785 // Normally here we fall through to return_equal, but undefined is
786 // special: (undefined == undefined) == true, but
787 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
788 if (cond == le || cond == ge) {
789 __ cmp(r4, Operand(ODDBALL_TYPE));
790 __ b(ne, &return_equal);
791 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
793 __ b(ne, &return_equal);
795 // undefined <= undefined should fail.
796 __ mov(r0, Operand(GREATER));
798 // undefined >= undefined should fail.
799 __ mov(r0, Operand(LESS));
806 __ bind(&return_equal);
808 __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
809 } else if (cond == gt) {
810 __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
812 __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
816 // For less and greater we don't have to check for NaN since the result of
817 // x < x is false regardless. For the others here is some code to check
819 if (cond != lt && cond != gt) {
820 __ bind(&heap_number);
821 // It is a heap number, so return non-equal if it's NaN and equal if it's
824 // The representation of NaN values has all exponent bits (52..62) set,
825 // and not all mantissa bits (0..51) clear.
826 // Read top bits of double representation (second word of value).
827 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
828 // Test that exponent bits are all set.
829 __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
830 // NaNs have all-one exponents so they sign extend to -1.
831 __ cmp(r3, Operand(-1));
832 __ b(ne, &return_equal);
834 // Shift out flag and all exponent bits, retaining only mantissa.
835 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
836 // Or with all low-bits of mantissa.
837 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
838 __ orr(r0, r3, Operand(r2), SetCC);
839 // For equal we already have the right value in r0: Return zero (equal)
840 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
841 // not (it's a NaN). For <= and >= we need to load r0 with the failing
842 // value if it's a NaN.
844 // All-zero means Infinity means equal.
847 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
849 __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
854 // No fall through here.
856 __ bind(¬_identical);
860 // See comment at call site.
861 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
867 ASSERT((lhs.is(r0) && rhs.is(r1)) ||
868 (lhs.is(r1) && rhs.is(r0)));
871 __ JumpIfSmi(rhs, &rhs_is_smi);
873 // Lhs is a Smi. Check whether the rhs is a heap number.
874 __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
876 // If rhs is not a number and lhs is a Smi then strict equality cannot
877 // succeed. Return non-equal
878 // If rhs is r0 then there is already a non zero value in it.
880 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
884 // Smi compared non-strictly with a non-Smi non-heap-number. Call
889 // Lhs is a smi, rhs is a number.
890 // Convert lhs to a double in d7.
891 __ SmiToDouble(d7, lhs);
892 // Load the double from rhs, tagged HeapNumber r0, to d6.
893 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
895 // We now have both loaded as doubles but we can skip the lhs nan check
899 __ bind(&rhs_is_smi);
900 // Rhs is a smi. Check whether the non-smi lhs is a heap number.
901 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
903 // If lhs is not a number and rhs is a smi then strict equality cannot
904 // succeed. Return non-equal.
905 // If lhs is r0 then there is already a non zero value in it.
907 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
911 // Smi compared non-strictly with a non-smi non-heap-number. Call
916 // Rhs is a smi, lhs is a heap number.
917 // Load the double from lhs, tagged HeapNumber r1, to d7.
918 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
919 // Convert rhs to a double in d6 .
920 __ SmiToDouble(d6, rhs);
921 // Fall through to both_loaded_as_doubles.
925 // See comment at call site.
926 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
929 ASSERT((lhs.is(r0) && rhs.is(r1)) ||
930 (lhs.is(r1) && rhs.is(r0)));
932 // If either operand is a JS object or an oddball value, then they are
933 // not equal since their pointers are different.
934 // There is no test for undetectability in strict equality.
935 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
936 Label first_non_object;
937 // Get the type of the first operand into r2 and compare it with
938 // FIRST_SPEC_OBJECT_TYPE.
939 __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE);
940 __ b(lt, &first_non_object);
942 // Return non-zero (r0 is not zero)
943 Label return_not_equal;
944 __ bind(&return_not_equal);
947 __ bind(&first_non_object);
948 // Check for oddballs: true, false, null, undefined.
949 __ cmp(r2, Operand(ODDBALL_TYPE));
950 __ b(eq, &return_not_equal);
952 __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE);
953 __ b(ge, &return_not_equal);
955 // Check for oddballs: true, false, null, undefined.
956 __ cmp(r3, Operand(ODDBALL_TYPE));
957 __ b(eq, &return_not_equal);
959 // Now that we have the types we might as well check for
960 // internalized-internalized.
961 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
962 __ orr(r2, r2, Operand(r3));
963 __ tst(r2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
964 __ b(eq, &return_not_equal);
968 // See comment at call site.
969 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
972 Label* both_loaded_as_doubles,
973 Label* not_heap_numbers,
975 ASSERT((lhs.is(r0) && rhs.is(r1)) ||
976 (lhs.is(r1) && rhs.is(r0)));
978 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
979 __ b(ne, not_heap_numbers);
980 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
982 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
984 // Both are heap numbers. Load them up then jump to the code we have
986 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
987 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
988 __ jmp(both_loaded_as_doubles);
992 // Fast negative check for internalized-to-internalized equality.
993 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
996 Label* possible_strings,
997 Label* not_both_strings) {
998 ASSERT((lhs.is(r0) && rhs.is(r1)) ||
999 (lhs.is(r1) && rhs.is(r0)));
1001 // r2 is object type of rhs.
1003 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
1004 __ tst(r2, Operand(kIsNotStringMask));
1005 __ b(ne, &object_test);
1006 __ tst(r2, Operand(kIsNotInternalizedMask));
1007 __ b(ne, possible_strings);
1008 __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
1009 __ b(ge, not_both_strings);
1010 __ tst(r3, Operand(kIsNotInternalizedMask));
1011 __ b(ne, possible_strings);
1013 // Both are internalized. We already checked they weren't the same pointer
1014 // so they are not equal.
1015 __ mov(r0, Operand(NOT_EQUAL));
1018 __ bind(&object_test);
1019 __ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE));
1020 __ b(lt, not_both_strings);
1021 __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE);
1022 __ b(lt, not_both_strings);
1023 // If both objects are undetectable, they are equal. Otherwise, they
1024 // are not equal, since they are different objects and an object is not
1025 // equal to undefined.
1026 __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
1027 __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
1028 __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
1029 __ and_(r0, r2, Operand(r3));
1030 __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
1031 __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
1036 static void ICCompareStub_CheckInputType(MacroAssembler* masm,
1039 CompareIC::State expected,
1042 if (expected == CompareIC::SMI) {
1043 __ JumpIfNotSmi(input, fail);
1044 } else if (expected == CompareIC::NUMBER) {
1045 __ JumpIfSmi(input, &ok);
1046 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
1049 // We could be strict about internalized/non-internalized here, but as long as
1050 // hydrogen doesn't care, the stub doesn't have to care either.
1055 // On entry r1 and r2 are the values to be compared.
1056 // On exit r0 is 0, positive or negative to indicate the result of
1058 void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
1061 Condition cc = GetCondition();
1064 ICCompareStub_CheckInputType(masm, lhs, r2, left_, &miss);
1065 ICCompareStub_CheckInputType(masm, rhs, r3, right_, &miss);
1067 Label slow; // Call builtin.
1068 Label not_smis, both_loaded_as_doubles, lhs_not_nan;
1070 Label not_two_smis, smi_done;
1072 __ JumpIfNotSmi(r2, ¬_two_smis);
1073 __ mov(r1, Operand(r1, ASR, 1));
1074 __ sub(r0, r1, Operand(r0, ASR, 1));
1076 __ bind(¬_two_smis);
1078 // NOTICE! This code is only reached after a smi-fast-case check, so
1079 // it is certain that at least one operand isn't a smi.
1081 // Handle the case where the objects are identical. Either returns the answer
1082 // or goes to slow. Only falls through if the objects were not identical.
1083 EmitIdenticalObjectComparison(masm, &slow, cc);
1085 // If either is a Smi (we know that not both are), then they can only
1086 // be strictly equal if the other is a HeapNumber.
1087 STATIC_ASSERT(kSmiTag == 0);
1088 ASSERT_EQ(0, Smi::FromInt(0));
1089 __ and_(r2, lhs, Operand(rhs));
1090 __ JumpIfNotSmi(r2, ¬_smis);
1091 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
1092 // 1) Return the answer.
1094 // 3) Fall through to both_loaded_as_doubles.
1095 // 4) Jump to lhs_not_nan.
1096 // In cases 3 and 4 we have found out we were dealing with a number-number
1097 // comparison. If VFP3 is supported the double values of the numbers have
1098 // been loaded into d7 and d6. Otherwise, the double values have been loaded
1099 // into r0, r1, r2, and r3.
1100 EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
1102 __ bind(&both_loaded_as_doubles);
1103 // The arguments have been converted to doubles and stored in d6 and d7, if
1104 // VFP3 is supported, or in r0, r1, r2, and r3.
1105 Isolate* isolate = masm->isolate();
1106 __ bind(&lhs_not_nan);
1108 // ARMv7 VFP3 instructions to implement double precision comparison.
1109 __ VFPCompareAndSetFlags(d7, d6);
1112 __ mov(r0, Operand(EQUAL), LeaveCC, eq);
1113 __ mov(r0, Operand(LESS), LeaveCC, lt);
1114 __ mov(r0, Operand(GREATER), LeaveCC, gt);
1118 // If one of the sides was a NaN then the v flag is set. Load r0 with
1119 // whatever it takes to make the comparison fail, since comparisons with NaN
1121 if (cc == lt || cc == le) {
1122 __ mov(r0, Operand(GREATER));
1124 __ mov(r0, Operand(LESS));
1129 // At this point we know we are dealing with two different objects,
1130 // and neither of them is a Smi. The objects are in rhs_ and lhs_.
1132 // This returns non-equal for some object types, or falls through if it
1134 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
1137 Label check_for_internalized_strings;
1138 Label flat_string_check;
1139 // Check for heap-number-heap-number comparison. Can jump to slow case,
1140 // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
1141 // that case. If the inputs are not doubles then jumps to
1142 // check_for_internalized_strings.
1143 // In this case r2 will contain the type of rhs_. Never falls through.
1144 EmitCheckForTwoHeapNumbers(masm,
1147 &both_loaded_as_doubles,
1148 &check_for_internalized_strings,
1149 &flat_string_check);
1151 __ bind(&check_for_internalized_strings);
1152 // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
1153 // internalized strings.
1154 if (cc == eq && !strict()) {
1155 // Returns an answer for two internalized strings or two detectable objects.
1156 // Otherwise jumps to string case or not both strings case.
1157 // Assumes that r2 is the type of rhs_ on entry.
1158 EmitCheckForInternalizedStringsOrObjects(
1159 masm, lhs, rhs, &flat_string_check, &slow);
1162 // Check for both being sequential ASCII strings, and inline if that is the
1164 __ bind(&flat_string_check);
1166 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow);
1168 __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3);
1170 StringCompareStub::GenerateFlatAsciiStringEquals(masm,
1177 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
1185 // Never falls through to here.
1190 // Figure out which native to call and setup the arguments.
1191 Builtins::JavaScript native;
1193 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1195 native = Builtins::COMPARE;
1196 int ncr; // NaN compare result
1197 if (cc == lt || cc == le) {
1200 ASSERT(cc == gt || cc == ge); // remaining cases
1203 __ mov(r0, Operand(Smi::FromInt(ncr)));
1207 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1208 // tagged as a small integer.
1209 __ InvokeBuiltin(native, JUMP_FUNCTION);
1216 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1217 // We don't allow a GC during a store buffer overflow so there is no need to
1218 // store the registers in any particular way, but we do have to store and
1220 __ stm(db_w, sp, kCallerSaved | lr.bit());
1222 const Register scratch = r1;
1224 if (save_doubles_ == kSaveFPRegs) {
1225 __ SaveFPRegs(sp, scratch);
1227 const int argument_count = 1;
1228 const int fp_argument_count = 0;
1230 AllowExternalCallThatCantCauseGC scope(masm);
1231 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
1232 __ mov(r0, Operand(ExternalReference::isolate_address(masm->isolate())));
1234 ExternalReference::store_buffer_overflow_function(masm->isolate()),
1236 if (save_doubles_ == kSaveFPRegs) {
1237 __ RestoreFPRegs(sp, scratch);
1239 __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
1243 void MathPowStub::Generate(MacroAssembler* masm) {
1244 const Register base = r1;
1245 const Register exponent = r2;
1246 const Register heapnumbermap = r5;
1247 const Register heapnumber = r0;
1248 const DwVfpRegister double_base = d0;
1249 const DwVfpRegister double_exponent = d1;
1250 const DwVfpRegister double_result = d2;
1251 const DwVfpRegister double_scratch = d3;
1252 const SwVfpRegister single_scratch = s6;
1253 const Register scratch = r9;
1254 const Register scratch2 = r4;
1256 Label call_runtime, done, int_exponent;
1257 if (exponent_type_ == ON_STACK) {
1258 Label base_is_smi, unpack_exponent;
1259 // The exponent and base are supplied as arguments on the stack.
1260 // This can only happen if the stub is called from non-optimized code.
1261 // Load input parameters from stack to double registers.
1262 __ ldr(base, MemOperand(sp, 1 * kPointerSize));
1263 __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
1265 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
1267 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
1268 __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
1269 __ cmp(scratch, heapnumbermap);
1270 __ b(ne, &call_runtime);
1272 __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
1273 __ jmp(&unpack_exponent);
1275 __ bind(&base_is_smi);
1276 __ vmov(single_scratch, scratch);
1277 __ vcvt_f64_s32(double_base, single_scratch);
1278 __ bind(&unpack_exponent);
1280 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
1282 __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
1283 __ cmp(scratch, heapnumbermap);
1284 __ b(ne, &call_runtime);
1285 __ vldr(double_exponent,
1286 FieldMemOperand(exponent, HeapNumber::kValueOffset));
1287 } else if (exponent_type_ == TAGGED) {
1288 // Base is already in double_base.
1289 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
1291 __ vldr(double_exponent,
1292 FieldMemOperand(exponent, HeapNumber::kValueOffset));
1295 if (exponent_type_ != INTEGER) {
1296 Label int_exponent_convert;
1297 // Detect integer exponents stored as double.
1298 __ vcvt_u32_f64(single_scratch, double_exponent);
1299 // We do not check for NaN or Infinity here because comparing numbers on
1300 // ARM correctly distinguishes NaNs. We end up calling the built-in.
1301 __ vcvt_f64_u32(double_scratch, single_scratch);
1302 __ VFPCompareAndSetFlags(double_scratch, double_exponent);
1303 __ b(eq, &int_exponent_convert);
1305 if (exponent_type_ == ON_STACK) {
1306 // Detect square root case. Crankshaft detects constant +/-0.5 at
1307 // compile time and uses DoMathPowHalf instead. We then skip this check
1308 // for non-constant cases of +/-0.5 as these hardly occur.
1309 Label not_plus_half;
1312 __ vmov(double_scratch, 0.5, scratch);
1313 __ VFPCompareAndSetFlags(double_exponent, double_scratch);
1314 __ b(ne, ¬_plus_half);
1316 // Calculates square root of base. Check for the special case of
1317 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
1318 __ vmov(double_scratch, -V8_INFINITY, scratch);
1319 __ VFPCompareAndSetFlags(double_base, double_scratch);
1320 __ vneg(double_result, double_scratch, eq);
1323 // Add +0 to convert -0 to +0.
1324 __ vadd(double_scratch, double_base, kDoubleRegZero);
1325 __ vsqrt(double_result, double_scratch);
1328 __ bind(¬_plus_half);
1329 __ vmov(double_scratch, -0.5, scratch);
1330 __ VFPCompareAndSetFlags(double_exponent, double_scratch);
1331 __ b(ne, &call_runtime);
1333 // Calculates square root of base. Check for the special case of
1334 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
1335 __ vmov(double_scratch, -V8_INFINITY, scratch);
1336 __ VFPCompareAndSetFlags(double_base, double_scratch);
1337 __ vmov(double_result, kDoubleRegZero, eq);
1340 // Add +0 to convert -0 to +0.
1341 __ vadd(double_scratch, double_base, kDoubleRegZero);
1342 __ vmov(double_result, 1.0, scratch);
1343 __ vsqrt(double_scratch, double_scratch);
1344 __ vdiv(double_result, double_result, double_scratch);
1350 AllowExternalCallThatCantCauseGC scope(masm);
1351 __ PrepareCallCFunction(0, 2, scratch);
1352 __ MovToFloatParameters(double_base, double_exponent);
1354 ExternalReference::power_double_double_function(masm->isolate()),
1358 __ MovFromFloatResult(double_result);
1361 __ bind(&int_exponent_convert);
1362 __ vcvt_u32_f64(single_scratch, double_exponent);
1363 __ vmov(scratch, single_scratch);
1366 // Calculate power with integer exponent.
1367 __ bind(&int_exponent);
1369 // Get two copies of exponent in the registers scratch and exponent.
1370 if (exponent_type_ == INTEGER) {
1371 __ mov(scratch, exponent);
1373 // Exponent has previously been stored into scratch as untagged integer.
1374 __ mov(exponent, scratch);
1376 __ vmov(double_scratch, double_base); // Back up base.
1377 __ vmov(double_result, 1.0, scratch2);
1379 // Get absolute value of exponent.
1380 __ cmp(scratch, Operand::Zero());
1381 __ mov(scratch2, Operand::Zero(), LeaveCC, mi);
1382 __ sub(scratch, scratch2, scratch, LeaveCC, mi);
1385 __ bind(&while_true);
1386 __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
1387 __ vmul(double_result, double_result, double_scratch, cs);
1388 __ vmul(double_scratch, double_scratch, double_scratch, ne);
1389 __ b(ne, &while_true);
1391 __ cmp(exponent, Operand::Zero());
1393 __ vmov(double_scratch, 1.0, scratch);
1394 __ vdiv(double_result, double_scratch, double_result);
1395 // Test whether result is zero. Bail out to check for subnormal result.
1396 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
1397 __ VFPCompareAndSetFlags(double_result, 0.0);
1399 // double_exponent may not containe the exponent value if the input was a
1400 // smi. We set it with exponent value before bailing out.
1401 __ vmov(single_scratch, exponent);
1402 __ vcvt_f64_s32(double_exponent, single_scratch);
1404 // Returning or bailing out.
1405 Counters* counters = masm->isolate()->counters();
1406 if (exponent_type_ == ON_STACK) {
1407 // The arguments are still on the stack.
1408 __ bind(&call_runtime);
1409 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
1411 // The stub is called from non-optimized code, which expects the result
1412 // as heap number in exponent.
1414 __ AllocateHeapNumber(
1415 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
1416 __ vstr(double_result,
1417 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
1418 ASSERT(heapnumber.is(r0));
1419 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1424 AllowExternalCallThatCantCauseGC scope(masm);
1425 __ PrepareCallCFunction(0, 2, scratch);
1426 __ MovToFloatParameters(double_base, double_exponent);
1428 ExternalReference::power_double_double_function(masm->isolate()),
1432 __ MovFromFloatResult(double_result);
1435 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1441 bool CEntryStub::NeedsImmovableCode() {
1446 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
1447 CEntryStub::GenerateAheadOfTime(isolate);
1448 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
1449 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
1450 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
1451 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
1452 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
1453 BinaryOpICStub::GenerateAheadOfTime(isolate);
1454 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
1458 void CodeStub::GenerateFPStubs(Isolate* isolate) {
1459 SaveFPRegsMode mode = kSaveFPRegs;
1460 CEntryStub save_doubles(1, mode);
1461 StoreBufferOverflowStub stub(mode);
1462 // These stubs might already be in the snapshot, detect that and don't
1463 // regenerate, which would lead to code stub initialization state being messed
1465 Code* save_doubles_code;
1466 if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
1467 save_doubles_code = *save_doubles.GetCode(isolate);
1469 Code* store_buffer_overflow_code;
1470 if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) {
1471 store_buffer_overflow_code = *stub.GetCode(isolate);
1473 isolate->set_fp_stubs_generated(true);
1477 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1478 CEntryStub stub(1, kDontSaveFPRegs);
1479 stub.GetCode(isolate);
1483 static void JumpIfOOM(MacroAssembler* masm,
1487 STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
1488 STATIC_ASSERT(kFailureTag == 3);
1489 __ and_(scratch, value, Operand(0xf));
1490 __ cmp(scratch, Operand(0xf));
1491 __ b(eq, oom_label);
1495 void CEntryStub::GenerateCore(MacroAssembler* masm,
1496 Label* throw_normal_exception,
1497 Label* throw_termination_exception,
1498 Label* throw_out_of_memory_exception,
1500 bool always_allocate) {
1501 // r0: result parameter for PerformGC, if any
1502 // r4: number of arguments including receiver (C callee-saved)
1503 // r5: pointer to builtin function (C callee-saved)
1504 // r6: pointer to the first argument (C callee-saved)
1505 Isolate* isolate = masm->isolate();
1509 __ PrepareCallCFunction(2, 0, r1);
1510 __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
1511 __ CallCFunction(ExternalReference::perform_gc_function(isolate),
1515 ExternalReference scope_depth =
1516 ExternalReference::heap_always_allocate_scope_depth(isolate);
1517 if (always_allocate) {
1518 __ mov(r0, Operand(scope_depth));
1519 __ ldr(r1, MemOperand(r0));
1520 __ add(r1, r1, Operand(1));
1521 __ str(r1, MemOperand(r0));
1525 // r0 = argc, r1 = argv
1526 __ mov(r0, Operand(r4));
1527 __ mov(r1, Operand(r6));
1529 #if V8_HOST_ARCH_ARM
1530 int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1531 int frame_alignment_mask = frame_alignment - 1;
1532 if (FLAG_debug_code) {
1533 if (frame_alignment > kPointerSize) {
1534 Label alignment_as_expected;
1535 ASSERT(IsPowerOf2(frame_alignment));
1536 __ tst(sp, Operand(frame_alignment_mask));
1537 __ b(eq, &alignment_as_expected);
1538 // Don't use Check here, as it will call Runtime_Abort re-entering here.
1539 __ stop("Unexpected alignment");
1540 __ bind(&alignment_as_expected);
1545 __ mov(r2, Operand(ExternalReference::isolate_address(isolate)));
1547 // To let the GC traverse the return address of the exit frames, we need to
1548 // know where the return address is. The CEntryStub is unmovable, so
1549 // we can store the address on the stack to be able to find it again and
1550 // we never have to restore it, because it will not change.
1551 // Compute the return address in lr to return to after the jump below. Pc is
1552 // already at '+ 8' from the current instruction but return is after three
1553 // instructions so add another 4 to pc to get the return address.
1555 // Prevent literal pool emission before return address.
1556 Assembler::BlockConstPoolScope block_const_pool(masm);
1557 masm->add(lr, pc, Operand(4));
1558 __ str(lr, MemOperand(sp, 0));
1562 __ VFPEnsureFPSCRState(r2);
1564 if (always_allocate) {
1565 // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
1566 // though (contain the result).
1567 __ mov(r2, Operand(scope_depth));
1568 __ ldr(r3, MemOperand(r2));
1569 __ sub(r3, r3, Operand(1));
1570 __ str(r3, MemOperand(r2));
1573 // check for failure result
1574 Label failure_returned;
1575 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
1576 // Lower 2 bits of r2 are 0 iff r0 has failure tag.
1577 __ add(r2, r0, Operand(1));
1578 __ tst(r2, Operand(kFailureTagMask));
1579 __ b(eq, &failure_returned);
1581 // Exit C frame and return.
1583 // sp: stack pointer
1584 // fp: frame pointer
1585 // Callee-saved register r4 still holds argc.
1586 __ LeaveExitFrame(save_doubles_, r4, true);
1589 // check if we should retry or throw exception
1591 __ bind(&failure_returned);
1592 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
1593 __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
1596 // Special handling of out of memory exceptions.
1597 JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception);
1599 // Retrieve the pending exception.
1600 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1602 __ ldr(r0, MemOperand(ip));
1604 // See if we just retrieved an OOM exception.
1605 JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception);
1607 // Clear the pending exception.
1608 __ mov(r3, Operand(isolate->factory()->the_hole_value()));
1609 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1611 __ str(r3, MemOperand(ip));
1613 // Special handling of termination exceptions which are uncatchable
1614 // by javascript code.
1615 __ cmp(r0, Operand(isolate->factory()->termination_exception()));
1616 __ b(eq, throw_termination_exception);
1618 // Handle normal exception.
1619 __ jmp(throw_normal_exception);
1621 __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
1625 void CEntryStub::Generate(MacroAssembler* masm) {
1626 // Called from JavaScript; parameters are on stack as if calling JS function
1627 // r0: number of arguments including receiver
1628 // r1: pointer to builtin function
1629 // fp: frame pointer (restored after C call)
1630 // sp: stack pointer (restored as callee's sp after C call)
1631 // cp: current context (C callee-saved)
1633 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1635 // Result returned in r0 or r0+r1 by default.
1637 // NOTE: Invocations of builtins may return failure objects
1638 // instead of a proper result. The builtin entry handles
1639 // this by performing a garbage collection and retrying the
1642 // Compute the argv pointer in a callee-saved register.
1643 __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
1644 __ sub(r6, r6, Operand(kPointerSize));
1646 // Enter the exit frame that transitions from JavaScript to C++.
1647 FrameScope scope(masm, StackFrame::MANUAL);
1648 __ EnterExitFrame(save_doubles_);
1650 // Set up argc and the builtin function in callee-saved registers.
1651 __ mov(r4, Operand(r0));
1652 __ mov(r5, Operand(r1));
1654 // r4: number of arguments (C callee-saved)
1655 // r5: pointer to builtin function (C callee-saved)
1656 // r6: pointer to first argument (C callee-saved)
1658 Label throw_normal_exception;
1659 Label throw_termination_exception;
1660 Label throw_out_of_memory_exception;
1662 // Call into the runtime system.
1664 &throw_normal_exception,
1665 &throw_termination_exception,
1666 &throw_out_of_memory_exception,
1670 // Do space-specific GC and retry runtime call.
1672 &throw_normal_exception,
1673 &throw_termination_exception,
1674 &throw_out_of_memory_exception,
1678 // Do full GC and retry runtime call one final time.
1679 Failure* failure = Failure::InternalError();
1680 __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
1682 &throw_normal_exception,
1683 &throw_termination_exception,
1684 &throw_out_of_memory_exception,
1688 __ bind(&throw_out_of_memory_exception);
1689 // Set external caught exception to false.
1690 Isolate* isolate = masm->isolate();
1691 ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
1693 __ mov(r0, Operand(false, RelocInfo::NONE32));
1694 __ mov(r2, Operand(external_caught));
1695 __ str(r0, MemOperand(r2));
1697 // Set pending exception and r0 to out of memory exception.
1698 Label already_have_failure;
1699 JumpIfOOM(masm, r0, ip, &already_have_failure);
1700 Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
1701 __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
1702 __ bind(&already_have_failure);
1703 __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1705 __ str(r0, MemOperand(r2));
1706 // Fall through to the next label.
1708 __ bind(&throw_termination_exception);
1709 __ ThrowUncatchable(r0);
1711 __ bind(&throw_normal_exception);
1716 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
1723 Label invoke, handler_entry, exit;
1725 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1727 // Called from C, so do not pop argc and args on exit (preserve sp)
1728 // No need to save register-passed args
1729 // Save callee-saved registers (incl. cp and fp), sp, and lr
1730 __ stm(db_w, sp, kCalleeSaved | lr.bit());
1732 // Save callee-saved vfp registers.
1733 __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
1734 // Set up the reserved register for 0.0.
1735 __ vmov(kDoubleRegZero, 0.0);
1736 __ VFPEnsureFPSCRState(r4);
1738 // Get address of argv, see stm above.
1744 // Set up argv in r4.
1745 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1746 offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
1747 __ ldr(r4, MemOperand(sp, offset_to_argv));
1749 // Push a frame with special values setup to mark it as an entry frame.
1755 Isolate* isolate = masm->isolate();
1756 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
1757 if (FLAG_enable_ool_constant_pool) {
1758 __ mov(r8, Operand(Smi::FromInt(marker)));
1760 __ mov(r7, Operand(Smi::FromInt(marker)));
1761 __ mov(r6, Operand(Smi::FromInt(marker)));
1763 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
1764 __ ldr(r5, MemOperand(r5));
1765 __ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
1766 __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() |
1767 (FLAG_enable_ool_constant_pool ? r8.bit() : 0) |
1770 // Set up frame pointer for the frame to be pushed.
1771 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1773 // If this is the outermost JS call, set js_entry_sp value.
1774 Label non_outermost_js;
1775 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
1776 __ mov(r5, Operand(ExternalReference(js_entry_sp)));
1777 __ ldr(r6, MemOperand(r5));
1778 __ cmp(r6, Operand::Zero());
1779 __ b(ne, &non_outermost_js);
1780 __ str(fp, MemOperand(r5));
1781 __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1784 __ bind(&non_outermost_js);
1785 __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1789 // Jump to a faked try block that does the invoke, with a faked catch
1790 // block that sets the pending exception.
1793 // Block literal pool emission whilst taking the position of the handler
1794 // entry. This avoids making the assumption that literal pools are always
1795 // emitted after an instruction is emitted, rather than before.
1797 Assembler::BlockConstPoolScope block_const_pool(masm);
1798 __ bind(&handler_entry);
1799 handler_offset_ = handler_entry.pos();
1800 // Caught exception: Store result (exception) in the pending exception
1801 // field in the JSEnv and return a failure sentinel. Coming in here the
1802 // fp will be invalid because the PushTryHandler below sets it to 0 to
1803 // signal the existence of the JSEntry frame.
1804 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1807 __ str(r0, MemOperand(ip));
1808 __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
1811 // Invoke: Link this frame into the handler chain. There's only one
1812 // handler block in this code object, so its index is 0.
1814 // Must preserve r0-r4, r5-r6 are available.
1815 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
1816 // If an exception not caught by another handler occurs, this handler
1817 // returns control to the code after the bl(&invoke) above, which
1818 // restores all kCalleeSaved registers (including cp and fp) to their
1819 // saved values before returning a failure to C.
1821 // Clear any pending exceptions.
1822 __ mov(r5, Operand(isolate->factory()->the_hole_value()));
1823 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1825 __ str(r5, MemOperand(ip));
1827 // Invoke the function by calling through JS entry trampoline builtin.
1828 // Notice that we cannot store a reference to the trampoline code directly in
1829 // this stub, because runtime stubs are not traversed when doing GC.
1831 // Expected registers by Builtins::JSEntryTrampoline
1838 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1840 __ mov(ip, Operand(construct_entry));
1842 ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
1843 __ mov(ip, Operand(entry));
1845 __ ldr(ip, MemOperand(ip)); // deref address
1847 // Branch and link to JSEntryTrampoline. We don't use the double underscore
1848 // macro for the add instruction because we don't want the coverage tool
1849 // inserting instructions here after we read the pc. We block literal pool
1850 // emission for the same reason.
1852 Assembler::BlockConstPoolScope block_const_pool(masm);
1853 __ mov(lr, Operand(pc));
1854 masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
1857 // Unlink this frame from the handler chain.
1860 __ bind(&exit); // r0 holds result
1861 // Check if the current stack frame is marked as the outermost JS frame.
1862 Label non_outermost_js_2;
1864 __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1865 __ b(ne, &non_outermost_js_2);
1866 __ mov(r6, Operand::Zero());
1867 __ mov(r5, Operand(ExternalReference(js_entry_sp)));
1868 __ str(r6, MemOperand(r5));
1869 __ bind(&non_outermost_js_2);
1871 // Restore the top frame descriptors from the stack.
1874 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
1875 __ str(r3, MemOperand(ip));
1877 // Reset the stack to the callee saved registers.
1878 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1880 // Restore callee-saved registers and return.
1882 if (FLAG_debug_code) {
1883 __ mov(lr, Operand(pc));
1887 // Restore callee-saved vfp registers.
1888 __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
1890 __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
1894 // Uses registers r0 to r4.
1895 // Expected input (depending on whether args are in registers or on the stack):
1896 // * object: r0 or at sp + 1 * kPointerSize.
1897 // * function: r1 or at sp.
1899 // An inlined call site may have been generated before calling this stub.
1900 // In this case the offset to the inline site to patch is passed on the stack,
1901 // in the safepoint slot for register r4.
1902 // (See LCodeGen::DoInstanceOfKnownGlobal)
1903 void InstanceofStub::Generate(MacroAssembler* masm) {
1904 // Call site inlining and patching implies arguments in registers.
1905 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
1906 // ReturnTrueFalse is only implemented for inlined call sites.
1907 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
1909 // Fixed register usage throughout the stub:
1910 const Register object = r0; // Object (lhs).
1911 Register map = r3; // Map of the object.
1912 const Register function = r1; // Function (rhs).
1913 const Register prototype = r4; // Prototype of the function.
1914 const Register inline_site = r9;
1915 const Register scratch = r2;
1917 const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize;
1919 Label slow, loop, is_instance, is_not_instance, not_js_object;
1921 if (!HasArgsInRegisters()) {
1922 __ ldr(object, MemOperand(sp, 1 * kPointerSize));
1923 __ ldr(function, MemOperand(sp, 0));
1926 // Check that the left hand is a JS object and load map.
1927 __ JumpIfSmi(object, ¬_js_object);
1928 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
1930 // If there is a call site cache don't look in the global cache, but do the
1931 // real lookup and update the call site cache.
1932 if (!HasCallSiteInlineCheck()) {
1934 __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1936 __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
1938 __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
1939 __ Ret(HasArgsInRegisters() ? 0 : 2);
1944 // Get the prototype of the function.
1945 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
1947 // Check that the function prototype is a JS object.
1948 __ JumpIfSmi(prototype, &slow);
1949 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
1951 // Update the global instanceof or call site inlined cache with the current
1952 // map and function. The cached answer will be set when it is known below.
1953 if (!HasCallSiteInlineCheck()) {
1954 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1955 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
1957 ASSERT(HasArgsInRegisters());
1958 // Patch the (relocated) inlined map check.
1960 // The offset was stored in r4 safepoint slot.
1961 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
1962 __ LoadFromSafepointRegisterSlot(scratch, r4);
1963 __ sub(inline_site, lr, scratch);
1964 // Get the map location in scratch and patch it.
1965 __ GetRelocatedValueLocation(inline_site, scratch);
1966 __ ldr(scratch, MemOperand(scratch));
1967 __ str(map, FieldMemOperand(scratch, Cell::kValueOffset));
1970 // Register mapping: r3 is object map and r4 is function prototype.
1971 // Get prototype of object into r2.
1972 __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
1974 // We don't need map any more. Use it as a scratch register.
1975 Register scratch2 = map;
1978 // Loop through the prototype chain looking for the function prototype.
1979 __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
1981 __ cmp(scratch, Operand(prototype));
1982 __ b(eq, &is_instance);
1983 __ cmp(scratch, scratch2);
1984 __ b(eq, &is_not_instance);
1985 __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
1986 __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
1989 __ bind(&is_instance);
1990 if (!HasCallSiteInlineCheck()) {
1991 __ mov(r0, Operand(Smi::FromInt(0)));
1992 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
1994 // Patch the call site to return true.
1995 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
1996 __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1997 // Get the boolean result location in scratch and patch it.
1998 __ GetRelocatedValueLocation(inline_site, scratch);
1999 __ str(r0, MemOperand(scratch));
2001 if (!ReturnTrueFalseObject()) {
2002 __ mov(r0, Operand(Smi::FromInt(0)));
2005 __ Ret(HasArgsInRegisters() ? 0 : 2);
2007 __ bind(&is_not_instance);
2008 if (!HasCallSiteInlineCheck()) {
2009 __ mov(r0, Operand(Smi::FromInt(1)));
2010 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
2012 // Patch the call site to return false.
2013 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
2014 __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
2015 // Get the boolean result location in scratch and patch it.
2016 __ GetRelocatedValueLocation(inline_site, scratch);
2017 __ str(r0, MemOperand(scratch));
2019 if (!ReturnTrueFalseObject()) {
2020 __ mov(r0, Operand(Smi::FromInt(1)));
2023 __ Ret(HasArgsInRegisters() ? 0 : 2);
2025 Label object_not_null, object_not_null_or_smi;
2026 __ bind(¬_js_object);
2027 // Before null, smi and string value checks, check that the rhs is a function
2028 // as for a non-function rhs an exception needs to be thrown.
2029 __ JumpIfSmi(function, &slow);
2030 __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE);
2033 // Null is not instance of anything.
2034 __ cmp(scratch, Operand(masm->isolate()->factory()->null_value()));
2035 __ b(ne, &object_not_null);
2036 __ mov(r0, Operand(Smi::FromInt(1)));
2037 __ Ret(HasArgsInRegisters() ? 0 : 2);
2039 __ bind(&object_not_null);
2040 // Smi values are not instances of anything.
2041 __ JumpIfNotSmi(object, &object_not_null_or_smi);
2042 __ mov(r0, Operand(Smi::FromInt(1)));
2043 __ Ret(HasArgsInRegisters() ? 0 : 2);
2045 __ bind(&object_not_null_or_smi);
2046 // String values are not instances of anything.
2047 __ IsObjectJSStringType(object, scratch, &slow);
2048 __ mov(r0, Operand(Smi::FromInt(1)));
2049 __ Ret(HasArgsInRegisters() ? 0 : 2);
2051 // Slow-case. Tail call builtin.
2053 if (!ReturnTrueFalseObject()) {
2054 if (HasArgsInRegisters()) {
2057 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
2060 FrameScope scope(masm, StackFrame::INTERNAL);
2062 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
2064 __ cmp(r0, Operand::Zero());
2065 __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
2066 __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
2067 __ Ret(HasArgsInRegisters() ? 0 : 2);
2072 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
2075 if (kind() == Code::KEYED_LOAD_IC) {
2076 // ----------- S t a t e -------------
2077 // -- lr : return address
2080 // -----------------------------------
2081 __ cmp(r0, Operand(masm->isolate()->factory()->prototype_string()));
2085 ASSERT(kind() == Code::LOAD_IC);
2086 // ----------- S t a t e -------------
2088 // -- lr : return address
2090 // -- sp[0] : receiver
2091 // -----------------------------------
2095 StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3, r4, &miss);
2097 StubCompiler::TailCallBuiltin(
2098 masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
2102 void StringLengthStub::Generate(MacroAssembler* masm) {
2105 if (kind() == Code::KEYED_LOAD_IC) {
2106 // ----------- S t a t e -------------
2107 // -- lr : return address
2110 // -----------------------------------
2111 __ cmp(r0, Operand(masm->isolate()->factory()->length_string()));
2115 ASSERT(kind() == Code::LOAD_IC);
2116 // ----------- S t a t e -------------
2118 // -- lr : return address
2120 // -- sp[0] : receiver
2121 // -----------------------------------
2125 StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss);
2128 StubCompiler::TailCallBuiltin(
2129 masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
2133 void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
2134 // This accepts as a receiver anything JSArray::SetElementsLength accepts
2135 // (currently anything except for external arrays which means anything with
2136 // elements of FixedArray type). Value must be a number, but only smis are
2137 // accepted as the most common case.
2142 if (kind() == Code::KEYED_STORE_IC) {
2143 // ----------- S t a t e -------------
2144 // -- lr : return address
2148 // -----------------------------------
2149 __ cmp(r1, Operand(masm->isolate()->factory()->length_string()));
2154 ASSERT(kind() == Code::STORE_IC);
2155 // ----------- S t a t e -------------
2156 // -- lr : return address
2160 // -----------------------------------
2164 Register scratch = r3;
2166 // Check that the receiver isn't a smi.
2167 __ JumpIfSmi(receiver, &miss);
2169 // Check that the object is a JS array.
2170 __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
2173 // Check that elements are FixedArray.
2174 // We rely on StoreIC_ArrayLength below to deal with all types of
2175 // fast elements (including COW).
2176 __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
2177 __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
2180 // Check that the array has fast properties, otherwise the length
2181 // property might have been redefined.
2182 __ ldr(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
2183 __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
2184 __ CompareRoot(scratch, Heap::kHashTableMapRootIndex);
2187 // Check that value is a smi.
2188 __ JumpIfNotSmi(value, &miss);
2190 // Prepare tail call to StoreIC_ArrayLength.
2191 __ Push(receiver, value);
2193 ExternalReference ref =
2194 ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
2195 __ TailCallExternalReference(ref, 2, 1);
2199 StubCompiler::TailCallBuiltin(
2200 masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
2204 Register InstanceofStub::left() { return r0; }
2207 Register InstanceofStub::right() { return r1; }
2210 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
2211 // The displacement is the offset of the last parameter (if any)
2212 // relative to the frame pointer.
2213 const int kDisplacement =
2214 StandardFrameConstants::kCallerSPOffset - kPointerSize;
2216 // Check that the key is a smi.
2218 __ JumpIfNotSmi(r1, &slow);
2220 // Check if the calling frame is an arguments adaptor frame.
2222 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2223 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
2224 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2227 // Check index against formal parameters count limit passed in
2228 // through register r0. Use unsigned comparison to get negative
2233 // Read the argument from the stack and return it.
2235 __ add(r3, fp, Operand::PointerOffsetFromSmiKey(r3));
2236 __ ldr(r0, MemOperand(r3, kDisplacement));
2239 // Arguments adaptor case: Check index against actual arguments
2240 // limit found in the arguments adaptor frame. Use unsigned
2241 // comparison to get negative check for free.
2243 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
2247 // Read the argument from the adaptor frame and return it.
2249 __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r3));
2250 __ ldr(r0, MemOperand(r3, kDisplacement));
2253 // Slow-case: Handle non-smi or out-of-bounds access to arguments
2254 // by calling the runtime system.
2257 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
2261 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
2262 // sp[0] : number of parameters
2263 // sp[4] : receiver displacement
2266 // Check if the calling frame is an arguments adaptor frame.
2268 __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2269 __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
2270 __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2273 // Patch the arguments.length and the parameters pointer in the current frame.
2274 __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
2275 __ str(r2, MemOperand(sp, 0 * kPointerSize));
2276 __ add(r3, r3, Operand(r2, LSL, 1));
2277 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
2278 __ str(r3, MemOperand(sp, 1 * kPointerSize));
2281 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
2285 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
2287 // sp[0] : number of parameters (tagged)
2288 // sp[4] : address of receiver argument
2290 // Registers used over whole function:
2291 // r6 : allocated object (tagged)
2292 // r9 : mapped parameter count (tagged)
2294 __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
2295 // r1 = parameter count (tagged)
2297 // Check if the calling frame is an arguments adaptor frame.
2299 Label adaptor_frame, try_allocate;
2300 __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2301 __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
2302 __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2303 __ b(eq, &adaptor_frame);
2305 // No adaptor, parameter count = argument count.
2307 __ b(&try_allocate);
2309 // We have an adaptor frame. Patch the parameters pointer.
2310 __ bind(&adaptor_frame);
2311 __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
2312 __ add(r3, r3, Operand(r2, LSL, 1));
2313 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
2314 __ str(r3, MemOperand(sp, 1 * kPointerSize));
2316 // r1 = parameter count (tagged)
2317 // r2 = argument count (tagged)
2318 // Compute the mapped parameter count = min(r1, r2) in r1.
2319 __ cmp(r1, Operand(r2));
2320 __ mov(r1, Operand(r2), LeaveCC, gt);
2322 __ bind(&try_allocate);
2324 // Compute the sizes of backing store, parameter map, and arguments object.
2325 // 1. Parameter map, has 2 extra words containing context and backing store.
2326 const int kParameterMapHeaderSize =
2327 FixedArray::kHeaderSize + 2 * kPointerSize;
2328 // If there are no mapped parameters, we do not need the parameter_map.
2329 __ cmp(r1, Operand(Smi::FromInt(0)));
2330 __ mov(r9, Operand::Zero(), LeaveCC, eq);
2331 __ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne);
2332 __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
2334 // 2. Backing store.
2335 __ add(r9, r9, Operand(r2, LSL, 1));
2336 __ add(r9, r9, Operand(FixedArray::kHeaderSize));
2338 // 3. Arguments object.
2339 __ add(r9, r9, Operand(Heap::kArgumentsObjectSize));
2341 // Do the allocation of all three objects in one go.
2342 __ Allocate(r9, r0, r3, r4, &runtime, TAG_OBJECT);
2344 // r0 = address of new object(s) (tagged)
2345 // r2 = argument count (tagged)
2346 // Get the arguments boilerplate from the current native context into r4.
2347 const int kNormalOffset =
2348 Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
2349 const int kAliasedOffset =
2350 Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
2352 __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2353 __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
2354 __ cmp(r1, Operand::Zero());
2355 __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
2356 __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
2358 // r0 = address of new object (tagged)
2359 // r1 = mapped parameter count (tagged)
2360 // r2 = argument count (tagged)
2361 // r4 = address of boilerplate object (tagged)
2362 // Copy the JS object part.
2363 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
2364 __ ldr(r3, FieldMemOperand(r4, i));
2365 __ str(r3, FieldMemOperand(r0, i));
2368 // Set up the callee in-object property.
2369 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
2370 __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
2371 const int kCalleeOffset = JSObject::kHeaderSize +
2372 Heap::kArgumentsCalleeIndex * kPointerSize;
2373 __ str(r3, FieldMemOperand(r0, kCalleeOffset));
2375 // Use the length (smi tagged) and set that as an in-object property too.
2376 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2377 const int kLengthOffset = JSObject::kHeaderSize +
2378 Heap::kArgumentsLengthIndex * kPointerSize;
2379 __ str(r2, FieldMemOperand(r0, kLengthOffset));
2381 // Set up the elements pointer in the allocated arguments object.
2382 // If we allocated a parameter map, r4 will point there, otherwise
2383 // it will point to the backing store.
2384 __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
2385 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
2387 // r0 = address of new object (tagged)
2388 // r1 = mapped parameter count (tagged)
2389 // r2 = argument count (tagged)
2390 // r4 = address of parameter map or backing store (tagged)
2391 // Initialize parameter map. If there are no mapped arguments, we're done.
2392 Label skip_parameter_map;
2393 __ cmp(r1, Operand(Smi::FromInt(0)));
2394 // Move backing store address to r3, because it is
2395 // expected there when filling in the unmapped arguments.
2396 __ mov(r3, r4, LeaveCC, eq);
2397 __ b(eq, &skip_parameter_map);
2399 __ LoadRoot(r6, Heap::kNonStrictArgumentsElementsMapRootIndex);
2400 __ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset));
2401 __ add(r6, r1, Operand(Smi::FromInt(2)));
2402 __ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
2403 __ str(cp, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
2404 __ add(r6, r4, Operand(r1, LSL, 1));
2405 __ add(r6, r6, Operand(kParameterMapHeaderSize));
2406 __ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
2408 // Copy the parameter slots and the holes in the arguments.
2409 // We need to fill in mapped_parameter_count slots. They index the context,
2410 // where parameters are stored in reverse order, at
2411 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
2412 // The mapped parameter thus need to get indices
2413 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
2414 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
2415 // We loop from right to left.
2416 Label parameters_loop, parameters_test;
2418 __ ldr(r9, MemOperand(sp, 0 * kPointerSize));
2419 __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
2420 __ sub(r9, r9, Operand(r1));
2421 __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
2422 __ add(r3, r4, Operand(r6, LSL, 1));
2423 __ add(r3, r3, Operand(kParameterMapHeaderSize));
2425 // r6 = loop variable (tagged)
2426 // r1 = mapping index (tagged)
2427 // r3 = address of backing store (tagged)
2428 // r4 = address of parameter map (tagged), which is also the address of new
2429 // object + Heap::kArgumentsObjectSize (tagged)
2430 // r0 = temporary scratch (a.o., for address calculation)
2431 // r5 = the hole value
2432 __ jmp(¶meters_test);
2434 __ bind(¶meters_loop);
2435 __ sub(r6, r6, Operand(Smi::FromInt(1)));
2436 __ mov(r0, Operand(r6, LSL, 1));
2437 __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
2438 __ str(r9, MemOperand(r4, r0));
2439 __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
2440 __ str(r5, MemOperand(r3, r0));
2441 __ add(r9, r9, Operand(Smi::FromInt(1)));
2442 __ bind(¶meters_test);
2443 __ cmp(r6, Operand(Smi::FromInt(0)));
2444 __ b(ne, ¶meters_loop);
2446 // Restore r0 = new object (tagged)
2447 __ sub(r0, r4, Operand(Heap::kArgumentsObjectSize));
2449 __ bind(&skip_parameter_map);
2450 // r0 = address of new object (tagged)
2451 // r2 = argument count (tagged)
2452 // r3 = address of backing store (tagged)
2454 // Copy arguments header and remaining slots (if there are any).
2455 __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
2456 __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset));
2457 __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset));
2459 Label arguments_loop, arguments_test;
2461 __ ldr(r4, MemOperand(sp, 1 * kPointerSize));
2462 __ sub(r4, r4, Operand(r9, LSL, 1));
2463 __ jmp(&arguments_test);
2465 __ bind(&arguments_loop);
2466 __ sub(r4, r4, Operand(kPointerSize));
2467 __ ldr(r6, MemOperand(r4, 0));
2468 __ add(r5, r3, Operand(r9, LSL, 1));
2469 __ str(r6, FieldMemOperand(r5, FixedArray::kHeaderSize));
2470 __ add(r9, r9, Operand(Smi::FromInt(1)));
2472 __ bind(&arguments_test);
2473 __ cmp(r9, Operand(r2));
2474 __ b(lt, &arguments_loop);
2476 // Return and remove the on-stack parameters.
2477 __ add(sp, sp, Operand(3 * kPointerSize));
2480 // Do the runtime call to allocate the arguments object.
2481 // r0 = address of new object (tagged)
2482 // r2 = argument count (tagged)
2484 __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
2485 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
2489 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
2490 // sp[0] : number of parameters
2491 // sp[4] : receiver displacement
2493 // Check if the calling frame is an arguments adaptor frame.
2494 Label adaptor_frame, try_allocate, runtime;
2495 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2496 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
2497 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2498 __ b(eq, &adaptor_frame);
2500 // Get the length from the frame.
2501 __ ldr(r1, MemOperand(sp, 0));
2502 __ b(&try_allocate);
2504 // Patch the arguments.length and the parameters pointer.
2505 __ bind(&adaptor_frame);
2506 __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
2507 __ str(r1, MemOperand(sp, 0));
2508 __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
2509 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
2510 __ str(r3, MemOperand(sp, 1 * kPointerSize));
2512 // Try the new space allocation. Start out with computing the size
2513 // of the arguments object and the elements array in words.
2514 Label add_arguments_object;
2515 __ bind(&try_allocate);
2516 __ SmiUntag(r1, SetCC);
2517 __ b(eq, &add_arguments_object);
2518 __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
2519 __ bind(&add_arguments_object);
2520 __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
2522 // Do the allocation of both objects in one go.
2523 __ Allocate(r1, r0, r2, r3, &runtime,
2524 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
2526 // Get the arguments boilerplate from the current native context.
2527 __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2528 __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
2529 __ ldr(r4, MemOperand(r4, Context::SlotOffset(
2530 Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
2532 // Copy the JS object part.
2533 __ CopyFields(r0, r4, d0, JSObject::kHeaderSize / kPointerSize);
2535 // Get the length (smi tagged) and set that as an in-object property too.
2536 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2537 __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
2538 __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
2539 Heap::kArgumentsLengthIndex * kPointerSize));
2541 // If there are no actual arguments, we're done.
2543 __ cmp(r1, Operand::Zero());
2546 // Get the parameters pointer from the stack.
2547 __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
2549 // Set up the elements pointer in the allocated arguments object and
2550 // initialize the header in the elements fixed array.
2551 __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
2552 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
2553 __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
2554 __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
2555 __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
2558 // Copy the fixed array slots.
2560 // Set up r4 to point to the first array slot.
2561 __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2563 // Pre-decrement r2 with kPointerSize on each iteration.
2564 // Pre-decrement in order to skip receiver.
2565 __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
2566 // Post-increment r4 with kPointerSize on each iteration.
2567 __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
2568 __ sub(r1, r1, Operand(1));
2569 __ cmp(r1, Operand::Zero());
2572 // Return and remove the on-stack parameters.
2574 __ add(sp, sp, Operand(3 * kPointerSize));
2577 // Do the runtime call to allocate the arguments object.
2579 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
2583 void RegExpExecStub::Generate(MacroAssembler* masm) {
2584 // Just jump directly to runtime if native RegExp is not selected at compile
2585 // time or if regexp entry in generated code is turned off runtime switch or
2587 #ifdef V8_INTERPRETED_REGEXP
2588 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
2589 #else // V8_INTERPRETED_REGEXP
2591 // Stack frame on entry.
2592 // sp[0]: last_match_info (expected JSArray)
2593 // sp[4]: previous index
2594 // sp[8]: subject string
2595 // sp[12]: JSRegExp object
2597 const int kLastMatchInfoOffset = 0 * kPointerSize;
2598 const int kPreviousIndexOffset = 1 * kPointerSize;
2599 const int kSubjectOffset = 2 * kPointerSize;
2600 const int kJSRegExpOffset = 3 * kPointerSize;
2603 // Allocation of registers for this function. These are in callee save
2604 // registers and will be preserved by the call to the native RegExp code, as
2605 // this code is called using the normal C calling convention. When calling
2606 // directly from generated code the native RegExp code will not do a GC and
2607 // therefore the content of these registers are safe to use after the call.
2608 Register subject = r4;
2609 Register regexp_data = r5;
2610 Register last_match_info_elements = no_reg; // will be r6;
2612 // Ensure that a RegExp stack is allocated.
2613 Isolate* isolate = masm->isolate();
2614 ExternalReference address_of_regexp_stack_memory_address =
2615 ExternalReference::address_of_regexp_stack_memory_address(isolate);
2616 ExternalReference address_of_regexp_stack_memory_size =
2617 ExternalReference::address_of_regexp_stack_memory_size(isolate);
2618 __ mov(r0, Operand(address_of_regexp_stack_memory_size));
2619 __ ldr(r0, MemOperand(r0, 0));
2620 __ cmp(r0, Operand::Zero());
2623 // Check that the first argument is a JSRegExp object.
2624 __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
2625 __ JumpIfSmi(r0, &runtime);
2626 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
2629 // Check that the RegExp has been compiled (data contains a fixed array).
2630 __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
2631 if (FLAG_debug_code) {
2632 __ SmiTst(regexp_data);
2633 __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
2634 __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
2635 __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
2638 // regexp_data: RegExp data (FixedArray)
2639 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2640 __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2641 __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
2644 // regexp_data: RegExp data (FixedArray)
2645 // Check that the number of captures fit in the static offsets vector buffer.
2647 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2648 // Check (number_of_captures + 1) * 2 <= offsets vector size
2649 // Or number_of_captures * 2 <= offsets vector size - 2
2650 // Multiplying by 2 comes for free since r2 is smi-tagged.
2651 STATIC_ASSERT(kSmiTag == 0);
2652 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2653 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
2654 __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
2657 // Reset offset for possibly sliced string.
2658 __ mov(r9, Operand::Zero());
2659 __ ldr(subject, MemOperand(sp, kSubjectOffset));
2660 __ JumpIfSmi(subject, &runtime);
2661 __ mov(r3, subject); // Make a copy of the original subject string.
2662 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
2663 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
2664 // subject: subject string
2665 // r3: subject string
2666 // r0: subject string instance type
2667 // regexp_data: RegExp data (FixedArray)
2668 // Handle subject string according to its encoding and representation:
2669 // (1) Sequential string? If yes, go to (5).
2670 // (2) Anything but sequential or cons? If yes, go to (6).
2671 // (3) Cons string. If the string is flat, replace subject with first string.
2672 // Otherwise bailout.
2673 // (4) Is subject external? If yes, go to (7).
2674 // (5) Sequential string. Load regexp code according to encoding.
2678 // Deferred code at the end of the stub:
2679 // (6) Not a long external string? If yes, go to (8).
2680 // (7) External string. Make it, offset-wise, look like a sequential string.
2682 // (8) Short external string or not a string? If yes, bail out to runtime.
2683 // (9) Sliced string. Replace subject with parent. Go to (4).
2685 Label seq_string /* 5 */, external_string /* 7 */,
2686 check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
2687 not_long_external /* 8 */;
2689 // (1) Sequential string? If yes, go to (5).
2692 Operand(kIsNotStringMask |
2693 kStringRepresentationMask |
2694 kShortExternalStringMask),
2696 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
2697 __ b(eq, &seq_string); // Go to (5).
2699 // (2) Anything but sequential or cons? If yes, go to (6).
2700 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2701 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2702 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2703 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2704 __ cmp(r1, Operand(kExternalStringTag));
2705 __ b(ge, ¬_seq_nor_cons); // Go to (6).
2707 // (3) Cons string. Check that it's flat.
2708 // Replace subject with first string and reload instance type.
2709 __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
2710 __ CompareRoot(r0, Heap::kempty_stringRootIndex);
2712 __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2714 // (4) Is subject external? If yes, go to (7).
2715 __ bind(&check_underlying);
2716 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
2717 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
2718 STATIC_ASSERT(kSeqStringTag == 0);
2719 __ tst(r0, Operand(kStringRepresentationMask));
2720 // The underlying external string is never a short external string.
2721 STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
2722 STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2723 __ b(ne, &external_string); // Go to (7).
2725 // (5) Sequential string. Load regexp code according to encoding.
2726 __ bind(&seq_string);
2727 // subject: sequential subject string (or look-alike, external string)
2728 // r3: original subject string
2729 // Load previous index and check range before r3 is overwritten. We have to
2730 // use r3 instead of subject here because subject might have been only made
2731 // to look like a sequential string when it actually is an external string.
2732 __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
2733 __ JumpIfNotSmi(r1, &runtime);
2734 __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset));
2735 __ cmp(r3, Operand(r1));
2739 STATIC_ASSERT(4 == kOneByteStringTag);
2740 STATIC_ASSERT(kTwoByteStringTag == 0);
2741 __ and_(r0, r0, Operand(kStringEncodingMask));
2742 __ mov(r3, Operand(r0, ASR, 2), SetCC);
2743 __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
2744 __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
2746 // (E) Carry on. String handling is done.
2747 // r6: irregexp code
2748 // Check that the irregexp code has been generated for the actual string
2749 // encoding. If it has, the field contains a code object otherwise it contains
2750 // a smi (code flushing support).
2751 __ JumpIfSmi(r6, &runtime);
2753 // r1: previous index
2754 // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
2756 // subject: Subject string
2757 // regexp_data: RegExp data (FixedArray)
2758 // All checks done. Now push arguments for native regexp code.
2759 __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
2761 // Isolates: note we add an additional parameter here (isolate pointer).
2762 const int kRegExpExecuteArguments = 9;
2763 const int kParameterRegisters = 4;
2764 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
2766 // Stack pointer now points to cell where return address is to be written.
2767 // Arguments are before that on the stack or in registers.
2769 // Argument 9 (sp[20]): Pass current isolate address.
2770 __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
2771 __ str(r0, MemOperand(sp, 5 * kPointerSize));
2773 // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
2774 __ mov(r0, Operand(1));
2775 __ str(r0, MemOperand(sp, 4 * kPointerSize));
2777 // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
2778 __ mov(r0, Operand(address_of_regexp_stack_memory_address));
2779 __ ldr(r0, MemOperand(r0, 0));
2780 __ mov(r2, Operand(address_of_regexp_stack_memory_size));
2781 __ ldr(r2, MemOperand(r2, 0));
2782 __ add(r0, r0, Operand(r2));
2783 __ str(r0, MemOperand(sp, 3 * kPointerSize));
2785 // Argument 6: Set the number of capture registers to zero to force global
2786 // regexps to behave as non-global. This does not affect non-global regexps.
2787 __ mov(r0, Operand::Zero());
2788 __ str(r0, MemOperand(sp, 2 * kPointerSize));
2790 // Argument 5 (sp[4]): static offsets vector buffer.
2792 Operand(ExternalReference::address_of_static_offsets_vector(isolate)));
2793 __ str(r0, MemOperand(sp, 1 * kPointerSize));
2795 // For arguments 4 and 3 get string length, calculate start of string data and
2796 // calculate the shift of the index (0 for ASCII and 1 for two byte).
2797 __ add(r7, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
2798 __ eor(r3, r3, Operand(1));
2799 // Load the length from the original subject string from the previous stack
2800 // frame. Therefore we have to use fp, which points exactly to two pointer
2801 // sizes below the previous sp. (Because creating a new stack frame pushes
2802 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
2803 __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2804 // If slice offset is not 0, load the length from the original sliced string.
2805 // Argument 4, r3: End of string data
2806 // Argument 3, r2: Start of string data
2807 // Prepare start and end index of the input.
2808 __ add(r9, r7, Operand(r9, LSL, r3));
2809 __ add(r2, r9, Operand(r1, LSL, r3));
2811 __ ldr(r7, FieldMemOperand(subject, String::kLengthOffset));
2813 __ add(r3, r9, Operand(r7, LSL, r3));
2815 // Argument 2 (r1): Previous index.
2818 // Argument 1 (r0): Subject string.
2819 __ mov(r0, subject);
2821 // Locate the code entry and call it.
2822 __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
2823 DirectCEntryStub stub;
2824 stub.GenerateCall(masm, r6);
2826 __ LeaveExitFrame(false, no_reg, true);
2828 last_match_info_elements = r6;
2831 // subject: subject string (callee saved)
2832 // regexp_data: RegExp data (callee saved)
2833 // last_match_info_elements: Last match info elements (callee saved)
2834 // Check the result.
2836 __ cmp(r0, Operand(1));
2837 // We expect exactly one result since we force the called regexp to behave
2841 __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
2843 __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
2844 // If not exception it can only be retry. Handle that in the runtime system.
2846 // Result must now be exception. If there is no pending exception already a
2847 // stack overflow (on the backtrack stack) was detected in RegExp code but
2848 // haven't created the exception yet. Handle that in the runtime system.
2849 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
2850 __ mov(r1, Operand(isolate->factory()->the_hole_value()));
2851 __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2853 __ ldr(r0, MemOperand(r2, 0));
2857 __ str(r1, MemOperand(r2, 0)); // Clear pending exception.
2859 // Check if the exception is a termination. If so, throw as uncatchable.
2860 __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex);
2862 Label termination_exception;
2863 __ b(eq, &termination_exception);
2867 __ bind(&termination_exception);
2868 __ ThrowUncatchable(r0);
2871 // For failure and exception return null.
2872 __ mov(r0, Operand(masm->isolate()->factory()->null_value()));
2873 __ add(sp, sp, Operand(4 * kPointerSize));
2876 // Process the result from the native regexp code.
2879 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2880 // Calculate number of capture registers (number_of_captures + 1) * 2.
2881 // Multiplying by 2 comes for free since r1 is smi-tagged.
2882 STATIC_ASSERT(kSmiTag == 0);
2883 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2884 __ add(r1, r1, Operand(2)); // r1 was a smi.
2886 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
2887 __ JumpIfSmi(r0, &runtime);
2888 __ CompareObjectType(r0, r2, r2, JS_ARRAY_TYPE);
2890 // Check that the JSArray is in fast case.
2891 __ ldr(last_match_info_elements,
2892 FieldMemOperand(r0, JSArray::kElementsOffset));
2893 __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2894 __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
2896 // Check that the last match info has space for the capture registers and the
2897 // additional information.
2899 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
2900 __ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead));
2901 __ cmp(r2, Operand::SmiUntag(r0));
2904 // r1: number of capture registers
2905 // r4: subject string
2906 // Store the capture count.
2908 __ str(r2, FieldMemOperand(last_match_info_elements,
2909 RegExpImpl::kLastCaptureCountOffset));
2910 // Store last subject and last input.
2912 FieldMemOperand(last_match_info_elements,
2913 RegExpImpl::kLastSubjectOffset));
2914 __ mov(r2, subject);
2915 __ RecordWriteField(last_match_info_elements,
2916 RegExpImpl::kLastSubjectOffset,
2921 __ mov(subject, r2);
2923 FieldMemOperand(last_match_info_elements,
2924 RegExpImpl::kLastInputOffset));
2925 __ RecordWriteField(last_match_info_elements,
2926 RegExpImpl::kLastInputOffset,
2932 // Get the static offsets vector filled by the native regexp code.
2933 ExternalReference address_of_static_offsets_vector =
2934 ExternalReference::address_of_static_offsets_vector(isolate);
2935 __ mov(r2, Operand(address_of_static_offsets_vector));
2937 // r1: number of capture registers
2938 // r2: offsets vector
2939 Label next_capture, done;
2940 // Capture register counter starts from number of capture registers and
2941 // counts down until wraping after zero.
2943 last_match_info_elements,
2944 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
2945 __ bind(&next_capture);
2946 __ sub(r1, r1, Operand(1), SetCC);
2948 // Read the value from the static offsets vector buffer.
2949 __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
2950 // Store the smi value in the last match info.
2952 __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
2953 __ jmp(&next_capture);
2956 // Return last match info.
2957 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
2958 __ add(sp, sp, Operand(4 * kPointerSize));
2961 // Do the runtime call to execute the regexp.
2963 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
2965 // Deferred code for string handling.
2966 // (6) Not a long external string? If yes, go to (8).
2967 __ bind(¬_seq_nor_cons);
2968 // Compare flags are still set.
2969 __ b(gt, ¬_long_external); // Go to (8).
2971 // (7) External string. Make it, offset-wise, look like a sequential string.
2972 __ bind(&external_string);
2973 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
2974 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
2975 if (FLAG_debug_code) {
2976 // Assert that we do not have a cons or slice (indirect strings) here.
2977 // Sequential strings have already been ruled out.
2978 __ tst(r0, Operand(kIsIndirectStringMask));
2979 __ Assert(eq, kExternalStringExpectedButNotFound);
2982 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
2983 // Move the pointer so that offset-wise, it looks like a sequential string.
2984 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2987 Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
2988 __ jmp(&seq_string); // Go to (5).
2990 // (8) Short external string or not a string? If yes, bail out to runtime.
2991 __ bind(¬_long_external);
2992 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
2993 __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
2996 // (9) Sliced string. Replace subject with parent. Go to (4).
2997 // Load offset into r9 and replace subject string with parent.
2998 __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
3000 __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
3001 __ jmp(&check_underlying); // Go to (4).
3002 #endif // V8_INTERPRETED_REGEXP
3006 static void GenerateRecordCallTarget(MacroAssembler* masm) {
3007 // Cache the called function in a global property cell. Cache states
3008 // are uninitialized, monomorphic (indicated by a JSFunction), and
3010 // r0 : number of arguments to the construct function
3011 // r1 : the function to call
3012 // r2 : cache cell for call target
3013 Label initialize, done, miss, megamorphic, not_array_function;
3015 ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
3016 masm->isolate()->heap()->undefined_value());
3017 ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
3018 masm->isolate()->heap()->the_hole_value());
3020 // Load the cache state into r3.
3021 __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
3023 // A monomorphic cache hit or an already megamorphic state: invoke the
3024 // function without changing the state.
3028 // If we came here, we need to see if we are the array function.
3029 // If we didn't have a matching function, and we didn't find the megamorph
3030 // sentinel, then we have in the cell either some other function or an
3031 // AllocationSite. Do a map check on the object in ecx.
3032 __ ldr(r5, FieldMemOperand(r3, 0));
3033 __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
3036 // Make sure the function is the Array() function
3037 __ LoadArrayFunction(r3);
3039 __ b(ne, &megamorphic);
3044 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
3046 __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
3047 __ b(eq, &initialize);
3048 // MegamorphicSentinel is an immortal immovable object (undefined) so no
3049 // write-barrier is needed.
3050 __ bind(&megamorphic);
3051 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
3052 __ str(ip, FieldMemOperand(r2, Cell::kValueOffset));
3055 // An uninitialized cache is patched with the function or sentinel to
3056 // indicate the ElementsKind if function is the Array constructor.
3057 __ bind(&initialize);
3058 // Make sure the function is the Array() function
3059 __ LoadArrayFunction(r3);
3061 __ b(ne, ¬_array_function);
3063 // The target function is the Array constructor,
3064 // Create an AllocationSite if we don't already have it, store it in the cell
3066 FrameScope scope(masm, StackFrame::INTERNAL);
3068 // Arguments register must be smi-tagged to call out.
3070 __ Push(r2, r1, r0);
3072 CreateAllocationSiteStub create_stub;
3073 __ CallStub(&create_stub);
3080 __ bind(¬_array_function);
3081 __ str(r1, FieldMemOperand(r2, Cell::kValueOffset));
3082 // No need for a write barrier here - cells are rescanned.
3088 void CallFunctionStub::Generate(MacroAssembler* masm) {
3089 // r1 : the function to call
3090 // r2 : cache cell for call target
3091 Label slow, non_function, wrap, cont;
3093 if (NeedsChecks()) {
3094 // Check that the function is really a JavaScript function.
3095 // r1: pushed function (to be verified)
3096 __ JumpIfSmi(r1, &non_function);
3098 // Goto slow case if we do not have a function.
3099 __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
3102 if (RecordCallTarget()) {
3103 GenerateRecordCallTarget(masm);
3107 // Fast-case: Invoke the function now.
3108 // r1: pushed function
3109 ParameterCount actual(argc_);
3111 if (CallAsMethod()) {
3112 if (NeedsChecks()) {
3113 // Do not transform the receiver for strict mode functions.
3114 __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
3115 __ ldr(r4, FieldMemOperand(r3, SharedFunctionInfo::kCompilerHintsOffset));
3116 __ tst(r4, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
3120 // Do not transform the receiver for native (Compilerhints already in r3).
3121 __ tst(r4, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
3125 // Compute the receiver in non-strict mode.
3126 __ ldr(r3, MemOperand(sp, argc_ * kPointerSize));
3128 if (NeedsChecks()) {
3129 __ JumpIfSmi(r3, &wrap);
3130 __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
3138 __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper());
3140 if (NeedsChecks()) {
3141 // Slow-case: Non-function called.
3143 if (RecordCallTarget()) {
3144 // If there is a call target cache, mark it megamorphic in the
3145 // non-function case. MegamorphicSentinel is an immortal immovable
3146 // object (undefined) so no write barrier is needed.
3147 ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
3148 masm->isolate()->heap()->undefined_value());
3149 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
3150 __ str(ip, FieldMemOperand(r2, Cell::kValueOffset));
3152 // Check for function proxy.
3153 __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
3154 __ b(ne, &non_function);
3155 __ push(r1); // put proxy as additional argument
3156 __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32));
3157 __ mov(r2, Operand::Zero());
3158 __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY);
3160 Handle<Code> adaptor =
3161 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
3162 __ Jump(adaptor, RelocInfo::CODE_TARGET);
3165 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
3166 // of the original receiver from the call site).
3167 __ bind(&non_function);
3168 __ str(r1, MemOperand(sp, argc_ * kPointerSize));
3169 __ mov(r0, Operand(argc_)); // Set up the number of arguments.
3170 __ mov(r2, Operand::Zero());
3171 __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION);
3172 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3173 RelocInfo::CODE_TARGET);
3176 if (CallAsMethod()) {
3178 // Wrap the receiver and patch it back onto the stack.
3179 { FrameScope frame_scope(masm, StackFrame::INTERNAL);
3181 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
3184 __ str(r0, MemOperand(sp, argc_ * kPointerSize));
3190 void CallConstructStub::Generate(MacroAssembler* masm) {
3191 // r0 : number of arguments
3192 // r1 : the function to call
3193 // r2 : cache cell for call target
3194 Label slow, non_function_call;
3196 // Check that the function is not a smi.
3197 __ JumpIfSmi(r1, &non_function_call);
3198 // Check that the function is a JSFunction.
3199 __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
3202 if (RecordCallTarget()) {
3203 GenerateRecordCallTarget(masm);
3206 // Jump to the function-specific construct stub.
3207 Register jmp_reg = r3;
3208 __ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
3209 __ ldr(jmp_reg, FieldMemOperand(jmp_reg,
3210 SharedFunctionInfo::kConstructStubOffset));
3211 __ add(pc, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
3213 // r0: number of arguments
3214 // r1: called object
3218 __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
3219 __ b(ne, &non_function_call);
3220 __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
3223 __ bind(&non_function_call);
3224 __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
3226 // Set expected number of arguments to zero (not changing r0).
3227 __ mov(r2, Operand::Zero());
3228 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3229 RelocInfo::CODE_TARGET);
3233 // StringCharCodeAtGenerator
3234 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
3237 Label got_char_code;
3238 Label sliced_string;
3240 // If the receiver is a smi trigger the non-string case.
3241 __ JumpIfSmi(object_, receiver_not_string_);
3243 // Fetch the instance type of the receiver into result register.
3244 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3245 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3246 // If the receiver is not a string trigger the non-string case.
3247 __ tst(result_, Operand(kIsNotStringMask));
3248 __ b(ne, receiver_not_string_);
3250 // If the index is non-smi trigger the non-smi case.
3251 __ JumpIfNotSmi(index_, &index_not_smi_);
3252 __ bind(&got_smi_index_);
3254 // Check for index out of range.
3255 __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
3256 __ cmp(ip, Operand(index_));
3257 __ b(ls, index_out_of_range_);
3259 __ SmiUntag(index_);
3261 StringCharLoadGenerator::Generate(masm,
3272 void StringCharCodeAtGenerator::GenerateSlow(
3273 MacroAssembler* masm,
3274 const RuntimeCallHelper& call_helper) {
3275 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
3277 // Index is not a smi.
3278 __ bind(&index_not_smi_);
3279 // If index is a heap number, try converting it to an integer.
3282 Heap::kHeapNumberMapRootIndex,
3285 call_helper.BeforeCall(masm);
3287 __ push(index_); // Consumed by runtime conversion function.
3288 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3289 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3291 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3292 // NumberToSmi discards numbers that are not exact integers.
3293 __ CallRuntime(Runtime::kNumberToSmi, 1);
3295 // Save the conversion result before the pop instructions below
3296 // have a chance to overwrite it.
3297 __ Move(index_, r0);
3299 // Reload the instance type.
3300 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3301 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3302 call_helper.AfterCall(masm);
3303 // If index is still not a smi, it must be out of range.
3304 __ JumpIfNotSmi(index_, index_out_of_range_);
3305 // Otherwise, return to the fast path.
3306 __ jmp(&got_smi_index_);
3308 // Call runtime. We get here when the receiver is a string and the
3309 // index is a number, but the code of getting the actual character
3310 // is too complex (e.g., when the string needs to be flattened).
3311 __ bind(&call_runtime_);
3312 call_helper.BeforeCall(masm);
3314 __ Push(object_, index_);
3315 __ CallRuntime(Runtime::kStringCharCodeAt, 2);
3316 __ Move(result_, r0);
3317 call_helper.AfterCall(masm);
3320 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3324 // -------------------------------------------------------------------------
3325 // StringCharFromCodeGenerator
3327 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3328 // Fast case of Heap::LookupSingleCharacterStringFromCode.
3329 STATIC_ASSERT(kSmiTag == 0);
3330 STATIC_ASSERT(kSmiShiftSize == 0);
3331 ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
3333 Operand(kSmiTagMask |
3334 ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
3335 __ b(ne, &slow_case_);
3337 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3338 // At this point code register contains smi tagged ASCII char code.
3339 __ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_));
3340 __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3341 __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
3342 __ b(eq, &slow_case_);
3347 void StringCharFromCodeGenerator::GenerateSlow(
3348 MacroAssembler* masm,
3349 const RuntimeCallHelper& call_helper) {
3350 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3352 __ bind(&slow_case_);
3353 call_helper.BeforeCall(masm);
3355 __ CallRuntime(Runtime::kCharFromCode, 1);
3356 __ Move(result_, r0);
3357 call_helper.AfterCall(masm);
3360 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3364 enum CopyCharactersFlags {
3366 DEST_ALWAYS_ALIGNED = 2
3370 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
3379 bool ascii = (flags & COPY_ASCII) != 0;
3380 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
3382 if (dest_always_aligned && FLAG_debug_code) {
3383 // Check that destination is actually word aligned if the flag says
3385 __ tst(dest, Operand(kPointerAlignmentMask));
3386 __ Check(eq, kDestinationOfCopyNotAligned);
3389 const int kReadAlignment = 4;
3390 const int kReadAlignmentMask = kReadAlignment - 1;
3391 // Ensure that reading an entire aligned word containing the last character
3392 // of a string will not read outside the allocated area (because we pad up
3393 // to kObjectAlignment).
3394 STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
3395 // Assumes word reads and writes are little endian.
3396 // Nothing to do for zero characters.
3399 __ add(count, count, Operand(count), SetCC);
3401 __ cmp(count, Operand::Zero());
3405 // Assume that you cannot read (or write) unaligned.
3407 // Must copy at least eight bytes, otherwise just do it one byte at a time.
3408 __ cmp(count, Operand(8));
3409 __ add(count, dest, Operand(count));
3410 Register limit = count; // Read until src equals this.
3411 __ b(lt, &byte_loop);
3413 if (!dest_always_aligned) {
3414 // Align dest by byte copying. Copies between zero and three bytes.
3415 __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
3417 __ b(eq, &dest_aligned);
3418 __ cmp(scratch4, Operand(2));
3419 __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
3420 __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
3421 __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
3422 __ strb(scratch1, MemOperand(dest, 1, PostIndex));
3423 __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
3424 __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
3425 __ bind(&dest_aligned);
3430 __ sub(scratch4, dest, Operand(src));
3431 __ and_(scratch4, scratch4, Operand(0x03), SetCC);
3432 __ b(eq, &simple_loop);
3433 // Shift register is number of bits in a source word that
3434 // must be combined with bits in the next source word in order
3435 // to create a destination word.
3437 // Complex loop for src/dst that are not aligned the same way.
3440 __ mov(scratch4, Operand(scratch4, LSL, 3));
3441 Register left_shift = scratch4;
3442 __ and_(src, src, Operand(~3)); // Round down to load previous word.
3443 __ ldr(scratch1, MemOperand(src, 4, PostIndex));
3444 // Store the "shift" most significant bits of scratch in the least
3445 // signficant bits (i.e., shift down by (32-shift)).
3446 __ rsb(scratch2, left_shift, Operand(32));
3447 Register right_shift = scratch2;
3448 __ mov(scratch1, Operand(scratch1, LSR, right_shift));
3451 __ ldr(scratch3, MemOperand(src, 4, PostIndex));
3452 __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
3453 __ str(scratch1, MemOperand(dest, 4, PostIndex));
3454 __ mov(scratch1, Operand(scratch3, LSR, right_shift));
3455 // Loop if four or more bytes left to copy.
3456 __ sub(scratch3, limit, Operand(dest));
3457 __ sub(scratch3, scratch3, Operand(4), SetCC);
3460 // There is now between zero and three bytes left to copy (negative that
3461 // number is in scratch3), and between one and three bytes already read into
3462 // scratch1 (eight times that number in scratch4). We may have read past
3463 // the end of the string, but because objects are aligned, we have not read
3464 // past the end of the object.
3465 // Find the minimum of remaining characters to move and preloaded characters
3466 // and write those as bytes.
3467 __ add(scratch3, scratch3, Operand(4), SetCC);
3469 __ cmp(scratch4, Operand(scratch3, LSL, 3), ne);
3470 // Move minimum of bytes read and bytes left to copy to scratch4.
3471 __ mov(scratch3, Operand(scratch4, LSR, 3), LeaveCC, lt);
3472 // Between one and three (value in scratch3) characters already read into
3473 // scratch ready to write.
3474 __ cmp(scratch3, Operand(2));
3475 __ strb(scratch1, MemOperand(dest, 1, PostIndex));
3476 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
3477 __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
3478 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
3479 __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
3480 // Copy any remaining bytes.
3484 // Copy words from src to dst, until less than four bytes left.
3485 // Both src and dest are word aligned.
3486 __ bind(&simple_loop);
3490 __ ldr(scratch1, MemOperand(src, 4, PostIndex));
3491 __ sub(scratch3, limit, Operand(dest));
3492 __ str(scratch1, MemOperand(dest, 4, PostIndex));
3493 // Compare to 8, not 4, because we do the substraction before increasing
3495 __ cmp(scratch3, Operand(8));
3499 // Copy bytes from src to dst until dst hits limit.
3500 __ bind(&byte_loop);
3501 __ cmp(dest, Operand(limit));
3502 __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
3504 __ strb(scratch1, MemOperand(dest, 1, PostIndex));
3511 void StringHelper::GenerateHashInit(MacroAssembler* masm,
3513 Register character) {
3514 // hash = character + (character << 10);
3515 __ LoadRoot(hash, Heap::kHashSeedRootIndex);
3516 // Untag smi seed and add the character.
3517 __ add(hash, character, Operand(hash, LSR, kSmiTagSize));
3518 // hash += hash << 10;
3519 __ add(hash, hash, Operand(hash, LSL, 10));
3520 // hash ^= hash >> 6;
3521 __ eor(hash, hash, Operand(hash, LSR, 6));
3525 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
3527 Register character) {
3528 // hash += character;
3529 __ add(hash, hash, Operand(character));
3530 // hash += hash << 10;
3531 __ add(hash, hash, Operand(hash, LSL, 10));
3532 // hash ^= hash >> 6;
3533 __ eor(hash, hash, Operand(hash, LSR, 6));
3537 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
3539 // hash += hash << 3;
3540 __ add(hash, hash, Operand(hash, LSL, 3));
3541 // hash ^= hash >> 11;
3542 __ eor(hash, hash, Operand(hash, LSR, 11));
3543 // hash += hash << 15;
3544 __ add(hash, hash, Operand(hash, LSL, 15));
3546 __ and_(hash, hash, Operand(String::kHashBitMask), SetCC);
3548 // if (hash == 0) hash = 27;
3549 __ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq);
3553 void SubStringStub::Generate(MacroAssembler* masm) {
3556 // Stack frame on entry.
3557 // lr: return address
3562 // This stub is called from the native-call %_SubString(...), so
3563 // nothing can be assumed about the arguments. It is tested that:
3564 // "string" is a sequential string,
3565 // both "from" and "to" are smis, and
3566 // 0 <= from <= to <= string.length.
3567 // If any of these assumptions fail, we call the runtime system.
3569 const int kToOffset = 0 * kPointerSize;
3570 const int kFromOffset = 1 * kPointerSize;
3571 const int kStringOffset = 2 * kPointerSize;
3573 __ Ldrd(r2, r3, MemOperand(sp, kToOffset));
3574 STATIC_ASSERT(kFromOffset == kToOffset + 4);
3575 STATIC_ASSERT(kSmiTag == 0);
3576 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3578 // Arithmetic shift right by one un-smi-tags. In this case we rotate right
3579 // instead because we bail out on non-smi values: ROR and ASR are equivalent
3580 // for smis but they set the flags in a way that's easier to optimize.
3581 __ mov(r2, Operand(r2, ROR, 1), SetCC);
3582 __ mov(r3, Operand(r3, ROR, 1), SetCC, cc);
3583 // If either to or from had the smi tag bit set, then C is set now, and N
3584 // has the same value: we rotated by 1, so the bottom bit is now the top bit.
3585 // We want to bailout to runtime here if From is negative. In that case, the
3586 // next instruction is not executed and we fall through to bailing out to
3588 // Executed if both r2 and r3 are untagged integers.
3589 __ sub(r2, r2, Operand(r3), SetCC, cc);
3590 // One of the above un-smis or the above SUB could have set N==1.
3591 __ b(mi, &runtime); // Either "from" or "to" is not an smi, or from > to.
3593 // Make sure first argument is a string.
3594 __ ldr(r0, MemOperand(sp, kStringOffset));
3595 // Do a JumpIfSmi, but fold its jump into the subsequent string test.
3597 Condition is_string = masm->IsObjectStringType(r0, r1, ne);
3598 ASSERT(is_string == eq);
3599 __ b(NegateCondition(is_string), &runtime);
3602 __ cmp(r2, Operand(1));
3603 __ b(eq, &single_char);
3605 // Short-cut for the case of trivial substring.
3607 // r0: original string
3608 // r2: result string length
3609 __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
3610 __ cmp(r2, Operand(r4, ASR, 1));
3611 // Return original string.
3612 __ b(eq, &return_r0);
3613 // Longer than original string's length or negative: unsafe arguments.
3615 // Shorter than original string's length: an actual substring.
3617 // Deal with different string types: update the index if necessary
3618 // and put the underlying string into r5.
3619 // r0: original string
3620 // r1: instance type
3622 // r3: from index (untagged)
3623 Label underlying_unpacked, sliced_string, seq_or_external_string;
3624 // If the string is not indirect, it can only be sequential or external.
3625 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
3626 STATIC_ASSERT(kIsIndirectStringMask != 0);
3627 __ tst(r1, Operand(kIsIndirectStringMask));
3628 __ b(eq, &seq_or_external_string);
3630 __ tst(r1, Operand(kSlicedNotConsMask));
3631 __ b(ne, &sliced_string);
3632 // Cons string. Check whether it is flat, then fetch first part.
3633 __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
3634 __ CompareRoot(r5, Heap::kempty_stringRootIndex);
3636 __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
3637 // Update instance type.
3638 __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
3639 __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
3640 __ jmp(&underlying_unpacked);
3642 __ bind(&sliced_string);
3643 // Sliced string. Fetch parent and correct start index by offset.
3644 __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
3645 __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset));
3646 __ add(r3, r3, Operand(r4, ASR, 1)); // Add offset to index.
3647 // Update instance type.
3648 __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
3649 __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
3650 __ jmp(&underlying_unpacked);
3652 __ bind(&seq_or_external_string);
3653 // Sequential or external string. Just move string to the expected register.
3656 __ bind(&underlying_unpacked);
3658 if (FLAG_string_slices) {
3660 // r5: underlying subject string
3661 // r1: instance type of underlying subject string
3663 // r3: adjusted start index (untagged)
3664 __ cmp(r2, Operand(SlicedString::kMinLength));
3665 // Short slice. Copy instead of slicing.
3666 __ b(lt, ©_routine);
3667 // Allocate new sliced string. At this point we do not reload the instance
3668 // type including the string encoding because we simply rely on the info
3669 // provided by the original string. It does not matter if the original
3670 // string's encoding is wrong because we always have to recheck encoding of
3671 // the newly created string's parent anyways due to externalized strings.
3672 Label two_byte_slice, set_slice_header;
3673 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
3674 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
3675 __ tst(r1, Operand(kStringEncodingMask));
3676 __ b(eq, &two_byte_slice);
3677 __ AllocateAsciiSlicedString(r0, r2, r6, r4, &runtime);
3678 __ jmp(&set_slice_header);
3679 __ bind(&two_byte_slice);
3680 __ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime);
3681 __ bind(&set_slice_header);
3682 __ mov(r3, Operand(r3, LSL, 1));
3683 __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
3684 __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
3687 __ bind(©_routine);
3690 // r5: underlying subject string
3691 // r1: instance type of underlying subject string
3693 // r3: adjusted start index (untagged)
3694 Label two_byte_sequential, sequential_string, allocate_result;
3695 STATIC_ASSERT(kExternalStringTag != 0);
3696 STATIC_ASSERT(kSeqStringTag == 0);
3697 __ tst(r1, Operand(kExternalStringTag));
3698 __ b(eq, &sequential_string);
3700 // Handle external string.
3701 // Rule out short external strings.
3702 STATIC_CHECK(kShortExternalStringTag != 0);
3703 __ tst(r1, Operand(kShortExternalStringTag));
3705 __ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset));
3706 // r5 already points to the first character of underlying string.
3707 __ jmp(&allocate_result);
3709 __ bind(&sequential_string);
3710 // Locate first character of underlying subject string.
3711 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3712 __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3714 __ bind(&allocate_result);
3715 // Sequential acii string. Allocate the result.
3716 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3717 __ tst(r1, Operand(kStringEncodingMask));
3718 __ b(eq, &two_byte_sequential);
3720 // Allocate and copy the resulting ASCII string.
3721 __ AllocateAsciiString(r0, r2, r4, r6, r1, &runtime);
3723 // Locate first character of substring to copy.
3725 // Locate first character of result.
3726 __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3728 // r0: result string
3729 // r1: first character of result string
3730 // r2: result string length
3731 // r5: first character of substring to copy
3732 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3733 StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r9,
3734 COPY_ASCII | DEST_ALWAYS_ALIGNED);
3737 // Allocate and copy the resulting two-byte string.
3738 __ bind(&two_byte_sequential);
3739 __ AllocateTwoByteString(r0, r2, r4, r6, r1, &runtime);
3741 // Locate first character of substring to copy.
3742 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
3743 __ add(r5, r5, Operand(r3, LSL, 1));
3744 // Locate first character of result.
3745 __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3747 // r0: result string.
3748 // r1: first character of result.
3749 // r2: result length.
3750 // r5: first character of substring to copy.
3751 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3752 StringHelper::GenerateCopyCharactersLong(
3753 masm, r1, r5, r2, r3, r4, r6, r9, DEST_ALWAYS_ALIGNED);
3755 __ bind(&return_r0);
3756 Counters* counters = masm->isolate()->counters();
3757 __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
3761 // Just jump to runtime to create the sub string.
3763 __ TailCallRuntime(Runtime::kSubString, 3, 1);
3765 __ bind(&single_char);
3766 // r0: original string
3767 // r1: instance type
3769 // r3: from index (untagged)
3771 StringCharAtGenerator generator(
3772 r0, r3, r2, r0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
3773 generator.GenerateFast(masm);
3776 generator.SkipSlow(masm, &runtime);
3780 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
3785 Register scratch3) {
3786 Register length = scratch1;
3789 Label strings_not_equal, check_zero_length;
3790 __ ldr(length, FieldMemOperand(left, String::kLengthOffset));
3791 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
3792 __ cmp(length, scratch2);
3793 __ b(eq, &check_zero_length);
3794 __ bind(&strings_not_equal);
3795 __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
3798 // Check if the length is zero.
3799 Label compare_chars;
3800 __ bind(&check_zero_length);
3801 STATIC_ASSERT(kSmiTag == 0);
3802 __ cmp(length, Operand::Zero());
3803 __ b(ne, &compare_chars);
3804 __ mov(r0, Operand(Smi::FromInt(EQUAL)));
3807 // Compare characters.
3808 __ bind(&compare_chars);
3809 GenerateAsciiCharsCompareLoop(masm,
3810 left, right, length, scratch2, scratch3,
3811 &strings_not_equal);
3813 // Characters are equal.
3814 __ mov(r0, Operand(Smi::FromInt(EQUAL)));
3819 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
3825 Register scratch4) {
3826 Label result_not_equal, compare_lengths;
3827 // Find minimum length and length difference.
3828 __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
3829 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
3830 __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
3831 Register length_delta = scratch3;
3832 __ mov(scratch1, scratch2, LeaveCC, gt);
3833 Register min_length = scratch1;
3834 STATIC_ASSERT(kSmiTag == 0);
3835 __ cmp(min_length, Operand::Zero());
3836 __ b(eq, &compare_lengths);
3839 GenerateAsciiCharsCompareLoop(masm,
3840 left, right, min_length, scratch2, scratch4,
3843 // Compare lengths - strings up to min-length are equal.
3844 __ bind(&compare_lengths);
3845 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
3846 // Use length_delta as result if it's zero.
3847 __ mov(r0, Operand(length_delta), SetCC);
3848 __ bind(&result_not_equal);
3849 // Conditionally update the result based either on length_delta or
3850 // the last comparion performed in the loop above.
3851 __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
3852 __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
3857 void StringCompareStub::GenerateAsciiCharsCompareLoop(
3858 MacroAssembler* masm,
3864 Label* chars_not_equal) {
3865 // Change index to run from -length to -1 by adding length to string
3866 // start. This means that loop ends when index reaches zero, which
3867 // doesn't need an additional compare.
3868 __ SmiUntag(length);
3869 __ add(scratch1, length,
3870 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3871 __ add(left, left, Operand(scratch1));
3872 __ add(right, right, Operand(scratch1));
3873 __ rsb(length, length, Operand::Zero());
3874 Register index = length; // index = -length;
3879 __ ldrb(scratch1, MemOperand(left, index));
3880 __ ldrb(scratch2, MemOperand(right, index));
3881 __ cmp(scratch1, scratch2);
3882 __ b(ne, chars_not_equal);
3883 __ add(index, index, Operand(1), SetCC);
3888 void StringCompareStub::Generate(MacroAssembler* masm) {
3891 Counters* counters = masm->isolate()->counters();
3893 // Stack frame on entry.
3894 // sp[0]: right string
3895 // sp[4]: left string
3896 __ Ldrd(r0 , r1, MemOperand(sp)); // Load right in r0, left in r1.
3900 __ b(ne, ¬_same);
3901 STATIC_ASSERT(EQUAL == 0);
3902 STATIC_ASSERT(kSmiTag == 0);
3903 __ mov(r0, Operand(Smi::FromInt(EQUAL)));
3904 __ IncrementCounter(counters->string_compare_native(), 1, r1, r2);
3905 __ add(sp, sp, Operand(2 * kPointerSize));
3910 // Check that both objects are sequential ASCII strings.
3911 __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime);
3913 // Compare flat ASCII strings natively. Remove arguments from stack first.
3914 __ IncrementCounter(counters->string_compare_native(), 1, r2, r3);
3915 __ add(sp, sp, Operand(2 * kPointerSize));
3916 GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5);
3918 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
3919 // tagged as a small integer.
3921 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3925 void ArrayPushStub::Generate(MacroAssembler* masm) {
3926 Register receiver = r0;
3927 Register scratch = r1;
3929 int argc = arguments_count();
3932 // Nothing to do, just return the length.
3933 __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
3939 Isolate* isolate = masm->isolate();
3942 __ TailCallExternalReference(
3943 ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
3947 Label call_builtin, attempt_to_grow_elements, with_write_barrier;
3949 Register elements = r6;
3950 Register end_elements = r5;
3951 // Get the elements array of the object.
3952 __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
3954 if (IsFastSmiOrObjectElementsKind(elements_kind())) {
3955 // Check that the elements are in fast mode and writable.
3956 __ CheckMap(elements,
3958 Heap::kFixedArrayMapRootIndex,
3963 // Get the array's length into scratch and calculate new length.
3964 __ ldr(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
3965 __ add(scratch, scratch, Operand(Smi::FromInt(argc)));
3967 // Get the elements' length.
3968 __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
3970 // Check if we could survive without allocation.
3971 __ cmp(scratch, r4);
3973 const int kEndElementsOffset =
3974 FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
3976 if (IsFastSmiOrObjectElementsKind(elements_kind())) {
3977 __ b(gt, &attempt_to_grow_elements);
3979 // Check if value is a smi.
3980 __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
3981 __ JumpIfNotSmi(r4, &with_write_barrier);
3984 // We may need a register containing the address end_elements below, so
3985 // write back the value in end_elements.
3986 __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch));
3987 __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
3989 // Check if we could survive without allocation.
3990 __ cmp(scratch, r4);
3991 __ b(gt, &call_builtin);
3993 __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
3994 __ StoreNumberToDoubleElements(r4, scratch, elements, r5, d0,
3995 &call_builtin, argc * kDoubleSize);
3999 __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
4001 __ mov(r0, scratch);
4004 if (IsFastDoubleElementsKind(elements_kind())) {
4005 __ bind(&call_builtin);
4006 __ TailCallExternalReference(
4007 ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
4011 __ bind(&with_write_barrier);
4013 if (IsFastSmiElementsKind(elements_kind())) {
4014 if (FLAG_trace_elements_transitions) __ jmp(&call_builtin);
4016 __ ldr(r9, FieldMemOperand(r4, HeapObject::kMapOffset));
4017 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4019 __ b(eq, &call_builtin);
4021 ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
4022 ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
4023 __ ldr(r3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
4024 __ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
4025 __ ldr(r3, ContextOperand(r3, Context::JS_ARRAY_MAPS_INDEX));
4026 const int header_size = FixedArrayBase::kHeaderSize;
4027 // Verify that the object can be transitioned in place.
4028 const int origin_offset = header_size + elements_kind() * kPointerSize;
4029 __ ldr(r2, FieldMemOperand(receiver, origin_offset));
4030 __ ldr(ip, FieldMemOperand(r3, HeapObject::kMapOffset));
4032 __ b(ne, &call_builtin);
4034 const int target_offset = header_size + target_kind * kPointerSize;
4035 __ ldr(r3, FieldMemOperand(r3, target_offset));
4036 __ mov(r2, receiver);
4037 ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
4038 masm, DONT_TRACK_ALLOCATION_SITE, NULL);
4042 __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
4045 // We may need a register containing the address end_elements below, so write
4046 // back the value in end_elements.
4047 __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch));
4048 __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
4050 __ RecordWrite(elements,
4055 EMIT_REMEMBERED_SET,
4058 __ mov(r0, scratch);
4061 __ bind(&attempt_to_grow_elements);
4062 // scratch: array's length + 1.
4064 if (!FLAG_inline_new) {
4065 __ bind(&call_builtin);
4066 __ TailCallExternalReference(
4067 ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
4071 __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize));
4072 // Growing elements that are SMI-only requires special handling in case the
4073 // new element is non-Smi. For now, delegate to the builtin.
4074 if (IsFastSmiElementsKind(elements_kind())) {
4075 __ JumpIfNotSmi(r2, &call_builtin);
4078 // We could be lucky and the elements array could be at the top of new-space.
4079 // In this case we can just grow it in place by moving the allocation pointer
4081 ExternalReference new_space_allocation_top =
4082 ExternalReference::new_space_allocation_top_address(isolate);
4083 ExternalReference new_space_allocation_limit =
4084 ExternalReference::new_space_allocation_limit_address(isolate);
4086 const int kAllocationDelta = 4;
4087 ASSERT(kAllocationDelta >= argc);
4088 // Load top and check if it is the end of elements.
4089 __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch));
4090 __ add(end_elements, end_elements, Operand(kEndElementsOffset));
4091 __ mov(r4, Operand(new_space_allocation_top));
4092 __ ldr(r3, MemOperand(r4));
4093 __ cmp(end_elements, r3);
4094 __ b(ne, &call_builtin);
4096 __ mov(r9, Operand(new_space_allocation_limit));
4097 __ ldr(r9, MemOperand(r9));
4098 __ add(r3, r3, Operand(kAllocationDelta * kPointerSize));
4100 __ b(hi, &call_builtin);
4102 // We fit and could grow elements.
4103 // Update new_space_allocation_top.
4104 __ str(r3, MemOperand(r4));
4105 // Push the argument.
4106 __ str(r2, MemOperand(end_elements));
4107 // Fill the rest with holes.
4108 __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
4109 for (int i = 1; i < kAllocationDelta; i++) {
4110 __ str(r3, MemOperand(end_elements, i * kPointerSize));
4113 // Update elements' and array's sizes.
4114 __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
4115 __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
4116 __ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta)));
4117 __ str(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
4119 // Elements are in new space, so write barrier is not required.
4121 __ mov(r0, scratch);
4124 __ bind(&call_builtin);
4125 __ TailCallExternalReference(
4126 ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
4130 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
4131 // ----------- S t a t e -------------
4134 // -- lr : return address
4135 // -----------------------------------
4136 Isolate* isolate = masm->isolate();
4138 // Load r2 with the allocation site. We stick an undefined dummy value here
4139 // and replace it with the real allocation site later when we instantiate this
4140 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
4141 __ Move(r2, handle(isolate->heap()->undefined_value()));
4143 // Make sure that we actually patched the allocation site.
4144 if (FLAG_debug_code) {
4145 __ tst(r2, Operand(kSmiTagMask));
4146 __ Assert(ne, kExpectedAllocationSite);
4148 __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
4149 __ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex);
4152 __ Assert(eq, kExpectedAllocationSite);
4155 // Tail call into the stub that handles binary operations with allocation
4157 BinaryOpWithAllocationSiteStub stub(state_);
4158 __ TailCallStub(&stub);
4162 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
4163 ASSERT(state_ == CompareIC::SMI);
4166 __ JumpIfNotSmi(r2, &miss);
4168 if (GetCondition() == eq) {
4169 // For equality we do not care about the sign of the result.
4170 __ sub(r0, r0, r1, SetCC);
4172 // Untag before subtracting to avoid handling overflow.
4174 __ sub(r0, r1, Operand::SmiUntag(r0));
4183 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
4184 ASSERT(state_ == CompareIC::NUMBER);
4187 Label unordered, maybe_undefined1, maybe_undefined2;
4190 if (left_ == CompareIC::SMI) {
4191 __ JumpIfNotSmi(r1, &miss);
4193 if (right_ == CompareIC::SMI) {
4194 __ JumpIfNotSmi(r0, &miss);
4197 // Inlining the double comparison and falling back to the general compare
4198 // stub if NaN is involved.
4199 // Load left and right operand.
4200 Label done, left, left_smi, right_smi;
4201 __ JumpIfSmi(r0, &right_smi);
4202 __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
4204 __ sub(r2, r0, Operand(kHeapObjectTag));
4205 __ vldr(d1, r2, HeapNumber::kValueOffset);
4207 __ bind(&right_smi);
4208 __ SmiToDouble(d1, r0);
4211 __ JumpIfSmi(r1, &left_smi);
4212 __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
4214 __ sub(r2, r1, Operand(kHeapObjectTag));
4215 __ vldr(d0, r2, HeapNumber::kValueOffset);
4218 __ SmiToDouble(d0, r1);
4221 // Compare operands.
4222 __ VFPCompareAndSetFlags(d0, d1);
4224 // Don't base result on status bits when a NaN is involved.
4225 __ b(vs, &unordered);
4227 // Return a result of -1, 0, or 1, based on status bits.
4228 __ mov(r0, Operand(EQUAL), LeaveCC, eq);
4229 __ mov(r0, Operand(LESS), LeaveCC, lt);
4230 __ mov(r0, Operand(GREATER), LeaveCC, gt);
4233 __ bind(&unordered);
4234 __ bind(&generic_stub);
4235 ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
4236 CompareIC::GENERIC);
4237 __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
4239 __ bind(&maybe_undefined1);
4240 if (Token::IsOrderedRelationalCompareOp(op_)) {
4241 __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
4243 __ JumpIfSmi(r1, &unordered);
4244 __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
4245 __ b(ne, &maybe_undefined2);
4249 __ bind(&maybe_undefined2);
4250 if (Token::IsOrderedRelationalCompareOp(op_)) {
4251 __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
4252 __ b(eq, &unordered);
4260 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
4261 ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
4264 // Registers containing left and right operands respectively.
4266 Register right = r0;
4270 // Check that both operands are heap objects.
4271 __ JumpIfEitherSmi(left, right, &miss);
4273 // Check that both operands are internalized strings.
4274 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4275 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4276 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4277 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4278 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
4279 __ orr(tmp1, tmp1, Operand(tmp2));
4280 __ tst(tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
4283 // Internalized strings are compared by identity.
4284 __ cmp(left, right);
4285 // Make sure r0 is non-zero. At this point input operands are
4286 // guaranteed to be non-zero.
4287 ASSERT(right.is(r0));
4288 STATIC_ASSERT(EQUAL == 0);
4289 STATIC_ASSERT(kSmiTag == 0);
4290 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
4298 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
4299 ASSERT(state_ == CompareIC::UNIQUE_NAME);
4300 ASSERT(GetCondition() == eq);
4303 // Registers containing left and right operands respectively.
4305 Register right = r0;
4309 // Check that both operands are heap objects.
4310 __ JumpIfEitherSmi(left, right, &miss);
4312 // Check that both operands are unique names. This leaves the instance
4313 // types loaded in tmp1 and tmp2.
4314 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4315 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4316 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4317 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4319 __ JumpIfNotUniqueName(tmp1, &miss);
4320 __ JumpIfNotUniqueName(tmp2, &miss);
4322 // Unique names are compared by identity.
4323 __ cmp(left, right);
4324 // Make sure r0 is non-zero. At this point input operands are
4325 // guaranteed to be non-zero.
4326 ASSERT(right.is(r0));
4327 STATIC_ASSERT(EQUAL == 0);
4328 STATIC_ASSERT(kSmiTag == 0);
4329 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
4337 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
4338 ASSERT(state_ == CompareIC::STRING);
4341 bool equality = Token::IsEqualityOp(op_);
4343 // Registers containing left and right operands respectively.
4345 Register right = r0;
4351 // Check that both operands are heap objects.
4352 __ JumpIfEitherSmi(left, right, &miss);
4354 // Check that both operands are strings. This leaves the instance
4355 // types loaded in tmp1 and tmp2.
4356 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4357 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4358 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4359 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4360 STATIC_ASSERT(kNotStringTag != 0);
4361 __ orr(tmp3, tmp1, tmp2);
4362 __ tst(tmp3, Operand(kIsNotStringMask));
4365 // Fast check for identical strings.
4366 __ cmp(left, right);
4367 STATIC_ASSERT(EQUAL == 0);
4368 STATIC_ASSERT(kSmiTag == 0);
4369 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
4372 // Handle not identical strings.
4374 // Check that both strings are internalized strings. If they are, we're done
4375 // because we already know they are not identical. We know they are both
4378 ASSERT(GetCondition() == eq);
4379 STATIC_ASSERT(kInternalizedTag == 0);
4380 __ orr(tmp3, tmp1, Operand(tmp2));
4381 __ tst(tmp3, Operand(kIsNotInternalizedMask));
4382 // Make sure r0 is non-zero. At this point input operands are
4383 // guaranteed to be non-zero.
4384 ASSERT(right.is(r0));
4388 // Check that both strings are sequential ASCII.
4390 __ JumpIfBothInstanceTypesAreNotSequentialAscii(
4391 tmp1, tmp2, tmp3, tmp4, &runtime);
4393 // Compare flat ASCII strings. Returns when done.
4395 StringCompareStub::GenerateFlatAsciiStringEquals(
4396 masm, left, right, tmp1, tmp2, tmp3);
4398 StringCompareStub::GenerateCompareFlatAsciiStrings(
4399 masm, left, right, tmp1, tmp2, tmp3, tmp4);
4402 // Handle more complex cases in runtime.
4404 __ Push(left, right);
4406 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
4408 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
4416 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
4417 ASSERT(state_ == CompareIC::OBJECT);
4419 __ and_(r2, r1, Operand(r0));
4420 __ JumpIfSmi(r2, &miss);
4422 __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
4424 __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
4427 ASSERT(GetCondition() == eq);
4428 __ sub(r0, r0, Operand(r1));
4436 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
4438 __ and_(r2, r1, Operand(r0));
4439 __ JumpIfSmi(r2, &miss);
4440 __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
4441 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
4442 __ cmp(r2, Operand(known_map_));
4444 __ cmp(r3, Operand(known_map_));
4447 __ sub(r0, r0, Operand(r1));
4456 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
4458 // Call the runtime system in a fresh internal frame.
4459 ExternalReference miss =
4460 ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
4462 FrameScope scope(masm, StackFrame::INTERNAL);
4464 __ Push(lr, r1, r0);
4465 __ mov(ip, Operand(Smi::FromInt(op_)));
4467 __ CallExternalReference(miss, 3);
4468 // Compute the entry point of the rewritten stub.
4469 __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
4470 // Restore registers.
4479 void DirectCEntryStub::Generate(MacroAssembler* masm) {
4480 // Place the return address on the stack, making the call
4481 // GC safe. The RegExp backend also relies on this.
4482 __ str(lr, MemOperand(sp, 0));
4483 __ blx(ip); // Call the C++ function.
4484 __ VFPEnsureFPSCRState(r2);
4485 __ ldr(pc, MemOperand(sp, 0));
4489 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
4492 reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
4493 __ Move(ip, target);
4494 __ mov(lr, Operand(code, RelocInfo::CODE_TARGET));
4495 __ blx(lr); // Call the stub.
4499 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
4503 Register properties,
4505 Register scratch0) {
4506 ASSERT(name->IsUniqueName());
4507 // If names of slots in range from 1 to kProbes - 1 for the hash value are
4508 // not equal to the name and kProbes-th slot is not used (its name is the
4509 // undefined value), it guarantees the hash table doesn't contain the
4510 // property. It's true even if some slots represent deleted properties
4511 // (their names are the hole value).
4512 for (int i = 0; i < kInlinedProbes; i++) {
4513 // scratch0 points to properties hash.
4514 // Compute the masked index: (hash + i + i * i) & mask.
4515 Register index = scratch0;
4516 // Capacity is smi 2^n.
4517 __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
4518 __ sub(index, index, Operand(1));
4519 __ and_(index, index, Operand(
4520 Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
4522 // Scale the index by multiplying by the entry size.
4523 ASSERT(NameDictionary::kEntrySize == 3);
4524 __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
4526 Register entity_name = scratch0;
4527 // Having undefined at this place means the name is not contained.
4528 ASSERT_EQ(kSmiTagSize, 1);
4529 Register tmp = properties;
4530 __ add(tmp, properties, Operand(index, LSL, 1));
4531 __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
4533 ASSERT(!tmp.is(entity_name));
4534 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
4535 __ cmp(entity_name, tmp);
4538 // Load the hole ready for use below:
4539 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
4541 // Stop if found the property.
4542 __ cmp(entity_name, Operand(Handle<Name>(name)));
4546 __ cmp(entity_name, tmp);
4549 // Check if the entry name is not a unique name.
4550 __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
4551 __ ldrb(entity_name,
4552 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
4553 __ JumpIfNotUniqueName(entity_name, miss);
4556 // Restore the properties.
4558 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4561 const int spill_mask =
4562 (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
4563 r2.bit() | r1.bit() | r0.bit());
4565 __ stm(db_w, sp, spill_mask);
4566 __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4567 __ mov(r1, Operand(Handle<Name>(name)));
4568 NameDictionaryLookupStub stub(NEGATIVE_LOOKUP);
4570 __ cmp(r0, Operand::Zero());
4571 __ ldm(ia_w, sp, spill_mask);
4578 // Probe the name dictionary in the |elements| register. Jump to the
4579 // |done| label if a property with the given name is found. Jump to
4580 // the |miss| label otherwise.
4581 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
4582 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
4588 Register scratch2) {
4589 ASSERT(!elements.is(scratch1));
4590 ASSERT(!elements.is(scratch2));
4591 ASSERT(!name.is(scratch1));
4592 ASSERT(!name.is(scratch2));
4594 __ AssertName(name);
4596 // Compute the capacity mask.
4597 __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
4598 __ SmiUntag(scratch1);
4599 __ sub(scratch1, scratch1, Operand(1));
4601 // Generate an unrolled loop that performs a few probes before
4602 // giving up. Measurements done on Gmail indicate that 2 probes
4603 // cover ~93% of loads from dictionaries.
4604 for (int i = 0; i < kInlinedProbes; i++) {
4605 // Compute the masked index: (hash + i + i * i) & mask.
4606 __ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
4608 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4609 // the hash in a separate instruction. The value hash + i + i * i is right
4610 // shifted in the following and instruction.
4611 ASSERT(NameDictionary::GetProbeOffset(i) <
4612 1 << (32 - Name::kHashFieldOffset));
4613 __ add(scratch2, scratch2, Operand(
4614 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4616 __ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
4618 // Scale the index by multiplying by the element size.
4619 ASSERT(NameDictionary::kEntrySize == 3);
4620 // scratch2 = scratch2 * 3.
4621 __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
4623 // Check if the key is identical to the name.
4624 __ add(scratch2, elements, Operand(scratch2, LSL, 2));
4625 __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
4626 __ cmp(name, Operand(ip));
4630 const int spill_mask =
4631 (lr.bit() | r6.bit() | r5.bit() | r4.bit() |
4632 r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
4633 ~(scratch1.bit() | scratch2.bit());
4635 __ stm(db_w, sp, spill_mask);
4637 ASSERT(!elements.is(r1));
4639 __ Move(r0, elements);
4641 __ Move(r0, elements);
4644 NameDictionaryLookupStub stub(POSITIVE_LOOKUP);
4646 __ cmp(r0, Operand::Zero());
4647 __ mov(scratch2, Operand(r2));
4648 __ ldm(ia_w, sp, spill_mask);
4655 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
4656 // This stub overrides SometimesSetsUpAFrame() to return false. That means
4657 // we cannot call anything that could cause a GC from this stub.
4659 // result: NameDictionary to probe
4661 // dictionary: NameDictionary to probe.
4662 // index: will hold an index of entry if lookup is successful.
4663 // might alias with result_.
4665 // result_ is zero if lookup failed, non zero otherwise.
4667 Register result = r0;
4668 Register dictionary = r0;
4670 Register index = r2;
4673 Register undefined = r5;
4674 Register entry_key = r6;
4676 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
4678 __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
4680 __ sub(mask, mask, Operand(1));
4682 __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
4684 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
4686 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
4687 // Compute the masked index: (hash + i + i * i) & mask.
4688 // Capacity is smi 2^n.
4690 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4691 // the hash in a separate instruction. The value hash + i + i * i is right
4692 // shifted in the following and instruction.
4693 ASSERT(NameDictionary::GetProbeOffset(i) <
4694 1 << (32 - Name::kHashFieldOffset));
4695 __ add(index, hash, Operand(
4696 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4698 __ mov(index, Operand(hash));
4700 __ and_(index, mask, Operand(index, LSR, Name::kHashShift));
4702 // Scale the index by multiplying by the entry size.
4703 ASSERT(NameDictionary::kEntrySize == 3);
4704 __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
4706 ASSERT_EQ(kSmiTagSize, 1);
4707 __ add(index, dictionary, Operand(index, LSL, 2));
4708 __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
4710 // Having undefined at this place means the name is not contained.
4711 __ cmp(entry_key, Operand(undefined));
4712 __ b(eq, ¬_in_dictionary);
4714 // Stop if found the property.
4715 __ cmp(entry_key, Operand(key));
4716 __ b(eq, &in_dictionary);
4718 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
4719 // Check if the entry name is not a unique name.
4720 __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
4722 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
4723 __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
4727 __ bind(&maybe_in_dictionary);
4728 // If we are doing negative lookup then probing failure should be
4729 // treated as a lookup success. For positive lookup probing failure
4730 // should be treated as lookup failure.
4731 if (mode_ == POSITIVE_LOOKUP) {
4732 __ mov(result, Operand::Zero());
4736 __ bind(&in_dictionary);
4737 __ mov(result, Operand(1));
4740 __ bind(¬_in_dictionary);
4741 __ mov(result, Operand::Zero());
4746 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
4748 StoreBufferOverflowStub stub1(kDontSaveFPRegs);
4749 stub1.GetCode(isolate);
4750 // Hydrogen code stubs need stub2 at snapshot time.
4751 StoreBufferOverflowStub stub2(kSaveFPRegs);
4752 stub2.GetCode(isolate);
4756 bool CodeStub::CanUseFPRegisters() {
4757 return true; // VFP2 is a base requirement for V8
4761 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
4762 // the value has just been written into the object, now this stub makes sure
4763 // we keep the GC informed. The word in the object where the value has been
4764 // written is in the address register.
4765 void RecordWriteStub::Generate(MacroAssembler* masm) {
4766 Label skip_to_incremental_noncompacting;
4767 Label skip_to_incremental_compacting;
4769 // The first two instructions are generated with labels so as to get the
4770 // offset fixed up correctly by the bind(Label*) call. We patch it back and
4771 // forth between a compare instructions (a nop in this position) and the
4772 // real branch when we start and stop incremental heap marking.
4773 // See RecordWriteStub::Patch for details.
4775 // Block literal pool emission, as the position of these two instructions
4776 // is assumed by the patching code.
4777 Assembler::BlockConstPoolScope block_const_pool(masm);
4778 __ b(&skip_to_incremental_noncompacting);
4779 __ b(&skip_to_incremental_compacting);
4782 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4783 __ RememberedSetHelper(object_,
4787 MacroAssembler::kReturnAtEnd);
4791 __ bind(&skip_to_incremental_noncompacting);
4792 GenerateIncremental(masm, INCREMENTAL);
4794 __ bind(&skip_to_incremental_compacting);
4795 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4797 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
4798 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
4799 ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
4800 ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
4801 PatchBranchIntoNop(masm, 0);
4802 PatchBranchIntoNop(masm, Assembler::kInstrSize);
4806 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4809 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4810 Label dont_need_remembered_set;
4812 __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
4813 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
4815 &dont_need_remembered_set);
4817 __ CheckPageFlag(regs_.object(),
4819 1 << MemoryChunk::SCAN_ON_SCAVENGE,
4821 &dont_need_remembered_set);
4823 // First notify the incremental marker if necessary, then update the
4825 CheckNeedsToInformIncrementalMarker(
4826 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4827 InformIncrementalMarker(masm, mode);
4828 regs_.Restore(masm);
4829 __ RememberedSetHelper(object_,
4833 MacroAssembler::kReturnAtEnd);
4835 __ bind(&dont_need_remembered_set);
4838 CheckNeedsToInformIncrementalMarker(
4839 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4840 InformIncrementalMarker(masm, mode);
4841 regs_.Restore(masm);
4846 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
4847 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
4848 int argument_count = 3;
4849 __ PrepareCallCFunction(argument_count, regs_.scratch0());
4851 r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
4852 ASSERT(!address.is(regs_.object()));
4853 ASSERT(!address.is(r0));
4854 __ Move(address, regs_.address());
4855 __ Move(r0, regs_.object());
4856 __ Move(r1, address);
4857 __ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate())));
4859 AllowExternalCallThatCantCauseGC scope(masm);
4860 if (mode == INCREMENTAL_COMPACTION) {
4862 ExternalReference::incremental_evacuation_record_write_function(
4866 ASSERT(mode == INCREMENTAL);
4868 ExternalReference::incremental_marking_record_write_function(
4872 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
4876 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4877 MacroAssembler* masm,
4878 OnNoNeedToInformIncrementalMarker on_no_need,
4881 Label need_incremental;
4882 Label need_incremental_pop_scratch;
4884 __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
4885 __ ldr(regs_.scratch1(),
4886 MemOperand(regs_.scratch0(),
4887 MemoryChunk::kWriteBarrierCounterOffset));
4888 __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC);
4889 __ str(regs_.scratch1(),
4890 MemOperand(regs_.scratch0(),
4891 MemoryChunk::kWriteBarrierCounterOffset));
4892 __ b(mi, &need_incremental);
4894 // Let's look at the color of the object: If it is not black we don't have
4895 // to inform the incremental marker.
4896 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4898 regs_.Restore(masm);
4899 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4900 __ RememberedSetHelper(object_,
4904 MacroAssembler::kReturnAtEnd);
4911 // Get the value from the slot.
4912 __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
4914 if (mode == INCREMENTAL_COMPACTION) {
4915 Label ensure_not_white;
4917 __ CheckPageFlag(regs_.scratch0(), // Contains value.
4918 regs_.scratch1(), // Scratch.
4919 MemoryChunk::kEvacuationCandidateMask,
4923 __ CheckPageFlag(regs_.object(),
4924 regs_.scratch1(), // Scratch.
4925 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
4929 __ bind(&ensure_not_white);
4932 // We need extra registers for this, so we push the object and the address
4933 // register temporarily.
4934 __ Push(regs_.object(), regs_.address());
4935 __ EnsureNotWhite(regs_.scratch0(), // The value.
4936 regs_.scratch1(), // Scratch.
4937 regs_.object(), // Scratch.
4938 regs_.address(), // Scratch.
4939 &need_incremental_pop_scratch);
4940 __ Pop(regs_.object(), regs_.address());
4942 regs_.Restore(masm);
4943 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4944 __ RememberedSetHelper(object_,
4948 MacroAssembler::kReturnAtEnd);
4953 __ bind(&need_incremental_pop_scratch);
4954 __ Pop(regs_.object(), regs_.address());
4956 __ bind(&need_incremental);
4958 // Fall through when we need to inform the incremental marker.
4962 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4963 // ----------- S t a t e -------------
4964 // -- r0 : element value to store
4965 // -- r3 : element index as smi
4966 // -- sp[0] : array literal index in function as smi
4967 // -- sp[4] : array literal
4968 // clobbers r1, r2, r4
4969 // -----------------------------------
4972 Label double_elements;
4974 Label slow_elements;
4975 Label fast_elements;
4977 // Get array literal index, array literal and its map.
4978 __ ldr(r4, MemOperand(sp, 0 * kPointerSize));
4979 __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
4980 __ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset));
4982 __ CheckFastElements(r2, r5, &double_elements);
4983 // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
4984 __ JumpIfSmi(r0, &smi_element);
4985 __ CheckFastSmiElements(r2, r5, &fast_elements);
4987 // Store into the array literal requires a elements transition. Call into
4989 __ bind(&slow_elements);
4991 __ Push(r1, r3, r0);
4992 __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4993 __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset));
4995 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4997 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4998 __ bind(&fast_elements);
4999 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
5000 __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
5001 __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5002 __ str(r0, MemOperand(r6, 0));
5003 // Update the write barrier for the array store.
5004 __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs,
5005 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
5008 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
5009 // and value is Smi.
5010 __ bind(&smi_element);
5011 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
5012 __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
5013 __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
5016 // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
5017 __ bind(&double_elements);
5018 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
5019 __ StoreNumberToDoubleElements(r0, r3, r5, r6, d0, &slow_elements);
5024 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
5025 CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
5026 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
5027 int parameter_count_offset =
5028 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
5029 __ ldr(r1, MemOperand(fp, parameter_count_offset));
5030 if (function_mode_ == JS_FUNCTION_STUB_MODE) {
5031 __ add(r1, r1, Operand(1));
5033 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
5034 __ mov(r1, Operand(r1, LSL, kPointerSizeLog2));
5040 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
5041 if (masm->isolate()->function_entry_hook() != NULL) {
5042 PredictableCodeSizeScope predictable(masm, 4 * Assembler::kInstrSize);
5043 ProfileEntryHookStub stub;
5051 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
5052 // The entry hook is a "push lr" instruction, followed by a call.
5053 const int32_t kReturnAddressDistanceFromFunctionStart =
5054 3 * Assembler::kInstrSize;
5056 // This should contain all kCallerSaved registers.
5057 const RegList kSavedRegs =
5064 // We also save lr, so the count here is one higher than the mask indicates.
5065 const int32_t kNumSavedRegs = 7;
5067 ASSERT((kCallerSaved & kSavedRegs) == kCallerSaved);
5069 // Save all caller-save registers as this may be called from anywhere.
5070 __ stm(db_w, sp, kSavedRegs | lr.bit());
5072 // Compute the function's address for the first argument.
5073 __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart));
5075 // The caller's return address is above the saved temporaries.
5076 // Grab that for the second argument to the hook.
5077 __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize));
5079 // Align the stack if necessary.
5080 int frame_alignment = masm->ActivationFrameAlignment();
5081 if (frame_alignment > kPointerSize) {
5083 ASSERT(IsPowerOf2(frame_alignment));
5084 __ and_(sp, sp, Operand(-frame_alignment));
5087 #if V8_HOST_ARCH_ARM
5088 int32_t entry_hook =
5089 reinterpret_cast<int32_t>(masm->isolate()->function_entry_hook());
5090 __ mov(ip, Operand(entry_hook));
5092 // Under the simulator we need to indirect the entry hook through a
5093 // trampoline function at a known address.
5094 // It additionally takes an isolate as a third parameter
5095 __ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate())));
5097 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
5098 __ mov(ip, Operand(ExternalReference(&dispatcher,
5099 ExternalReference::BUILTIN_CALL,
5104 // Restore the stack pointer if needed.
5105 if (frame_alignment > kPointerSize) {
5109 // Also pop pc to get Ret(0).
5110 __ ldm(ia_w, sp, kSavedRegs | pc.bit());
5115 static void CreateArrayDispatch(MacroAssembler* masm,
5116 AllocationSiteOverrideMode mode) {
5117 if (mode == DISABLE_ALLOCATION_SITES) {
5118 T stub(GetInitialFastElementsKind(), mode);
5119 __ TailCallStub(&stub);
5120 } else if (mode == DONT_OVERRIDE) {
5121 int last_index = GetSequenceIndexFromFastElementsKind(
5122 TERMINAL_FAST_ELEMENTS_KIND);
5123 for (int i = 0; i <= last_index; ++i) {
5124 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
5125 __ cmp(r3, Operand(kind));
5127 __ TailCallStub(&stub, eq);
5130 // If we reached this point there is a problem.
5131 __ Abort(kUnexpectedElementsKindInArrayConstructor);
5138 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
5139 AllocationSiteOverrideMode mode) {
5140 // r2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
5141 // r3 - kind (if mode != DISABLE_ALLOCATION_SITES)
5142 // r0 - number of arguments
5143 // r1 - constructor?
5144 // sp[0] - last argument
5145 Label normal_sequence;
5146 if (mode == DONT_OVERRIDE) {
5147 ASSERT(FAST_SMI_ELEMENTS == 0);
5148 ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
5149 ASSERT(FAST_ELEMENTS == 2);
5150 ASSERT(FAST_HOLEY_ELEMENTS == 3);
5151 ASSERT(FAST_DOUBLE_ELEMENTS == 4);
5152 ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
5154 // is the low bit set? If so, we are holey and that is good.
5155 __ tst(r3, Operand(1));
5156 __ b(ne, &normal_sequence);
5159 // look at the first argument
5160 __ ldr(r5, MemOperand(sp, 0));
5161 __ cmp(r5, Operand::Zero());
5162 __ b(eq, &normal_sequence);
5164 if (mode == DISABLE_ALLOCATION_SITES) {
5165 ElementsKind initial = GetInitialFastElementsKind();
5166 ElementsKind holey_initial = GetHoleyElementsKind(initial);
5168 ArraySingleArgumentConstructorStub stub_holey(holey_initial,
5169 DISABLE_ALLOCATION_SITES);
5170 __ TailCallStub(&stub_holey);
5172 __ bind(&normal_sequence);
5173 ArraySingleArgumentConstructorStub stub(initial,
5174 DISABLE_ALLOCATION_SITES);
5175 __ TailCallStub(&stub);
5176 } else if (mode == DONT_OVERRIDE) {
5177 // We are going to create a holey array, but our kind is non-holey.
5178 // Fix kind and retry (only if we have an allocation site in the cell).
5179 __ add(r3, r3, Operand(1));
5181 if (FLAG_debug_code) {
5182 __ ldr(r5, FieldMemOperand(r2, 0));
5183 __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
5184 __ Assert(eq, kExpectedAllocationSite);
5187 // Save the resulting elements kind in type info. We can't just store r3
5188 // in the AllocationSite::transition_info field because elements kind is
5189 // restricted to a portion of the field...upper bits need to be left alone.
5190 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
5191 __ ldr(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
5192 __ add(r4, r4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
5193 __ str(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
5195 __ bind(&normal_sequence);
5196 int last_index = GetSequenceIndexFromFastElementsKind(
5197 TERMINAL_FAST_ELEMENTS_KIND);
5198 for (int i = 0; i <= last_index; ++i) {
5199 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
5200 __ cmp(r3, Operand(kind));
5201 ArraySingleArgumentConstructorStub stub(kind);
5202 __ TailCallStub(&stub, eq);
5205 // If we reached this point there is a problem.
5206 __ Abort(kUnexpectedElementsKindInArrayConstructor);
5214 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
5215 int to_index = GetSequenceIndexFromFastElementsKind(
5216 TERMINAL_FAST_ELEMENTS_KIND);
5217 for (int i = 0; i <= to_index; ++i) {
5218 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
5220 stub.GetCode(isolate);
5221 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
5222 T stub1(kind, DISABLE_ALLOCATION_SITES);
5223 stub1.GetCode(isolate);
5229 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
5230 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
5232 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
5234 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
5239 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
5241 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
5242 for (int i = 0; i < 2; i++) {
5243 // For internal arrays we only need a few things
5244 InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
5245 stubh1.GetCode(isolate);
5246 InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
5247 stubh2.GetCode(isolate);
5248 InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
5249 stubh3.GetCode(isolate);
5254 void ArrayConstructorStub::GenerateDispatchToArrayStub(
5255 MacroAssembler* masm,
5256 AllocationSiteOverrideMode mode) {
5257 if (argument_count_ == ANY) {
5258 Label not_zero_case, not_one_case;
5260 __ b(ne, ¬_zero_case);
5261 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5263 __ bind(¬_zero_case);
5264 __ cmp(r0, Operand(1));
5265 __ b(gt, ¬_one_case);
5266 CreateArrayDispatchOneArgument(masm, mode);
5268 __ bind(¬_one_case);
5269 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5270 } else if (argument_count_ == NONE) {
5271 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5272 } else if (argument_count_ == ONE) {
5273 CreateArrayDispatchOneArgument(masm, mode);
5274 } else if (argument_count_ == MORE_THAN_ONE) {
5275 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5282 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
5283 // ----------- S t a t e -------------
5284 // -- r0 : argc (only if argument_count_ == ANY)
5285 // -- r1 : constructor
5286 // -- r2 : type info cell
5287 // -- sp[0] : return address
5288 // -- sp[4] : last argument
5289 // -----------------------------------
5290 if (FLAG_debug_code) {
5291 // The array construct code is only set for the global and natives
5292 // builtin Array functions which always have maps.
5294 // Initial map for the builtin Array function should be a map.
5295 __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
5296 // Will both indicate a NULL and a Smi.
5297 __ tst(r3, Operand(kSmiTagMask));
5298 __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
5299 __ CompareObjectType(r3, r3, r4, MAP_TYPE);
5300 __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
5302 // We should either have undefined in ebx or a valid cell
5304 Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
5305 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
5306 __ b(eq, &okay_here);
5307 __ ldr(r3, FieldMemOperand(r2, 0));
5308 __ cmp(r3, Operand(cell_map));
5309 __ Assert(eq, kExpectedPropertyCellInRegisterEbx);
5310 __ bind(&okay_here);
5314 // Get the elements kind and case on that.
5315 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
5317 __ ldr(r2, FieldMemOperand(r2, Cell::kValueOffset));
5319 // If the type cell is undefined, or contains anything other than an
5320 // AllocationSite, call an array constructor that doesn't use AllocationSites.
5321 __ ldr(r4, FieldMemOperand(r2, 0));
5322 __ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex);
5325 __ ldr(r3, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
5327 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
5328 __ and_(r3, r3, Operand(AllocationSite::ElementsKindBits::kMask));
5329 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
5332 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
5336 void InternalArrayConstructorStub::GenerateCase(
5337 MacroAssembler* masm, ElementsKind kind) {
5338 __ cmp(r0, Operand(1));
5340 InternalArrayNoArgumentConstructorStub stub0(kind);
5341 __ TailCallStub(&stub0, lo);
5343 InternalArrayNArgumentsConstructorStub stubN(kind);
5344 __ TailCallStub(&stubN, hi);
5346 if (IsFastPackedElementsKind(kind)) {
5347 // We might need to create a holey array
5348 // look at the first argument
5349 __ ldr(r3, MemOperand(sp, 0));
5350 __ cmp(r3, Operand::Zero());
5352 InternalArraySingleArgumentConstructorStub
5353 stub1_holey(GetHoleyElementsKind(kind));
5354 __ TailCallStub(&stub1_holey, ne);
5357 InternalArraySingleArgumentConstructorStub stub1(kind);
5358 __ TailCallStub(&stub1);
5362 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
5363 // ----------- S t a t e -------------
5365 // -- r1 : constructor
5366 // -- sp[0] : return address
5367 // -- sp[4] : last argument
5368 // -----------------------------------
5370 if (FLAG_debug_code) {
5371 // The array construct code is only set for the global and natives
5372 // builtin Array functions which always have maps.
5374 // Initial map for the builtin Array function should be a map.
5375 __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
5376 // Will both indicate a NULL and a Smi.
5377 __ tst(r3, Operand(kSmiTagMask));
5378 __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
5379 __ CompareObjectType(r3, r3, r4, MAP_TYPE);
5380 __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
5383 // Figure out the right elements kind
5384 __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
5385 // Load the map's "bit field 2" into |result|. We only need the first byte,
5386 // but the following bit field extraction takes care of that anyway.
5387 __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
5388 // Retrieve elements_kind from bit field 2.
5389 __ Ubfx(r3, r3, Map::kElementsKindShift, Map::kElementsKindBitCount);
5391 if (FLAG_debug_code) {
5393 __ cmp(r3, Operand(FAST_ELEMENTS));
5395 __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS));
5397 kInvalidElementsKindForInternalArrayOrInternalPackedArray);
5401 Label fast_elements_case;
5402 __ cmp(r3, Operand(FAST_ELEMENTS));
5403 __ b(eq, &fast_elements_case);
5404 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
5406 __ bind(&fast_elements_case);
5407 GenerateCase(masm, FAST_ELEMENTS);
5411 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
5412 // ----------- S t a t e -------------
5414 // -- r4 : call_data
5416 // -- r1 : api_function_address
5419 // -- sp[0] : last argument
5421 // -- sp[(argc - 1)* 4] : first argument
5422 // -- sp[argc * 4] : receiver
5423 // -----------------------------------
5425 Register callee = r0;
5426 Register call_data = r4;
5427 Register holder = r2;
5428 Register api_function_address = r1;
5429 Register context = cp;
5431 int argc = ArgumentBits::decode(bit_field_);
5432 bool restore_context = RestoreContextBits::decode(bit_field_);
5433 bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
5435 typedef FunctionCallbackArguments FCA;
5437 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
5438 STATIC_ASSERT(FCA::kCalleeIndex == 5);
5439 STATIC_ASSERT(FCA::kDataIndex == 4);
5440 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
5441 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
5442 STATIC_ASSERT(FCA::kIsolateIndex == 1);
5443 STATIC_ASSERT(FCA::kHolderIndex == 0);
5444 STATIC_ASSERT(FCA::kArgsLength == 7);
5446 Isolate* isolate = masm->isolate();
5450 // load context from callee
5451 __ ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
5459 Register scratch = call_data;
5460 if (!call_data_undefined) {
5461 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5465 // return value default
5469 Operand(ExternalReference::isolate_address(isolate)));
5474 // Prepare arguments.
5475 __ mov(scratch, sp);
5477 // Allocate the v8::Arguments structure in the arguments' space since
5478 // it's not controlled by GC.
5479 const int kApiStackSpace = 4;
5481 FrameScope frame_scope(masm, StackFrame::MANUAL);
5482 __ EnterExitFrame(false, kApiStackSpace);
5484 ASSERT(!api_function_address.is(r0) && !scratch.is(r0));
5485 // r0 = FunctionCallbackInfo&
5486 // Arguments is after the return address.
5487 __ add(r0, sp, Operand(1 * kPointerSize));
5488 // FunctionCallbackInfo::implicit_args_
5489 __ str(scratch, MemOperand(r0, 0 * kPointerSize));
5490 // FunctionCallbackInfo::values_
5491 __ add(ip, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
5492 __ str(ip, MemOperand(r0, 1 * kPointerSize));
5493 // FunctionCallbackInfo::length_ = argc
5494 __ mov(ip, Operand(argc));
5495 __ str(ip, MemOperand(r0, 2 * kPointerSize));
5496 // FunctionCallbackInfo::is_construct_call = 0
5497 __ mov(ip, Operand::Zero());
5498 __ str(ip, MemOperand(r0, 3 * kPointerSize));
5500 const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
5501 Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
5502 ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
5503 ApiFunction thunk_fun(thunk_address);
5504 ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
5507 AllowExternalCallThatCantCauseGC scope(masm);
5508 MemOperand context_restore_operand(
5509 fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
5510 MemOperand return_value_operand(fp,
5511 (2 + FCA::kReturnValueOffset) * kPointerSize);
5513 __ CallApiFunctionAndReturn(api_function_address,
5516 return_value_operand,
5518 &context_restore_operand : NULL);
5522 void CallApiGetterStub::Generate(MacroAssembler* masm) {
5523 // ----------- S t a t e -------------
5525 // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
5527 // -- r2 : api_function_address
5528 // -----------------------------------
5530 Register api_function_address = r2;
5532 __ mov(r0, sp); // r0 = Handle<Name>
5533 __ add(r1, r0, Operand(1 * kPointerSize)); // r1 = PCA
5535 const int kApiStackSpace = 1;
5536 FrameScope frame_scope(masm, StackFrame::MANUAL);
5537 __ EnterExitFrame(false, kApiStackSpace);
5539 // Create PropertyAccessorInfo instance on the stack above the exit frame with
5540 // r1 (internal::Object** args_) as the data.
5541 __ str(r1, MemOperand(sp, 1 * kPointerSize));
5542 __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
5544 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
5546 Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
5547 ExternalReference::Type thunk_type =
5548 ExternalReference::PROFILING_GETTER_CALL;
5549 ApiFunction thunk_fun(thunk_address);
5550 ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
5552 __ CallApiFunctionAndReturn(api_function_address,
5555 MemOperand(fp, 6 * kPointerSize),
5562 } } // namespace v8::internal
5564 #endif // V8_TARGET_ARCH_ARM