1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "src/bootstrapper.h"
10 #include "src/code-stubs.h"
11 #include "src/codegen.h"
12 #include "src/ic/ic-compiler.h"
13 #include "src/isolate.h"
14 #include "src/jsregexp.h"
15 #include "src/regexp-macro-assembler.h"
16 #include "src/runtime.h"
22 void FastNewClosureStub::InitializeInterfaceDescriptor(
23 CodeStubInterfaceDescriptor* descriptor) {
24 Register registers[] = { cp, r2 };
25 descriptor->Initialize(
26 MajorKey(), ARRAY_SIZE(registers), registers,
27 Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
31 void FastNewContextStub::InitializeInterfaceDescriptor(
32 CodeStubInterfaceDescriptor* descriptor) {
33 Register registers[] = { cp, r1 };
34 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
38 void ToNumberStub::InitializeInterfaceDescriptor(
39 CodeStubInterfaceDescriptor* descriptor) {
40 Register registers[] = { cp, r0 };
41 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
45 void NumberToStringStub::InitializeInterfaceDescriptor(
46 CodeStubInterfaceDescriptor* descriptor) {
47 Register registers[] = { cp, r0 };
48 descriptor->Initialize(
49 MajorKey(), ARRAY_SIZE(registers), registers,
50 Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry);
54 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
55 CodeStubInterfaceDescriptor* descriptor) {
56 Register registers[] = { cp, r3, r2, r1 };
57 Representation representations[] = {
58 Representation::Tagged(),
59 Representation::Tagged(),
60 Representation::Smi(),
61 Representation::Tagged() };
62 descriptor->Initialize(
63 MajorKey(), ARRAY_SIZE(registers), registers,
64 Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry,
69 void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
70 CodeStubInterfaceDescriptor* descriptor) {
71 Register registers[] = { cp, r3, r2, r1, r0 };
72 descriptor->Initialize(
73 MajorKey(), ARRAY_SIZE(registers), registers,
74 Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
78 void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
79 CodeStubInterfaceDescriptor* descriptor) {
80 Register registers[] = { cp, r2, r3 };
81 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
85 void CallFunctionStub::InitializeInterfaceDescriptor(
86 CodeStubInterfaceDescriptor* descriptor) {
87 // r1 function the function to call
88 Register registers[] = {cp, r1};
89 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
93 void CallConstructStub::InitializeInterfaceDescriptor(
94 CodeStubInterfaceDescriptor* descriptor) {
95 // r0 : number of arguments
96 // r1 : the function to call
97 // r2 : feedback vector
98 // r3 : (only if r2 is not the megamorphic symbol) slot in feedback
100 // TODO(turbofan): So far we don't gather type feedback and hence skip the
101 // slot parameter, but ArrayConstructStub needs the vector to be undefined.
102 Register registers[] = {cp, r0, r1, r2};
103 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
107 void RegExpConstructResultStub::InitializeInterfaceDescriptor(
108 CodeStubInterfaceDescriptor* descriptor) {
109 Register registers[] = { cp, r2, r1, r0 };
110 descriptor->Initialize(
111 MajorKey(), ARRAY_SIZE(registers), registers,
112 Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
116 void TransitionElementsKindStub::InitializeInterfaceDescriptor(
117 CodeStubInterfaceDescriptor* descriptor) {
118 Register registers[] = { cp, r0, r1 };
120 Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
121 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
122 FUNCTION_ADDR(entry));
126 void CompareNilICStub::InitializeInterfaceDescriptor(
127 CodeStubInterfaceDescriptor* descriptor) {
128 Register registers[] = { cp, r0 };
129 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
130 FUNCTION_ADDR(CompareNilIC_Miss));
131 descriptor->SetMissHandler(
132 ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
136 const Register InterfaceDescriptor::ContextRegister() { return cp; }
139 static void InitializeArrayConstructorDescriptor(
140 CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
141 int constant_stack_parameter_count) {
144 // r0 -- number of arguments
146 // r2 -- allocation site with elements kind
147 Address deopt_handler = Runtime::FunctionForId(
148 Runtime::kArrayConstructor)->entry;
150 if (constant_stack_parameter_count == 0) {
151 Register registers[] = { cp, r1, r2 };
152 descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
153 deopt_handler, NULL, constant_stack_parameter_count,
154 JS_FUNCTION_STUB_MODE);
156 // stack param count needs (constructor pointer, and single argument)
157 Register registers[] = { cp, r1, r2, r0 };
158 Representation representations[] = {
159 Representation::Tagged(),
160 Representation::Tagged(),
161 Representation::Tagged(),
162 Representation::Integer32() };
163 descriptor->Initialize(major, ARRAY_SIZE(registers), registers, r0,
164 deopt_handler, representations,
165 constant_stack_parameter_count,
166 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
171 static void InitializeInternalArrayConstructorDescriptor(
172 CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
173 int constant_stack_parameter_count) {
176 // r0 -- number of arguments
177 // r1 -- constructor function
178 Address deopt_handler = Runtime::FunctionForId(
179 Runtime::kInternalArrayConstructor)->entry;
181 if (constant_stack_parameter_count == 0) {
182 Register registers[] = { cp, r1 };
183 descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
184 deopt_handler, NULL, constant_stack_parameter_count,
185 JS_FUNCTION_STUB_MODE);
187 // stack param count needs (constructor pointer, and single argument)
188 Register registers[] = { cp, r1, r0 };
189 Representation representations[] = {
190 Representation::Tagged(),
191 Representation::Tagged(),
192 Representation::Integer32() };
193 descriptor->Initialize(major, ARRAY_SIZE(registers), registers, r0,
194 deopt_handler, representations,
195 constant_stack_parameter_count,
196 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
201 void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
202 CodeStubInterfaceDescriptor* descriptor) {
203 InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 0);
207 void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
208 CodeStubInterfaceDescriptor* descriptor) {
209 InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 1);
213 void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
214 CodeStubInterfaceDescriptor* descriptor) {
215 InitializeArrayConstructorDescriptor(MajorKey(), descriptor, -1);
219 void ToBooleanStub::InitializeInterfaceDescriptor(
220 CodeStubInterfaceDescriptor* descriptor) {
221 Register registers[] = { cp, r0 };
222 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
223 FUNCTION_ADDR(ToBooleanIC_Miss));
224 descriptor->SetMissHandler(
225 ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
229 void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
230 CodeStubInterfaceDescriptor* descriptor) {
231 InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0);
235 void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
236 CodeStubInterfaceDescriptor* descriptor) {
237 InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1);
241 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
242 CodeStubInterfaceDescriptor* descriptor) {
243 InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1);
247 void BinaryOpICStub::InitializeInterfaceDescriptor(
248 CodeStubInterfaceDescriptor* descriptor) {
249 Register registers[] = { cp, r1, r0 };
250 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
251 FUNCTION_ADDR(BinaryOpIC_Miss));
252 descriptor->SetMissHandler(
253 ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
257 void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
258 CodeStubInterfaceDescriptor* descriptor) {
259 Register registers[] = { cp, r2, r1, r0 };
260 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
261 FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite));
265 void StringAddStub::InitializeInterfaceDescriptor(
266 CodeStubInterfaceDescriptor* descriptor) {
267 Register registers[] = { cp, r1, r0 };
268 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
269 Runtime::FunctionForId(Runtime::kStringAdd)->entry);
273 void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
274 static PlatformInterfaceDescriptor default_descriptor =
275 PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
277 static PlatformInterfaceDescriptor noInlineDescriptor =
278 PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
281 CallInterfaceDescriptor* descriptor =
282 isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
283 Register registers[] = { cp, // context
285 r0, // actual number of arguments
286 r2, // expected number of arguments
288 Representation representations[] = {
289 Representation::Tagged(), // context
290 Representation::Tagged(), // JSFunction
291 Representation::Integer32(), // actual number of arguments
292 Representation::Integer32(), // expected number of arguments
294 descriptor->Initialize(ARRAY_SIZE(registers), registers,
295 representations, &default_descriptor);
298 CallInterfaceDescriptor* descriptor =
299 isolate->call_descriptor(Isolate::KeyedCall);
300 Register registers[] = { cp, // context
303 Representation representations[] = {
304 Representation::Tagged(), // context
305 Representation::Tagged(), // key
307 descriptor->Initialize(ARRAY_SIZE(registers), registers,
308 representations, &noInlineDescriptor);
311 CallInterfaceDescriptor* descriptor =
312 isolate->call_descriptor(Isolate::NamedCall);
313 Register registers[] = { cp, // context
316 Representation representations[] = {
317 Representation::Tagged(), // context
318 Representation::Tagged(), // name
320 descriptor->Initialize(ARRAY_SIZE(registers), registers,
321 representations, &noInlineDescriptor);
324 CallInterfaceDescriptor* descriptor =
325 isolate->call_descriptor(Isolate::CallHandler);
326 Register registers[] = { cp, // context
329 Representation representations[] = {
330 Representation::Tagged(), // context
331 Representation::Tagged(), // receiver
333 descriptor->Initialize(ARRAY_SIZE(registers), registers,
334 representations, &default_descriptor);
337 CallInterfaceDescriptor* descriptor =
338 isolate->call_descriptor(Isolate::ApiFunctionCall);
339 Register registers[] = { cp, // context
343 r1, // api_function_address
345 Representation representations[] = {
346 Representation::Tagged(), // context
347 Representation::Tagged(), // callee
348 Representation::Tagged(), // call_data
349 Representation::Tagged(), // holder
350 Representation::External(), // api_function_address
352 descriptor->Initialize(ARRAY_SIZE(registers), registers,
353 representations, &default_descriptor);
358 #define __ ACCESS_MASM(masm)
361 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
364 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
370 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
375 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
376 // Update the static counter each time a new code stub is generated.
377 isolate()->counters()->code_stubs()->Increment();
379 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
380 int param_count = descriptor->GetEnvironmentParameterCount();
382 // Call the runtime system in a fresh internal frame.
383 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
384 DCHECK(param_count == 0 ||
385 r0.is(descriptor->GetEnvironmentParameterRegister(
388 for (int i = 0; i < param_count; ++i) {
389 __ push(descriptor->GetEnvironmentParameterRegister(i));
391 ExternalReference miss = descriptor->miss_handler();
392 __ CallExternalReference(miss, param_count);
399 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
400 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
401 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
402 // scratch register. Destroys the source register. No GC occurs during this
403 // stub so you don't have to set up the frame.
404 class ConvertToDoubleStub : public PlatformCodeStub {
406 ConvertToDoubleStub(Isolate* isolate,
407 Register result_reg_1,
408 Register result_reg_2,
410 Register scratch_reg)
411 : PlatformCodeStub(isolate),
412 result1_(result_reg_1),
413 result2_(result_reg_2),
415 zeros_(scratch_reg) { }
423 // Minor key encoding in 16 bits.
424 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
425 class OpBits: public BitField<Token::Value, 2, 14> {};
427 Major MajorKey() const { return ConvertToDouble; }
428 int MinorKey() const {
429 // Encode the parameters in a unique 16 bit value.
430 return result1_.code() +
431 (result2_.code() << 4) +
432 (source_.code() << 8) +
433 (zeros_.code() << 12);
436 void Generate(MacroAssembler* masm);
440 void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
441 Register exponent = result1_;
442 Register mantissa = result2_;
445 __ SmiUntag(source_);
446 // Move sign bit from source to destination. This works because the sign bit
447 // in the exponent word of the double has the same position and polarity as
448 // the 2's complement sign bit in a Smi.
449 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
450 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
451 // Subtract from 0 if source was negative.
452 __ rsb(source_, source_, Operand::Zero(), LeaveCC, ne);
454 // We have -1, 0 or 1, which we treat specially. Register source_ contains
455 // absolute value: it is either equal to 1 (special case of -1 and 1),
456 // greater than 1 (not a special case) or less than 1 (special case of 0).
457 __ cmp(source_, Operand(1));
458 __ b(gt, ¬_special);
460 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
461 const uint32_t exponent_word_for_1 =
462 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
463 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
464 // 1, 0 and -1 all have 0 for the second word.
465 __ mov(mantissa, Operand::Zero());
468 __ bind(¬_special);
469 __ clz(zeros_, source_);
470 // Compute exponent and or it into the exponent register.
471 // We use mantissa as a scratch register here. Use a fudge factor to
472 // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
473 // that fit in the ARM's constant field.
475 __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
476 __ add(mantissa, mantissa, Operand(fudge));
479 Operand(mantissa, LSL, HeapNumber::kExponentShift));
480 // Shift up the source chopping the top bit off.
481 __ add(zeros_, zeros_, Operand(1));
482 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
483 __ mov(source_, Operand(source_, LSL, zeros_));
484 // Compute lower part of fraction (last 12 bits).
485 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
486 // And the top (top 20 bits).
489 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
494 void DoubleToIStub::Generate(MacroAssembler* masm) {
495 Label out_of_range, only_low, negate, done;
496 Register input_reg = source();
497 Register result_reg = destination();
498 DCHECK(is_truncating());
500 int double_offset = offset();
501 // Account for saved regs if input is sp.
502 if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
504 Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
505 Register scratch_low =
506 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
507 Register scratch_high =
508 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
509 LowDwVfpRegister double_scratch = kScratchDoubleReg;
511 __ Push(scratch_high, scratch_low, scratch);
513 if (!skip_fastpath()) {
514 // Load double input.
515 __ vldr(double_scratch, MemOperand(input_reg, double_offset));
516 __ vmov(scratch_low, scratch_high, double_scratch);
518 // Do fast-path convert from double to int.
519 __ vcvt_s32_f64(double_scratch.low(), double_scratch);
520 __ vmov(result_reg, double_scratch.low());
522 // If result is not saturated (0x7fffffff or 0x80000000), we are done.
523 __ sub(scratch, result_reg, Operand(1));
524 __ cmp(scratch, Operand(0x7ffffffe));
527 // We've already done MacroAssembler::TryFastTruncatedDoubleToILoad, so we
528 // know exponent > 31, so we can skip the vcvt_s32_f64 which will saturate.
529 if (double_offset == 0) {
530 __ ldm(ia, input_reg, scratch_low.bit() | scratch_high.bit());
532 __ ldr(scratch_low, MemOperand(input_reg, double_offset));
533 __ ldr(scratch_high, MemOperand(input_reg, double_offset + kIntSize));
537 __ Ubfx(scratch, scratch_high,
538 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
539 // Load scratch with exponent - 1. This is faster than loading
540 // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
541 STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
542 __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
543 // If exponent is greater than or equal to 84, the 32 less significant
544 // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
546 // Compare exponent with 84 (compare exponent - 1 with 83).
547 __ cmp(scratch, Operand(83));
548 __ b(ge, &out_of_range);
550 // If we reach this code, 31 <= exponent <= 83.
551 // So, we don't have to handle cases where 0 <= exponent <= 20 for
552 // which we would need to shift right the high part of the mantissa.
553 // Scratch contains exponent - 1.
554 // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
555 __ rsb(scratch, scratch, Operand(51), SetCC);
557 // 21 <= exponent <= 51, shift scratch_low and scratch_high
558 // to generate the result.
559 __ mov(scratch_low, Operand(scratch_low, LSR, scratch));
560 // Scratch contains: 52 - exponent.
561 // We needs: exponent - 20.
562 // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
563 __ rsb(scratch, scratch, Operand(32));
564 __ Ubfx(result_reg, scratch_high,
565 0, HeapNumber::kMantissaBitsInTopWord);
566 // Set the implicit 1 before the mantissa part in scratch_high.
567 __ orr(result_reg, result_reg,
568 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
569 __ orr(result_reg, scratch_low, Operand(result_reg, LSL, scratch));
572 __ bind(&out_of_range);
573 __ mov(result_reg, Operand::Zero());
577 // 52 <= exponent <= 83, shift only scratch_low.
578 // On entry, scratch contains: 52 - exponent.
579 __ rsb(scratch, scratch, Operand::Zero());
580 __ mov(result_reg, Operand(scratch_low, LSL, scratch));
583 // If input was positive, scratch_high ASR 31 equals 0 and
584 // scratch_high LSR 31 equals zero.
585 // New result = (result eor 0) + 0 = result.
586 // If the input was negative, we have to negate the result.
587 // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
588 // New result = (result eor 0xffffffff) + 1 = 0 - result.
589 __ eor(result_reg, result_reg, Operand(scratch_high, ASR, 31));
590 __ add(result_reg, result_reg, Operand(scratch_high, LSR, 31));
594 __ Pop(scratch_high, scratch_low, scratch);
599 void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
601 WriteInt32ToHeapNumberStub stub1(isolate, r1, r0, r2);
602 WriteInt32ToHeapNumberStub stub2(isolate, r2, r0, r3);
608 // See comment for class.
609 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
610 Label max_negative_int;
611 // the_int_ has the answer which is a signed int32 but not a Smi.
612 // We test for the special value that has a different exponent. This test
613 // has the neat side effect of setting the flags according to the sign.
614 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
615 __ cmp(the_int_, Operand(0x80000000u));
616 __ b(eq, &max_negative_int);
617 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
618 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
619 uint32_t non_smi_exponent =
620 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
621 __ mov(scratch_, Operand(non_smi_exponent));
622 // Set the sign bit in scratch_ if the value was negative.
623 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
624 // Subtract from 0 if the value was negative.
625 __ rsb(the_int_, the_int_, Operand::Zero(), LeaveCC, cs);
626 // We should be masking the implict first digit of the mantissa away here,
627 // but it just ends up combining harmlessly with the last digit of the
628 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
629 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
630 DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
631 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
632 __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
633 __ str(scratch_, FieldMemOperand(the_heap_number_,
634 HeapNumber::kExponentOffset));
635 __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
636 __ str(scratch_, FieldMemOperand(the_heap_number_,
637 HeapNumber::kMantissaOffset));
640 __ bind(&max_negative_int);
641 // The max negative int32 is stored as a positive number in the mantissa of
642 // a double because it uses a sign bit instead of using two's complement.
643 // The actual mantissa bits stored are all 0 because the implicit most
644 // significant 1 bit is not stored.
645 non_smi_exponent += 1 << HeapNumber::kExponentShift;
646 __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
647 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
648 __ mov(ip, Operand::Zero());
649 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
654 // Handle the case where the lhs and rhs are the same object.
655 // Equality is almost reflexive (everything but NaN), so this is a test
656 // for "identity and not NaN".
657 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
661 Label heap_number, return_equal;
663 __ b(ne, ¬_identical);
665 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
666 // so we do the second best thing - test it ourselves.
667 // They are both equal and they are not both Smis so both of them are not
668 // Smis. If it's not a heap number, then return equal.
669 if (cond == lt || cond == gt) {
670 __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
673 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
674 __ b(eq, &heap_number);
675 // Comparing JS objects with <=, >= is complicated.
677 __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
679 // Normally here we fall through to return_equal, but undefined is
680 // special: (undefined == undefined) == true, but
681 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
682 if (cond == le || cond == ge) {
683 __ cmp(r4, Operand(ODDBALL_TYPE));
684 __ b(ne, &return_equal);
685 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
687 __ b(ne, &return_equal);
689 // undefined <= undefined should fail.
690 __ mov(r0, Operand(GREATER));
692 // undefined >= undefined should fail.
693 __ mov(r0, Operand(LESS));
700 __ bind(&return_equal);
702 __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
703 } else if (cond == gt) {
704 __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
706 __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
710 // For less and greater we don't have to check for NaN since the result of
711 // x < x is false regardless. For the others here is some code to check
713 if (cond != lt && cond != gt) {
714 __ bind(&heap_number);
715 // It is a heap number, so return non-equal if it's NaN and equal if it's
718 // The representation of NaN values has all exponent bits (52..62) set,
719 // and not all mantissa bits (0..51) clear.
720 // Read top bits of double representation (second word of value).
721 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
722 // Test that exponent bits are all set.
723 __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
724 // NaNs have all-one exponents so they sign extend to -1.
725 __ cmp(r3, Operand(-1));
726 __ b(ne, &return_equal);
728 // Shift out flag and all exponent bits, retaining only mantissa.
729 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
730 // Or with all low-bits of mantissa.
731 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
732 __ orr(r0, r3, Operand(r2), SetCC);
733 // For equal we already have the right value in r0: Return zero (equal)
734 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
735 // not (it's a NaN). For <= and >= we need to load r0 with the failing
736 // value if it's a NaN.
738 // All-zero means Infinity means equal.
741 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
743 __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
748 // No fall through here.
750 __ bind(¬_identical);
754 // See comment at call site.
755 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
761 DCHECK((lhs.is(r0) && rhs.is(r1)) ||
762 (lhs.is(r1) && rhs.is(r0)));
765 __ JumpIfSmi(rhs, &rhs_is_smi);
767 // Lhs is a Smi. Check whether the rhs is a heap number.
768 __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
770 // If rhs is not a number and lhs is a Smi then strict equality cannot
771 // succeed. Return non-equal
772 // If rhs is r0 then there is already a non zero value in it.
774 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
778 // Smi compared non-strictly with a non-Smi non-heap-number. Call
783 // Lhs is a smi, rhs is a number.
784 // Convert lhs to a double in d7.
785 __ SmiToDouble(d7, lhs);
786 // Load the double from rhs, tagged HeapNumber r0, to d6.
787 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
789 // We now have both loaded as doubles but we can skip the lhs nan check
793 __ bind(&rhs_is_smi);
794 // Rhs is a smi. Check whether the non-smi lhs is a heap number.
795 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
797 // If lhs is not a number and rhs is a smi then strict equality cannot
798 // succeed. Return non-equal.
799 // If lhs is r0 then there is already a non zero value in it.
801 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
805 // Smi compared non-strictly with a non-smi non-heap-number. Call
810 // Rhs is a smi, lhs is a heap number.
811 // Load the double from lhs, tagged HeapNumber r1, to d7.
812 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
813 // Convert rhs to a double in d6 .
814 __ SmiToDouble(d6, rhs);
815 // Fall through to both_loaded_as_doubles.
819 // See comment at call site.
820 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
823 DCHECK((lhs.is(r0) && rhs.is(r1)) ||
824 (lhs.is(r1) && rhs.is(r0)));
826 // If either operand is a JS object or an oddball value, then they are
827 // not equal since their pointers are different.
828 // There is no test for undetectability in strict equality.
829 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
830 Label first_non_object;
831 // Get the type of the first operand into r2 and compare it with
832 // FIRST_SPEC_OBJECT_TYPE.
833 __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE);
834 __ b(lt, &first_non_object);
836 // Return non-zero (r0 is not zero)
837 Label return_not_equal;
838 __ bind(&return_not_equal);
841 __ bind(&first_non_object);
842 // Check for oddballs: true, false, null, undefined.
843 __ cmp(r2, Operand(ODDBALL_TYPE));
844 __ b(eq, &return_not_equal);
846 __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE);
847 __ b(ge, &return_not_equal);
849 // Check for oddballs: true, false, null, undefined.
850 __ cmp(r3, Operand(ODDBALL_TYPE));
851 __ b(eq, &return_not_equal);
853 // Now that we have the types we might as well check for
854 // internalized-internalized.
855 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
856 __ orr(r2, r2, Operand(r3));
857 __ tst(r2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
858 __ b(eq, &return_not_equal);
862 // See comment at call site.
863 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
866 Label* both_loaded_as_doubles,
867 Label* not_heap_numbers,
869 DCHECK((lhs.is(r0) && rhs.is(r1)) ||
870 (lhs.is(r1) && rhs.is(r0)));
872 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
873 __ b(ne, not_heap_numbers);
874 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
876 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
878 // Both are heap numbers. Load them up then jump to the code we have
880 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
881 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
882 __ jmp(both_loaded_as_doubles);
886 // Fast negative check for internalized-to-internalized equality.
887 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
890 Label* possible_strings,
891 Label* not_both_strings) {
892 DCHECK((lhs.is(r0) && rhs.is(r1)) ||
893 (lhs.is(r1) && rhs.is(r0)));
895 // r2 is object type of rhs.
897 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
898 __ tst(r2, Operand(kIsNotStringMask));
899 __ b(ne, &object_test);
900 __ tst(r2, Operand(kIsNotInternalizedMask));
901 __ b(ne, possible_strings);
902 __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
903 __ b(ge, not_both_strings);
904 __ tst(r3, Operand(kIsNotInternalizedMask));
905 __ b(ne, possible_strings);
907 // Both are internalized. We already checked they weren't the same pointer
908 // so they are not equal.
909 __ mov(r0, Operand(NOT_EQUAL));
912 __ bind(&object_test);
913 __ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE));
914 __ b(lt, not_both_strings);
915 __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE);
916 __ b(lt, not_both_strings);
917 // If both objects are undetectable, they are equal. Otherwise, they
918 // are not equal, since they are different objects and an object is not
919 // equal to undefined.
920 __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
921 __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
922 __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
923 __ and_(r0, r2, Operand(r3));
924 __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
925 __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
930 static void ICCompareStub_CheckInputType(MacroAssembler* masm,
933 CompareIC::State expected,
936 if (expected == CompareIC::SMI) {
937 __ JumpIfNotSmi(input, fail);
938 } else if (expected == CompareIC::NUMBER) {
939 __ JumpIfSmi(input, &ok);
940 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
943 // We could be strict about internalized/non-internalized here, but as long as
944 // hydrogen doesn't care, the stub doesn't have to care either.
949 // On entry r1 and r2 are the values to be compared.
950 // On exit r0 is 0, positive or negative to indicate the result of
952 void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
955 Condition cc = GetCondition();
958 ICCompareStub_CheckInputType(masm, lhs, r2, left_, &miss);
959 ICCompareStub_CheckInputType(masm, rhs, r3, right_, &miss);
961 Label slow; // Call builtin.
962 Label not_smis, both_loaded_as_doubles, lhs_not_nan;
964 Label not_two_smis, smi_done;
966 __ JumpIfNotSmi(r2, ¬_two_smis);
967 __ mov(r1, Operand(r1, ASR, 1));
968 __ sub(r0, r1, Operand(r0, ASR, 1));
970 __ bind(¬_two_smis);
972 // NOTICE! This code is only reached after a smi-fast-case check, so
973 // it is certain that at least one operand isn't a smi.
975 // Handle the case where the objects are identical. Either returns the answer
976 // or goes to slow. Only falls through if the objects were not identical.
977 EmitIdenticalObjectComparison(masm, &slow, cc);
979 // If either is a Smi (we know that not both are), then they can only
980 // be strictly equal if the other is a HeapNumber.
981 STATIC_ASSERT(kSmiTag == 0);
982 DCHECK_EQ(0, Smi::FromInt(0));
983 __ and_(r2, lhs, Operand(rhs));
984 __ JumpIfNotSmi(r2, ¬_smis);
985 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
986 // 1) Return the answer.
988 // 3) Fall through to both_loaded_as_doubles.
989 // 4) Jump to lhs_not_nan.
990 // In cases 3 and 4 we have found out we were dealing with a number-number
991 // comparison. If VFP3 is supported the double values of the numbers have
992 // been loaded into d7 and d6. Otherwise, the double values have been loaded
993 // into r0, r1, r2, and r3.
994 EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
996 __ bind(&both_loaded_as_doubles);
997 // The arguments have been converted to doubles and stored in d6 and d7, if
998 // VFP3 is supported, or in r0, r1, r2, and r3.
999 __ bind(&lhs_not_nan);
1001 // ARMv7 VFP3 instructions to implement double precision comparison.
1002 __ VFPCompareAndSetFlags(d7, d6);
1005 __ mov(r0, Operand(EQUAL), LeaveCC, eq);
1006 __ mov(r0, Operand(LESS), LeaveCC, lt);
1007 __ mov(r0, Operand(GREATER), LeaveCC, gt);
1011 // If one of the sides was a NaN then the v flag is set. Load r0 with
1012 // whatever it takes to make the comparison fail, since comparisons with NaN
1014 if (cc == lt || cc == le) {
1015 __ mov(r0, Operand(GREATER));
1017 __ mov(r0, Operand(LESS));
1022 // At this point we know we are dealing with two different objects,
1023 // and neither of them is a Smi. The objects are in rhs_ and lhs_.
1025 // This returns non-equal for some object types, or falls through if it
1027 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
1030 Label check_for_internalized_strings;
1031 Label flat_string_check;
1032 // Check for heap-number-heap-number comparison. Can jump to slow case,
1033 // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
1034 // that case. If the inputs are not doubles then jumps to
1035 // check_for_internalized_strings.
1036 // In this case r2 will contain the type of rhs_. Never falls through.
1037 EmitCheckForTwoHeapNumbers(masm,
1040 &both_loaded_as_doubles,
1041 &check_for_internalized_strings,
1042 &flat_string_check);
1044 __ bind(&check_for_internalized_strings);
1045 // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
1046 // internalized strings.
1047 if (cc == eq && !strict()) {
1048 // Returns an answer for two internalized strings or two detectable objects.
1049 // Otherwise jumps to string case or not both strings case.
1050 // Assumes that r2 is the type of rhs_ on entry.
1051 EmitCheckForInternalizedStringsOrObjects(
1052 masm, lhs, rhs, &flat_string_check, &slow);
1055 // Check for both being sequential ASCII strings, and inline if that is the
1057 __ bind(&flat_string_check);
1059 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow);
1061 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r2,
1064 StringCompareStub::GenerateFlatAsciiStringEquals(masm,
1071 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
1079 // Never falls through to here.
1084 // Figure out which native to call and setup the arguments.
1085 Builtins::JavaScript native;
1087 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1089 native = Builtins::COMPARE;
1090 int ncr; // NaN compare result
1091 if (cc == lt || cc == le) {
1094 DCHECK(cc == gt || cc == ge); // remaining cases
1097 __ mov(r0, Operand(Smi::FromInt(ncr)));
1101 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1102 // tagged as a small integer.
1103 __ InvokeBuiltin(native, JUMP_FUNCTION);
1110 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1111 // We don't allow a GC during a store buffer overflow so there is no need to
1112 // store the registers in any particular way, but we do have to store and
1114 __ stm(db_w, sp, kCallerSaved | lr.bit());
1116 const Register scratch = r1;
1118 if (save_doubles_ == kSaveFPRegs) {
1119 __ SaveFPRegs(sp, scratch);
1121 const int argument_count = 1;
1122 const int fp_argument_count = 0;
1124 AllowExternalCallThatCantCauseGC scope(masm);
1125 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
1126 __ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
1128 ExternalReference::store_buffer_overflow_function(isolate()),
1130 if (save_doubles_ == kSaveFPRegs) {
1131 __ RestoreFPRegs(sp, scratch);
1133 __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
1137 void MathPowStub::Generate(MacroAssembler* masm) {
1138 const Register base = r1;
1139 const Register exponent = r2;
1140 const Register heapnumbermap = r5;
1141 const Register heapnumber = r0;
1142 const DwVfpRegister double_base = d0;
1143 const DwVfpRegister double_exponent = d1;
1144 const DwVfpRegister double_result = d2;
1145 const DwVfpRegister double_scratch = d3;
1146 const SwVfpRegister single_scratch = s6;
1147 const Register scratch = r9;
1148 const Register scratch2 = r4;
1150 Label call_runtime, done, int_exponent;
1151 if (exponent_type_ == ON_STACK) {
1152 Label base_is_smi, unpack_exponent;
1153 // The exponent and base are supplied as arguments on the stack.
1154 // This can only happen if the stub is called from non-optimized code.
1155 // Load input parameters from stack to double registers.
1156 __ ldr(base, MemOperand(sp, 1 * kPointerSize));
1157 __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
1159 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
1161 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
1162 __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
1163 __ cmp(scratch, heapnumbermap);
1164 __ b(ne, &call_runtime);
1166 __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
1167 __ jmp(&unpack_exponent);
1169 __ bind(&base_is_smi);
1170 __ vmov(single_scratch, scratch);
1171 __ vcvt_f64_s32(double_base, single_scratch);
1172 __ bind(&unpack_exponent);
1174 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
1176 __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
1177 __ cmp(scratch, heapnumbermap);
1178 __ b(ne, &call_runtime);
1179 __ vldr(double_exponent,
1180 FieldMemOperand(exponent, HeapNumber::kValueOffset));
1181 } else if (exponent_type_ == TAGGED) {
1182 // Base is already in double_base.
1183 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
1185 __ vldr(double_exponent,
1186 FieldMemOperand(exponent, HeapNumber::kValueOffset));
1189 if (exponent_type_ != INTEGER) {
1190 Label int_exponent_convert;
1191 // Detect integer exponents stored as double.
1192 __ vcvt_u32_f64(single_scratch, double_exponent);
1193 // We do not check for NaN or Infinity here because comparing numbers on
1194 // ARM correctly distinguishes NaNs. We end up calling the built-in.
1195 __ vcvt_f64_u32(double_scratch, single_scratch);
1196 __ VFPCompareAndSetFlags(double_scratch, double_exponent);
1197 __ b(eq, &int_exponent_convert);
1199 if (exponent_type_ == ON_STACK) {
1200 // Detect square root case. Crankshaft detects constant +/-0.5 at
1201 // compile time and uses DoMathPowHalf instead. We then skip this check
1202 // for non-constant cases of +/-0.5 as these hardly occur.
1203 Label not_plus_half;
1206 __ vmov(double_scratch, 0.5, scratch);
1207 __ VFPCompareAndSetFlags(double_exponent, double_scratch);
1208 __ b(ne, ¬_plus_half);
1210 // Calculates square root of base. Check for the special case of
1211 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
1212 __ vmov(double_scratch, -V8_INFINITY, scratch);
1213 __ VFPCompareAndSetFlags(double_base, double_scratch);
1214 __ vneg(double_result, double_scratch, eq);
1217 // Add +0 to convert -0 to +0.
1218 __ vadd(double_scratch, double_base, kDoubleRegZero);
1219 __ vsqrt(double_result, double_scratch);
1222 __ bind(¬_plus_half);
1223 __ vmov(double_scratch, -0.5, scratch);
1224 __ VFPCompareAndSetFlags(double_exponent, double_scratch);
1225 __ b(ne, &call_runtime);
1227 // Calculates square root of base. Check for the special case of
1228 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
1229 __ vmov(double_scratch, -V8_INFINITY, scratch);
1230 __ VFPCompareAndSetFlags(double_base, double_scratch);
1231 __ vmov(double_result, kDoubleRegZero, eq);
1234 // Add +0 to convert -0 to +0.
1235 __ vadd(double_scratch, double_base, kDoubleRegZero);
1236 __ vmov(double_result, 1.0, scratch);
1237 __ vsqrt(double_scratch, double_scratch);
1238 __ vdiv(double_result, double_result, double_scratch);
1244 AllowExternalCallThatCantCauseGC scope(masm);
1245 __ PrepareCallCFunction(0, 2, scratch);
1246 __ MovToFloatParameters(double_base, double_exponent);
1248 ExternalReference::power_double_double_function(isolate()),
1252 __ MovFromFloatResult(double_result);
1255 __ bind(&int_exponent_convert);
1256 __ vcvt_u32_f64(single_scratch, double_exponent);
1257 __ vmov(scratch, single_scratch);
1260 // Calculate power with integer exponent.
1261 __ bind(&int_exponent);
1263 // Get two copies of exponent in the registers scratch and exponent.
1264 if (exponent_type_ == INTEGER) {
1265 __ mov(scratch, exponent);
1267 // Exponent has previously been stored into scratch as untagged integer.
1268 __ mov(exponent, scratch);
1270 __ vmov(double_scratch, double_base); // Back up base.
1271 __ vmov(double_result, 1.0, scratch2);
1273 // Get absolute value of exponent.
1274 __ cmp(scratch, Operand::Zero());
1275 __ mov(scratch2, Operand::Zero(), LeaveCC, mi);
1276 __ sub(scratch, scratch2, scratch, LeaveCC, mi);
1279 __ bind(&while_true);
1280 __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
1281 __ vmul(double_result, double_result, double_scratch, cs);
1282 __ vmul(double_scratch, double_scratch, double_scratch, ne);
1283 __ b(ne, &while_true);
1285 __ cmp(exponent, Operand::Zero());
1287 __ vmov(double_scratch, 1.0, scratch);
1288 __ vdiv(double_result, double_scratch, double_result);
1289 // Test whether result is zero. Bail out to check for subnormal result.
1290 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
1291 __ VFPCompareAndSetFlags(double_result, 0.0);
1293 // double_exponent may not containe the exponent value if the input was a
1294 // smi. We set it with exponent value before bailing out.
1295 __ vmov(single_scratch, exponent);
1296 __ vcvt_f64_s32(double_exponent, single_scratch);
1298 // Returning or bailing out.
1299 Counters* counters = isolate()->counters();
1300 if (exponent_type_ == ON_STACK) {
1301 // The arguments are still on the stack.
1302 __ bind(&call_runtime);
1303 __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
1305 // The stub is called from non-optimized code, which expects the result
1306 // as heap number in exponent.
1308 __ AllocateHeapNumber(
1309 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
1310 __ vstr(double_result,
1311 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
1312 DCHECK(heapnumber.is(r0));
1313 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1318 AllowExternalCallThatCantCauseGC scope(masm);
1319 __ PrepareCallCFunction(0, 2, scratch);
1320 __ MovToFloatParameters(double_base, double_exponent);
1322 ExternalReference::power_double_double_function(isolate()),
1326 __ MovFromFloatResult(double_result);
1329 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1335 bool CEntryStub::NeedsImmovableCode() {
1340 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
1341 CEntryStub::GenerateAheadOfTime(isolate);
1342 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
1343 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
1344 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
1345 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
1346 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
1347 BinaryOpICStub::GenerateAheadOfTime(isolate);
1348 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
1352 void CodeStub::GenerateFPStubs(Isolate* isolate) {
1353 SaveFPRegsMode mode = kSaveFPRegs;
1354 CEntryStub save_doubles(isolate, 1, mode);
1355 StoreBufferOverflowStub stub(isolate, mode);
1356 // These stubs might already be in the snapshot, detect that and don't
1357 // regenerate, which would lead to code stub initialization state being messed
1359 Code* save_doubles_code;
1360 if (!save_doubles.FindCodeInCache(&save_doubles_code)) {
1361 save_doubles_code = *save_doubles.GetCode();
1363 Code* store_buffer_overflow_code;
1364 if (!stub.FindCodeInCache(&store_buffer_overflow_code)) {
1365 store_buffer_overflow_code = *stub.GetCode();
1367 isolate->set_fp_stubs_generated(true);
1371 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1372 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
1377 void CEntryStub::Generate(MacroAssembler* masm) {
1378 // Called from JavaScript; parameters are on stack as if calling JS function.
1379 // r0: number of arguments including receiver
1380 // r1: pointer to builtin function
1381 // fp: frame pointer (restored after C call)
1382 // sp: stack pointer (restored as callee's sp after C call)
1383 // cp: current context (C callee-saved)
1385 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1387 __ mov(r5, Operand(r1));
1389 // Compute the argv pointer in a callee-saved register.
1390 __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
1391 __ sub(r1, r1, Operand(kPointerSize));
1393 // Enter the exit frame that transitions from JavaScript to C++.
1394 FrameScope scope(masm, StackFrame::MANUAL);
1395 __ EnterExitFrame(save_doubles_);
1397 // Store a copy of argc in callee-saved registers for later.
1398 __ mov(r4, Operand(r0));
1400 // r0, r4: number of arguments including receiver (C callee-saved)
1401 // r1: pointer to the first argument (C callee-saved)
1402 // r5: pointer to builtin function (C callee-saved)
1404 // Result returned in r0 or r0+r1 by default.
1406 #if V8_HOST_ARCH_ARM
1407 int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1408 int frame_alignment_mask = frame_alignment - 1;
1409 if (FLAG_debug_code) {
1410 if (frame_alignment > kPointerSize) {
1411 Label alignment_as_expected;
1412 DCHECK(IsPowerOf2(frame_alignment));
1413 __ tst(sp, Operand(frame_alignment_mask));
1414 __ b(eq, &alignment_as_expected);
1415 // Don't use Check here, as it will call Runtime_Abort re-entering here.
1416 __ stop("Unexpected alignment");
1417 __ bind(&alignment_as_expected);
1423 // r0 = argc, r1 = argv
1424 __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
1426 // To let the GC traverse the return address of the exit frames, we need to
1427 // know where the return address is. The CEntryStub is unmovable, so
1428 // we can store the address on the stack to be able to find it again and
1429 // we never have to restore it, because it will not change.
1430 // Compute the return address in lr to return to after the jump below. Pc is
1431 // already at '+ 8' from the current instruction but return is after three
1432 // instructions so add another 4 to pc to get the return address.
1434 // Prevent literal pool emission before return address.
1435 Assembler::BlockConstPoolScope block_const_pool(masm);
1436 __ add(lr, pc, Operand(4));
1437 __ str(lr, MemOperand(sp, 0));
1441 __ VFPEnsureFPSCRState(r2);
1443 // Runtime functions should not return 'the hole'. Allowing it to escape may
1444 // lead to crashes in the IC code later.
1445 if (FLAG_debug_code) {
1447 __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
1449 __ stop("The hole escaped");
1453 // Check result for exception sentinel.
1454 Label exception_returned;
1455 __ CompareRoot(r0, Heap::kExceptionRootIndex);
1456 __ b(eq, &exception_returned);
1458 ExternalReference pending_exception_address(
1459 Isolate::kPendingExceptionAddress, isolate());
1461 // Check that there is no pending exception, otherwise we
1462 // should have returned the exception sentinel.
1463 if (FLAG_debug_code) {
1465 __ mov(r2, Operand(pending_exception_address));
1466 __ ldr(r2, MemOperand(r2));
1467 __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
1468 // Cannot use check here as it attempts to generate call into runtime.
1470 __ stop("Unexpected pending exception");
1474 // Exit C frame and return.
1476 // sp: stack pointer
1477 // fp: frame pointer
1478 // Callee-saved register r4 still holds argc.
1479 __ LeaveExitFrame(save_doubles_, r4, true);
1482 // Handling of exception.
1483 __ bind(&exception_returned);
1485 // Retrieve the pending exception.
1486 __ mov(r2, Operand(pending_exception_address));
1487 __ ldr(r0, MemOperand(r2));
1489 // Clear the pending exception.
1490 __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
1491 __ str(r3, MemOperand(r2));
1493 // Special handling of termination exceptions which are uncatchable
1494 // by javascript code.
1495 Label throw_termination_exception;
1496 __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex);
1497 __ b(eq, &throw_termination_exception);
1499 // Handle normal exception.
1502 __ bind(&throw_termination_exception);
1503 __ ThrowUncatchable(r0);
1507 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
1514 Label invoke, handler_entry, exit;
1516 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1518 // Called from C, so do not pop argc and args on exit (preserve sp)
1519 // No need to save register-passed args
1520 // Save callee-saved registers (incl. cp and fp), sp, and lr
1521 __ stm(db_w, sp, kCalleeSaved | lr.bit());
1523 // Save callee-saved vfp registers.
1524 __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
1525 // Set up the reserved register for 0.0.
1526 __ vmov(kDoubleRegZero, 0.0);
1527 __ VFPEnsureFPSCRState(r4);
1529 // Get address of argv, see stm above.
1535 // Set up argv in r4.
1536 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1537 offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
1538 __ ldr(r4, MemOperand(sp, offset_to_argv));
1540 // Push a frame with special values setup to mark it as an entry frame.
1546 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
1547 if (FLAG_enable_ool_constant_pool) {
1548 __ mov(r8, Operand(isolate()->factory()->empty_constant_pool_array()));
1550 __ mov(r7, Operand(Smi::FromInt(marker)));
1551 __ mov(r6, Operand(Smi::FromInt(marker)));
1553 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1554 __ ldr(r5, MemOperand(r5));
1555 __ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
1556 __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() |
1557 (FLAG_enable_ool_constant_pool ? r8.bit() : 0) |
1560 // Set up frame pointer for the frame to be pushed.
1561 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1563 // If this is the outermost JS call, set js_entry_sp value.
1564 Label non_outermost_js;
1565 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
1566 __ mov(r5, Operand(ExternalReference(js_entry_sp)));
1567 __ ldr(r6, MemOperand(r5));
1568 __ cmp(r6, Operand::Zero());
1569 __ b(ne, &non_outermost_js);
1570 __ str(fp, MemOperand(r5));
1571 __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1574 __ bind(&non_outermost_js);
1575 __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1579 // Jump to a faked try block that does the invoke, with a faked catch
1580 // block that sets the pending exception.
1583 // Block literal pool emission whilst taking the position of the handler
1584 // entry. This avoids making the assumption that literal pools are always
1585 // emitted after an instruction is emitted, rather than before.
1587 Assembler::BlockConstPoolScope block_const_pool(masm);
1588 __ bind(&handler_entry);
1589 handler_offset_ = handler_entry.pos();
1590 // Caught exception: Store result (exception) in the pending exception
1591 // field in the JSEnv and return a failure sentinel. Coming in here the
1592 // fp will be invalid because the PushTryHandler below sets it to 0 to
1593 // signal the existence of the JSEntry frame.
1594 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1597 __ str(r0, MemOperand(ip));
1598 __ LoadRoot(r0, Heap::kExceptionRootIndex);
1601 // Invoke: Link this frame into the handler chain. There's only one
1602 // handler block in this code object, so its index is 0.
1604 // Must preserve r0-r4, r5-r6 are available.
1605 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
1606 // If an exception not caught by another handler occurs, this handler
1607 // returns control to the code after the bl(&invoke) above, which
1608 // restores all kCalleeSaved registers (including cp and fp) to their
1609 // saved values before returning a failure to C.
1611 // Clear any pending exceptions.
1612 __ mov(r5, Operand(isolate()->factory()->the_hole_value()));
1613 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1615 __ str(r5, MemOperand(ip));
1617 // Invoke the function by calling through JS entry trampoline builtin.
1618 // Notice that we cannot store a reference to the trampoline code directly in
1619 // this stub, because runtime stubs are not traversed when doing GC.
1621 // Expected registers by Builtins::JSEntryTrampoline
1628 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1630 __ mov(ip, Operand(construct_entry));
1632 ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
1633 __ mov(ip, Operand(entry));
1635 __ ldr(ip, MemOperand(ip)); // deref address
1636 __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
1638 // Branch and link to JSEntryTrampoline.
1641 // Unlink this frame from the handler chain.
1644 __ bind(&exit); // r0 holds result
1645 // Check if the current stack frame is marked as the outermost JS frame.
1646 Label non_outermost_js_2;
1648 __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1649 __ b(ne, &non_outermost_js_2);
1650 __ mov(r6, Operand::Zero());
1651 __ mov(r5, Operand(ExternalReference(js_entry_sp)));
1652 __ str(r6, MemOperand(r5));
1653 __ bind(&non_outermost_js_2);
1655 // Restore the top frame descriptors from the stack.
1658 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1659 __ str(r3, MemOperand(ip));
1661 // Reset the stack to the callee saved registers.
1662 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1664 // Restore callee-saved registers and return.
1666 if (FLAG_debug_code) {
1667 __ mov(lr, Operand(pc));
1671 // Restore callee-saved vfp registers.
1672 __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
1674 __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
1678 // Uses registers r0 to r4.
1679 // Expected input (depending on whether args are in registers or on the stack):
1680 // * object: r0 or at sp + 1 * kPointerSize.
1681 // * function: r1 or at sp.
1683 // An inlined call site may have been generated before calling this stub.
1684 // In this case the offset to the inline sites to patch are passed in r5 and r6.
1685 // (See LCodeGen::DoInstanceOfKnownGlobal)
1686 void InstanceofStub::Generate(MacroAssembler* masm) {
1687 // Call site inlining and patching implies arguments in registers.
1688 DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
1690 // Fixed register usage throughout the stub:
1691 const Register object = r0; // Object (lhs).
1692 Register map = r3; // Map of the object.
1693 const Register function = r1; // Function (rhs).
1694 const Register prototype = r4; // Prototype of the function.
1695 const Register scratch = r2;
1697 Label slow, loop, is_instance, is_not_instance, not_js_object;
1699 if (!HasArgsInRegisters()) {
1700 __ ldr(object, MemOperand(sp, 1 * kPointerSize));
1701 __ ldr(function, MemOperand(sp, 0));
1704 // Check that the left hand is a JS object and load map.
1705 __ JumpIfSmi(object, ¬_js_object);
1706 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
1708 // If there is a call site cache don't look in the global cache, but do the
1709 // real lookup and update the call site cache.
1710 if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
1712 __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1714 __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
1716 __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
1717 __ Ret(HasArgsInRegisters() ? 0 : 2);
1722 // Get the prototype of the function.
1723 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
1725 // Check that the function prototype is a JS object.
1726 __ JumpIfSmi(prototype, &slow);
1727 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
1729 // Update the global instanceof or call site inlined cache with the current
1730 // map and function. The cached answer will be set when it is known below.
1731 if (!HasCallSiteInlineCheck()) {
1732 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1733 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
1735 DCHECK(HasArgsInRegisters());
1736 // Patch the (relocated) inlined map check.
1738 // The map_load_offset was stored in r5
1739 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
1740 const Register map_load_offset = r5;
1741 __ sub(r9, lr, map_load_offset);
1742 // Get the map location in r5 and patch it.
1743 __ GetRelocatedValueLocation(r9, map_load_offset, scratch);
1744 __ ldr(map_load_offset, MemOperand(map_load_offset));
1745 __ str(map, FieldMemOperand(map_load_offset, Cell::kValueOffset));
1748 // Register mapping: r3 is object map and r4 is function prototype.
1749 // Get prototype of object into r2.
1750 __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
1752 // We don't need map any more. Use it as a scratch register.
1753 Register scratch2 = map;
1756 // Loop through the prototype chain looking for the function prototype.
1757 __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
1759 __ cmp(scratch, Operand(prototype));
1760 __ b(eq, &is_instance);
1761 __ cmp(scratch, scratch2);
1762 __ b(eq, &is_not_instance);
1763 __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
1764 __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
1766 Factory* factory = isolate()->factory();
1768 __ bind(&is_instance);
1769 if (!HasCallSiteInlineCheck()) {
1770 __ mov(r0, Operand(Smi::FromInt(0)));
1771 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
1772 if (ReturnTrueFalseObject()) {
1773 __ Move(r0, factory->true_value());
1776 // Patch the call site to return true.
1777 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
1778 // The bool_load_offset was stored in r6
1779 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
1780 const Register bool_load_offset = r6;
1781 __ sub(r9, lr, bool_load_offset);
1782 // Get the boolean result location in scratch and patch it.
1783 __ GetRelocatedValueLocation(r9, scratch, scratch2);
1784 __ str(r0, MemOperand(scratch));
1786 if (!ReturnTrueFalseObject()) {
1787 __ mov(r0, Operand(Smi::FromInt(0)));
1790 __ Ret(HasArgsInRegisters() ? 0 : 2);
1792 __ bind(&is_not_instance);
1793 if (!HasCallSiteInlineCheck()) {
1794 __ mov(r0, Operand(Smi::FromInt(1)));
1795 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
1796 if (ReturnTrueFalseObject()) {
1797 __ Move(r0, factory->false_value());
1800 // Patch the call site to return false.
1801 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
1802 // The bool_load_offset was stored in r6
1803 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
1804 const Register bool_load_offset = r6;
1805 __ sub(r9, lr, bool_load_offset);
1807 // Get the boolean result location in scratch and patch it.
1808 __ GetRelocatedValueLocation(r9, scratch, scratch2);
1809 __ str(r0, MemOperand(scratch));
1811 if (!ReturnTrueFalseObject()) {
1812 __ mov(r0, Operand(Smi::FromInt(1)));
1815 __ Ret(HasArgsInRegisters() ? 0 : 2);
1817 Label object_not_null, object_not_null_or_smi;
1818 __ bind(¬_js_object);
1819 // Before null, smi and string value checks, check that the rhs is a function
1820 // as for a non-function rhs an exception needs to be thrown.
1821 __ JumpIfSmi(function, &slow);
1822 __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE);
1825 // Null is not instance of anything.
1826 __ cmp(scratch, Operand(isolate()->factory()->null_value()));
1827 __ b(ne, &object_not_null);
1828 if (ReturnTrueFalseObject()) {
1829 __ Move(r0, factory->false_value());
1831 __ mov(r0, Operand(Smi::FromInt(1)));
1833 __ Ret(HasArgsInRegisters() ? 0 : 2);
1835 __ bind(&object_not_null);
1836 // Smi values are not instances of anything.
1837 __ JumpIfNotSmi(object, &object_not_null_or_smi);
1838 if (ReturnTrueFalseObject()) {
1839 __ Move(r0, factory->false_value());
1841 __ mov(r0, Operand(Smi::FromInt(1)));
1843 __ Ret(HasArgsInRegisters() ? 0 : 2);
1845 __ bind(&object_not_null_or_smi);
1846 // String values are not instances of anything.
1847 __ IsObjectJSStringType(object, scratch, &slow);
1848 if (ReturnTrueFalseObject()) {
1849 __ Move(r0, factory->false_value());
1851 __ mov(r0, Operand(Smi::FromInt(1)));
1853 __ Ret(HasArgsInRegisters() ? 0 : 2);
1855 // Slow-case. Tail call builtin.
1857 if (!ReturnTrueFalseObject()) {
1858 if (HasArgsInRegisters()) {
1861 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
1864 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
1866 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
1868 __ cmp(r0, Operand::Zero());
1869 __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
1870 __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
1871 __ Ret(HasArgsInRegisters() ? 0 : 2);
1876 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1878 Register receiver = LoadIC::ReceiverRegister();
1880 NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3,
1883 PropertyAccessCompiler::TailCallBuiltin(
1884 masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1888 Register InstanceofStub::left() { return r0; }
1891 Register InstanceofStub::right() { return r1; }
1894 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
1895 // The displacement is the offset of the last parameter (if any)
1896 // relative to the frame pointer.
1897 const int kDisplacement =
1898 StandardFrameConstants::kCallerSPOffset - kPointerSize;
1900 // Check that the key is a smi.
1902 __ JumpIfNotSmi(r1, &slow);
1904 // Check if the calling frame is an arguments adaptor frame.
1906 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1907 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
1908 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1911 // Check index against formal parameters count limit passed in
1912 // through register r0. Use unsigned comparison to get negative
1917 // Read the argument from the stack and return it.
1919 __ add(r3, fp, Operand::PointerOffsetFromSmiKey(r3));
1920 __ ldr(r0, MemOperand(r3, kDisplacement));
1923 // Arguments adaptor case: Check index against actual arguments
1924 // limit found in the arguments adaptor frame. Use unsigned
1925 // comparison to get negative check for free.
1927 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1931 // Read the argument from the adaptor frame and return it.
1933 __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r3));
1934 __ ldr(r0, MemOperand(r3, kDisplacement));
1937 // Slow-case: Handle non-smi or out-of-bounds access to arguments
1938 // by calling the runtime system.
1941 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
1945 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
1946 // sp[0] : number of parameters
1947 // sp[4] : receiver displacement
1950 // Check if the calling frame is an arguments adaptor frame.
1952 __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1953 __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
1954 __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1957 // Patch the arguments.length and the parameters pointer in the current frame.
1958 __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
1959 __ str(r2, MemOperand(sp, 0 * kPointerSize));
1960 __ add(r3, r3, Operand(r2, LSL, 1));
1961 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
1962 __ str(r3, MemOperand(sp, 1 * kPointerSize));
1965 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1969 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
1971 // sp[0] : number of parameters (tagged)
1972 // sp[4] : address of receiver argument
1974 // Registers used over whole function:
1975 // r6 : allocated object (tagged)
1976 // r9 : mapped parameter count (tagged)
1978 __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
1979 // r1 = parameter count (tagged)
1981 // Check if the calling frame is an arguments adaptor frame.
1983 Label adaptor_frame, try_allocate;
1984 __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1985 __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
1986 __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1987 __ b(eq, &adaptor_frame);
1989 // No adaptor, parameter count = argument count.
1991 __ b(&try_allocate);
1993 // We have an adaptor frame. Patch the parameters pointer.
1994 __ bind(&adaptor_frame);
1995 __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
1996 __ add(r3, r3, Operand(r2, LSL, 1));
1997 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
1998 __ str(r3, MemOperand(sp, 1 * kPointerSize));
2000 // r1 = parameter count (tagged)
2001 // r2 = argument count (tagged)
2002 // Compute the mapped parameter count = min(r1, r2) in r1.
2003 __ cmp(r1, Operand(r2));
2004 __ mov(r1, Operand(r2), LeaveCC, gt);
2006 __ bind(&try_allocate);
2008 // Compute the sizes of backing store, parameter map, and arguments object.
2009 // 1. Parameter map, has 2 extra words containing context and backing store.
2010 const int kParameterMapHeaderSize =
2011 FixedArray::kHeaderSize + 2 * kPointerSize;
2012 // If there are no mapped parameters, we do not need the parameter_map.
2013 __ cmp(r1, Operand(Smi::FromInt(0)));
2014 __ mov(r9, Operand::Zero(), LeaveCC, eq);
2015 __ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne);
2016 __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
2018 // 2. Backing store.
2019 __ add(r9, r9, Operand(r2, LSL, 1));
2020 __ add(r9, r9, Operand(FixedArray::kHeaderSize));
2022 // 3. Arguments object.
2023 __ add(r9, r9, Operand(Heap::kSloppyArgumentsObjectSize));
2025 // Do the allocation of all three objects in one go.
2026 __ Allocate(r9, r0, r3, r4, &runtime, TAG_OBJECT);
2028 // r0 = address of new object(s) (tagged)
2029 // r2 = argument count (smi-tagged)
2030 // Get the arguments boilerplate from the current native context into r4.
2031 const int kNormalOffset =
2032 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
2033 const int kAliasedOffset =
2034 Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
2036 __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2037 __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
2038 __ cmp(r1, Operand::Zero());
2039 __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
2040 __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
2042 // r0 = address of new object (tagged)
2043 // r1 = mapped parameter count (tagged)
2044 // r2 = argument count (smi-tagged)
2045 // r4 = address of arguments map (tagged)
2046 __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
2047 __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
2048 __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
2049 __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
2051 // Set up the callee in-object property.
2052 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
2053 __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
2054 __ AssertNotSmi(r3);
2055 const int kCalleeOffset = JSObject::kHeaderSize +
2056 Heap::kArgumentsCalleeIndex * kPointerSize;
2057 __ str(r3, FieldMemOperand(r0, kCalleeOffset));
2059 // Use the length (smi tagged) and set that as an in-object property too.
2061 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2062 const int kLengthOffset = JSObject::kHeaderSize +
2063 Heap::kArgumentsLengthIndex * kPointerSize;
2064 __ str(r2, FieldMemOperand(r0, kLengthOffset));
2066 // Set up the elements pointer in the allocated arguments object.
2067 // If we allocated a parameter map, r4 will point there, otherwise
2068 // it will point to the backing store.
2069 __ add(r4, r0, Operand(Heap::kSloppyArgumentsObjectSize));
2070 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
2072 // r0 = address of new object (tagged)
2073 // r1 = mapped parameter count (tagged)
2074 // r2 = argument count (tagged)
2075 // r4 = address of parameter map or backing store (tagged)
2076 // Initialize parameter map. If there are no mapped arguments, we're done.
2077 Label skip_parameter_map;
2078 __ cmp(r1, Operand(Smi::FromInt(0)));
2079 // Move backing store address to r3, because it is
2080 // expected there when filling in the unmapped arguments.
2081 __ mov(r3, r4, LeaveCC, eq);
2082 __ b(eq, &skip_parameter_map);
2084 __ LoadRoot(r6, Heap::kSloppyArgumentsElementsMapRootIndex);
2085 __ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset));
2086 __ add(r6, r1, Operand(Smi::FromInt(2)));
2087 __ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
2088 __ str(cp, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
2089 __ add(r6, r4, Operand(r1, LSL, 1));
2090 __ add(r6, r6, Operand(kParameterMapHeaderSize));
2091 __ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
2093 // Copy the parameter slots and the holes in the arguments.
2094 // We need to fill in mapped_parameter_count slots. They index the context,
2095 // where parameters are stored in reverse order, at
2096 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
2097 // The mapped parameter thus need to get indices
2098 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
2099 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
2100 // We loop from right to left.
2101 Label parameters_loop, parameters_test;
2103 __ ldr(r9, MemOperand(sp, 0 * kPointerSize));
2104 __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
2105 __ sub(r9, r9, Operand(r1));
2106 __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
2107 __ add(r3, r4, Operand(r6, LSL, 1));
2108 __ add(r3, r3, Operand(kParameterMapHeaderSize));
2110 // r6 = loop variable (tagged)
2111 // r1 = mapping index (tagged)
2112 // r3 = address of backing store (tagged)
2113 // r4 = address of parameter map (tagged), which is also the address of new
2114 // object + Heap::kSloppyArgumentsObjectSize (tagged)
2115 // r0 = temporary scratch (a.o., for address calculation)
2116 // r5 = the hole value
2117 __ jmp(¶meters_test);
2119 __ bind(¶meters_loop);
2120 __ sub(r6, r6, Operand(Smi::FromInt(1)));
2121 __ mov(r0, Operand(r6, LSL, 1));
2122 __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
2123 __ str(r9, MemOperand(r4, r0));
2124 __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
2125 __ str(r5, MemOperand(r3, r0));
2126 __ add(r9, r9, Operand(Smi::FromInt(1)));
2127 __ bind(¶meters_test);
2128 __ cmp(r6, Operand(Smi::FromInt(0)));
2129 __ b(ne, ¶meters_loop);
2131 // Restore r0 = new object (tagged)
2132 __ sub(r0, r4, Operand(Heap::kSloppyArgumentsObjectSize));
2134 __ bind(&skip_parameter_map);
2135 // r0 = address of new object (tagged)
2136 // r2 = argument count (tagged)
2137 // r3 = address of backing store (tagged)
2139 // Copy arguments header and remaining slots (if there are any).
2140 __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
2141 __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset));
2142 __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset));
2144 Label arguments_loop, arguments_test;
2146 __ ldr(r4, MemOperand(sp, 1 * kPointerSize));
2147 __ sub(r4, r4, Operand(r9, LSL, 1));
2148 __ jmp(&arguments_test);
2150 __ bind(&arguments_loop);
2151 __ sub(r4, r4, Operand(kPointerSize));
2152 __ ldr(r6, MemOperand(r4, 0));
2153 __ add(r5, r3, Operand(r9, LSL, 1));
2154 __ str(r6, FieldMemOperand(r5, FixedArray::kHeaderSize));
2155 __ add(r9, r9, Operand(Smi::FromInt(1)));
2157 __ bind(&arguments_test);
2158 __ cmp(r9, Operand(r2));
2159 __ b(lt, &arguments_loop);
2161 // Return and remove the on-stack parameters.
2162 __ add(sp, sp, Operand(3 * kPointerSize));
2165 // Do the runtime call to allocate the arguments object.
2166 // r0 = address of new object (tagged)
2167 // r2 = argument count (tagged)
2169 __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
2170 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
2174 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
2175 // sp[0] : number of parameters
2176 // sp[4] : receiver displacement
2178 // Check if the calling frame is an arguments adaptor frame.
2179 Label adaptor_frame, try_allocate, runtime;
2180 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2181 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
2182 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2183 __ b(eq, &adaptor_frame);
2185 // Get the length from the frame.
2186 __ ldr(r1, MemOperand(sp, 0));
2187 __ b(&try_allocate);
2189 // Patch the arguments.length and the parameters pointer.
2190 __ bind(&adaptor_frame);
2191 __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
2192 __ str(r1, MemOperand(sp, 0));
2193 __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
2194 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
2195 __ str(r3, MemOperand(sp, 1 * kPointerSize));
2197 // Try the new space allocation. Start out with computing the size
2198 // of the arguments object and the elements array in words.
2199 Label add_arguments_object;
2200 __ bind(&try_allocate);
2201 __ SmiUntag(r1, SetCC);
2202 __ b(eq, &add_arguments_object);
2203 __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
2204 __ bind(&add_arguments_object);
2205 __ add(r1, r1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
2207 // Do the allocation of both objects in one go.
2208 __ Allocate(r1, r0, r2, r3, &runtime,
2209 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
2211 // Get the arguments boilerplate from the current native context.
2212 __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2213 __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
2214 __ ldr(r4, MemOperand(
2215 r4, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
2217 __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
2218 __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
2219 __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
2220 __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
2222 // Get the length (smi tagged) and set that as an in-object property too.
2223 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2224 __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
2226 __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
2227 Heap::kArgumentsLengthIndex * kPointerSize));
2229 // If there are no actual arguments, we're done.
2231 __ cmp(r1, Operand::Zero());
2234 // Get the parameters pointer from the stack.
2235 __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
2237 // Set up the elements pointer in the allocated arguments object and
2238 // initialize the header in the elements fixed array.
2239 __ add(r4, r0, Operand(Heap::kStrictArgumentsObjectSize));
2240 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
2241 __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
2242 __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
2243 __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
2246 // Copy the fixed array slots.
2248 // Set up r4 to point to the first array slot.
2249 __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2251 // Pre-decrement r2 with kPointerSize on each iteration.
2252 // Pre-decrement in order to skip receiver.
2253 __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
2254 // Post-increment r4 with kPointerSize on each iteration.
2255 __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
2256 __ sub(r1, r1, Operand(1));
2257 __ cmp(r1, Operand::Zero());
2260 // Return and remove the on-stack parameters.
2262 __ add(sp, sp, Operand(3 * kPointerSize));
2265 // Do the runtime call to allocate the arguments object.
2267 __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
2271 void RegExpExecStub::Generate(MacroAssembler* masm) {
2272 // Just jump directly to runtime if native RegExp is not selected at compile
2273 // time or if regexp entry in generated code is turned off runtime switch or
2275 #ifdef V8_INTERPRETED_REGEXP
2276 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2277 #else // V8_INTERPRETED_REGEXP
2279 // Stack frame on entry.
2280 // sp[0]: last_match_info (expected JSArray)
2281 // sp[4]: previous index
2282 // sp[8]: subject string
2283 // sp[12]: JSRegExp object
2285 const int kLastMatchInfoOffset = 0 * kPointerSize;
2286 const int kPreviousIndexOffset = 1 * kPointerSize;
2287 const int kSubjectOffset = 2 * kPointerSize;
2288 const int kJSRegExpOffset = 3 * kPointerSize;
2291 // Allocation of registers for this function. These are in callee save
2292 // registers and will be preserved by the call to the native RegExp code, as
2293 // this code is called using the normal C calling convention. When calling
2294 // directly from generated code the native RegExp code will not do a GC and
2295 // therefore the content of these registers are safe to use after the call.
2296 Register subject = r4;
2297 Register regexp_data = r5;
2298 Register last_match_info_elements = no_reg; // will be r6;
2300 // Ensure that a RegExp stack is allocated.
2301 ExternalReference address_of_regexp_stack_memory_address =
2302 ExternalReference::address_of_regexp_stack_memory_address(isolate());
2303 ExternalReference address_of_regexp_stack_memory_size =
2304 ExternalReference::address_of_regexp_stack_memory_size(isolate());
2305 __ mov(r0, Operand(address_of_regexp_stack_memory_size));
2306 __ ldr(r0, MemOperand(r0, 0));
2307 __ cmp(r0, Operand::Zero());
2310 // Check that the first argument is a JSRegExp object.
2311 __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
2312 __ JumpIfSmi(r0, &runtime);
2313 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
2316 // Check that the RegExp has been compiled (data contains a fixed array).
2317 __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
2318 if (FLAG_debug_code) {
2319 __ SmiTst(regexp_data);
2320 __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
2321 __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
2322 __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
2325 // regexp_data: RegExp data (FixedArray)
2326 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2327 __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2328 __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
2331 // regexp_data: RegExp data (FixedArray)
2332 // Check that the number of captures fit in the static offsets vector buffer.
2334 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2335 // Check (number_of_captures + 1) * 2 <= offsets vector size
2336 // Or number_of_captures * 2 <= offsets vector size - 2
2337 // Multiplying by 2 comes for free since r2 is smi-tagged.
2338 STATIC_ASSERT(kSmiTag == 0);
2339 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2340 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
2341 __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
2344 // Reset offset for possibly sliced string.
2345 __ mov(r9, Operand::Zero());
2346 __ ldr(subject, MemOperand(sp, kSubjectOffset));
2347 __ JumpIfSmi(subject, &runtime);
2348 __ mov(r3, subject); // Make a copy of the original subject string.
2349 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
2350 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
2351 // subject: subject string
2352 // r3: subject string
2353 // r0: subject string instance type
2354 // regexp_data: RegExp data (FixedArray)
2355 // Handle subject string according to its encoding and representation:
2356 // (1) Sequential string? If yes, go to (5).
2357 // (2) Anything but sequential or cons? If yes, go to (6).
2358 // (3) Cons string. If the string is flat, replace subject with first string.
2359 // Otherwise bailout.
2360 // (4) Is subject external? If yes, go to (7).
2361 // (5) Sequential string. Load regexp code according to encoding.
2365 // Deferred code at the end of the stub:
2366 // (6) Not a long external string? If yes, go to (8).
2367 // (7) External string. Make it, offset-wise, look like a sequential string.
2369 // (8) Short external string or not a string? If yes, bail out to runtime.
2370 // (9) Sliced string. Replace subject with parent. Go to (4).
2372 Label seq_string /* 5 */, external_string /* 7 */,
2373 check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
2374 not_long_external /* 8 */;
2376 // (1) Sequential string? If yes, go to (5).
2379 Operand(kIsNotStringMask |
2380 kStringRepresentationMask |
2381 kShortExternalStringMask),
2383 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
2384 __ b(eq, &seq_string); // Go to (5).
2386 // (2) Anything but sequential or cons? If yes, go to (6).
2387 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2388 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2389 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2390 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2391 __ cmp(r1, Operand(kExternalStringTag));
2392 __ b(ge, ¬_seq_nor_cons); // Go to (6).
2394 // (3) Cons string. Check that it's flat.
2395 // Replace subject with first string and reload instance type.
2396 __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
2397 __ CompareRoot(r0, Heap::kempty_stringRootIndex);
2399 __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2401 // (4) Is subject external? If yes, go to (7).
2402 __ bind(&check_underlying);
2403 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
2404 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
2405 STATIC_ASSERT(kSeqStringTag == 0);
2406 __ tst(r0, Operand(kStringRepresentationMask));
2407 // The underlying external string is never a short external string.
2408 STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
2409 STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2410 __ b(ne, &external_string); // Go to (7).
2412 // (5) Sequential string. Load regexp code according to encoding.
2413 __ bind(&seq_string);
2414 // subject: sequential subject string (or look-alike, external string)
2415 // r3: original subject string
2416 // Load previous index and check range before r3 is overwritten. We have to
2417 // use r3 instead of subject here because subject might have been only made
2418 // to look like a sequential string when it actually is an external string.
2419 __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
2420 __ JumpIfNotSmi(r1, &runtime);
2421 __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset));
2422 __ cmp(r3, Operand(r1));
2426 STATIC_ASSERT(4 == kOneByteStringTag);
2427 STATIC_ASSERT(kTwoByteStringTag == 0);
2428 __ and_(r0, r0, Operand(kStringEncodingMask));
2429 __ mov(r3, Operand(r0, ASR, 2), SetCC);
2430 __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
2431 __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
2433 // (E) Carry on. String handling is done.
2434 // r6: irregexp code
2435 // Check that the irregexp code has been generated for the actual string
2436 // encoding. If it has, the field contains a code object otherwise it contains
2437 // a smi (code flushing support).
2438 __ JumpIfSmi(r6, &runtime);
2440 // r1: previous index
2441 // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
2443 // subject: Subject string
2444 // regexp_data: RegExp data (FixedArray)
2445 // All checks done. Now push arguments for native regexp code.
2446 __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r0, r2);
2448 // Isolates: note we add an additional parameter here (isolate pointer).
2449 const int kRegExpExecuteArguments = 9;
2450 const int kParameterRegisters = 4;
2451 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
2453 // Stack pointer now points to cell where return address is to be written.
2454 // Arguments are before that on the stack or in registers.
2456 // Argument 9 (sp[20]): Pass current isolate address.
2457 __ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2458 __ str(r0, MemOperand(sp, 5 * kPointerSize));
2460 // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
2461 __ mov(r0, Operand(1));
2462 __ str(r0, MemOperand(sp, 4 * kPointerSize));
2464 // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
2465 __ mov(r0, Operand(address_of_regexp_stack_memory_address));
2466 __ ldr(r0, MemOperand(r0, 0));
2467 __ mov(r2, Operand(address_of_regexp_stack_memory_size));
2468 __ ldr(r2, MemOperand(r2, 0));
2469 __ add(r0, r0, Operand(r2));
2470 __ str(r0, MemOperand(sp, 3 * kPointerSize));
2472 // Argument 6: Set the number of capture registers to zero to force global
2473 // regexps to behave as non-global. This does not affect non-global regexps.
2474 __ mov(r0, Operand::Zero());
2475 __ str(r0, MemOperand(sp, 2 * kPointerSize));
2477 // Argument 5 (sp[4]): static offsets vector buffer.
2479 Operand(ExternalReference::address_of_static_offsets_vector(
2481 __ str(r0, MemOperand(sp, 1 * kPointerSize));
2483 // For arguments 4 and 3 get string length, calculate start of string data and
2484 // calculate the shift of the index (0 for ASCII and 1 for two byte).
2485 __ add(r7, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
2486 __ eor(r3, r3, Operand(1));
2487 // Load the length from the original subject string from the previous stack
2488 // frame. Therefore we have to use fp, which points exactly to two pointer
2489 // sizes below the previous sp. (Because creating a new stack frame pushes
2490 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
2491 __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2492 // If slice offset is not 0, load the length from the original sliced string.
2493 // Argument 4, r3: End of string data
2494 // Argument 3, r2: Start of string data
2495 // Prepare start and end index of the input.
2496 __ add(r9, r7, Operand(r9, LSL, r3));
2497 __ add(r2, r9, Operand(r1, LSL, r3));
2499 __ ldr(r7, FieldMemOperand(subject, String::kLengthOffset));
2501 __ add(r3, r9, Operand(r7, LSL, r3));
2503 // Argument 2 (r1): Previous index.
2506 // Argument 1 (r0): Subject string.
2507 __ mov(r0, subject);
2509 // Locate the code entry and call it.
2510 __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
2511 DirectCEntryStub stub(isolate());
2512 stub.GenerateCall(masm, r6);
2514 __ LeaveExitFrame(false, no_reg, true);
2516 last_match_info_elements = r6;
2519 // subject: subject string (callee saved)
2520 // regexp_data: RegExp data (callee saved)
2521 // last_match_info_elements: Last match info elements (callee saved)
2522 // Check the result.
2524 __ cmp(r0, Operand(1));
2525 // We expect exactly one result since we force the called regexp to behave
2529 __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
2531 __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
2532 // If not exception it can only be retry. Handle that in the runtime system.
2534 // Result must now be exception. If there is no pending exception already a
2535 // stack overflow (on the backtrack stack) was detected in RegExp code but
2536 // haven't created the exception yet. Handle that in the runtime system.
2537 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
2538 __ mov(r1, Operand(isolate()->factory()->the_hole_value()));
2539 __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2541 __ ldr(r0, MemOperand(r2, 0));
2545 __ str(r1, MemOperand(r2, 0)); // Clear pending exception.
2547 // Check if the exception is a termination. If so, throw as uncatchable.
2548 __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex);
2550 Label termination_exception;
2551 __ b(eq, &termination_exception);
2555 __ bind(&termination_exception);
2556 __ ThrowUncatchable(r0);
2559 // For failure and exception return null.
2560 __ mov(r0, Operand(isolate()->factory()->null_value()));
2561 __ add(sp, sp, Operand(4 * kPointerSize));
2564 // Process the result from the native regexp code.
2567 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2568 // Calculate number of capture registers (number_of_captures + 1) * 2.
2569 // Multiplying by 2 comes for free since r1 is smi-tagged.
2570 STATIC_ASSERT(kSmiTag == 0);
2571 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2572 __ add(r1, r1, Operand(2)); // r1 was a smi.
2574 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
2575 __ JumpIfSmi(r0, &runtime);
2576 __ CompareObjectType(r0, r2, r2, JS_ARRAY_TYPE);
2578 // Check that the JSArray is in fast case.
2579 __ ldr(last_match_info_elements,
2580 FieldMemOperand(r0, JSArray::kElementsOffset));
2581 __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2582 __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
2584 // Check that the last match info has space for the capture registers and the
2585 // additional information.
2587 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
2588 __ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead));
2589 __ cmp(r2, Operand::SmiUntag(r0));
2592 // r1: number of capture registers
2593 // r4: subject string
2594 // Store the capture count.
2596 __ str(r2, FieldMemOperand(last_match_info_elements,
2597 RegExpImpl::kLastCaptureCountOffset));
2598 // Store last subject and last input.
2600 FieldMemOperand(last_match_info_elements,
2601 RegExpImpl::kLastSubjectOffset));
2602 __ mov(r2, subject);
2603 __ RecordWriteField(last_match_info_elements,
2604 RegExpImpl::kLastSubjectOffset,
2609 __ mov(subject, r2);
2611 FieldMemOperand(last_match_info_elements,
2612 RegExpImpl::kLastInputOffset));
2613 __ RecordWriteField(last_match_info_elements,
2614 RegExpImpl::kLastInputOffset,
2620 // Get the static offsets vector filled by the native regexp code.
2621 ExternalReference address_of_static_offsets_vector =
2622 ExternalReference::address_of_static_offsets_vector(isolate());
2623 __ mov(r2, Operand(address_of_static_offsets_vector));
2625 // r1: number of capture registers
2626 // r2: offsets vector
2627 Label next_capture, done;
2628 // Capture register counter starts from number of capture registers and
2629 // counts down until wraping after zero.
2631 last_match_info_elements,
2632 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
2633 __ bind(&next_capture);
2634 __ sub(r1, r1, Operand(1), SetCC);
2636 // Read the value from the static offsets vector buffer.
2637 __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
2638 // Store the smi value in the last match info.
2640 __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
2641 __ jmp(&next_capture);
2644 // Return last match info.
2645 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
2646 __ add(sp, sp, Operand(4 * kPointerSize));
2649 // Do the runtime call to execute the regexp.
2651 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2653 // Deferred code for string handling.
2654 // (6) Not a long external string? If yes, go to (8).
2655 __ bind(¬_seq_nor_cons);
2656 // Compare flags are still set.
2657 __ b(gt, ¬_long_external); // Go to (8).
2659 // (7) External string. Make it, offset-wise, look like a sequential string.
2660 __ bind(&external_string);
2661 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
2662 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
2663 if (FLAG_debug_code) {
2664 // Assert that we do not have a cons or slice (indirect strings) here.
2665 // Sequential strings have already been ruled out.
2666 __ tst(r0, Operand(kIsIndirectStringMask));
2667 __ Assert(eq, kExternalStringExpectedButNotFound);
2670 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
2671 // Move the pointer so that offset-wise, it looks like a sequential string.
2672 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2675 Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
2676 __ jmp(&seq_string); // Go to (5).
2678 // (8) Short external string or not a string? If yes, bail out to runtime.
2679 __ bind(¬_long_external);
2680 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
2681 __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
2684 // (9) Sliced string. Replace subject with parent. Go to (4).
2685 // Load offset into r9 and replace subject string with parent.
2686 __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
2688 __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
2689 __ jmp(&check_underlying); // Go to (4).
2690 #endif // V8_INTERPRETED_REGEXP
2694 static void GenerateRecordCallTarget(MacroAssembler* masm) {
2695 // Cache the called function in a feedback vector slot. Cache states
2696 // are uninitialized, monomorphic (indicated by a JSFunction), and
2698 // r0 : number of arguments to the construct function
2699 // r1 : the function to call
2700 // r2 : Feedback vector
2701 // r3 : slot in feedback vector (Smi)
2702 Label initialize, done, miss, megamorphic, not_array_function;
2704 DCHECK_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
2705 masm->isolate()->heap()->megamorphic_symbol());
2706 DCHECK_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
2707 masm->isolate()->heap()->uninitialized_symbol());
2709 // Load the cache state into r4.
2710 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2711 __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
2713 // A monomorphic cache hit or an already megamorphic state: invoke the
2714 // function without changing the state.
2718 if (!FLAG_pretenuring_call_new) {
2719 // If we came here, we need to see if we are the array function.
2720 // If we didn't have a matching function, and we didn't find the megamorph
2721 // sentinel, then we have in the slot either some other function or an
2722 // AllocationSite. Do a map check on the object in ecx.
2723 __ ldr(r5, FieldMemOperand(r4, 0));
2724 __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
2727 // Make sure the function is the Array() function
2728 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
2730 __ b(ne, &megamorphic);
2736 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2738 __ CompareRoot(r4, Heap::kUninitializedSymbolRootIndex);
2739 __ b(eq, &initialize);
2740 // MegamorphicSentinel is an immortal immovable object (undefined) so no
2741 // write-barrier is needed.
2742 __ bind(&megamorphic);
2743 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2744 __ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex);
2745 __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
2748 // An uninitialized cache is patched with the function
2749 __ bind(&initialize);
2751 if (!FLAG_pretenuring_call_new) {
2752 // Make sure the function is the Array() function
2753 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
2755 __ b(ne, ¬_array_function);
2757 // The target function is the Array constructor,
2758 // Create an AllocationSite if we don't already have it, store it in the
2761 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2763 // Arguments register must be smi-tagged to call out.
2765 __ Push(r3, r2, r1, r0);
2767 CreateAllocationSiteStub create_stub(masm->isolate());
2768 __ CallStub(&create_stub);
2770 __ Pop(r3, r2, r1, r0);
2775 __ bind(¬_array_function);
2778 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2779 __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2780 __ str(r1, MemOperand(r4, 0));
2782 __ Push(r4, r2, r1);
2783 __ RecordWrite(r2, r4, r1, kLRHasNotBeenSaved, kDontSaveFPRegs,
2784 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
2791 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
2792 // Do not transform the receiver for strict mode functions.
2793 __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
2794 __ ldr(r4, FieldMemOperand(r3, SharedFunctionInfo::kCompilerHintsOffset));
2795 __ tst(r4, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
2799 // Do not transform the receiver for native (Compilerhints already in r3).
2800 __ tst(r4, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
2805 static void EmitSlowCase(MacroAssembler* masm,
2807 Label* non_function) {
2808 // Check for function proxy.
2809 __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
2810 __ b(ne, non_function);
2811 __ push(r1); // put proxy as additional argument
2812 __ mov(r0, Operand(argc + 1, RelocInfo::NONE32));
2813 __ mov(r2, Operand::Zero());
2814 __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY);
2816 Handle<Code> adaptor =
2817 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
2818 __ Jump(adaptor, RelocInfo::CODE_TARGET);
2821 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
2822 // of the original receiver from the call site).
2823 __ bind(non_function);
2824 __ str(r1, MemOperand(sp, argc * kPointerSize));
2825 __ mov(r0, Operand(argc)); // Set up the number of arguments.
2826 __ mov(r2, Operand::Zero());
2827 __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION);
2828 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2829 RelocInfo::CODE_TARGET);
2833 static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
2834 // Wrap the receiver and patch it back onto the stack.
2835 { FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
2837 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
2840 __ str(r0, MemOperand(sp, argc * kPointerSize));
2845 static void CallFunctionNoFeedback(MacroAssembler* masm,
2846 int argc, bool needs_checks,
2847 bool call_as_method) {
2848 // r1 : the function to call
2849 Label slow, non_function, wrap, cont;
2852 // Check that the function is really a JavaScript function.
2853 // r1: pushed function (to be verified)
2854 __ JumpIfSmi(r1, &non_function);
2856 // Goto slow case if we do not have a function.
2857 __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
2861 // Fast-case: Invoke the function now.
2862 // r1: pushed function
2863 ParameterCount actual(argc);
2865 if (call_as_method) {
2867 EmitContinueIfStrictOrNative(masm, &cont);
2870 // Compute the receiver in sloppy mode.
2871 __ ldr(r3, MemOperand(sp, argc * kPointerSize));
2874 __ JumpIfSmi(r3, &wrap);
2875 __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
2884 __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper());
2887 // Slow-case: Non-function called.
2889 EmitSlowCase(masm, argc, &non_function);
2892 if (call_as_method) {
2894 EmitWrapCase(masm, argc, &cont);
2899 void CallFunctionStub::Generate(MacroAssembler* masm) {
2900 CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
2904 void CallConstructStub::Generate(MacroAssembler* masm) {
2905 // r0 : number of arguments
2906 // r1 : the function to call
2907 // r2 : feedback vector
2908 // r3 : (only if r2 is not the megamorphic symbol) slot in feedback
2910 Label slow, non_function_call;
2912 // Check that the function is not a smi.
2913 __ JumpIfSmi(r1, &non_function_call);
2914 // Check that the function is a JSFunction.
2915 __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
2918 if (RecordCallTarget()) {
2919 GenerateRecordCallTarget(masm);
2921 __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
2922 if (FLAG_pretenuring_call_new) {
2923 // Put the AllocationSite from the feedback vector into r2.
2924 // By adding kPointerSize we encode that we know the AllocationSite
2925 // entry is at the feedback vector slot given by r3 + 1.
2926 __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize + kPointerSize));
2928 Label feedback_register_initialized;
2929 // Put the AllocationSite from the feedback vector into r2, or undefined.
2930 __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize));
2931 __ ldr(r5, FieldMemOperand(r2, AllocationSite::kMapOffset));
2932 __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
2933 __ b(eq, &feedback_register_initialized);
2934 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
2935 __ bind(&feedback_register_initialized);
2938 __ AssertUndefinedOrAllocationSite(r2, r5);
2941 // Jump to the function-specific construct stub.
2942 Register jmp_reg = r4;
2943 __ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
2944 __ ldr(jmp_reg, FieldMemOperand(jmp_reg,
2945 SharedFunctionInfo::kConstructStubOffset));
2946 __ add(pc, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
2948 // r0: number of arguments
2949 // r1: called object
2953 __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
2954 __ b(ne, &non_function_call);
2955 __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
2958 __ bind(&non_function_call);
2959 __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
2961 // Set expected number of arguments to zero (not changing r0).
2962 __ mov(r2, Operand::Zero());
2963 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2964 RelocInfo::CODE_TARGET);
2968 static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
2969 __ ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
2970 __ ldr(vector, FieldMemOperand(vector,
2971 JSFunction::kSharedFunctionInfoOffset));
2972 __ ldr(vector, FieldMemOperand(vector,
2973 SharedFunctionInfo::kFeedbackVectorOffset));
2977 void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
2981 int argc = state_.arg_count();
2982 ParameterCount actual(argc);
2984 EmitLoadTypeFeedbackVector(masm, r2);
2986 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
2990 __ mov(r0, Operand(arg_count()));
2991 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2992 __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
2994 // Verify that r4 contains an AllocationSite
2995 __ ldr(r5, FieldMemOperand(r4, HeapObject::kMapOffset));
2996 __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
3000 ArrayConstructorStub stub(masm->isolate(), arg_count());
3001 __ TailCallStub(&stub);
3004 GenerateMiss(masm, IC::kCallIC_Customization_Miss);
3006 // The slow case, we need this no matter what to complete a call after a miss.
3007 CallFunctionNoFeedback(masm,
3013 __ stop("Unexpected code address");
3017 void CallICStub::Generate(MacroAssembler* masm) {
3019 // r3 - slot id (Smi)
3020 Label extra_checks_or_miss, slow_start;
3021 Label slow, non_function, wrap, cont;
3022 Label have_js_function;
3023 int argc = state_.arg_count();
3024 ParameterCount actual(argc);
3026 EmitLoadTypeFeedbackVector(masm, r2);
3028 // The checks. First, does r1 match the recorded monomorphic target?
3029 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
3030 __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
3032 __ b(ne, &extra_checks_or_miss);
3034 __ bind(&have_js_function);
3035 if (state_.CallAsMethod()) {
3036 EmitContinueIfStrictOrNative(masm, &cont);
3037 // Compute the receiver in sloppy mode.
3038 __ ldr(r3, MemOperand(sp, argc * kPointerSize));
3040 __ JumpIfSmi(r3, &wrap);
3041 __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
3047 __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper());
3050 EmitSlowCase(masm, argc, &non_function);
3052 if (state_.CallAsMethod()) {
3054 EmitWrapCase(masm, argc, &cont);
3057 __ bind(&extra_checks_or_miss);
3060 __ CompareRoot(r4, Heap::kMegamorphicSymbolRootIndex);
3061 __ b(eq, &slow_start);
3062 __ CompareRoot(r4, Heap::kUninitializedSymbolRootIndex);
3065 if (!FLAG_trace_ic) {
3066 // We are going megamorphic. If the feedback is a JSFunction, it is fine
3067 // to handle it here. More complex cases are dealt with in the runtime.
3068 __ AssertNotSmi(r4);
3069 __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
3071 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
3072 __ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex);
3073 __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
3074 __ jmp(&slow_start);
3077 // We are here because tracing is on or we are going monomorphic.
3079 GenerateMiss(masm, IC::kCallIC_Miss);
3082 __ bind(&slow_start);
3083 // Check that the function is really a JavaScript function.
3084 // r1: pushed function (to be verified)
3085 __ JumpIfSmi(r1, &non_function);
3087 // Goto slow case if we do not have a function.
3088 __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
3090 __ jmp(&have_js_function);
3094 void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
3095 // Get the receiver of the function from the stack; 1 ~ return address.
3096 __ ldr(r4, MemOperand(sp, (state_.arg_count() + 1) * kPointerSize));
3099 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
3101 // Push the receiver and the function and feedback info.
3102 __ Push(r4, r1, r2, r3);
3105 ExternalReference miss = ExternalReference(IC_Utility(id),
3107 __ CallExternalReference(miss, 4);
3109 // Move result to edi and exit the internal frame.
3115 // StringCharCodeAtGenerator
3116 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
3119 Label got_char_code;
3120 Label sliced_string;
3122 // If the receiver is a smi trigger the non-string case.
3123 __ JumpIfSmi(object_, receiver_not_string_);
3125 // Fetch the instance type of the receiver into result register.
3126 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3127 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3128 // If the receiver is not a string trigger the non-string case.
3129 __ tst(result_, Operand(kIsNotStringMask));
3130 __ b(ne, receiver_not_string_);
3132 // If the index is non-smi trigger the non-smi case.
3133 __ JumpIfNotSmi(index_, &index_not_smi_);
3134 __ bind(&got_smi_index_);
3136 // Check for index out of range.
3137 __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
3138 __ cmp(ip, Operand(index_));
3139 __ b(ls, index_out_of_range_);
3141 __ SmiUntag(index_);
3143 StringCharLoadGenerator::Generate(masm,
3154 void StringCharCodeAtGenerator::GenerateSlow(
3155 MacroAssembler* masm,
3156 const RuntimeCallHelper& call_helper) {
3157 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
3159 // Index is not a smi.
3160 __ bind(&index_not_smi_);
3161 // If index is a heap number, try converting it to an integer.
3164 Heap::kHeapNumberMapRootIndex,
3167 call_helper.BeforeCall(masm);
3169 __ push(index_); // Consumed by runtime conversion function.
3170 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3171 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3173 DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3174 // NumberToSmi discards numbers that are not exact integers.
3175 __ CallRuntime(Runtime::kNumberToSmi, 1);
3177 // Save the conversion result before the pop instructions below
3178 // have a chance to overwrite it.
3179 __ Move(index_, r0);
3181 // Reload the instance type.
3182 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3183 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3184 call_helper.AfterCall(masm);
3185 // If index is still not a smi, it must be out of range.
3186 __ JumpIfNotSmi(index_, index_out_of_range_);
3187 // Otherwise, return to the fast path.
3188 __ jmp(&got_smi_index_);
3190 // Call runtime. We get here when the receiver is a string and the
3191 // index is a number, but the code of getting the actual character
3192 // is too complex (e.g., when the string needs to be flattened).
3193 __ bind(&call_runtime_);
3194 call_helper.BeforeCall(masm);
3196 __ Push(object_, index_);
3197 __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
3198 __ Move(result_, r0);
3199 call_helper.AfterCall(masm);
3202 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3206 // -------------------------------------------------------------------------
3207 // StringCharFromCodeGenerator
3209 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3210 // Fast case of Heap::LookupSingleCharacterStringFromCode.
3211 STATIC_ASSERT(kSmiTag == 0);
3212 STATIC_ASSERT(kSmiShiftSize == 0);
3213 DCHECK(IsPowerOf2(String::kMaxOneByteCharCode + 1));
3215 Operand(kSmiTagMask |
3216 ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
3217 __ b(ne, &slow_case_);
3219 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3220 // At this point code register contains smi tagged ASCII char code.
3221 __ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_));
3222 __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3223 __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
3224 __ b(eq, &slow_case_);
3229 void StringCharFromCodeGenerator::GenerateSlow(
3230 MacroAssembler* masm,
3231 const RuntimeCallHelper& call_helper) {
3232 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3234 __ bind(&slow_case_);
3235 call_helper.BeforeCall(masm);
3237 __ CallRuntime(Runtime::kCharFromCode, 1);
3238 __ Move(result_, r0);
3239 call_helper.AfterCall(masm);
3242 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3246 enum CopyCharactersFlags {
3248 DEST_ALWAYS_ALIGNED = 2
3252 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
3257 String::Encoding encoding) {
3258 if (FLAG_debug_code) {
3259 // Check that destination is word aligned.
3260 __ tst(dest, Operand(kPointerAlignmentMask));
3261 __ Check(eq, kDestinationOfCopyNotAligned);
3264 // Assumes word reads and writes are little endian.
3265 // Nothing to do for zero characters.
3267 if (encoding == String::TWO_BYTE_ENCODING) {
3268 __ add(count, count, Operand(count), SetCC);
3271 Register limit = count; // Read until dest equals this.
3272 __ add(limit, dest, Operand(count));
3274 Label loop_entry, loop;
3275 // Copy bytes from src to dest until dest hits limit.
3278 __ ldrb(scratch, MemOperand(src, 1, PostIndex), lt);
3279 __ strb(scratch, MemOperand(dest, 1, PostIndex));
3280 __ bind(&loop_entry);
3281 __ cmp(dest, Operand(limit));
3288 void StringHelper::GenerateHashInit(MacroAssembler* masm,
3290 Register character) {
3291 // hash = character + (character << 10);
3292 __ LoadRoot(hash, Heap::kHashSeedRootIndex);
3293 // Untag smi seed and add the character.
3294 __ add(hash, character, Operand(hash, LSR, kSmiTagSize));
3295 // hash += hash << 10;
3296 __ add(hash, hash, Operand(hash, LSL, 10));
3297 // hash ^= hash >> 6;
3298 __ eor(hash, hash, Operand(hash, LSR, 6));
3302 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
3304 Register character) {
3305 // hash += character;
3306 __ add(hash, hash, Operand(character));
3307 // hash += hash << 10;
3308 __ add(hash, hash, Operand(hash, LSL, 10));
3309 // hash ^= hash >> 6;
3310 __ eor(hash, hash, Operand(hash, LSR, 6));
3314 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
3316 // hash += hash << 3;
3317 __ add(hash, hash, Operand(hash, LSL, 3));
3318 // hash ^= hash >> 11;
3319 __ eor(hash, hash, Operand(hash, LSR, 11));
3320 // hash += hash << 15;
3321 __ add(hash, hash, Operand(hash, LSL, 15));
3323 __ and_(hash, hash, Operand(String::kHashBitMask), SetCC);
3325 // if (hash == 0) hash = 27;
3326 __ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq);
3330 void SubStringStub::Generate(MacroAssembler* masm) {
3333 // Stack frame on entry.
3334 // lr: return address
3339 // This stub is called from the native-call %_SubString(...), so
3340 // nothing can be assumed about the arguments. It is tested that:
3341 // "string" is a sequential string,
3342 // both "from" and "to" are smis, and
3343 // 0 <= from <= to <= string.length.
3344 // If any of these assumptions fail, we call the runtime system.
3346 const int kToOffset = 0 * kPointerSize;
3347 const int kFromOffset = 1 * kPointerSize;
3348 const int kStringOffset = 2 * kPointerSize;
3350 __ Ldrd(r2, r3, MemOperand(sp, kToOffset));
3351 STATIC_ASSERT(kFromOffset == kToOffset + 4);
3352 STATIC_ASSERT(kSmiTag == 0);
3353 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3355 // Arithmetic shift right by one un-smi-tags. In this case we rotate right
3356 // instead because we bail out on non-smi values: ROR and ASR are equivalent
3357 // for smis but they set the flags in a way that's easier to optimize.
3358 __ mov(r2, Operand(r2, ROR, 1), SetCC);
3359 __ mov(r3, Operand(r3, ROR, 1), SetCC, cc);
3360 // If either to or from had the smi tag bit set, then C is set now, and N
3361 // has the same value: we rotated by 1, so the bottom bit is now the top bit.
3362 // We want to bailout to runtime here if From is negative. In that case, the
3363 // next instruction is not executed and we fall through to bailing out to
3365 // Executed if both r2 and r3 are untagged integers.
3366 __ sub(r2, r2, Operand(r3), SetCC, cc);
3367 // One of the above un-smis or the above SUB could have set N==1.
3368 __ b(mi, &runtime); // Either "from" or "to" is not an smi, or from > to.
3370 // Make sure first argument is a string.
3371 __ ldr(r0, MemOperand(sp, kStringOffset));
3372 __ JumpIfSmi(r0, &runtime);
3373 Condition is_string = masm->IsObjectStringType(r0, r1);
3374 __ b(NegateCondition(is_string), &runtime);
3377 __ cmp(r2, Operand(1));
3378 __ b(eq, &single_char);
3380 // Short-cut for the case of trivial substring.
3382 // r0: original string
3383 // r2: result string length
3384 __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
3385 __ cmp(r2, Operand(r4, ASR, 1));
3386 // Return original string.
3387 __ b(eq, &return_r0);
3388 // Longer than original string's length or negative: unsafe arguments.
3390 // Shorter than original string's length: an actual substring.
3392 // Deal with different string types: update the index if necessary
3393 // and put the underlying string into r5.
3394 // r0: original string
3395 // r1: instance type
3397 // r3: from index (untagged)
3398 Label underlying_unpacked, sliced_string, seq_or_external_string;
3399 // If the string is not indirect, it can only be sequential or external.
3400 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
3401 STATIC_ASSERT(kIsIndirectStringMask != 0);
3402 __ tst(r1, Operand(kIsIndirectStringMask));
3403 __ b(eq, &seq_or_external_string);
3405 __ tst(r1, Operand(kSlicedNotConsMask));
3406 __ b(ne, &sliced_string);
3407 // Cons string. Check whether it is flat, then fetch first part.
3408 __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
3409 __ CompareRoot(r5, Heap::kempty_stringRootIndex);
3411 __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
3412 // Update instance type.
3413 __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
3414 __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
3415 __ jmp(&underlying_unpacked);
3417 __ bind(&sliced_string);
3418 // Sliced string. Fetch parent and correct start index by offset.
3419 __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
3420 __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset));
3421 __ add(r3, r3, Operand(r4, ASR, 1)); // Add offset to index.
3422 // Update instance type.
3423 __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
3424 __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
3425 __ jmp(&underlying_unpacked);
3427 __ bind(&seq_or_external_string);
3428 // Sequential or external string. Just move string to the expected register.
3431 __ bind(&underlying_unpacked);
3433 if (FLAG_string_slices) {
3435 // r5: underlying subject string
3436 // r1: instance type of underlying subject string
3438 // r3: adjusted start index (untagged)
3439 __ cmp(r2, Operand(SlicedString::kMinLength));
3440 // Short slice. Copy instead of slicing.
3441 __ b(lt, ©_routine);
3442 // Allocate new sliced string. At this point we do not reload the instance
3443 // type including the string encoding because we simply rely on the info
3444 // provided by the original string. It does not matter if the original
3445 // string's encoding is wrong because we always have to recheck encoding of
3446 // the newly created string's parent anyways due to externalized strings.
3447 Label two_byte_slice, set_slice_header;
3448 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
3449 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
3450 __ tst(r1, Operand(kStringEncodingMask));
3451 __ b(eq, &two_byte_slice);
3452 __ AllocateAsciiSlicedString(r0, r2, r6, r4, &runtime);
3453 __ jmp(&set_slice_header);
3454 __ bind(&two_byte_slice);
3455 __ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime);
3456 __ bind(&set_slice_header);
3457 __ mov(r3, Operand(r3, LSL, 1));
3458 __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
3459 __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
3462 __ bind(©_routine);
3465 // r5: underlying subject string
3466 // r1: instance type of underlying subject string
3468 // r3: adjusted start index (untagged)
3469 Label two_byte_sequential, sequential_string, allocate_result;
3470 STATIC_ASSERT(kExternalStringTag != 0);
3471 STATIC_ASSERT(kSeqStringTag == 0);
3472 __ tst(r1, Operand(kExternalStringTag));
3473 __ b(eq, &sequential_string);
3475 // Handle external string.
3476 // Rule out short external strings.
3477 STATIC_ASSERT(kShortExternalStringTag != 0);
3478 __ tst(r1, Operand(kShortExternalStringTag));
3480 __ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset));
3481 // r5 already points to the first character of underlying string.
3482 __ jmp(&allocate_result);
3484 __ bind(&sequential_string);
3485 // Locate first character of underlying subject string.
3486 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3487 __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3489 __ bind(&allocate_result);
3490 // Sequential acii string. Allocate the result.
3491 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3492 __ tst(r1, Operand(kStringEncodingMask));
3493 __ b(eq, &two_byte_sequential);
3495 // Allocate and copy the resulting ASCII string.
3496 __ AllocateAsciiString(r0, r2, r4, r6, r1, &runtime);
3498 // Locate first character of substring to copy.
3500 // Locate first character of result.
3501 __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3503 // r0: result string
3504 // r1: first character of result string
3505 // r2: result string length
3506 // r5: first character of substring to copy
3507 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3508 StringHelper::GenerateCopyCharacters(
3509 masm, r1, r5, r2, r3, String::ONE_BYTE_ENCODING);
3512 // Allocate and copy the resulting two-byte string.
3513 __ bind(&two_byte_sequential);
3514 __ AllocateTwoByteString(r0, r2, r4, r6, r1, &runtime);
3516 // Locate first character of substring to copy.
3517 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
3518 __ add(r5, r5, Operand(r3, LSL, 1));
3519 // Locate first character of result.
3520 __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3522 // r0: result string.
3523 // r1: first character of result.
3524 // r2: result length.
3525 // r5: first character of substring to copy.
3526 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3527 StringHelper::GenerateCopyCharacters(
3528 masm, r1, r5, r2, r3, String::TWO_BYTE_ENCODING);
3530 __ bind(&return_r0);
3531 Counters* counters = isolate()->counters();
3532 __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
3536 // Just jump to runtime to create the sub string.
3538 __ TailCallRuntime(Runtime::kSubString, 3, 1);
3540 __ bind(&single_char);
3541 // r0: original string
3542 // r1: instance type
3544 // r3: from index (untagged)
3546 StringCharAtGenerator generator(
3547 r0, r3, r2, r0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
3548 generator.GenerateFast(masm);
3551 generator.SkipSlow(masm, &runtime);
3555 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
3560 Register scratch3) {
3561 Register length = scratch1;
3564 Label strings_not_equal, check_zero_length;
3565 __ ldr(length, FieldMemOperand(left, String::kLengthOffset));
3566 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
3567 __ cmp(length, scratch2);
3568 __ b(eq, &check_zero_length);
3569 __ bind(&strings_not_equal);
3570 __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
3573 // Check if the length is zero.
3574 Label compare_chars;
3575 __ bind(&check_zero_length);
3576 STATIC_ASSERT(kSmiTag == 0);
3577 __ cmp(length, Operand::Zero());
3578 __ b(ne, &compare_chars);
3579 __ mov(r0, Operand(Smi::FromInt(EQUAL)));
3582 // Compare characters.
3583 __ bind(&compare_chars);
3584 GenerateAsciiCharsCompareLoop(masm,
3585 left, right, length, scratch2, scratch3,
3586 &strings_not_equal);
3588 // Characters are equal.
3589 __ mov(r0, Operand(Smi::FromInt(EQUAL)));
3594 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
3600 Register scratch4) {
3601 Label result_not_equal, compare_lengths;
3602 // Find minimum length and length difference.
3603 __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
3604 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
3605 __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
3606 Register length_delta = scratch3;
3607 __ mov(scratch1, scratch2, LeaveCC, gt);
3608 Register min_length = scratch1;
3609 STATIC_ASSERT(kSmiTag == 0);
3610 __ cmp(min_length, Operand::Zero());
3611 __ b(eq, &compare_lengths);
3614 GenerateAsciiCharsCompareLoop(masm,
3615 left, right, min_length, scratch2, scratch4,
3618 // Compare lengths - strings up to min-length are equal.
3619 __ bind(&compare_lengths);
3620 DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
3621 // Use length_delta as result if it's zero.
3622 __ mov(r0, Operand(length_delta), SetCC);
3623 __ bind(&result_not_equal);
3624 // Conditionally update the result based either on length_delta or
3625 // the last comparion performed in the loop above.
3626 __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
3627 __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
3632 void StringCompareStub::GenerateAsciiCharsCompareLoop(
3633 MacroAssembler* masm,
3639 Label* chars_not_equal) {
3640 // Change index to run from -length to -1 by adding length to string
3641 // start. This means that loop ends when index reaches zero, which
3642 // doesn't need an additional compare.
3643 __ SmiUntag(length);
3644 __ add(scratch1, length,
3645 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3646 __ add(left, left, Operand(scratch1));
3647 __ add(right, right, Operand(scratch1));
3648 __ rsb(length, length, Operand::Zero());
3649 Register index = length; // index = -length;
3654 __ ldrb(scratch1, MemOperand(left, index));
3655 __ ldrb(scratch2, MemOperand(right, index));
3656 __ cmp(scratch1, scratch2);
3657 __ b(ne, chars_not_equal);
3658 __ add(index, index, Operand(1), SetCC);
3663 void StringCompareStub::Generate(MacroAssembler* masm) {
3666 Counters* counters = isolate()->counters();
3668 // Stack frame on entry.
3669 // sp[0]: right string
3670 // sp[4]: left string
3671 __ Ldrd(r0 , r1, MemOperand(sp)); // Load right in r0, left in r1.
3675 __ b(ne, ¬_same);
3676 STATIC_ASSERT(EQUAL == 0);
3677 STATIC_ASSERT(kSmiTag == 0);
3678 __ mov(r0, Operand(Smi::FromInt(EQUAL)));
3679 __ IncrementCounter(counters->string_compare_native(), 1, r1, r2);
3680 __ add(sp, sp, Operand(2 * kPointerSize));
3685 // Check that both objects are sequential ASCII strings.
3686 __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime);
3688 // Compare flat ASCII strings natively. Remove arguments from stack first.
3689 __ IncrementCounter(counters->string_compare_native(), 1, r2, r3);
3690 __ add(sp, sp, Operand(2 * kPointerSize));
3691 GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5);
3693 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
3694 // tagged as a small integer.
3696 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3700 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
3701 // ----------- S t a t e -------------
3704 // -- lr : return address
3705 // -----------------------------------
3707 // Load r2 with the allocation site. We stick an undefined dummy value here
3708 // and replace it with the real allocation site later when we instantiate this
3709 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
3710 __ Move(r2, handle(isolate()->heap()->undefined_value()));
3712 // Make sure that we actually patched the allocation site.
3713 if (FLAG_debug_code) {
3714 __ tst(r2, Operand(kSmiTagMask));
3715 __ Assert(ne, kExpectedAllocationSite);
3717 __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
3718 __ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex);
3721 __ Assert(eq, kExpectedAllocationSite);
3724 // Tail call into the stub that handles binary operations with allocation
3726 BinaryOpWithAllocationSiteStub stub(isolate(), state_);
3727 __ TailCallStub(&stub);
3731 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
3732 DCHECK(state_ == CompareIC::SMI);
3735 __ JumpIfNotSmi(r2, &miss);
3737 if (GetCondition() == eq) {
3738 // For equality we do not care about the sign of the result.
3739 __ sub(r0, r0, r1, SetCC);
3741 // Untag before subtracting to avoid handling overflow.
3743 __ sub(r0, r1, Operand::SmiUntag(r0));
3752 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
3753 DCHECK(state_ == CompareIC::NUMBER);
3756 Label unordered, maybe_undefined1, maybe_undefined2;
3759 if (left_ == CompareIC::SMI) {
3760 __ JumpIfNotSmi(r1, &miss);
3762 if (right_ == CompareIC::SMI) {
3763 __ JumpIfNotSmi(r0, &miss);
3766 // Inlining the double comparison and falling back to the general compare
3767 // stub if NaN is involved.
3768 // Load left and right operand.
3769 Label done, left, left_smi, right_smi;
3770 __ JumpIfSmi(r0, &right_smi);
3771 __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
3773 __ sub(r2, r0, Operand(kHeapObjectTag));
3774 __ vldr(d1, r2, HeapNumber::kValueOffset);
3776 __ bind(&right_smi);
3777 __ SmiToDouble(d1, r0);
3780 __ JumpIfSmi(r1, &left_smi);
3781 __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
3783 __ sub(r2, r1, Operand(kHeapObjectTag));
3784 __ vldr(d0, r2, HeapNumber::kValueOffset);
3787 __ SmiToDouble(d0, r1);
3790 // Compare operands.
3791 __ VFPCompareAndSetFlags(d0, d1);
3793 // Don't base result on status bits when a NaN is involved.
3794 __ b(vs, &unordered);
3796 // Return a result of -1, 0, or 1, based on status bits.
3797 __ mov(r0, Operand(EQUAL), LeaveCC, eq);
3798 __ mov(r0, Operand(LESS), LeaveCC, lt);
3799 __ mov(r0, Operand(GREATER), LeaveCC, gt);
3802 __ bind(&unordered);
3803 __ bind(&generic_stub);
3804 ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
3805 CompareIC::GENERIC);
3806 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3808 __ bind(&maybe_undefined1);
3809 if (Token::IsOrderedRelationalCompareOp(op_)) {
3810 __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
3812 __ JumpIfSmi(r1, &unordered);
3813 __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
3814 __ b(ne, &maybe_undefined2);
3818 __ bind(&maybe_undefined2);
3819 if (Token::IsOrderedRelationalCompareOp(op_)) {
3820 __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
3821 __ b(eq, &unordered);
3829 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3830 DCHECK(state_ == CompareIC::INTERNALIZED_STRING);
3833 // Registers containing left and right operands respectively.
3835 Register right = r0;
3839 // Check that both operands are heap objects.
3840 __ JumpIfEitherSmi(left, right, &miss);
3842 // Check that both operands are internalized strings.
3843 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3844 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3845 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3846 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3847 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3848 __ orr(tmp1, tmp1, Operand(tmp2));
3849 __ tst(tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3852 // Internalized strings are compared by identity.
3853 __ cmp(left, right);
3854 // Make sure r0 is non-zero. At this point input operands are
3855 // guaranteed to be non-zero.
3856 DCHECK(right.is(r0));
3857 STATIC_ASSERT(EQUAL == 0);
3858 STATIC_ASSERT(kSmiTag == 0);
3859 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
3867 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
3868 DCHECK(state_ == CompareIC::UNIQUE_NAME);
3869 DCHECK(GetCondition() == eq);
3872 // Registers containing left and right operands respectively.
3874 Register right = r0;
3878 // Check that both operands are heap objects.
3879 __ JumpIfEitherSmi(left, right, &miss);
3881 // Check that both operands are unique names. This leaves the instance
3882 // types loaded in tmp1 and tmp2.
3883 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3884 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3885 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3886 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3888 __ JumpIfNotUniqueName(tmp1, &miss);
3889 __ JumpIfNotUniqueName(tmp2, &miss);
3891 // Unique names are compared by identity.
3892 __ cmp(left, right);
3893 // Make sure r0 is non-zero. At this point input operands are
3894 // guaranteed to be non-zero.
3895 DCHECK(right.is(r0));
3896 STATIC_ASSERT(EQUAL == 0);
3897 STATIC_ASSERT(kSmiTag == 0);
3898 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
3906 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
3907 DCHECK(state_ == CompareIC::STRING);
3910 bool equality = Token::IsEqualityOp(op_);
3912 // Registers containing left and right operands respectively.
3914 Register right = r0;
3920 // Check that both operands are heap objects.
3921 __ JumpIfEitherSmi(left, right, &miss);
3923 // Check that both operands are strings. This leaves the instance
3924 // types loaded in tmp1 and tmp2.
3925 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3926 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3927 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3928 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3929 STATIC_ASSERT(kNotStringTag != 0);
3930 __ orr(tmp3, tmp1, tmp2);
3931 __ tst(tmp3, Operand(kIsNotStringMask));
3934 // Fast check for identical strings.
3935 __ cmp(left, right);
3936 STATIC_ASSERT(EQUAL == 0);
3937 STATIC_ASSERT(kSmiTag == 0);
3938 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
3941 // Handle not identical strings.
3943 // Check that both strings are internalized strings. If they are, we're done
3944 // because we already know they are not identical. We know they are both
3947 DCHECK(GetCondition() == eq);
3948 STATIC_ASSERT(kInternalizedTag == 0);
3949 __ orr(tmp3, tmp1, Operand(tmp2));
3950 __ tst(tmp3, Operand(kIsNotInternalizedMask));
3951 // Make sure r0 is non-zero. At this point input operands are
3952 // guaranteed to be non-zero.
3953 DCHECK(right.is(r0));
3957 // Check that both strings are sequential ASCII.
3959 __ JumpIfBothInstanceTypesAreNotSequentialAscii(
3960 tmp1, tmp2, tmp3, tmp4, &runtime);
3962 // Compare flat ASCII strings. Returns when done.
3964 StringCompareStub::GenerateFlatAsciiStringEquals(
3965 masm, left, right, tmp1, tmp2, tmp3);
3967 StringCompareStub::GenerateCompareFlatAsciiStrings(
3968 masm, left, right, tmp1, tmp2, tmp3, tmp4);
3971 // Handle more complex cases in runtime.
3973 __ Push(left, right);
3975 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
3977 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3985 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
3986 DCHECK(state_ == CompareIC::OBJECT);
3988 __ and_(r2, r1, Operand(r0));
3989 __ JumpIfSmi(r2, &miss);
3991 __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
3993 __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
3996 DCHECK(GetCondition() == eq);
3997 __ sub(r0, r0, Operand(r1));
4005 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
4007 __ and_(r2, r1, Operand(r0));
4008 __ JumpIfSmi(r2, &miss);
4009 __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
4010 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
4011 __ cmp(r2, Operand(known_map_));
4013 __ cmp(r3, Operand(known_map_));
4016 __ sub(r0, r0, Operand(r1));
4025 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
4027 // Call the runtime system in a fresh internal frame.
4028 ExternalReference miss =
4029 ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
4031 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
4033 __ Push(lr, r1, r0);
4034 __ mov(ip, Operand(Smi::FromInt(op_)));
4036 __ CallExternalReference(miss, 3);
4037 // Compute the entry point of the rewritten stub.
4038 __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
4039 // Restore registers.
4048 void DirectCEntryStub::Generate(MacroAssembler* masm) {
4049 // Place the return address on the stack, making the call
4050 // GC safe. The RegExp backend also relies on this.
4051 __ str(lr, MemOperand(sp, 0));
4052 __ blx(ip); // Call the C++ function.
4053 __ VFPEnsureFPSCRState(r2);
4054 __ ldr(pc, MemOperand(sp, 0));
4058 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
4061 reinterpret_cast<intptr_t>(GetCode().location());
4062 __ Move(ip, target);
4063 __ mov(lr, Operand(code, RelocInfo::CODE_TARGET));
4064 __ blx(lr); // Call the stub.
4068 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
4072 Register properties,
4074 Register scratch0) {
4075 DCHECK(name->IsUniqueName());
4076 // If names of slots in range from 1 to kProbes - 1 for the hash value are
4077 // not equal to the name and kProbes-th slot is not used (its name is the
4078 // undefined value), it guarantees the hash table doesn't contain the
4079 // property. It's true even if some slots represent deleted properties
4080 // (their names are the hole value).
4081 for (int i = 0; i < kInlinedProbes; i++) {
4082 // scratch0 points to properties hash.
4083 // Compute the masked index: (hash + i + i * i) & mask.
4084 Register index = scratch0;
4085 // Capacity is smi 2^n.
4086 __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
4087 __ sub(index, index, Operand(1));
4088 __ and_(index, index, Operand(
4089 Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
4091 // Scale the index by multiplying by the entry size.
4092 DCHECK(NameDictionary::kEntrySize == 3);
4093 __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
4095 Register entity_name = scratch0;
4096 // Having undefined at this place means the name is not contained.
4097 DCHECK_EQ(kSmiTagSize, 1);
4098 Register tmp = properties;
4099 __ add(tmp, properties, Operand(index, LSL, 1));
4100 __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
4102 DCHECK(!tmp.is(entity_name));
4103 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
4104 __ cmp(entity_name, tmp);
4107 // Load the hole ready for use below:
4108 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
4110 // Stop if found the property.
4111 __ cmp(entity_name, Operand(Handle<Name>(name)));
4115 __ cmp(entity_name, tmp);
4118 // Check if the entry name is not a unique name.
4119 __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
4120 __ ldrb(entity_name,
4121 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
4122 __ JumpIfNotUniqueName(entity_name, miss);
4125 // Restore the properties.
4127 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4130 const int spill_mask =
4131 (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
4132 r2.bit() | r1.bit() | r0.bit());
4134 __ stm(db_w, sp, spill_mask);
4135 __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4136 __ mov(r1, Operand(Handle<Name>(name)));
4137 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
4139 __ cmp(r0, Operand::Zero());
4140 __ ldm(ia_w, sp, spill_mask);
4147 // Probe the name dictionary in the |elements| register. Jump to the
4148 // |done| label if a property with the given name is found. Jump to
4149 // the |miss| label otherwise.
4150 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
4151 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
4157 Register scratch2) {
4158 DCHECK(!elements.is(scratch1));
4159 DCHECK(!elements.is(scratch2));
4160 DCHECK(!name.is(scratch1));
4161 DCHECK(!name.is(scratch2));
4163 __ AssertName(name);
4165 // Compute the capacity mask.
4166 __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
4167 __ SmiUntag(scratch1);
4168 __ sub(scratch1, scratch1, Operand(1));
4170 // Generate an unrolled loop that performs a few probes before
4171 // giving up. Measurements done on Gmail indicate that 2 probes
4172 // cover ~93% of loads from dictionaries.
4173 for (int i = 0; i < kInlinedProbes; i++) {
4174 // Compute the masked index: (hash + i + i * i) & mask.
4175 __ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
4177 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4178 // the hash in a separate instruction. The value hash + i + i * i is right
4179 // shifted in the following and instruction.
4180 DCHECK(NameDictionary::GetProbeOffset(i) <
4181 1 << (32 - Name::kHashFieldOffset));
4182 __ add(scratch2, scratch2, Operand(
4183 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4185 __ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
4187 // Scale the index by multiplying by the element size.
4188 DCHECK(NameDictionary::kEntrySize == 3);
4189 // scratch2 = scratch2 * 3.
4190 __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
4192 // Check if the key is identical to the name.
4193 __ add(scratch2, elements, Operand(scratch2, LSL, 2));
4194 __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
4195 __ cmp(name, Operand(ip));
4199 const int spill_mask =
4200 (lr.bit() | r6.bit() | r5.bit() | r4.bit() |
4201 r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
4202 ~(scratch1.bit() | scratch2.bit());
4204 __ stm(db_w, sp, spill_mask);
4206 DCHECK(!elements.is(r1));
4208 __ Move(r0, elements);
4210 __ Move(r0, elements);
4213 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
4215 __ cmp(r0, Operand::Zero());
4216 __ mov(scratch2, Operand(r2));
4217 __ ldm(ia_w, sp, spill_mask);
4224 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
4225 // This stub overrides SometimesSetsUpAFrame() to return false. That means
4226 // we cannot call anything that could cause a GC from this stub.
4228 // result: NameDictionary to probe
4230 // dictionary: NameDictionary to probe.
4231 // index: will hold an index of entry if lookup is successful.
4232 // might alias with result_.
4234 // result_ is zero if lookup failed, non zero otherwise.
4236 Register result = r0;
4237 Register dictionary = r0;
4239 Register index = r2;
4242 Register undefined = r5;
4243 Register entry_key = r6;
4245 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
4247 __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
4249 __ sub(mask, mask, Operand(1));
4251 __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
4253 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
4255 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
4256 // Compute the masked index: (hash + i + i * i) & mask.
4257 // Capacity is smi 2^n.
4259 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4260 // the hash in a separate instruction. The value hash + i + i * i is right
4261 // shifted in the following and instruction.
4262 DCHECK(NameDictionary::GetProbeOffset(i) <
4263 1 << (32 - Name::kHashFieldOffset));
4264 __ add(index, hash, Operand(
4265 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4267 __ mov(index, Operand(hash));
4269 __ and_(index, mask, Operand(index, LSR, Name::kHashShift));
4271 // Scale the index by multiplying by the entry size.
4272 DCHECK(NameDictionary::kEntrySize == 3);
4273 __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
4275 DCHECK_EQ(kSmiTagSize, 1);
4276 __ add(index, dictionary, Operand(index, LSL, 2));
4277 __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
4279 // Having undefined at this place means the name is not contained.
4280 __ cmp(entry_key, Operand(undefined));
4281 __ b(eq, ¬_in_dictionary);
4283 // Stop if found the property.
4284 __ cmp(entry_key, Operand(key));
4285 __ b(eq, &in_dictionary);
4287 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
4288 // Check if the entry name is not a unique name.
4289 __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
4291 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
4292 __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
4296 __ bind(&maybe_in_dictionary);
4297 // If we are doing negative lookup then probing failure should be
4298 // treated as a lookup success. For positive lookup probing failure
4299 // should be treated as lookup failure.
4300 if (mode_ == POSITIVE_LOOKUP) {
4301 __ mov(result, Operand::Zero());
4305 __ bind(&in_dictionary);
4306 __ mov(result, Operand(1));
4309 __ bind(¬_in_dictionary);
4310 __ mov(result, Operand::Zero());
4315 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
4317 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
4319 // Hydrogen code stubs need stub2 at snapshot time.
4320 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
4325 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
4326 // the value has just been written into the object, now this stub makes sure
4327 // we keep the GC informed. The word in the object where the value has been
4328 // written is in the address register.
4329 void RecordWriteStub::Generate(MacroAssembler* masm) {
4330 Label skip_to_incremental_noncompacting;
4331 Label skip_to_incremental_compacting;
4333 // The first two instructions are generated with labels so as to get the
4334 // offset fixed up correctly by the bind(Label*) call. We patch it back and
4335 // forth between a compare instructions (a nop in this position) and the
4336 // real branch when we start and stop incremental heap marking.
4337 // See RecordWriteStub::Patch for details.
4339 // Block literal pool emission, as the position of these two instructions
4340 // is assumed by the patching code.
4341 Assembler::BlockConstPoolScope block_const_pool(masm);
4342 __ b(&skip_to_incremental_noncompacting);
4343 __ b(&skip_to_incremental_compacting);
4346 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4347 __ RememberedSetHelper(object_,
4351 MacroAssembler::kReturnAtEnd);
4355 __ bind(&skip_to_incremental_noncompacting);
4356 GenerateIncremental(masm, INCREMENTAL);
4358 __ bind(&skip_to_incremental_compacting);
4359 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4361 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
4362 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
4363 DCHECK(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
4364 DCHECK(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
4365 PatchBranchIntoNop(masm, 0);
4366 PatchBranchIntoNop(masm, Assembler::kInstrSize);
4370 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4373 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4374 Label dont_need_remembered_set;
4376 __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
4377 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
4379 &dont_need_remembered_set);
4381 __ CheckPageFlag(regs_.object(),
4383 1 << MemoryChunk::SCAN_ON_SCAVENGE,
4385 &dont_need_remembered_set);
4387 // First notify the incremental marker if necessary, then update the
4389 CheckNeedsToInformIncrementalMarker(
4390 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4391 InformIncrementalMarker(masm);
4392 regs_.Restore(masm);
4393 __ RememberedSetHelper(object_,
4397 MacroAssembler::kReturnAtEnd);
4399 __ bind(&dont_need_remembered_set);
4402 CheckNeedsToInformIncrementalMarker(
4403 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4404 InformIncrementalMarker(masm);
4405 regs_.Restore(masm);
4410 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4411 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
4412 int argument_count = 3;
4413 __ PrepareCallCFunction(argument_count, regs_.scratch0());
4415 r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
4416 DCHECK(!address.is(regs_.object()));
4417 DCHECK(!address.is(r0));
4418 __ Move(address, regs_.address());
4419 __ Move(r0, regs_.object());
4420 __ Move(r1, address);
4421 __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
4423 AllowExternalCallThatCantCauseGC scope(masm);
4425 ExternalReference::incremental_marking_record_write_function(isolate()),
4427 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
4431 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4432 MacroAssembler* masm,
4433 OnNoNeedToInformIncrementalMarker on_no_need,
4436 Label need_incremental;
4437 Label need_incremental_pop_scratch;
4439 __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
4440 __ ldr(regs_.scratch1(),
4441 MemOperand(regs_.scratch0(),
4442 MemoryChunk::kWriteBarrierCounterOffset));
4443 __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC);
4444 __ str(regs_.scratch1(),
4445 MemOperand(regs_.scratch0(),
4446 MemoryChunk::kWriteBarrierCounterOffset));
4447 __ b(mi, &need_incremental);
4449 // Let's look at the color of the object: If it is not black we don't have
4450 // to inform the incremental marker.
4451 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4453 regs_.Restore(masm);
4454 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4455 __ RememberedSetHelper(object_,
4459 MacroAssembler::kReturnAtEnd);
4466 // Get the value from the slot.
4467 __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
4469 if (mode == INCREMENTAL_COMPACTION) {
4470 Label ensure_not_white;
4472 __ CheckPageFlag(regs_.scratch0(), // Contains value.
4473 regs_.scratch1(), // Scratch.
4474 MemoryChunk::kEvacuationCandidateMask,
4478 __ CheckPageFlag(regs_.object(),
4479 regs_.scratch1(), // Scratch.
4480 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
4484 __ bind(&ensure_not_white);
4487 // We need extra registers for this, so we push the object and the address
4488 // register temporarily.
4489 __ Push(regs_.object(), regs_.address());
4490 __ EnsureNotWhite(regs_.scratch0(), // The value.
4491 regs_.scratch1(), // Scratch.
4492 regs_.object(), // Scratch.
4493 regs_.address(), // Scratch.
4494 &need_incremental_pop_scratch);
4495 __ Pop(regs_.object(), regs_.address());
4497 regs_.Restore(masm);
4498 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4499 __ RememberedSetHelper(object_,
4503 MacroAssembler::kReturnAtEnd);
4508 __ bind(&need_incremental_pop_scratch);
4509 __ Pop(regs_.object(), regs_.address());
4511 __ bind(&need_incremental);
4513 // Fall through when we need to inform the incremental marker.
4517 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4518 // ----------- S t a t e -------------
4519 // -- r0 : element value to store
4520 // -- r3 : element index as smi
4521 // -- sp[0] : array literal index in function as smi
4522 // -- sp[4] : array literal
4523 // clobbers r1, r2, r4
4524 // -----------------------------------
4527 Label double_elements;
4529 Label slow_elements;
4530 Label fast_elements;
4532 // Get array literal index, array literal and its map.
4533 __ ldr(r4, MemOperand(sp, 0 * kPointerSize));
4534 __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
4535 __ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset));
4537 __ CheckFastElements(r2, r5, &double_elements);
4538 // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
4539 __ JumpIfSmi(r0, &smi_element);
4540 __ CheckFastSmiElements(r2, r5, &fast_elements);
4542 // Store into the array literal requires a elements transition. Call into
4544 __ bind(&slow_elements);
4546 __ Push(r1, r3, r0);
4547 __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4548 __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset));
4550 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4552 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4553 __ bind(&fast_elements);
4554 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
4555 __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
4556 __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4557 __ str(r0, MemOperand(r6, 0));
4558 // Update the write barrier for the array store.
4559 __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs,
4560 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
4563 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
4564 // and value is Smi.
4565 __ bind(&smi_element);
4566 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
4567 __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
4568 __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
4571 // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
4572 __ bind(&double_elements);
4573 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
4574 __ StoreNumberToDoubleElements(r0, r3, r5, r6, d0, &slow_elements);
4579 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4580 CEntryStub ces(isolate(), 1, kSaveFPRegs);
4581 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4582 int parameter_count_offset =
4583 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4584 __ ldr(r1, MemOperand(fp, parameter_count_offset));
4585 if (function_mode_ == JS_FUNCTION_STUB_MODE) {
4586 __ add(r1, r1, Operand(1));
4588 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4589 __ mov(r1, Operand(r1, LSL, kPointerSizeLog2));
4595 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4596 if (masm->isolate()->function_entry_hook() != NULL) {
4597 ProfileEntryHookStub stub(masm->isolate());
4598 int code_size = masm->CallStubSize(&stub) + 2 * Assembler::kInstrSize;
4599 PredictableCodeSizeScope predictable(masm, code_size);
4607 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4608 // The entry hook is a "push lr" instruction, followed by a call.
4609 const int32_t kReturnAddressDistanceFromFunctionStart =
4610 3 * Assembler::kInstrSize;
4612 // This should contain all kCallerSaved registers.
4613 const RegList kSavedRegs =
4620 // We also save lr, so the count here is one higher than the mask indicates.
4621 const int32_t kNumSavedRegs = 7;
4623 DCHECK((kCallerSaved & kSavedRegs) == kCallerSaved);
4625 // Save all caller-save registers as this may be called from anywhere.
4626 __ stm(db_w, sp, kSavedRegs | lr.bit());
4628 // Compute the function's address for the first argument.
4629 __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart));
4631 // The caller's return address is above the saved temporaries.
4632 // Grab that for the second argument to the hook.
4633 __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize));
4635 // Align the stack if necessary.
4636 int frame_alignment = masm->ActivationFrameAlignment();
4637 if (frame_alignment > kPointerSize) {
4639 DCHECK(IsPowerOf2(frame_alignment));
4640 __ and_(sp, sp, Operand(-frame_alignment));
4643 #if V8_HOST_ARCH_ARM
4644 int32_t entry_hook =
4645 reinterpret_cast<int32_t>(isolate()->function_entry_hook());
4646 __ mov(ip, Operand(entry_hook));
4648 // Under the simulator we need to indirect the entry hook through a
4649 // trampoline function at a known address.
4650 // It additionally takes an isolate as a third parameter
4651 __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
4653 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4654 __ mov(ip, Operand(ExternalReference(&dispatcher,
4655 ExternalReference::BUILTIN_CALL,
4660 // Restore the stack pointer if needed.
4661 if (frame_alignment > kPointerSize) {
4665 // Also pop pc to get Ret(0).
4666 __ ldm(ia_w, sp, kSavedRegs | pc.bit());
4671 static void CreateArrayDispatch(MacroAssembler* masm,
4672 AllocationSiteOverrideMode mode) {
4673 if (mode == DISABLE_ALLOCATION_SITES) {
4674 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4675 __ TailCallStub(&stub);
4676 } else if (mode == DONT_OVERRIDE) {
4677 int last_index = GetSequenceIndexFromFastElementsKind(
4678 TERMINAL_FAST_ELEMENTS_KIND);
4679 for (int i = 0; i <= last_index; ++i) {
4680 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4681 __ cmp(r3, Operand(kind));
4682 T stub(masm->isolate(), kind);
4683 __ TailCallStub(&stub, eq);
4686 // If we reached this point there is a problem.
4687 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4694 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4695 AllocationSiteOverrideMode mode) {
4696 // r2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4697 // r3 - kind (if mode != DISABLE_ALLOCATION_SITES)
4698 // r0 - number of arguments
4699 // r1 - constructor?
4700 // sp[0] - last argument
4701 Label normal_sequence;
4702 if (mode == DONT_OVERRIDE) {
4703 DCHECK(FAST_SMI_ELEMENTS == 0);
4704 DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
4705 DCHECK(FAST_ELEMENTS == 2);
4706 DCHECK(FAST_HOLEY_ELEMENTS == 3);
4707 DCHECK(FAST_DOUBLE_ELEMENTS == 4);
4708 DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
4710 // is the low bit set? If so, we are holey and that is good.
4711 __ tst(r3, Operand(1));
4712 __ b(ne, &normal_sequence);
4715 // look at the first argument
4716 __ ldr(r5, MemOperand(sp, 0));
4717 __ cmp(r5, Operand::Zero());
4718 __ b(eq, &normal_sequence);
4720 if (mode == DISABLE_ALLOCATION_SITES) {
4721 ElementsKind initial = GetInitialFastElementsKind();
4722 ElementsKind holey_initial = GetHoleyElementsKind(initial);
4724 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
4726 DISABLE_ALLOCATION_SITES);
4727 __ TailCallStub(&stub_holey);
4729 __ bind(&normal_sequence);
4730 ArraySingleArgumentConstructorStub stub(masm->isolate(),
4732 DISABLE_ALLOCATION_SITES);
4733 __ TailCallStub(&stub);
4734 } else if (mode == DONT_OVERRIDE) {
4735 // We are going to create a holey array, but our kind is non-holey.
4736 // Fix kind and retry (only if we have an allocation site in the slot).
4737 __ add(r3, r3, Operand(1));
4739 if (FLAG_debug_code) {
4740 __ ldr(r5, FieldMemOperand(r2, 0));
4741 __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
4742 __ Assert(eq, kExpectedAllocationSite);
4745 // Save the resulting elements kind in type info. We can't just store r3
4746 // in the AllocationSite::transition_info field because elements kind is
4747 // restricted to a portion of the field...upper bits need to be left alone.
4748 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4749 __ ldr(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
4750 __ add(r4, r4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
4751 __ str(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
4753 __ bind(&normal_sequence);
4754 int last_index = GetSequenceIndexFromFastElementsKind(
4755 TERMINAL_FAST_ELEMENTS_KIND);
4756 for (int i = 0; i <= last_index; ++i) {
4757 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4758 __ cmp(r3, Operand(kind));
4759 ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
4760 __ TailCallStub(&stub, eq);
4763 // If we reached this point there is a problem.
4764 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4772 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
4773 int to_index = GetSequenceIndexFromFastElementsKind(
4774 TERMINAL_FAST_ELEMENTS_KIND);
4775 for (int i = 0; i <= to_index; ++i) {
4776 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4777 T stub(isolate, kind);
4779 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
4780 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
4787 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
4788 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
4790 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
4792 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
4797 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
4799 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
4800 for (int i = 0; i < 2; i++) {
4801 // For internal arrays we only need a few things
4802 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
4804 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
4806 InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
4812 void ArrayConstructorStub::GenerateDispatchToArrayStub(
4813 MacroAssembler* masm,
4814 AllocationSiteOverrideMode mode) {
4815 if (argument_count_ == ANY) {
4816 Label not_zero_case, not_one_case;
4818 __ b(ne, ¬_zero_case);
4819 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4821 __ bind(¬_zero_case);
4822 __ cmp(r0, Operand(1));
4823 __ b(gt, ¬_one_case);
4824 CreateArrayDispatchOneArgument(masm, mode);
4826 __ bind(¬_one_case);
4827 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4828 } else if (argument_count_ == NONE) {
4829 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4830 } else if (argument_count_ == ONE) {
4831 CreateArrayDispatchOneArgument(masm, mode);
4832 } else if (argument_count_ == MORE_THAN_ONE) {
4833 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4840 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
4841 // ----------- S t a t e -------------
4842 // -- r0 : argc (only if argument_count_ == ANY)
4843 // -- r1 : constructor
4844 // -- r2 : AllocationSite or undefined
4845 // -- sp[0] : return address
4846 // -- sp[4] : last argument
4847 // -----------------------------------
4849 if (FLAG_debug_code) {
4850 // The array construct code is only set for the global and natives
4851 // builtin Array functions which always have maps.
4853 // Initial map for the builtin Array function should be a map.
4854 __ ldr(r4, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
4855 // Will both indicate a NULL and a Smi.
4856 __ tst(r4, Operand(kSmiTagMask));
4857 __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
4858 __ CompareObjectType(r4, r4, r5, MAP_TYPE);
4859 __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
4861 // We should either have undefined in r2 or a valid AllocationSite
4862 __ AssertUndefinedOrAllocationSite(r2, r4);
4866 // Get the elements kind and case on that.
4867 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
4870 __ ldr(r3, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
4872 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4873 __ and_(r3, r3, Operand(AllocationSite::ElementsKindBits::kMask));
4874 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
4877 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
4881 void InternalArrayConstructorStub::GenerateCase(
4882 MacroAssembler* masm, ElementsKind kind) {
4883 __ cmp(r0, Operand(1));
4885 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
4886 __ TailCallStub(&stub0, lo);
4888 InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
4889 __ TailCallStub(&stubN, hi);
4891 if (IsFastPackedElementsKind(kind)) {
4892 // We might need to create a holey array
4893 // look at the first argument
4894 __ ldr(r3, MemOperand(sp, 0));
4895 __ cmp(r3, Operand::Zero());
4897 InternalArraySingleArgumentConstructorStub
4898 stub1_holey(isolate(), GetHoleyElementsKind(kind));
4899 __ TailCallStub(&stub1_holey, ne);
4902 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
4903 __ TailCallStub(&stub1);
4907 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
4908 // ----------- S t a t e -------------
4910 // -- r1 : constructor
4911 // -- sp[0] : return address
4912 // -- sp[4] : last argument
4913 // -----------------------------------
4915 if (FLAG_debug_code) {
4916 // The array construct code is only set for the global and natives
4917 // builtin Array functions which always have maps.
4919 // Initial map for the builtin Array function should be a map.
4920 __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
4921 // Will both indicate a NULL and a Smi.
4922 __ tst(r3, Operand(kSmiTagMask));
4923 __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
4924 __ CompareObjectType(r3, r3, r4, MAP_TYPE);
4925 __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
4928 // Figure out the right elements kind
4929 __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
4930 // Load the map's "bit field 2" into |result|. We only need the first byte,
4931 // but the following bit field extraction takes care of that anyway.
4932 __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
4933 // Retrieve elements_kind from bit field 2.
4934 __ DecodeField<Map::ElementsKindBits>(r3);
4936 if (FLAG_debug_code) {
4938 __ cmp(r3, Operand(FAST_ELEMENTS));
4940 __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS));
4942 kInvalidElementsKindForInternalArrayOrInternalPackedArray);
4946 Label fast_elements_case;
4947 __ cmp(r3, Operand(FAST_ELEMENTS));
4948 __ b(eq, &fast_elements_case);
4949 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
4951 __ bind(&fast_elements_case);
4952 GenerateCase(masm, FAST_ELEMENTS);
4956 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
4957 // ----------- S t a t e -------------
4959 // -- r4 : call_data
4961 // -- r1 : api_function_address
4964 // -- sp[0] : last argument
4966 // -- sp[(argc - 1)* 4] : first argument
4967 // -- sp[argc * 4] : receiver
4968 // -----------------------------------
4970 Register callee = r0;
4971 Register call_data = r4;
4972 Register holder = r2;
4973 Register api_function_address = r1;
4974 Register context = cp;
4976 int argc = ArgumentBits::decode(bit_field_);
4977 bool is_store = IsStoreBits::decode(bit_field_);
4978 bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
4980 typedef FunctionCallbackArguments FCA;
4982 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
4983 STATIC_ASSERT(FCA::kCalleeIndex == 5);
4984 STATIC_ASSERT(FCA::kDataIndex == 4);
4985 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
4986 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
4987 STATIC_ASSERT(FCA::kIsolateIndex == 1);
4988 STATIC_ASSERT(FCA::kHolderIndex == 0);
4989 STATIC_ASSERT(FCA::kArgsLength == 7);
4993 // load context from callee
4994 __ ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
5002 Register scratch = call_data;
5003 if (!call_data_undefined) {
5004 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5008 // return value default
5012 Operand(ExternalReference::isolate_address(isolate())));
5017 // Prepare arguments.
5018 __ mov(scratch, sp);
5020 // Allocate the v8::Arguments structure in the arguments' space since
5021 // it's not controlled by GC.
5022 const int kApiStackSpace = 4;
5024 FrameScope frame_scope(masm, StackFrame::MANUAL);
5025 __ EnterExitFrame(false, kApiStackSpace);
5027 DCHECK(!api_function_address.is(r0) && !scratch.is(r0));
5028 // r0 = FunctionCallbackInfo&
5029 // Arguments is after the return address.
5030 __ add(r0, sp, Operand(1 * kPointerSize));
5031 // FunctionCallbackInfo::implicit_args_
5032 __ str(scratch, MemOperand(r0, 0 * kPointerSize));
5033 // FunctionCallbackInfo::values_
5034 __ add(ip, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
5035 __ str(ip, MemOperand(r0, 1 * kPointerSize));
5036 // FunctionCallbackInfo::length_ = argc
5037 __ mov(ip, Operand(argc));
5038 __ str(ip, MemOperand(r0, 2 * kPointerSize));
5039 // FunctionCallbackInfo::is_construct_call = 0
5040 __ mov(ip, Operand::Zero());
5041 __ str(ip, MemOperand(r0, 3 * kPointerSize));
5043 const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
5044 ExternalReference thunk_ref =
5045 ExternalReference::invoke_function_callback(isolate());
5047 AllowExternalCallThatCantCauseGC scope(masm);
5048 MemOperand context_restore_operand(
5049 fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
5050 // Stores return the first js argument
5051 int return_value_offset = 0;
5053 return_value_offset = 2 + FCA::kArgsLength;
5055 return_value_offset = 2 + FCA::kReturnValueOffset;
5057 MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
5059 __ CallApiFunctionAndReturn(api_function_address,
5062 return_value_operand,
5063 &context_restore_operand);
5067 void CallApiGetterStub::Generate(MacroAssembler* masm) {
5068 // ----------- S t a t e -------------
5070 // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
5072 // -- r2 : api_function_address
5073 // -----------------------------------
5075 Register api_function_address = r2;
5077 __ mov(r0, sp); // r0 = Handle<Name>
5078 __ add(r1, r0, Operand(1 * kPointerSize)); // r1 = PCA
5080 const int kApiStackSpace = 1;
5081 FrameScope frame_scope(masm, StackFrame::MANUAL);
5082 __ EnterExitFrame(false, kApiStackSpace);
5084 // Create PropertyAccessorInfo instance on the stack above the exit frame with
5085 // r1 (internal::Object** args_) as the data.
5086 __ str(r1, MemOperand(sp, 1 * kPointerSize));
5087 __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
5089 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
5091 ExternalReference thunk_ref =
5092 ExternalReference::invoke_accessor_getter_callback(isolate());
5093 __ CallApiFunctionAndReturn(api_function_address,
5096 MemOperand(fp, 6 * kPointerSize),
5103 } } // namespace v8::internal
5105 #endif // V8_TARGET_ARCH_ARM