1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "src/bootstrapper.h"
10 #include "src/code-stubs.h"
11 #include "src/regexp-macro-assembler.h"
12 #include "src/stub-cache.h"
18 void FastNewClosureStub::InitializeInterfaceDescriptor(
19 CodeStubInterfaceDescriptor* descriptor) {
20 Register registers[] = { cp, r2 };
21 descriptor->Initialize(
22 MajorKey(), ARRAY_SIZE(registers), registers,
23 Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
27 void FastNewContextStub::InitializeInterfaceDescriptor(
28 CodeStubInterfaceDescriptor* descriptor) {
29 Register registers[] = { cp, r1 };
30 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
34 void ToNumberStub::InitializeInterfaceDescriptor(
35 CodeStubInterfaceDescriptor* descriptor) {
36 Register registers[] = { cp, r0 };
37 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
41 void NumberToStringStub::InitializeInterfaceDescriptor(
42 CodeStubInterfaceDescriptor* descriptor) {
43 Register registers[] = { cp, r0 };
44 descriptor->Initialize(
45 MajorKey(), ARRAY_SIZE(registers), registers,
46 Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry);
50 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
51 CodeStubInterfaceDescriptor* descriptor) {
52 Register registers[] = { cp, r3, r2, r1 };
53 Representation representations[] = {
54 Representation::Tagged(),
55 Representation::Tagged(),
56 Representation::Smi(),
57 Representation::Tagged() };
58 descriptor->Initialize(
59 MajorKey(), ARRAY_SIZE(registers), registers,
60 Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry,
65 void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
66 CodeStubInterfaceDescriptor* descriptor) {
67 Register registers[] = { cp, r3, r2, r1, r0 };
68 descriptor->Initialize(
69 MajorKey(), ARRAY_SIZE(registers), registers,
70 Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
74 void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
75 CodeStubInterfaceDescriptor* descriptor) {
76 Register registers[] = { cp, r2, r3 };
77 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
81 void CallFunctionStub::InitializeInterfaceDescriptor(
82 CodeStubInterfaceDescriptor* descriptor) {
83 // r1 function the function to call
84 Register registers[] = {cp, r1};
85 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
89 void CallConstructStub::InitializeInterfaceDescriptor(
90 CodeStubInterfaceDescriptor* descriptor) {
91 // r0 : number of arguments
92 // r1 : the function to call
93 // r2 : feedback vector
94 // r3 : (only if r2 is not the megamorphic symbol) slot in feedback
96 // TODO(turbofan): So far we don't gather type feedback and hence skip the
97 // slot parameter, but ArrayConstructStub needs the vector to be undefined.
98 Register registers[] = {cp, r0, r1, r2};
99 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
103 void RegExpConstructResultStub::InitializeInterfaceDescriptor(
104 CodeStubInterfaceDescriptor* descriptor) {
105 Register registers[] = { cp, r2, r1, r0 };
106 descriptor->Initialize(
107 MajorKey(), ARRAY_SIZE(registers), registers,
108 Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
112 void TransitionElementsKindStub::InitializeInterfaceDescriptor(
113 CodeStubInterfaceDescriptor* descriptor) {
114 Register registers[] = { cp, r0, r1 };
116 Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
117 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
118 FUNCTION_ADDR(entry));
122 void CompareNilICStub::InitializeInterfaceDescriptor(
123 CodeStubInterfaceDescriptor* descriptor) {
124 Register registers[] = { cp, r0 };
125 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
126 FUNCTION_ADDR(CompareNilIC_Miss));
127 descriptor->SetMissHandler(
128 ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
132 const Register InterfaceDescriptor::ContextRegister() { return cp; }
135 static void InitializeArrayConstructorDescriptor(
136 CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
137 int constant_stack_parameter_count) {
140 // r0 -- number of arguments
142 // r2 -- allocation site with elements kind
143 Address deopt_handler = Runtime::FunctionForId(
144 Runtime::kArrayConstructor)->entry;
146 if (constant_stack_parameter_count == 0) {
147 Register registers[] = { cp, r1, r2 };
148 descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
149 deopt_handler, NULL, constant_stack_parameter_count,
150 JS_FUNCTION_STUB_MODE);
152 // stack param count needs (constructor pointer, and single argument)
153 Register registers[] = { cp, r1, r2, r0 };
154 Representation representations[] = {
155 Representation::Tagged(),
156 Representation::Tagged(),
157 Representation::Tagged(),
158 Representation::Integer32() };
159 descriptor->Initialize(major, ARRAY_SIZE(registers), registers, r0,
160 deopt_handler, representations,
161 constant_stack_parameter_count,
162 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
167 static void InitializeInternalArrayConstructorDescriptor(
168 CodeStub::Major major, CodeStubInterfaceDescriptor* descriptor,
169 int constant_stack_parameter_count) {
172 // r0 -- number of arguments
173 // r1 -- constructor function
174 Address deopt_handler = Runtime::FunctionForId(
175 Runtime::kInternalArrayConstructor)->entry;
177 if (constant_stack_parameter_count == 0) {
178 Register registers[] = { cp, r1 };
179 descriptor->Initialize(major, ARRAY_SIZE(registers), registers,
180 deopt_handler, NULL, constant_stack_parameter_count,
181 JS_FUNCTION_STUB_MODE);
183 // stack param count needs (constructor pointer, and single argument)
184 Register registers[] = { cp, r1, r0 };
185 Representation representations[] = {
186 Representation::Tagged(),
187 Representation::Tagged(),
188 Representation::Integer32() };
189 descriptor->Initialize(major, ARRAY_SIZE(registers), registers, r0,
190 deopt_handler, representations,
191 constant_stack_parameter_count,
192 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
197 void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
198 CodeStubInterfaceDescriptor* descriptor) {
199 InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 0);
203 void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
204 CodeStubInterfaceDescriptor* descriptor) {
205 InitializeArrayConstructorDescriptor(MajorKey(), descriptor, 1);
209 void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
210 CodeStubInterfaceDescriptor* descriptor) {
211 InitializeArrayConstructorDescriptor(MajorKey(), descriptor, -1);
215 void ToBooleanStub::InitializeInterfaceDescriptor(
216 CodeStubInterfaceDescriptor* descriptor) {
217 Register registers[] = { cp, r0 };
218 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
219 FUNCTION_ADDR(ToBooleanIC_Miss));
220 descriptor->SetMissHandler(
221 ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
225 void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
226 CodeStubInterfaceDescriptor* descriptor) {
227 InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 0);
231 void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
232 CodeStubInterfaceDescriptor* descriptor) {
233 InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, 1);
237 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
238 CodeStubInterfaceDescriptor* descriptor) {
239 InitializeInternalArrayConstructorDescriptor(MajorKey(), descriptor, -1);
243 void BinaryOpICStub::InitializeInterfaceDescriptor(
244 CodeStubInterfaceDescriptor* descriptor) {
245 Register registers[] = { cp, r1, r0 };
246 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
247 FUNCTION_ADDR(BinaryOpIC_Miss));
248 descriptor->SetMissHandler(
249 ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
253 void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
254 CodeStubInterfaceDescriptor* descriptor) {
255 Register registers[] = { cp, r2, r1, r0 };
256 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
257 FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite));
261 void StringAddStub::InitializeInterfaceDescriptor(
262 CodeStubInterfaceDescriptor* descriptor) {
263 Register registers[] = { cp, r1, r0 };
264 descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers,
265 Runtime::FunctionForId(Runtime::kStringAdd)->entry);
269 void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
270 static PlatformInterfaceDescriptor default_descriptor =
271 PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
273 static PlatformInterfaceDescriptor noInlineDescriptor =
274 PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
277 CallInterfaceDescriptor* descriptor =
278 isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
279 Register registers[] = { cp, // context
281 r0, // actual number of arguments
282 r2, // expected number of arguments
284 Representation representations[] = {
285 Representation::Tagged(), // context
286 Representation::Tagged(), // JSFunction
287 Representation::Integer32(), // actual number of arguments
288 Representation::Integer32(), // expected number of arguments
290 descriptor->Initialize(ARRAY_SIZE(registers), registers,
291 representations, &default_descriptor);
294 CallInterfaceDescriptor* descriptor =
295 isolate->call_descriptor(Isolate::KeyedCall);
296 Register registers[] = { cp, // context
299 Representation representations[] = {
300 Representation::Tagged(), // context
301 Representation::Tagged(), // key
303 descriptor->Initialize(ARRAY_SIZE(registers), registers,
304 representations, &noInlineDescriptor);
307 CallInterfaceDescriptor* descriptor =
308 isolate->call_descriptor(Isolate::NamedCall);
309 Register registers[] = { cp, // context
312 Representation representations[] = {
313 Representation::Tagged(), // context
314 Representation::Tagged(), // name
316 descriptor->Initialize(ARRAY_SIZE(registers), registers,
317 representations, &noInlineDescriptor);
320 CallInterfaceDescriptor* descriptor =
321 isolate->call_descriptor(Isolate::CallHandler);
322 Register registers[] = { cp, // context
325 Representation representations[] = {
326 Representation::Tagged(), // context
327 Representation::Tagged(), // receiver
329 descriptor->Initialize(ARRAY_SIZE(registers), registers,
330 representations, &default_descriptor);
333 CallInterfaceDescriptor* descriptor =
334 isolate->call_descriptor(Isolate::ApiFunctionCall);
335 Register registers[] = { cp, // context
339 r1, // api_function_address
341 Representation representations[] = {
342 Representation::Tagged(), // context
343 Representation::Tagged(), // callee
344 Representation::Tagged(), // call_data
345 Representation::Tagged(), // holder
346 Representation::External(), // api_function_address
348 descriptor->Initialize(ARRAY_SIZE(registers), registers,
349 representations, &default_descriptor);
354 #define __ ACCESS_MASM(masm)
357 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
360 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
366 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
371 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
372 // Update the static counter each time a new code stub is generated.
373 isolate()->counters()->code_stubs()->Increment();
375 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
376 int param_count = descriptor->GetEnvironmentParameterCount();
378 // Call the runtime system in a fresh internal frame.
379 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
380 DCHECK(param_count == 0 ||
381 r0.is(descriptor->GetEnvironmentParameterRegister(
384 for (int i = 0; i < param_count; ++i) {
385 __ push(descriptor->GetEnvironmentParameterRegister(i));
387 ExternalReference miss = descriptor->miss_handler();
388 __ CallExternalReference(miss, param_count);
395 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
396 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
397 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
398 // scratch register. Destroys the source register. No GC occurs during this
399 // stub so you don't have to set up the frame.
400 class ConvertToDoubleStub : public PlatformCodeStub {
402 ConvertToDoubleStub(Isolate* isolate,
403 Register result_reg_1,
404 Register result_reg_2,
406 Register scratch_reg)
407 : PlatformCodeStub(isolate),
408 result1_(result_reg_1),
409 result2_(result_reg_2),
411 zeros_(scratch_reg) { }
419 // Minor key encoding in 16 bits.
420 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
421 class OpBits: public BitField<Token::Value, 2, 14> {};
423 Major MajorKey() const { return ConvertToDouble; }
424 int MinorKey() const {
425 // Encode the parameters in a unique 16 bit value.
426 return result1_.code() +
427 (result2_.code() << 4) +
428 (source_.code() << 8) +
429 (zeros_.code() << 12);
432 void Generate(MacroAssembler* masm);
436 void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
437 Register exponent = result1_;
438 Register mantissa = result2_;
441 __ SmiUntag(source_);
442 // Move sign bit from source to destination. This works because the sign bit
443 // in the exponent word of the double has the same position and polarity as
444 // the 2's complement sign bit in a Smi.
445 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
446 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
447 // Subtract from 0 if source was negative.
448 __ rsb(source_, source_, Operand::Zero(), LeaveCC, ne);
450 // We have -1, 0 or 1, which we treat specially. Register source_ contains
451 // absolute value: it is either equal to 1 (special case of -1 and 1),
452 // greater than 1 (not a special case) or less than 1 (special case of 0).
453 __ cmp(source_, Operand(1));
454 __ b(gt, ¬_special);
456 // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
457 const uint32_t exponent_word_for_1 =
458 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
459 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
460 // 1, 0 and -1 all have 0 for the second word.
461 __ mov(mantissa, Operand::Zero());
464 __ bind(¬_special);
465 __ clz(zeros_, source_);
466 // Compute exponent and or it into the exponent register.
467 // We use mantissa as a scratch register here. Use a fudge factor to
468 // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
469 // that fit in the ARM's constant field.
471 __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
472 __ add(mantissa, mantissa, Operand(fudge));
475 Operand(mantissa, LSL, HeapNumber::kExponentShift));
476 // Shift up the source chopping the top bit off.
477 __ add(zeros_, zeros_, Operand(1));
478 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
479 __ mov(source_, Operand(source_, LSL, zeros_));
480 // Compute lower part of fraction (last 12 bits).
481 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
482 // And the top (top 20 bits).
485 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
490 void DoubleToIStub::Generate(MacroAssembler* masm) {
491 Label out_of_range, only_low, negate, done;
492 Register input_reg = source();
493 Register result_reg = destination();
494 DCHECK(is_truncating());
496 int double_offset = offset();
497 // Account for saved regs if input is sp.
498 if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
500 Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
501 Register scratch_low =
502 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
503 Register scratch_high =
504 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
505 LowDwVfpRegister double_scratch = kScratchDoubleReg;
507 __ Push(scratch_high, scratch_low, scratch);
509 if (!skip_fastpath()) {
510 // Load double input.
511 __ vldr(double_scratch, MemOperand(input_reg, double_offset));
512 __ vmov(scratch_low, scratch_high, double_scratch);
514 // Do fast-path convert from double to int.
515 __ vcvt_s32_f64(double_scratch.low(), double_scratch);
516 __ vmov(result_reg, double_scratch.low());
518 // If result is not saturated (0x7fffffff or 0x80000000), we are done.
519 __ sub(scratch, result_reg, Operand(1));
520 __ cmp(scratch, Operand(0x7ffffffe));
523 // We've already done MacroAssembler::TryFastTruncatedDoubleToILoad, so we
524 // know exponent > 31, so we can skip the vcvt_s32_f64 which will saturate.
525 if (double_offset == 0) {
526 __ ldm(ia, input_reg, scratch_low.bit() | scratch_high.bit());
528 __ ldr(scratch_low, MemOperand(input_reg, double_offset));
529 __ ldr(scratch_high, MemOperand(input_reg, double_offset + kIntSize));
533 __ Ubfx(scratch, scratch_high,
534 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
535 // Load scratch with exponent - 1. This is faster than loading
536 // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
537 STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
538 __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
539 // If exponent is greater than or equal to 84, the 32 less significant
540 // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
542 // Compare exponent with 84 (compare exponent - 1 with 83).
543 __ cmp(scratch, Operand(83));
544 __ b(ge, &out_of_range);
546 // If we reach this code, 31 <= exponent <= 83.
547 // So, we don't have to handle cases where 0 <= exponent <= 20 for
548 // which we would need to shift right the high part of the mantissa.
549 // Scratch contains exponent - 1.
550 // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
551 __ rsb(scratch, scratch, Operand(51), SetCC);
553 // 21 <= exponent <= 51, shift scratch_low and scratch_high
554 // to generate the result.
555 __ mov(scratch_low, Operand(scratch_low, LSR, scratch));
556 // Scratch contains: 52 - exponent.
557 // We needs: exponent - 20.
558 // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
559 __ rsb(scratch, scratch, Operand(32));
560 __ Ubfx(result_reg, scratch_high,
561 0, HeapNumber::kMantissaBitsInTopWord);
562 // Set the implicit 1 before the mantissa part in scratch_high.
563 __ orr(result_reg, result_reg,
564 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
565 __ orr(result_reg, scratch_low, Operand(result_reg, LSL, scratch));
568 __ bind(&out_of_range);
569 __ mov(result_reg, Operand::Zero());
573 // 52 <= exponent <= 83, shift only scratch_low.
574 // On entry, scratch contains: 52 - exponent.
575 __ rsb(scratch, scratch, Operand::Zero());
576 __ mov(result_reg, Operand(scratch_low, LSL, scratch));
579 // If input was positive, scratch_high ASR 31 equals 0 and
580 // scratch_high LSR 31 equals zero.
581 // New result = (result eor 0) + 0 = result.
582 // If the input was negative, we have to negate the result.
583 // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
584 // New result = (result eor 0xffffffff) + 1 = 0 - result.
585 __ eor(result_reg, result_reg, Operand(scratch_high, ASR, 31));
586 __ add(result_reg, result_reg, Operand(scratch_high, LSR, 31));
590 __ Pop(scratch_high, scratch_low, scratch);
595 void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
597 WriteInt32ToHeapNumberStub stub1(isolate, r1, r0, r2);
598 WriteInt32ToHeapNumberStub stub2(isolate, r2, r0, r3);
604 // See comment for class.
605 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
606 Label max_negative_int;
607 // the_int_ has the answer which is a signed int32 but not a Smi.
608 // We test for the special value that has a different exponent. This test
609 // has the neat side effect of setting the flags according to the sign.
610 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
611 __ cmp(the_int_, Operand(0x80000000u));
612 __ b(eq, &max_negative_int);
613 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
614 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
615 uint32_t non_smi_exponent =
616 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
617 __ mov(scratch_, Operand(non_smi_exponent));
618 // Set the sign bit in scratch_ if the value was negative.
619 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
620 // Subtract from 0 if the value was negative.
621 __ rsb(the_int_, the_int_, Operand::Zero(), LeaveCC, cs);
622 // We should be masking the implict first digit of the mantissa away here,
623 // but it just ends up combining harmlessly with the last digit of the
624 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
625 // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
626 DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
627 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
628 __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
629 __ str(scratch_, FieldMemOperand(the_heap_number_,
630 HeapNumber::kExponentOffset));
631 __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
632 __ str(scratch_, FieldMemOperand(the_heap_number_,
633 HeapNumber::kMantissaOffset));
636 __ bind(&max_negative_int);
637 // The max negative int32 is stored as a positive number in the mantissa of
638 // a double because it uses a sign bit instead of using two's complement.
639 // The actual mantissa bits stored are all 0 because the implicit most
640 // significant 1 bit is not stored.
641 non_smi_exponent += 1 << HeapNumber::kExponentShift;
642 __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
643 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
644 __ mov(ip, Operand::Zero());
645 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
650 // Handle the case where the lhs and rhs are the same object.
651 // Equality is almost reflexive (everything but NaN), so this is a test
652 // for "identity and not NaN".
653 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
657 Label heap_number, return_equal;
659 __ b(ne, ¬_identical);
661 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
662 // so we do the second best thing - test it ourselves.
663 // They are both equal and they are not both Smis so both of them are not
664 // Smis. If it's not a heap number, then return equal.
665 if (cond == lt || cond == gt) {
666 __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
669 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
670 __ b(eq, &heap_number);
671 // Comparing JS objects with <=, >= is complicated.
673 __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
675 // Normally here we fall through to return_equal, but undefined is
676 // special: (undefined == undefined) == true, but
677 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
678 if (cond == le || cond == ge) {
679 __ cmp(r4, Operand(ODDBALL_TYPE));
680 __ b(ne, &return_equal);
681 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
683 __ b(ne, &return_equal);
685 // undefined <= undefined should fail.
686 __ mov(r0, Operand(GREATER));
688 // undefined >= undefined should fail.
689 __ mov(r0, Operand(LESS));
696 __ bind(&return_equal);
698 __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
699 } else if (cond == gt) {
700 __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
702 __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
706 // For less and greater we don't have to check for NaN since the result of
707 // x < x is false regardless. For the others here is some code to check
709 if (cond != lt && cond != gt) {
710 __ bind(&heap_number);
711 // It is a heap number, so return non-equal if it's NaN and equal if it's
714 // The representation of NaN values has all exponent bits (52..62) set,
715 // and not all mantissa bits (0..51) clear.
716 // Read top bits of double representation (second word of value).
717 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
718 // Test that exponent bits are all set.
719 __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
720 // NaNs have all-one exponents so they sign extend to -1.
721 __ cmp(r3, Operand(-1));
722 __ b(ne, &return_equal);
724 // Shift out flag and all exponent bits, retaining only mantissa.
725 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
726 // Or with all low-bits of mantissa.
727 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
728 __ orr(r0, r3, Operand(r2), SetCC);
729 // For equal we already have the right value in r0: Return zero (equal)
730 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
731 // not (it's a NaN). For <= and >= we need to load r0 with the failing
732 // value if it's a NaN.
734 // All-zero means Infinity means equal.
737 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
739 __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
744 // No fall through here.
746 __ bind(¬_identical);
750 // See comment at call site.
751 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
757 DCHECK((lhs.is(r0) && rhs.is(r1)) ||
758 (lhs.is(r1) && rhs.is(r0)));
761 __ JumpIfSmi(rhs, &rhs_is_smi);
763 // Lhs is a Smi. Check whether the rhs is a heap number.
764 __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
766 // If rhs is not a number and lhs is a Smi then strict equality cannot
767 // succeed. Return non-equal
768 // If rhs is r0 then there is already a non zero value in it.
770 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
774 // Smi compared non-strictly with a non-Smi non-heap-number. Call
779 // Lhs is a smi, rhs is a number.
780 // Convert lhs to a double in d7.
781 __ SmiToDouble(d7, lhs);
782 // Load the double from rhs, tagged HeapNumber r0, to d6.
783 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
785 // We now have both loaded as doubles but we can skip the lhs nan check
789 __ bind(&rhs_is_smi);
790 // Rhs is a smi. Check whether the non-smi lhs is a heap number.
791 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
793 // If lhs is not a number and rhs is a smi then strict equality cannot
794 // succeed. Return non-equal.
795 // If lhs is r0 then there is already a non zero value in it.
797 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
801 // Smi compared non-strictly with a non-smi non-heap-number. Call
806 // Rhs is a smi, lhs is a heap number.
807 // Load the double from lhs, tagged HeapNumber r1, to d7.
808 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
809 // Convert rhs to a double in d6 .
810 __ SmiToDouble(d6, rhs);
811 // Fall through to both_loaded_as_doubles.
815 // See comment at call site.
816 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
819 DCHECK((lhs.is(r0) && rhs.is(r1)) ||
820 (lhs.is(r1) && rhs.is(r0)));
822 // If either operand is a JS object or an oddball value, then they are
823 // not equal since their pointers are different.
824 // There is no test for undetectability in strict equality.
825 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
826 Label first_non_object;
827 // Get the type of the first operand into r2 and compare it with
828 // FIRST_SPEC_OBJECT_TYPE.
829 __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE);
830 __ b(lt, &first_non_object);
832 // Return non-zero (r0 is not zero)
833 Label return_not_equal;
834 __ bind(&return_not_equal);
837 __ bind(&first_non_object);
838 // Check for oddballs: true, false, null, undefined.
839 __ cmp(r2, Operand(ODDBALL_TYPE));
840 __ b(eq, &return_not_equal);
842 __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE);
843 __ b(ge, &return_not_equal);
845 // Check for oddballs: true, false, null, undefined.
846 __ cmp(r3, Operand(ODDBALL_TYPE));
847 __ b(eq, &return_not_equal);
849 // Now that we have the types we might as well check for
850 // internalized-internalized.
851 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
852 __ orr(r2, r2, Operand(r3));
853 __ tst(r2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
854 __ b(eq, &return_not_equal);
858 // See comment at call site.
859 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
862 Label* both_loaded_as_doubles,
863 Label* not_heap_numbers,
865 DCHECK((lhs.is(r0) && rhs.is(r1)) ||
866 (lhs.is(r1) && rhs.is(r0)));
868 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
869 __ b(ne, not_heap_numbers);
870 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
872 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
874 // Both are heap numbers. Load them up then jump to the code we have
876 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
877 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
878 __ jmp(both_loaded_as_doubles);
882 // Fast negative check for internalized-to-internalized equality.
883 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
886 Label* possible_strings,
887 Label* not_both_strings) {
888 DCHECK((lhs.is(r0) && rhs.is(r1)) ||
889 (lhs.is(r1) && rhs.is(r0)));
891 // r2 is object type of rhs.
893 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
894 __ tst(r2, Operand(kIsNotStringMask));
895 __ b(ne, &object_test);
896 __ tst(r2, Operand(kIsNotInternalizedMask));
897 __ b(ne, possible_strings);
898 __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
899 __ b(ge, not_both_strings);
900 __ tst(r3, Operand(kIsNotInternalizedMask));
901 __ b(ne, possible_strings);
903 // Both are internalized. We already checked they weren't the same pointer
904 // so they are not equal.
905 __ mov(r0, Operand(NOT_EQUAL));
908 __ bind(&object_test);
909 __ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE));
910 __ b(lt, not_both_strings);
911 __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE);
912 __ b(lt, not_both_strings);
913 // If both objects are undetectable, they are equal. Otherwise, they
914 // are not equal, since they are different objects and an object is not
915 // equal to undefined.
916 __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
917 __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
918 __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
919 __ and_(r0, r2, Operand(r3));
920 __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
921 __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
926 static void ICCompareStub_CheckInputType(MacroAssembler* masm,
929 CompareIC::State expected,
932 if (expected == CompareIC::SMI) {
933 __ JumpIfNotSmi(input, fail);
934 } else if (expected == CompareIC::NUMBER) {
935 __ JumpIfSmi(input, &ok);
936 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
939 // We could be strict about internalized/non-internalized here, but as long as
940 // hydrogen doesn't care, the stub doesn't have to care either.
945 // On entry r1 and r2 are the values to be compared.
946 // On exit r0 is 0, positive or negative to indicate the result of
948 void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
951 Condition cc = GetCondition();
954 ICCompareStub_CheckInputType(masm, lhs, r2, left_, &miss);
955 ICCompareStub_CheckInputType(masm, rhs, r3, right_, &miss);
957 Label slow; // Call builtin.
958 Label not_smis, both_loaded_as_doubles, lhs_not_nan;
960 Label not_two_smis, smi_done;
962 __ JumpIfNotSmi(r2, ¬_two_smis);
963 __ mov(r1, Operand(r1, ASR, 1));
964 __ sub(r0, r1, Operand(r0, ASR, 1));
966 __ bind(¬_two_smis);
968 // NOTICE! This code is only reached after a smi-fast-case check, so
969 // it is certain that at least one operand isn't a smi.
971 // Handle the case where the objects are identical. Either returns the answer
972 // or goes to slow. Only falls through if the objects were not identical.
973 EmitIdenticalObjectComparison(masm, &slow, cc);
975 // If either is a Smi (we know that not both are), then they can only
976 // be strictly equal if the other is a HeapNumber.
977 STATIC_ASSERT(kSmiTag == 0);
978 DCHECK_EQ(0, Smi::FromInt(0));
979 __ and_(r2, lhs, Operand(rhs));
980 __ JumpIfNotSmi(r2, ¬_smis);
981 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
982 // 1) Return the answer.
984 // 3) Fall through to both_loaded_as_doubles.
985 // 4) Jump to lhs_not_nan.
986 // In cases 3 and 4 we have found out we were dealing with a number-number
987 // comparison. If VFP3 is supported the double values of the numbers have
988 // been loaded into d7 and d6. Otherwise, the double values have been loaded
989 // into r0, r1, r2, and r3.
990 EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
992 __ bind(&both_loaded_as_doubles);
993 // The arguments have been converted to doubles and stored in d6 and d7, if
994 // VFP3 is supported, or in r0, r1, r2, and r3.
995 __ bind(&lhs_not_nan);
997 // ARMv7 VFP3 instructions to implement double precision comparison.
998 __ VFPCompareAndSetFlags(d7, d6);
1001 __ mov(r0, Operand(EQUAL), LeaveCC, eq);
1002 __ mov(r0, Operand(LESS), LeaveCC, lt);
1003 __ mov(r0, Operand(GREATER), LeaveCC, gt);
1007 // If one of the sides was a NaN then the v flag is set. Load r0 with
1008 // whatever it takes to make the comparison fail, since comparisons with NaN
1010 if (cc == lt || cc == le) {
1011 __ mov(r0, Operand(GREATER));
1013 __ mov(r0, Operand(LESS));
1018 // At this point we know we are dealing with two different objects,
1019 // and neither of them is a Smi. The objects are in rhs_ and lhs_.
1021 // This returns non-equal for some object types, or falls through if it
1023 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
1026 Label check_for_internalized_strings;
1027 Label flat_string_check;
1028 // Check for heap-number-heap-number comparison. Can jump to slow case,
1029 // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
1030 // that case. If the inputs are not doubles then jumps to
1031 // check_for_internalized_strings.
1032 // In this case r2 will contain the type of rhs_. Never falls through.
1033 EmitCheckForTwoHeapNumbers(masm,
1036 &both_loaded_as_doubles,
1037 &check_for_internalized_strings,
1038 &flat_string_check);
1040 __ bind(&check_for_internalized_strings);
1041 // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
1042 // internalized strings.
1043 if (cc == eq && !strict()) {
1044 // Returns an answer for two internalized strings or two detectable objects.
1045 // Otherwise jumps to string case or not both strings case.
1046 // Assumes that r2 is the type of rhs_ on entry.
1047 EmitCheckForInternalizedStringsOrObjects(
1048 masm, lhs, rhs, &flat_string_check, &slow);
1051 // Check for both being sequential ASCII strings, and inline if that is the
1053 __ bind(&flat_string_check);
1055 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow);
1057 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r2,
1060 StringCompareStub::GenerateFlatAsciiStringEquals(masm,
1067 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
1075 // Never falls through to here.
1080 // Figure out which native to call and setup the arguments.
1081 Builtins::JavaScript native;
1083 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1085 native = Builtins::COMPARE;
1086 int ncr; // NaN compare result
1087 if (cc == lt || cc == le) {
1090 DCHECK(cc == gt || cc == ge); // remaining cases
1093 __ mov(r0, Operand(Smi::FromInt(ncr)));
1097 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1098 // tagged as a small integer.
1099 __ InvokeBuiltin(native, JUMP_FUNCTION);
1106 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1107 // We don't allow a GC during a store buffer overflow so there is no need to
1108 // store the registers in any particular way, but we do have to store and
1110 __ stm(db_w, sp, kCallerSaved | lr.bit());
1112 const Register scratch = r1;
1114 if (save_doubles_ == kSaveFPRegs) {
1115 __ SaveFPRegs(sp, scratch);
1117 const int argument_count = 1;
1118 const int fp_argument_count = 0;
1120 AllowExternalCallThatCantCauseGC scope(masm);
1121 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
1122 __ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
1124 ExternalReference::store_buffer_overflow_function(isolate()),
1126 if (save_doubles_ == kSaveFPRegs) {
1127 __ RestoreFPRegs(sp, scratch);
1129 __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
1133 void MathPowStub::Generate(MacroAssembler* masm) {
1134 const Register base = r1;
1135 const Register exponent = r2;
1136 const Register heapnumbermap = r5;
1137 const Register heapnumber = r0;
1138 const DwVfpRegister double_base = d0;
1139 const DwVfpRegister double_exponent = d1;
1140 const DwVfpRegister double_result = d2;
1141 const DwVfpRegister double_scratch = d3;
1142 const SwVfpRegister single_scratch = s6;
1143 const Register scratch = r9;
1144 const Register scratch2 = r4;
1146 Label call_runtime, done, int_exponent;
1147 if (exponent_type_ == ON_STACK) {
1148 Label base_is_smi, unpack_exponent;
1149 // The exponent and base are supplied as arguments on the stack.
1150 // This can only happen if the stub is called from non-optimized code.
1151 // Load input parameters from stack to double registers.
1152 __ ldr(base, MemOperand(sp, 1 * kPointerSize));
1153 __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
1155 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
1157 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
1158 __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
1159 __ cmp(scratch, heapnumbermap);
1160 __ b(ne, &call_runtime);
1162 __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
1163 __ jmp(&unpack_exponent);
1165 __ bind(&base_is_smi);
1166 __ vmov(single_scratch, scratch);
1167 __ vcvt_f64_s32(double_base, single_scratch);
1168 __ bind(&unpack_exponent);
1170 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
1172 __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
1173 __ cmp(scratch, heapnumbermap);
1174 __ b(ne, &call_runtime);
1175 __ vldr(double_exponent,
1176 FieldMemOperand(exponent, HeapNumber::kValueOffset));
1177 } else if (exponent_type_ == TAGGED) {
1178 // Base is already in double_base.
1179 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
1181 __ vldr(double_exponent,
1182 FieldMemOperand(exponent, HeapNumber::kValueOffset));
1185 if (exponent_type_ != INTEGER) {
1186 Label int_exponent_convert;
1187 // Detect integer exponents stored as double.
1188 __ vcvt_u32_f64(single_scratch, double_exponent);
1189 // We do not check for NaN or Infinity here because comparing numbers on
1190 // ARM correctly distinguishes NaNs. We end up calling the built-in.
1191 __ vcvt_f64_u32(double_scratch, single_scratch);
1192 __ VFPCompareAndSetFlags(double_scratch, double_exponent);
1193 __ b(eq, &int_exponent_convert);
1195 if (exponent_type_ == ON_STACK) {
1196 // Detect square root case. Crankshaft detects constant +/-0.5 at
1197 // compile time and uses DoMathPowHalf instead. We then skip this check
1198 // for non-constant cases of +/-0.5 as these hardly occur.
1199 Label not_plus_half;
1202 __ vmov(double_scratch, 0.5, scratch);
1203 __ VFPCompareAndSetFlags(double_exponent, double_scratch);
1204 __ b(ne, ¬_plus_half);
1206 // Calculates square root of base. Check for the special case of
1207 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
1208 __ vmov(double_scratch, -V8_INFINITY, scratch);
1209 __ VFPCompareAndSetFlags(double_base, double_scratch);
1210 __ vneg(double_result, double_scratch, eq);
1213 // Add +0 to convert -0 to +0.
1214 __ vadd(double_scratch, double_base, kDoubleRegZero);
1215 __ vsqrt(double_result, double_scratch);
1218 __ bind(¬_plus_half);
1219 __ vmov(double_scratch, -0.5, scratch);
1220 __ VFPCompareAndSetFlags(double_exponent, double_scratch);
1221 __ b(ne, &call_runtime);
1223 // Calculates square root of base. Check for the special case of
1224 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
1225 __ vmov(double_scratch, -V8_INFINITY, scratch);
1226 __ VFPCompareAndSetFlags(double_base, double_scratch);
1227 __ vmov(double_result, kDoubleRegZero, eq);
1230 // Add +0 to convert -0 to +0.
1231 __ vadd(double_scratch, double_base, kDoubleRegZero);
1232 __ vmov(double_result, 1.0, scratch);
1233 __ vsqrt(double_scratch, double_scratch);
1234 __ vdiv(double_result, double_result, double_scratch);
1240 AllowExternalCallThatCantCauseGC scope(masm);
1241 __ PrepareCallCFunction(0, 2, scratch);
1242 __ MovToFloatParameters(double_base, double_exponent);
1244 ExternalReference::power_double_double_function(isolate()),
1248 __ MovFromFloatResult(double_result);
1251 __ bind(&int_exponent_convert);
1252 __ vcvt_u32_f64(single_scratch, double_exponent);
1253 __ vmov(scratch, single_scratch);
1256 // Calculate power with integer exponent.
1257 __ bind(&int_exponent);
1259 // Get two copies of exponent in the registers scratch and exponent.
1260 if (exponent_type_ == INTEGER) {
1261 __ mov(scratch, exponent);
1263 // Exponent has previously been stored into scratch as untagged integer.
1264 __ mov(exponent, scratch);
1266 __ vmov(double_scratch, double_base); // Back up base.
1267 __ vmov(double_result, 1.0, scratch2);
1269 // Get absolute value of exponent.
1270 __ cmp(scratch, Operand::Zero());
1271 __ mov(scratch2, Operand::Zero(), LeaveCC, mi);
1272 __ sub(scratch, scratch2, scratch, LeaveCC, mi);
1275 __ bind(&while_true);
1276 __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
1277 __ vmul(double_result, double_result, double_scratch, cs);
1278 __ vmul(double_scratch, double_scratch, double_scratch, ne);
1279 __ b(ne, &while_true);
1281 __ cmp(exponent, Operand::Zero());
1283 __ vmov(double_scratch, 1.0, scratch);
1284 __ vdiv(double_result, double_scratch, double_result);
1285 // Test whether result is zero. Bail out to check for subnormal result.
1286 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
1287 __ VFPCompareAndSetFlags(double_result, 0.0);
1289 // double_exponent may not containe the exponent value if the input was a
1290 // smi. We set it with exponent value before bailing out.
1291 __ vmov(single_scratch, exponent);
1292 __ vcvt_f64_s32(double_exponent, single_scratch);
1294 // Returning or bailing out.
1295 Counters* counters = isolate()->counters();
1296 if (exponent_type_ == ON_STACK) {
1297 // The arguments are still on the stack.
1298 __ bind(&call_runtime);
1299 __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
1301 // The stub is called from non-optimized code, which expects the result
1302 // as heap number in exponent.
1304 __ AllocateHeapNumber(
1305 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
1306 __ vstr(double_result,
1307 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
1308 DCHECK(heapnumber.is(r0));
1309 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1314 AllowExternalCallThatCantCauseGC scope(masm);
1315 __ PrepareCallCFunction(0, 2, scratch);
1316 __ MovToFloatParameters(double_base, double_exponent);
1318 ExternalReference::power_double_double_function(isolate()),
1322 __ MovFromFloatResult(double_result);
1325 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1331 bool CEntryStub::NeedsImmovableCode() {
1336 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
1337 CEntryStub::GenerateAheadOfTime(isolate);
1338 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
1339 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
1340 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
1341 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
1342 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
1343 BinaryOpICStub::GenerateAheadOfTime(isolate);
1344 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
1348 void CodeStub::GenerateFPStubs(Isolate* isolate) {
1349 SaveFPRegsMode mode = kSaveFPRegs;
1350 CEntryStub save_doubles(isolate, 1, mode);
1351 StoreBufferOverflowStub stub(isolate, mode);
1352 // These stubs might already be in the snapshot, detect that and don't
1353 // regenerate, which would lead to code stub initialization state being messed
1355 Code* save_doubles_code;
1356 if (!save_doubles.FindCodeInCache(&save_doubles_code)) {
1357 save_doubles_code = *save_doubles.GetCode();
1359 Code* store_buffer_overflow_code;
1360 if (!stub.FindCodeInCache(&store_buffer_overflow_code)) {
1361 store_buffer_overflow_code = *stub.GetCode();
1363 isolate->set_fp_stubs_generated(true);
1367 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1368 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
1373 void CEntryStub::Generate(MacroAssembler* masm) {
1374 // Called from JavaScript; parameters are on stack as if calling JS function.
1375 // r0: number of arguments including receiver
1376 // r1: pointer to builtin function
1377 // fp: frame pointer (restored after C call)
1378 // sp: stack pointer (restored as callee's sp after C call)
1379 // cp: current context (C callee-saved)
1381 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1383 __ mov(r5, Operand(r1));
1385 // Compute the argv pointer in a callee-saved register.
1386 __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
1387 __ sub(r1, r1, Operand(kPointerSize));
1389 // Enter the exit frame that transitions from JavaScript to C++.
1390 FrameScope scope(masm, StackFrame::MANUAL);
1391 __ EnterExitFrame(save_doubles_);
1393 // Store a copy of argc in callee-saved registers for later.
1394 __ mov(r4, Operand(r0));
1396 // r0, r4: number of arguments including receiver (C callee-saved)
1397 // r1: pointer to the first argument (C callee-saved)
1398 // r5: pointer to builtin function (C callee-saved)
1400 // Result returned in r0 or r0+r1 by default.
1402 #if V8_HOST_ARCH_ARM
1403 int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1404 int frame_alignment_mask = frame_alignment - 1;
1405 if (FLAG_debug_code) {
1406 if (frame_alignment > kPointerSize) {
1407 Label alignment_as_expected;
1408 DCHECK(IsPowerOf2(frame_alignment));
1409 __ tst(sp, Operand(frame_alignment_mask));
1410 __ b(eq, &alignment_as_expected);
1411 // Don't use Check here, as it will call Runtime_Abort re-entering here.
1412 __ stop("Unexpected alignment");
1413 __ bind(&alignment_as_expected);
1419 // r0 = argc, r1 = argv
1420 __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
1422 // To let the GC traverse the return address of the exit frames, we need to
1423 // know where the return address is. The CEntryStub is unmovable, so
1424 // we can store the address on the stack to be able to find it again and
1425 // we never have to restore it, because it will not change.
1426 // Compute the return address in lr to return to after the jump below. Pc is
1427 // already at '+ 8' from the current instruction but return is after three
1428 // instructions so add another 4 to pc to get the return address.
1430 // Prevent literal pool emission before return address.
1431 Assembler::BlockConstPoolScope block_const_pool(masm);
1432 __ add(lr, pc, Operand(4));
1433 __ str(lr, MemOperand(sp, 0));
1437 __ VFPEnsureFPSCRState(r2);
1439 // Runtime functions should not return 'the hole'. Allowing it to escape may
1440 // lead to crashes in the IC code later.
1441 if (FLAG_debug_code) {
1443 __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
1445 __ stop("The hole escaped");
1449 // Check result for exception sentinel.
1450 Label exception_returned;
1451 __ CompareRoot(r0, Heap::kExceptionRootIndex);
1452 __ b(eq, &exception_returned);
1454 ExternalReference pending_exception_address(
1455 Isolate::kPendingExceptionAddress, isolate());
1457 // Check that there is no pending exception, otherwise we
1458 // should have returned the exception sentinel.
1459 if (FLAG_debug_code) {
1461 __ mov(r2, Operand(pending_exception_address));
1462 __ ldr(r2, MemOperand(r2));
1463 __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
1464 // Cannot use check here as it attempts to generate call into runtime.
1466 __ stop("Unexpected pending exception");
1470 // Exit C frame and return.
1472 // sp: stack pointer
1473 // fp: frame pointer
1474 // Callee-saved register r4 still holds argc.
1475 __ LeaveExitFrame(save_doubles_, r4, true);
1478 // Handling of exception.
1479 __ bind(&exception_returned);
1481 // Retrieve the pending exception.
1482 __ mov(r2, Operand(pending_exception_address));
1483 __ ldr(r0, MemOperand(r2));
1485 // Clear the pending exception.
1486 __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
1487 __ str(r3, MemOperand(r2));
1489 // Special handling of termination exceptions which are uncatchable
1490 // by javascript code.
1491 Label throw_termination_exception;
1492 __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex);
1493 __ b(eq, &throw_termination_exception);
1495 // Handle normal exception.
1498 __ bind(&throw_termination_exception);
1499 __ ThrowUncatchable(r0);
1503 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
1510 Label invoke, handler_entry, exit;
1512 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1514 // Called from C, so do not pop argc and args on exit (preserve sp)
1515 // No need to save register-passed args
1516 // Save callee-saved registers (incl. cp and fp), sp, and lr
1517 __ stm(db_w, sp, kCalleeSaved | lr.bit());
1519 // Save callee-saved vfp registers.
1520 __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
1521 // Set up the reserved register for 0.0.
1522 __ vmov(kDoubleRegZero, 0.0);
1523 __ VFPEnsureFPSCRState(r4);
1525 // Get address of argv, see stm above.
1531 // Set up argv in r4.
1532 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1533 offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
1534 __ ldr(r4, MemOperand(sp, offset_to_argv));
1536 // Push a frame with special values setup to mark it as an entry frame.
1542 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
1543 if (FLAG_enable_ool_constant_pool) {
1544 __ mov(r8, Operand(isolate()->factory()->empty_constant_pool_array()));
1546 __ mov(r7, Operand(Smi::FromInt(marker)));
1547 __ mov(r6, Operand(Smi::FromInt(marker)));
1549 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1550 __ ldr(r5, MemOperand(r5));
1551 __ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
1552 __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() |
1553 (FLAG_enable_ool_constant_pool ? r8.bit() : 0) |
1556 // Set up frame pointer for the frame to be pushed.
1557 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1559 // If this is the outermost JS call, set js_entry_sp value.
1560 Label non_outermost_js;
1561 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
1562 __ mov(r5, Operand(ExternalReference(js_entry_sp)));
1563 __ ldr(r6, MemOperand(r5));
1564 __ cmp(r6, Operand::Zero());
1565 __ b(ne, &non_outermost_js);
1566 __ str(fp, MemOperand(r5));
1567 __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1570 __ bind(&non_outermost_js);
1571 __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1575 // Jump to a faked try block that does the invoke, with a faked catch
1576 // block that sets the pending exception.
1579 // Block literal pool emission whilst taking the position of the handler
1580 // entry. This avoids making the assumption that literal pools are always
1581 // emitted after an instruction is emitted, rather than before.
1583 Assembler::BlockConstPoolScope block_const_pool(masm);
1584 __ bind(&handler_entry);
1585 handler_offset_ = handler_entry.pos();
1586 // Caught exception: Store result (exception) in the pending exception
1587 // field in the JSEnv and return a failure sentinel. Coming in here the
1588 // fp will be invalid because the PushTryHandler below sets it to 0 to
1589 // signal the existence of the JSEntry frame.
1590 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1593 __ str(r0, MemOperand(ip));
1594 __ LoadRoot(r0, Heap::kExceptionRootIndex);
1597 // Invoke: Link this frame into the handler chain. There's only one
1598 // handler block in this code object, so its index is 0.
1600 // Must preserve r0-r4, r5-r6 are available.
1601 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
1602 // If an exception not caught by another handler occurs, this handler
1603 // returns control to the code after the bl(&invoke) above, which
1604 // restores all kCalleeSaved registers (including cp and fp) to their
1605 // saved values before returning a failure to C.
1607 // Clear any pending exceptions.
1608 __ mov(r5, Operand(isolate()->factory()->the_hole_value()));
1609 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1611 __ str(r5, MemOperand(ip));
1613 // Invoke the function by calling through JS entry trampoline builtin.
1614 // Notice that we cannot store a reference to the trampoline code directly in
1615 // this stub, because runtime stubs are not traversed when doing GC.
1617 // Expected registers by Builtins::JSEntryTrampoline
1624 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1626 __ mov(ip, Operand(construct_entry));
1628 ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
1629 __ mov(ip, Operand(entry));
1631 __ ldr(ip, MemOperand(ip)); // deref address
1632 __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
1634 // Branch and link to JSEntryTrampoline.
1637 // Unlink this frame from the handler chain.
1640 __ bind(&exit); // r0 holds result
1641 // Check if the current stack frame is marked as the outermost JS frame.
1642 Label non_outermost_js_2;
1644 __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1645 __ b(ne, &non_outermost_js_2);
1646 __ mov(r6, Operand::Zero());
1647 __ mov(r5, Operand(ExternalReference(js_entry_sp)));
1648 __ str(r6, MemOperand(r5));
1649 __ bind(&non_outermost_js_2);
1651 // Restore the top frame descriptors from the stack.
1654 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1655 __ str(r3, MemOperand(ip));
1657 // Reset the stack to the callee saved registers.
1658 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1660 // Restore callee-saved registers and return.
1662 if (FLAG_debug_code) {
1663 __ mov(lr, Operand(pc));
1667 // Restore callee-saved vfp registers.
1668 __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
1670 __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
1674 // Uses registers r0 to r4.
1675 // Expected input (depending on whether args are in registers or on the stack):
1676 // * object: r0 or at sp + 1 * kPointerSize.
1677 // * function: r1 or at sp.
1679 // An inlined call site may have been generated before calling this stub.
1680 // In this case the offset to the inline sites to patch are passed in r5 and r6.
1681 // (See LCodeGen::DoInstanceOfKnownGlobal)
1682 void InstanceofStub::Generate(MacroAssembler* masm) {
1683 // Call site inlining and patching implies arguments in registers.
1684 DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
1686 // Fixed register usage throughout the stub:
1687 const Register object = r0; // Object (lhs).
1688 Register map = r3; // Map of the object.
1689 const Register function = r1; // Function (rhs).
1690 const Register prototype = r4; // Prototype of the function.
1691 const Register scratch = r2;
1693 Label slow, loop, is_instance, is_not_instance, not_js_object;
1695 if (!HasArgsInRegisters()) {
1696 __ ldr(object, MemOperand(sp, 1 * kPointerSize));
1697 __ ldr(function, MemOperand(sp, 0));
1700 // Check that the left hand is a JS object and load map.
1701 __ JumpIfSmi(object, ¬_js_object);
1702 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
1704 // If there is a call site cache don't look in the global cache, but do the
1705 // real lookup and update the call site cache.
1706 if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
1708 __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1710 __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
1712 __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
1713 __ Ret(HasArgsInRegisters() ? 0 : 2);
1718 // Get the prototype of the function.
1719 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
1721 // Check that the function prototype is a JS object.
1722 __ JumpIfSmi(prototype, &slow);
1723 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
1725 // Update the global instanceof or call site inlined cache with the current
1726 // map and function. The cached answer will be set when it is known below.
1727 if (!HasCallSiteInlineCheck()) {
1728 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1729 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
1731 DCHECK(HasArgsInRegisters());
1732 // Patch the (relocated) inlined map check.
1734 // The map_load_offset was stored in r5
1735 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
1736 const Register map_load_offset = r5;
1737 __ sub(r9, lr, map_load_offset);
1738 // Get the map location in r5 and patch it.
1739 __ GetRelocatedValueLocation(r9, map_load_offset, scratch);
1740 __ ldr(map_load_offset, MemOperand(map_load_offset));
1741 __ str(map, FieldMemOperand(map_load_offset, Cell::kValueOffset));
1744 // Register mapping: r3 is object map and r4 is function prototype.
1745 // Get prototype of object into r2.
1746 __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
1748 // We don't need map any more. Use it as a scratch register.
1749 Register scratch2 = map;
1752 // Loop through the prototype chain looking for the function prototype.
1753 __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
1755 __ cmp(scratch, Operand(prototype));
1756 __ b(eq, &is_instance);
1757 __ cmp(scratch, scratch2);
1758 __ b(eq, &is_not_instance);
1759 __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
1760 __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
1762 Factory* factory = isolate()->factory();
1764 __ bind(&is_instance);
1765 if (!HasCallSiteInlineCheck()) {
1766 __ mov(r0, Operand(Smi::FromInt(0)));
1767 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
1768 if (ReturnTrueFalseObject()) {
1769 __ Move(r0, factory->true_value());
1772 // Patch the call site to return true.
1773 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
1774 // The bool_load_offset was stored in r6
1775 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
1776 const Register bool_load_offset = r6;
1777 __ sub(r9, lr, bool_load_offset);
1778 // Get the boolean result location in scratch and patch it.
1779 __ GetRelocatedValueLocation(r9, scratch, scratch2);
1780 __ str(r0, MemOperand(scratch));
1782 if (!ReturnTrueFalseObject()) {
1783 __ mov(r0, Operand(Smi::FromInt(0)));
1786 __ Ret(HasArgsInRegisters() ? 0 : 2);
1788 __ bind(&is_not_instance);
1789 if (!HasCallSiteInlineCheck()) {
1790 __ mov(r0, Operand(Smi::FromInt(1)));
1791 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
1792 if (ReturnTrueFalseObject()) {
1793 __ Move(r0, factory->false_value());
1796 // Patch the call site to return false.
1797 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
1798 // The bool_load_offset was stored in r6
1799 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
1800 const Register bool_load_offset = r6;
1801 __ sub(r9, lr, bool_load_offset);
1803 // Get the boolean result location in scratch and patch it.
1804 __ GetRelocatedValueLocation(r9, scratch, scratch2);
1805 __ str(r0, MemOperand(scratch));
1807 if (!ReturnTrueFalseObject()) {
1808 __ mov(r0, Operand(Smi::FromInt(1)));
1811 __ Ret(HasArgsInRegisters() ? 0 : 2);
1813 Label object_not_null, object_not_null_or_smi;
1814 __ bind(¬_js_object);
1815 // Before null, smi and string value checks, check that the rhs is a function
1816 // as for a non-function rhs an exception needs to be thrown.
1817 __ JumpIfSmi(function, &slow);
1818 __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE);
1821 // Null is not instance of anything.
1822 __ cmp(scratch, Operand(isolate()->factory()->null_value()));
1823 __ b(ne, &object_not_null);
1824 if (ReturnTrueFalseObject()) {
1825 __ Move(r0, factory->false_value());
1827 __ mov(r0, Operand(Smi::FromInt(1)));
1829 __ Ret(HasArgsInRegisters() ? 0 : 2);
1831 __ bind(&object_not_null);
1832 // Smi values are not instances of anything.
1833 __ JumpIfNotSmi(object, &object_not_null_or_smi);
1834 if (ReturnTrueFalseObject()) {
1835 __ Move(r0, factory->false_value());
1837 __ mov(r0, Operand(Smi::FromInt(1)));
1839 __ Ret(HasArgsInRegisters() ? 0 : 2);
1841 __ bind(&object_not_null_or_smi);
1842 // String values are not instances of anything.
1843 __ IsObjectJSStringType(object, scratch, &slow);
1844 if (ReturnTrueFalseObject()) {
1845 __ Move(r0, factory->false_value());
1847 __ mov(r0, Operand(Smi::FromInt(1)));
1849 __ Ret(HasArgsInRegisters() ? 0 : 2);
1851 // Slow-case. Tail call builtin.
1853 if (!ReturnTrueFalseObject()) {
1854 if (HasArgsInRegisters()) {
1857 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
1860 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
1862 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
1864 __ cmp(r0, Operand::Zero());
1865 __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
1866 __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
1867 __ Ret(HasArgsInRegisters() ? 0 : 2);
1872 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1874 Register receiver = LoadIC::ReceiverRegister();
1876 NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3,
1879 PropertyAccessCompiler::TailCallBuiltin(
1880 masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1884 Register InstanceofStub::left() { return r0; }
1887 Register InstanceofStub::right() { return r1; }
1890 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
1891 // The displacement is the offset of the last parameter (if any)
1892 // relative to the frame pointer.
1893 const int kDisplacement =
1894 StandardFrameConstants::kCallerSPOffset - kPointerSize;
1896 // Check that the key is a smi.
1898 __ JumpIfNotSmi(r1, &slow);
1900 // Check if the calling frame is an arguments adaptor frame.
1902 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1903 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
1904 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1907 // Check index against formal parameters count limit passed in
1908 // through register r0. Use unsigned comparison to get negative
1913 // Read the argument from the stack and return it.
1915 __ add(r3, fp, Operand::PointerOffsetFromSmiKey(r3));
1916 __ ldr(r0, MemOperand(r3, kDisplacement));
1919 // Arguments adaptor case: Check index against actual arguments
1920 // limit found in the arguments adaptor frame. Use unsigned
1921 // comparison to get negative check for free.
1923 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1927 // Read the argument from the adaptor frame and return it.
1929 __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r3));
1930 __ ldr(r0, MemOperand(r3, kDisplacement));
1933 // Slow-case: Handle non-smi or out-of-bounds access to arguments
1934 // by calling the runtime system.
1937 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
1941 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
1942 // sp[0] : number of parameters
1943 // sp[4] : receiver displacement
1946 // Check if the calling frame is an arguments adaptor frame.
1948 __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1949 __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
1950 __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1953 // Patch the arguments.length and the parameters pointer in the current frame.
1954 __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
1955 __ str(r2, MemOperand(sp, 0 * kPointerSize));
1956 __ add(r3, r3, Operand(r2, LSL, 1));
1957 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
1958 __ str(r3, MemOperand(sp, 1 * kPointerSize));
1961 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1965 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
1967 // sp[0] : number of parameters (tagged)
1968 // sp[4] : address of receiver argument
1970 // Registers used over whole function:
1971 // r6 : allocated object (tagged)
1972 // r9 : mapped parameter count (tagged)
1974 __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
1975 // r1 = parameter count (tagged)
1977 // Check if the calling frame is an arguments adaptor frame.
1979 Label adaptor_frame, try_allocate;
1980 __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1981 __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
1982 __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1983 __ b(eq, &adaptor_frame);
1985 // No adaptor, parameter count = argument count.
1987 __ b(&try_allocate);
1989 // We have an adaptor frame. Patch the parameters pointer.
1990 __ bind(&adaptor_frame);
1991 __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
1992 __ add(r3, r3, Operand(r2, LSL, 1));
1993 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
1994 __ str(r3, MemOperand(sp, 1 * kPointerSize));
1996 // r1 = parameter count (tagged)
1997 // r2 = argument count (tagged)
1998 // Compute the mapped parameter count = min(r1, r2) in r1.
1999 __ cmp(r1, Operand(r2));
2000 __ mov(r1, Operand(r2), LeaveCC, gt);
2002 __ bind(&try_allocate);
2004 // Compute the sizes of backing store, parameter map, and arguments object.
2005 // 1. Parameter map, has 2 extra words containing context and backing store.
2006 const int kParameterMapHeaderSize =
2007 FixedArray::kHeaderSize + 2 * kPointerSize;
2008 // If there are no mapped parameters, we do not need the parameter_map.
2009 __ cmp(r1, Operand(Smi::FromInt(0)));
2010 __ mov(r9, Operand::Zero(), LeaveCC, eq);
2011 __ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne);
2012 __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
2014 // 2. Backing store.
2015 __ add(r9, r9, Operand(r2, LSL, 1));
2016 __ add(r9, r9, Operand(FixedArray::kHeaderSize));
2018 // 3. Arguments object.
2019 __ add(r9, r9, Operand(Heap::kSloppyArgumentsObjectSize));
2021 // Do the allocation of all three objects in one go.
2022 __ Allocate(r9, r0, r3, r4, &runtime, TAG_OBJECT);
2024 // r0 = address of new object(s) (tagged)
2025 // r2 = argument count (smi-tagged)
2026 // Get the arguments boilerplate from the current native context into r4.
2027 const int kNormalOffset =
2028 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
2029 const int kAliasedOffset =
2030 Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
2032 __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2033 __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
2034 __ cmp(r1, Operand::Zero());
2035 __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
2036 __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
2038 // r0 = address of new object (tagged)
2039 // r1 = mapped parameter count (tagged)
2040 // r2 = argument count (smi-tagged)
2041 // r4 = address of arguments map (tagged)
2042 __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
2043 __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
2044 __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
2045 __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
2047 // Set up the callee in-object property.
2048 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
2049 __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
2050 __ AssertNotSmi(r3);
2051 const int kCalleeOffset = JSObject::kHeaderSize +
2052 Heap::kArgumentsCalleeIndex * kPointerSize;
2053 __ str(r3, FieldMemOperand(r0, kCalleeOffset));
2055 // Use the length (smi tagged) and set that as an in-object property too.
2057 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2058 const int kLengthOffset = JSObject::kHeaderSize +
2059 Heap::kArgumentsLengthIndex * kPointerSize;
2060 __ str(r2, FieldMemOperand(r0, kLengthOffset));
2062 // Set up the elements pointer in the allocated arguments object.
2063 // If we allocated a parameter map, r4 will point there, otherwise
2064 // it will point to the backing store.
2065 __ add(r4, r0, Operand(Heap::kSloppyArgumentsObjectSize));
2066 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
2068 // r0 = address of new object (tagged)
2069 // r1 = mapped parameter count (tagged)
2070 // r2 = argument count (tagged)
2071 // r4 = address of parameter map or backing store (tagged)
2072 // Initialize parameter map. If there are no mapped arguments, we're done.
2073 Label skip_parameter_map;
2074 __ cmp(r1, Operand(Smi::FromInt(0)));
2075 // Move backing store address to r3, because it is
2076 // expected there when filling in the unmapped arguments.
2077 __ mov(r3, r4, LeaveCC, eq);
2078 __ b(eq, &skip_parameter_map);
2080 __ LoadRoot(r6, Heap::kSloppyArgumentsElementsMapRootIndex);
2081 __ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset));
2082 __ add(r6, r1, Operand(Smi::FromInt(2)));
2083 __ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
2084 __ str(cp, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
2085 __ add(r6, r4, Operand(r1, LSL, 1));
2086 __ add(r6, r6, Operand(kParameterMapHeaderSize));
2087 __ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
2089 // Copy the parameter slots and the holes in the arguments.
2090 // We need to fill in mapped_parameter_count slots. They index the context,
2091 // where parameters are stored in reverse order, at
2092 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
2093 // The mapped parameter thus need to get indices
2094 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
2095 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
2096 // We loop from right to left.
2097 Label parameters_loop, parameters_test;
2099 __ ldr(r9, MemOperand(sp, 0 * kPointerSize));
2100 __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
2101 __ sub(r9, r9, Operand(r1));
2102 __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
2103 __ add(r3, r4, Operand(r6, LSL, 1));
2104 __ add(r3, r3, Operand(kParameterMapHeaderSize));
2106 // r6 = loop variable (tagged)
2107 // r1 = mapping index (tagged)
2108 // r3 = address of backing store (tagged)
2109 // r4 = address of parameter map (tagged), which is also the address of new
2110 // object + Heap::kSloppyArgumentsObjectSize (tagged)
2111 // r0 = temporary scratch (a.o., for address calculation)
2112 // r5 = the hole value
2113 __ jmp(¶meters_test);
2115 __ bind(¶meters_loop);
2116 __ sub(r6, r6, Operand(Smi::FromInt(1)));
2117 __ mov(r0, Operand(r6, LSL, 1));
2118 __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
2119 __ str(r9, MemOperand(r4, r0));
2120 __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
2121 __ str(r5, MemOperand(r3, r0));
2122 __ add(r9, r9, Operand(Smi::FromInt(1)));
2123 __ bind(¶meters_test);
2124 __ cmp(r6, Operand(Smi::FromInt(0)));
2125 __ b(ne, ¶meters_loop);
2127 // Restore r0 = new object (tagged)
2128 __ sub(r0, r4, Operand(Heap::kSloppyArgumentsObjectSize));
2130 __ bind(&skip_parameter_map);
2131 // r0 = address of new object (tagged)
2132 // r2 = argument count (tagged)
2133 // r3 = address of backing store (tagged)
2135 // Copy arguments header and remaining slots (if there are any).
2136 __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
2137 __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset));
2138 __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset));
2140 Label arguments_loop, arguments_test;
2142 __ ldr(r4, MemOperand(sp, 1 * kPointerSize));
2143 __ sub(r4, r4, Operand(r9, LSL, 1));
2144 __ jmp(&arguments_test);
2146 __ bind(&arguments_loop);
2147 __ sub(r4, r4, Operand(kPointerSize));
2148 __ ldr(r6, MemOperand(r4, 0));
2149 __ add(r5, r3, Operand(r9, LSL, 1));
2150 __ str(r6, FieldMemOperand(r5, FixedArray::kHeaderSize));
2151 __ add(r9, r9, Operand(Smi::FromInt(1)));
2153 __ bind(&arguments_test);
2154 __ cmp(r9, Operand(r2));
2155 __ b(lt, &arguments_loop);
2157 // Return and remove the on-stack parameters.
2158 __ add(sp, sp, Operand(3 * kPointerSize));
2161 // Do the runtime call to allocate the arguments object.
2162 // r0 = address of new object (tagged)
2163 // r2 = argument count (tagged)
2165 __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
2166 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
2170 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
2171 // sp[0] : number of parameters
2172 // sp[4] : receiver displacement
2174 // Check if the calling frame is an arguments adaptor frame.
2175 Label adaptor_frame, try_allocate, runtime;
2176 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2177 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
2178 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2179 __ b(eq, &adaptor_frame);
2181 // Get the length from the frame.
2182 __ ldr(r1, MemOperand(sp, 0));
2183 __ b(&try_allocate);
2185 // Patch the arguments.length and the parameters pointer.
2186 __ bind(&adaptor_frame);
2187 __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
2188 __ str(r1, MemOperand(sp, 0));
2189 __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
2190 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
2191 __ str(r3, MemOperand(sp, 1 * kPointerSize));
2193 // Try the new space allocation. Start out with computing the size
2194 // of the arguments object and the elements array in words.
2195 Label add_arguments_object;
2196 __ bind(&try_allocate);
2197 __ SmiUntag(r1, SetCC);
2198 __ b(eq, &add_arguments_object);
2199 __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
2200 __ bind(&add_arguments_object);
2201 __ add(r1, r1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
2203 // Do the allocation of both objects in one go.
2204 __ Allocate(r1, r0, r2, r3, &runtime,
2205 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
2207 // Get the arguments boilerplate from the current native context.
2208 __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2209 __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
2210 __ ldr(r4, MemOperand(
2211 r4, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
2213 __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
2214 __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
2215 __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
2216 __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
2218 // Get the length (smi tagged) and set that as an in-object property too.
2219 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2220 __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
2222 __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
2223 Heap::kArgumentsLengthIndex * kPointerSize));
2225 // If there are no actual arguments, we're done.
2227 __ cmp(r1, Operand::Zero());
2230 // Get the parameters pointer from the stack.
2231 __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
2233 // Set up the elements pointer in the allocated arguments object and
2234 // initialize the header in the elements fixed array.
2235 __ add(r4, r0, Operand(Heap::kStrictArgumentsObjectSize));
2236 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
2237 __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
2238 __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
2239 __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
2242 // Copy the fixed array slots.
2244 // Set up r4 to point to the first array slot.
2245 __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2247 // Pre-decrement r2 with kPointerSize on each iteration.
2248 // Pre-decrement in order to skip receiver.
2249 __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
2250 // Post-increment r4 with kPointerSize on each iteration.
2251 __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
2252 __ sub(r1, r1, Operand(1));
2253 __ cmp(r1, Operand::Zero());
2256 // Return and remove the on-stack parameters.
2258 __ add(sp, sp, Operand(3 * kPointerSize));
2261 // Do the runtime call to allocate the arguments object.
2263 __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
2267 void RegExpExecStub::Generate(MacroAssembler* masm) {
2268 // Just jump directly to runtime if native RegExp is not selected at compile
2269 // time or if regexp entry in generated code is turned off runtime switch or
2271 #ifdef V8_INTERPRETED_REGEXP
2272 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2273 #else // V8_INTERPRETED_REGEXP
2275 // Stack frame on entry.
2276 // sp[0]: last_match_info (expected JSArray)
2277 // sp[4]: previous index
2278 // sp[8]: subject string
2279 // sp[12]: JSRegExp object
2281 const int kLastMatchInfoOffset = 0 * kPointerSize;
2282 const int kPreviousIndexOffset = 1 * kPointerSize;
2283 const int kSubjectOffset = 2 * kPointerSize;
2284 const int kJSRegExpOffset = 3 * kPointerSize;
2287 // Allocation of registers for this function. These are in callee save
2288 // registers and will be preserved by the call to the native RegExp code, as
2289 // this code is called using the normal C calling convention. When calling
2290 // directly from generated code the native RegExp code will not do a GC and
2291 // therefore the content of these registers are safe to use after the call.
2292 Register subject = r4;
2293 Register regexp_data = r5;
2294 Register last_match_info_elements = no_reg; // will be r6;
2296 // Ensure that a RegExp stack is allocated.
2297 ExternalReference address_of_regexp_stack_memory_address =
2298 ExternalReference::address_of_regexp_stack_memory_address(isolate());
2299 ExternalReference address_of_regexp_stack_memory_size =
2300 ExternalReference::address_of_regexp_stack_memory_size(isolate());
2301 __ mov(r0, Operand(address_of_regexp_stack_memory_size));
2302 __ ldr(r0, MemOperand(r0, 0));
2303 __ cmp(r0, Operand::Zero());
2306 // Check that the first argument is a JSRegExp object.
2307 __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
2308 __ JumpIfSmi(r0, &runtime);
2309 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
2312 // Check that the RegExp has been compiled (data contains a fixed array).
2313 __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
2314 if (FLAG_debug_code) {
2315 __ SmiTst(regexp_data);
2316 __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
2317 __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
2318 __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
2321 // regexp_data: RegExp data (FixedArray)
2322 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2323 __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2324 __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
2327 // regexp_data: RegExp data (FixedArray)
2328 // Check that the number of captures fit in the static offsets vector buffer.
2330 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2331 // Check (number_of_captures + 1) * 2 <= offsets vector size
2332 // Or number_of_captures * 2 <= offsets vector size - 2
2333 // Multiplying by 2 comes for free since r2 is smi-tagged.
2334 STATIC_ASSERT(kSmiTag == 0);
2335 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2336 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
2337 __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
2340 // Reset offset for possibly sliced string.
2341 __ mov(r9, Operand::Zero());
2342 __ ldr(subject, MemOperand(sp, kSubjectOffset));
2343 __ JumpIfSmi(subject, &runtime);
2344 __ mov(r3, subject); // Make a copy of the original subject string.
2345 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
2346 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
2347 // subject: subject string
2348 // r3: subject string
2349 // r0: subject string instance type
2350 // regexp_data: RegExp data (FixedArray)
2351 // Handle subject string according to its encoding and representation:
2352 // (1) Sequential string? If yes, go to (5).
2353 // (2) Anything but sequential or cons? If yes, go to (6).
2354 // (3) Cons string. If the string is flat, replace subject with first string.
2355 // Otherwise bailout.
2356 // (4) Is subject external? If yes, go to (7).
2357 // (5) Sequential string. Load regexp code according to encoding.
2361 // Deferred code at the end of the stub:
2362 // (6) Not a long external string? If yes, go to (8).
2363 // (7) External string. Make it, offset-wise, look like a sequential string.
2365 // (8) Short external string or not a string? If yes, bail out to runtime.
2366 // (9) Sliced string. Replace subject with parent. Go to (4).
2368 Label seq_string /* 5 */, external_string /* 7 */,
2369 check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
2370 not_long_external /* 8 */;
2372 // (1) Sequential string? If yes, go to (5).
2375 Operand(kIsNotStringMask |
2376 kStringRepresentationMask |
2377 kShortExternalStringMask),
2379 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
2380 __ b(eq, &seq_string); // Go to (5).
2382 // (2) Anything but sequential or cons? If yes, go to (6).
2383 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2384 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2385 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2386 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2387 __ cmp(r1, Operand(kExternalStringTag));
2388 __ b(ge, ¬_seq_nor_cons); // Go to (6).
2390 // (3) Cons string. Check that it's flat.
2391 // Replace subject with first string and reload instance type.
2392 __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
2393 __ CompareRoot(r0, Heap::kempty_stringRootIndex);
2395 __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2397 // (4) Is subject external? If yes, go to (7).
2398 __ bind(&check_underlying);
2399 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
2400 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
2401 STATIC_ASSERT(kSeqStringTag == 0);
2402 __ tst(r0, Operand(kStringRepresentationMask));
2403 // The underlying external string is never a short external string.
2404 STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
2405 STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2406 __ b(ne, &external_string); // Go to (7).
2408 // (5) Sequential string. Load regexp code according to encoding.
2409 __ bind(&seq_string);
2410 // subject: sequential subject string (or look-alike, external string)
2411 // r3: original subject string
2412 // Load previous index and check range before r3 is overwritten. We have to
2413 // use r3 instead of subject here because subject might have been only made
2414 // to look like a sequential string when it actually is an external string.
2415 __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
2416 __ JumpIfNotSmi(r1, &runtime);
2417 __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset));
2418 __ cmp(r3, Operand(r1));
2422 STATIC_ASSERT(4 == kOneByteStringTag);
2423 STATIC_ASSERT(kTwoByteStringTag == 0);
2424 __ and_(r0, r0, Operand(kStringEncodingMask));
2425 __ mov(r3, Operand(r0, ASR, 2), SetCC);
2426 __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
2427 __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
2429 // (E) Carry on. String handling is done.
2430 // r6: irregexp code
2431 // Check that the irregexp code has been generated for the actual string
2432 // encoding. If it has, the field contains a code object otherwise it contains
2433 // a smi (code flushing support).
2434 __ JumpIfSmi(r6, &runtime);
2436 // r1: previous index
2437 // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
2439 // subject: Subject string
2440 // regexp_data: RegExp data (FixedArray)
2441 // All checks done. Now push arguments for native regexp code.
2442 __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r0, r2);
2444 // Isolates: note we add an additional parameter here (isolate pointer).
2445 const int kRegExpExecuteArguments = 9;
2446 const int kParameterRegisters = 4;
2447 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
2449 // Stack pointer now points to cell where return address is to be written.
2450 // Arguments are before that on the stack or in registers.
2452 // Argument 9 (sp[20]): Pass current isolate address.
2453 __ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2454 __ str(r0, MemOperand(sp, 5 * kPointerSize));
2456 // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
2457 __ mov(r0, Operand(1));
2458 __ str(r0, MemOperand(sp, 4 * kPointerSize));
2460 // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
2461 __ mov(r0, Operand(address_of_regexp_stack_memory_address));
2462 __ ldr(r0, MemOperand(r0, 0));
2463 __ mov(r2, Operand(address_of_regexp_stack_memory_size));
2464 __ ldr(r2, MemOperand(r2, 0));
2465 __ add(r0, r0, Operand(r2));
2466 __ str(r0, MemOperand(sp, 3 * kPointerSize));
2468 // Argument 6: Set the number of capture registers to zero to force global
2469 // regexps to behave as non-global. This does not affect non-global regexps.
2470 __ mov(r0, Operand::Zero());
2471 __ str(r0, MemOperand(sp, 2 * kPointerSize));
2473 // Argument 5 (sp[4]): static offsets vector buffer.
2475 Operand(ExternalReference::address_of_static_offsets_vector(
2477 __ str(r0, MemOperand(sp, 1 * kPointerSize));
2479 // For arguments 4 and 3 get string length, calculate start of string data and
2480 // calculate the shift of the index (0 for ASCII and 1 for two byte).
2481 __ add(r7, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
2482 __ eor(r3, r3, Operand(1));
2483 // Load the length from the original subject string from the previous stack
2484 // frame. Therefore we have to use fp, which points exactly to two pointer
2485 // sizes below the previous sp. (Because creating a new stack frame pushes
2486 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
2487 __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2488 // If slice offset is not 0, load the length from the original sliced string.
2489 // Argument 4, r3: End of string data
2490 // Argument 3, r2: Start of string data
2491 // Prepare start and end index of the input.
2492 __ add(r9, r7, Operand(r9, LSL, r3));
2493 __ add(r2, r9, Operand(r1, LSL, r3));
2495 __ ldr(r7, FieldMemOperand(subject, String::kLengthOffset));
2497 __ add(r3, r9, Operand(r7, LSL, r3));
2499 // Argument 2 (r1): Previous index.
2502 // Argument 1 (r0): Subject string.
2503 __ mov(r0, subject);
2505 // Locate the code entry and call it.
2506 __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
2507 DirectCEntryStub stub(isolate());
2508 stub.GenerateCall(masm, r6);
2510 __ LeaveExitFrame(false, no_reg, true);
2512 last_match_info_elements = r6;
2515 // subject: subject string (callee saved)
2516 // regexp_data: RegExp data (callee saved)
2517 // last_match_info_elements: Last match info elements (callee saved)
2518 // Check the result.
2520 __ cmp(r0, Operand(1));
2521 // We expect exactly one result since we force the called regexp to behave
2525 __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
2527 __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
2528 // If not exception it can only be retry. Handle that in the runtime system.
2530 // Result must now be exception. If there is no pending exception already a
2531 // stack overflow (on the backtrack stack) was detected in RegExp code but
2532 // haven't created the exception yet. Handle that in the runtime system.
2533 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
2534 __ mov(r1, Operand(isolate()->factory()->the_hole_value()));
2535 __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2537 __ ldr(r0, MemOperand(r2, 0));
2541 __ str(r1, MemOperand(r2, 0)); // Clear pending exception.
2543 // Check if the exception is a termination. If so, throw as uncatchable.
2544 __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex);
2546 Label termination_exception;
2547 __ b(eq, &termination_exception);
2551 __ bind(&termination_exception);
2552 __ ThrowUncatchable(r0);
2555 // For failure and exception return null.
2556 __ mov(r0, Operand(isolate()->factory()->null_value()));
2557 __ add(sp, sp, Operand(4 * kPointerSize));
2560 // Process the result from the native regexp code.
2563 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2564 // Calculate number of capture registers (number_of_captures + 1) * 2.
2565 // Multiplying by 2 comes for free since r1 is smi-tagged.
2566 STATIC_ASSERT(kSmiTag == 0);
2567 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2568 __ add(r1, r1, Operand(2)); // r1 was a smi.
2570 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
2571 __ JumpIfSmi(r0, &runtime);
2572 __ CompareObjectType(r0, r2, r2, JS_ARRAY_TYPE);
2574 // Check that the JSArray is in fast case.
2575 __ ldr(last_match_info_elements,
2576 FieldMemOperand(r0, JSArray::kElementsOffset));
2577 __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2578 __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
2580 // Check that the last match info has space for the capture registers and the
2581 // additional information.
2583 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
2584 __ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead));
2585 __ cmp(r2, Operand::SmiUntag(r0));
2588 // r1: number of capture registers
2589 // r4: subject string
2590 // Store the capture count.
2592 __ str(r2, FieldMemOperand(last_match_info_elements,
2593 RegExpImpl::kLastCaptureCountOffset));
2594 // Store last subject and last input.
2596 FieldMemOperand(last_match_info_elements,
2597 RegExpImpl::kLastSubjectOffset));
2598 __ mov(r2, subject);
2599 __ RecordWriteField(last_match_info_elements,
2600 RegExpImpl::kLastSubjectOffset,
2605 __ mov(subject, r2);
2607 FieldMemOperand(last_match_info_elements,
2608 RegExpImpl::kLastInputOffset));
2609 __ RecordWriteField(last_match_info_elements,
2610 RegExpImpl::kLastInputOffset,
2616 // Get the static offsets vector filled by the native regexp code.
2617 ExternalReference address_of_static_offsets_vector =
2618 ExternalReference::address_of_static_offsets_vector(isolate());
2619 __ mov(r2, Operand(address_of_static_offsets_vector));
2621 // r1: number of capture registers
2622 // r2: offsets vector
2623 Label next_capture, done;
2624 // Capture register counter starts from number of capture registers and
2625 // counts down until wraping after zero.
2627 last_match_info_elements,
2628 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
2629 __ bind(&next_capture);
2630 __ sub(r1, r1, Operand(1), SetCC);
2632 // Read the value from the static offsets vector buffer.
2633 __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
2634 // Store the smi value in the last match info.
2636 __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
2637 __ jmp(&next_capture);
2640 // Return last match info.
2641 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
2642 __ add(sp, sp, Operand(4 * kPointerSize));
2645 // Do the runtime call to execute the regexp.
2647 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2649 // Deferred code for string handling.
2650 // (6) Not a long external string? If yes, go to (8).
2651 __ bind(¬_seq_nor_cons);
2652 // Compare flags are still set.
2653 __ b(gt, ¬_long_external); // Go to (8).
2655 // (7) External string. Make it, offset-wise, look like a sequential string.
2656 __ bind(&external_string);
2657 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
2658 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
2659 if (FLAG_debug_code) {
2660 // Assert that we do not have a cons or slice (indirect strings) here.
2661 // Sequential strings have already been ruled out.
2662 __ tst(r0, Operand(kIsIndirectStringMask));
2663 __ Assert(eq, kExternalStringExpectedButNotFound);
2666 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
2667 // Move the pointer so that offset-wise, it looks like a sequential string.
2668 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2671 Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
2672 __ jmp(&seq_string); // Go to (5).
2674 // (8) Short external string or not a string? If yes, bail out to runtime.
2675 __ bind(¬_long_external);
2676 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
2677 __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
2680 // (9) Sliced string. Replace subject with parent. Go to (4).
2681 // Load offset into r9 and replace subject string with parent.
2682 __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
2684 __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
2685 __ jmp(&check_underlying); // Go to (4).
2686 #endif // V8_INTERPRETED_REGEXP
2690 static void GenerateRecordCallTarget(MacroAssembler* masm) {
2691 // Cache the called function in a feedback vector slot. Cache states
2692 // are uninitialized, monomorphic (indicated by a JSFunction), and
2694 // r0 : number of arguments to the construct function
2695 // r1 : the function to call
2696 // r2 : Feedback vector
2697 // r3 : slot in feedback vector (Smi)
2698 Label initialize, done, miss, megamorphic, not_array_function;
2700 DCHECK_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
2701 masm->isolate()->heap()->megamorphic_symbol());
2702 DCHECK_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
2703 masm->isolate()->heap()->uninitialized_symbol());
2705 // Load the cache state into r4.
2706 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2707 __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
2709 // A monomorphic cache hit or an already megamorphic state: invoke the
2710 // function without changing the state.
2714 if (!FLAG_pretenuring_call_new) {
2715 // If we came here, we need to see if we are the array function.
2716 // If we didn't have a matching function, and we didn't find the megamorph
2717 // sentinel, then we have in the slot either some other function or an
2718 // AllocationSite. Do a map check on the object in ecx.
2719 __ ldr(r5, FieldMemOperand(r4, 0));
2720 __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
2723 // Make sure the function is the Array() function
2724 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
2726 __ b(ne, &megamorphic);
2732 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2734 __ CompareRoot(r4, Heap::kUninitializedSymbolRootIndex);
2735 __ b(eq, &initialize);
2736 // MegamorphicSentinel is an immortal immovable object (undefined) so no
2737 // write-barrier is needed.
2738 __ bind(&megamorphic);
2739 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2740 __ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex);
2741 __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
2744 // An uninitialized cache is patched with the function
2745 __ bind(&initialize);
2747 if (!FLAG_pretenuring_call_new) {
2748 // Make sure the function is the Array() function
2749 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
2751 __ b(ne, ¬_array_function);
2753 // The target function is the Array constructor,
2754 // Create an AllocationSite if we don't already have it, store it in the
2757 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2759 // Arguments register must be smi-tagged to call out.
2761 __ Push(r3, r2, r1, r0);
2763 CreateAllocationSiteStub create_stub(masm->isolate());
2764 __ CallStub(&create_stub);
2766 __ Pop(r3, r2, r1, r0);
2771 __ bind(¬_array_function);
2774 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2775 __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2776 __ str(r1, MemOperand(r4, 0));
2778 __ Push(r4, r2, r1);
2779 __ RecordWrite(r2, r4, r1, kLRHasNotBeenSaved, kDontSaveFPRegs,
2780 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
2787 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
2788 // Do not transform the receiver for strict mode functions.
2789 __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
2790 __ ldr(r4, FieldMemOperand(r3, SharedFunctionInfo::kCompilerHintsOffset));
2791 __ tst(r4, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
2795 // Do not transform the receiver for native (Compilerhints already in r3).
2796 __ tst(r4, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
2801 static void EmitSlowCase(MacroAssembler* masm,
2803 Label* non_function) {
2804 // Check for function proxy.
2805 __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
2806 __ b(ne, non_function);
2807 __ push(r1); // put proxy as additional argument
2808 __ mov(r0, Operand(argc + 1, RelocInfo::NONE32));
2809 __ mov(r2, Operand::Zero());
2810 __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY);
2812 Handle<Code> adaptor =
2813 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
2814 __ Jump(adaptor, RelocInfo::CODE_TARGET);
2817 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
2818 // of the original receiver from the call site).
2819 __ bind(non_function);
2820 __ str(r1, MemOperand(sp, argc * kPointerSize));
2821 __ mov(r0, Operand(argc)); // Set up the number of arguments.
2822 __ mov(r2, Operand::Zero());
2823 __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION);
2824 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2825 RelocInfo::CODE_TARGET);
2829 static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
2830 // Wrap the receiver and patch it back onto the stack.
2831 { FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
2833 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
2836 __ str(r0, MemOperand(sp, argc * kPointerSize));
2841 static void CallFunctionNoFeedback(MacroAssembler* masm,
2842 int argc, bool needs_checks,
2843 bool call_as_method) {
2844 // r1 : the function to call
2845 Label slow, non_function, wrap, cont;
2848 // Check that the function is really a JavaScript function.
2849 // r1: pushed function (to be verified)
2850 __ JumpIfSmi(r1, &non_function);
2852 // Goto slow case if we do not have a function.
2853 __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
2857 // Fast-case: Invoke the function now.
2858 // r1: pushed function
2859 ParameterCount actual(argc);
2861 if (call_as_method) {
2863 EmitContinueIfStrictOrNative(masm, &cont);
2866 // Compute the receiver in sloppy mode.
2867 __ ldr(r3, MemOperand(sp, argc * kPointerSize));
2870 __ JumpIfSmi(r3, &wrap);
2871 __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
2880 __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper());
2883 // Slow-case: Non-function called.
2885 EmitSlowCase(masm, argc, &non_function);
2888 if (call_as_method) {
2890 EmitWrapCase(masm, argc, &cont);
2895 void CallFunctionStub::Generate(MacroAssembler* masm) {
2896 CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
2900 void CallConstructStub::Generate(MacroAssembler* masm) {
2901 // r0 : number of arguments
2902 // r1 : the function to call
2903 // r2 : feedback vector
2904 // r3 : (only if r2 is not the megamorphic symbol) slot in feedback
2906 Label slow, non_function_call;
2908 // Check that the function is not a smi.
2909 __ JumpIfSmi(r1, &non_function_call);
2910 // Check that the function is a JSFunction.
2911 __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
2914 if (RecordCallTarget()) {
2915 GenerateRecordCallTarget(masm);
2917 __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
2918 if (FLAG_pretenuring_call_new) {
2919 // Put the AllocationSite from the feedback vector into r2.
2920 // By adding kPointerSize we encode that we know the AllocationSite
2921 // entry is at the feedback vector slot given by r3 + 1.
2922 __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize + kPointerSize));
2924 Label feedback_register_initialized;
2925 // Put the AllocationSite from the feedback vector into r2, or undefined.
2926 __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize));
2927 __ ldr(r5, FieldMemOperand(r2, AllocationSite::kMapOffset));
2928 __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
2929 __ b(eq, &feedback_register_initialized);
2930 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
2931 __ bind(&feedback_register_initialized);
2934 __ AssertUndefinedOrAllocationSite(r2, r5);
2937 // Jump to the function-specific construct stub.
2938 Register jmp_reg = r4;
2939 __ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
2940 __ ldr(jmp_reg, FieldMemOperand(jmp_reg,
2941 SharedFunctionInfo::kConstructStubOffset));
2942 __ add(pc, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
2944 // r0: number of arguments
2945 // r1: called object
2949 __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
2950 __ b(ne, &non_function_call);
2951 __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
2954 __ bind(&non_function_call);
2955 __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
2957 // Set expected number of arguments to zero (not changing r0).
2958 __ mov(r2, Operand::Zero());
2959 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2960 RelocInfo::CODE_TARGET);
2964 static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
2965 __ ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
2966 __ ldr(vector, FieldMemOperand(vector,
2967 JSFunction::kSharedFunctionInfoOffset));
2968 __ ldr(vector, FieldMemOperand(vector,
2969 SharedFunctionInfo::kFeedbackVectorOffset));
2973 void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
2977 int argc = state_.arg_count();
2978 ParameterCount actual(argc);
2980 EmitLoadTypeFeedbackVector(masm, r2);
2982 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
2986 __ mov(r0, Operand(arg_count()));
2987 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2988 __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
2990 // Verify that r4 contains an AllocationSite
2991 __ ldr(r5, FieldMemOperand(r4, HeapObject::kMapOffset));
2992 __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
2996 ArrayConstructorStub stub(masm->isolate(), arg_count());
2997 __ TailCallStub(&stub);
3000 GenerateMiss(masm, IC::kCallIC_Customization_Miss);
3002 // The slow case, we need this no matter what to complete a call after a miss.
3003 CallFunctionNoFeedback(masm,
3009 __ stop("Unexpected code address");
3013 void CallICStub::Generate(MacroAssembler* masm) {
3015 // r3 - slot id (Smi)
3016 Label extra_checks_or_miss, slow_start;
3017 Label slow, non_function, wrap, cont;
3018 Label have_js_function;
3019 int argc = state_.arg_count();
3020 ParameterCount actual(argc);
3022 EmitLoadTypeFeedbackVector(masm, r2);
3024 // The checks. First, does r1 match the recorded monomorphic target?
3025 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
3026 __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
3028 __ b(ne, &extra_checks_or_miss);
3030 __ bind(&have_js_function);
3031 if (state_.CallAsMethod()) {
3032 EmitContinueIfStrictOrNative(masm, &cont);
3033 // Compute the receiver in sloppy mode.
3034 __ ldr(r3, MemOperand(sp, argc * kPointerSize));
3036 __ JumpIfSmi(r3, &wrap);
3037 __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
3043 __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper());
3046 EmitSlowCase(masm, argc, &non_function);
3048 if (state_.CallAsMethod()) {
3050 EmitWrapCase(masm, argc, &cont);
3053 __ bind(&extra_checks_or_miss);
3056 __ CompareRoot(r4, Heap::kMegamorphicSymbolRootIndex);
3057 __ b(eq, &slow_start);
3058 __ CompareRoot(r4, Heap::kUninitializedSymbolRootIndex);
3061 if (!FLAG_trace_ic) {
3062 // We are going megamorphic. If the feedback is a JSFunction, it is fine
3063 // to handle it here. More complex cases are dealt with in the runtime.
3064 __ AssertNotSmi(r4);
3065 __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
3067 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
3068 __ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex);
3069 __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
3070 __ jmp(&slow_start);
3073 // We are here because tracing is on or we are going monomorphic.
3075 GenerateMiss(masm, IC::kCallIC_Miss);
3078 __ bind(&slow_start);
3079 // Check that the function is really a JavaScript function.
3080 // r1: pushed function (to be verified)
3081 __ JumpIfSmi(r1, &non_function);
3083 // Goto slow case if we do not have a function.
3084 __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
3086 __ jmp(&have_js_function);
3090 void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
3091 // Get the receiver of the function from the stack; 1 ~ return address.
3092 __ ldr(r4, MemOperand(sp, (state_.arg_count() + 1) * kPointerSize));
3095 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
3097 // Push the receiver and the function and feedback info.
3098 __ Push(r4, r1, r2, r3);
3101 ExternalReference miss = ExternalReference(IC_Utility(id),
3103 __ CallExternalReference(miss, 4);
3105 // Move result to edi and exit the internal frame.
3111 // StringCharCodeAtGenerator
3112 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
3115 Label got_char_code;
3116 Label sliced_string;
3118 // If the receiver is a smi trigger the non-string case.
3119 __ JumpIfSmi(object_, receiver_not_string_);
3121 // Fetch the instance type of the receiver into result register.
3122 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3123 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3124 // If the receiver is not a string trigger the non-string case.
3125 __ tst(result_, Operand(kIsNotStringMask));
3126 __ b(ne, receiver_not_string_);
3128 // If the index is non-smi trigger the non-smi case.
3129 __ JumpIfNotSmi(index_, &index_not_smi_);
3130 __ bind(&got_smi_index_);
3132 // Check for index out of range.
3133 __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
3134 __ cmp(ip, Operand(index_));
3135 __ b(ls, index_out_of_range_);
3137 __ SmiUntag(index_);
3139 StringCharLoadGenerator::Generate(masm,
3150 void StringCharCodeAtGenerator::GenerateSlow(
3151 MacroAssembler* masm,
3152 const RuntimeCallHelper& call_helper) {
3153 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
3155 // Index is not a smi.
3156 __ bind(&index_not_smi_);
3157 // If index is a heap number, try converting it to an integer.
3160 Heap::kHeapNumberMapRootIndex,
3163 call_helper.BeforeCall(masm);
3165 __ push(index_); // Consumed by runtime conversion function.
3166 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3167 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3169 DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3170 // NumberToSmi discards numbers that are not exact integers.
3171 __ CallRuntime(Runtime::kNumberToSmi, 1);
3173 // Save the conversion result before the pop instructions below
3174 // have a chance to overwrite it.
3175 __ Move(index_, r0);
3177 // Reload the instance type.
3178 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3179 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3180 call_helper.AfterCall(masm);
3181 // If index is still not a smi, it must be out of range.
3182 __ JumpIfNotSmi(index_, index_out_of_range_);
3183 // Otherwise, return to the fast path.
3184 __ jmp(&got_smi_index_);
3186 // Call runtime. We get here when the receiver is a string and the
3187 // index is a number, but the code of getting the actual character
3188 // is too complex (e.g., when the string needs to be flattened).
3189 __ bind(&call_runtime_);
3190 call_helper.BeforeCall(masm);
3192 __ Push(object_, index_);
3193 __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
3194 __ Move(result_, r0);
3195 call_helper.AfterCall(masm);
3198 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3202 // -------------------------------------------------------------------------
3203 // StringCharFromCodeGenerator
3205 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3206 // Fast case of Heap::LookupSingleCharacterStringFromCode.
3207 STATIC_ASSERT(kSmiTag == 0);
3208 STATIC_ASSERT(kSmiShiftSize == 0);
3209 DCHECK(IsPowerOf2(String::kMaxOneByteCharCode + 1));
3211 Operand(kSmiTagMask |
3212 ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
3213 __ b(ne, &slow_case_);
3215 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3216 // At this point code register contains smi tagged ASCII char code.
3217 __ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_));
3218 __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3219 __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
3220 __ b(eq, &slow_case_);
3225 void StringCharFromCodeGenerator::GenerateSlow(
3226 MacroAssembler* masm,
3227 const RuntimeCallHelper& call_helper) {
3228 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3230 __ bind(&slow_case_);
3231 call_helper.BeforeCall(masm);
3233 __ CallRuntime(Runtime::kCharFromCode, 1);
3234 __ Move(result_, r0);
3235 call_helper.AfterCall(masm);
3238 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3242 enum CopyCharactersFlags {
3244 DEST_ALWAYS_ALIGNED = 2
3248 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
3253 String::Encoding encoding) {
3254 if (FLAG_debug_code) {
3255 // Check that destination is word aligned.
3256 __ tst(dest, Operand(kPointerAlignmentMask));
3257 __ Check(eq, kDestinationOfCopyNotAligned);
3260 // Assumes word reads and writes are little endian.
3261 // Nothing to do for zero characters.
3263 if (encoding == String::TWO_BYTE_ENCODING) {
3264 __ add(count, count, Operand(count), SetCC);
3267 Register limit = count; // Read until dest equals this.
3268 __ add(limit, dest, Operand(count));
3270 Label loop_entry, loop;
3271 // Copy bytes from src to dest until dest hits limit.
3274 __ ldrb(scratch, MemOperand(src, 1, PostIndex), lt);
3275 __ strb(scratch, MemOperand(dest, 1, PostIndex));
3276 __ bind(&loop_entry);
3277 __ cmp(dest, Operand(limit));
3284 void StringHelper::GenerateHashInit(MacroAssembler* masm,
3286 Register character) {
3287 // hash = character + (character << 10);
3288 __ LoadRoot(hash, Heap::kHashSeedRootIndex);
3289 // Untag smi seed and add the character.
3290 __ add(hash, character, Operand(hash, LSR, kSmiTagSize));
3291 // hash += hash << 10;
3292 __ add(hash, hash, Operand(hash, LSL, 10));
3293 // hash ^= hash >> 6;
3294 __ eor(hash, hash, Operand(hash, LSR, 6));
3298 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
3300 Register character) {
3301 // hash += character;
3302 __ add(hash, hash, Operand(character));
3303 // hash += hash << 10;
3304 __ add(hash, hash, Operand(hash, LSL, 10));
3305 // hash ^= hash >> 6;
3306 __ eor(hash, hash, Operand(hash, LSR, 6));
3310 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
3312 // hash += hash << 3;
3313 __ add(hash, hash, Operand(hash, LSL, 3));
3314 // hash ^= hash >> 11;
3315 __ eor(hash, hash, Operand(hash, LSR, 11));
3316 // hash += hash << 15;
3317 __ add(hash, hash, Operand(hash, LSL, 15));
3319 __ and_(hash, hash, Operand(String::kHashBitMask), SetCC);
3321 // if (hash == 0) hash = 27;
3322 __ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq);
3326 void SubStringStub::Generate(MacroAssembler* masm) {
3329 // Stack frame on entry.
3330 // lr: return address
3335 // This stub is called from the native-call %_SubString(...), so
3336 // nothing can be assumed about the arguments. It is tested that:
3337 // "string" is a sequential string,
3338 // both "from" and "to" are smis, and
3339 // 0 <= from <= to <= string.length.
3340 // If any of these assumptions fail, we call the runtime system.
3342 const int kToOffset = 0 * kPointerSize;
3343 const int kFromOffset = 1 * kPointerSize;
3344 const int kStringOffset = 2 * kPointerSize;
3346 __ Ldrd(r2, r3, MemOperand(sp, kToOffset));
3347 STATIC_ASSERT(kFromOffset == kToOffset + 4);
3348 STATIC_ASSERT(kSmiTag == 0);
3349 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3351 // Arithmetic shift right by one un-smi-tags. In this case we rotate right
3352 // instead because we bail out on non-smi values: ROR and ASR are equivalent
3353 // for smis but they set the flags in a way that's easier to optimize.
3354 __ mov(r2, Operand(r2, ROR, 1), SetCC);
3355 __ mov(r3, Operand(r3, ROR, 1), SetCC, cc);
3356 // If either to or from had the smi tag bit set, then C is set now, and N
3357 // has the same value: we rotated by 1, so the bottom bit is now the top bit.
3358 // We want to bailout to runtime here if From is negative. In that case, the
3359 // next instruction is not executed and we fall through to bailing out to
3361 // Executed if both r2 and r3 are untagged integers.
3362 __ sub(r2, r2, Operand(r3), SetCC, cc);
3363 // One of the above un-smis or the above SUB could have set N==1.
3364 __ b(mi, &runtime); // Either "from" or "to" is not an smi, or from > to.
3366 // Make sure first argument is a string.
3367 __ ldr(r0, MemOperand(sp, kStringOffset));
3368 __ JumpIfSmi(r0, &runtime);
3369 Condition is_string = masm->IsObjectStringType(r0, r1);
3370 __ b(NegateCondition(is_string), &runtime);
3373 __ cmp(r2, Operand(1));
3374 __ b(eq, &single_char);
3376 // Short-cut for the case of trivial substring.
3378 // r0: original string
3379 // r2: result string length
3380 __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
3381 __ cmp(r2, Operand(r4, ASR, 1));
3382 // Return original string.
3383 __ b(eq, &return_r0);
3384 // Longer than original string's length or negative: unsafe arguments.
3386 // Shorter than original string's length: an actual substring.
3388 // Deal with different string types: update the index if necessary
3389 // and put the underlying string into r5.
3390 // r0: original string
3391 // r1: instance type
3393 // r3: from index (untagged)
3394 Label underlying_unpacked, sliced_string, seq_or_external_string;
3395 // If the string is not indirect, it can only be sequential or external.
3396 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
3397 STATIC_ASSERT(kIsIndirectStringMask != 0);
3398 __ tst(r1, Operand(kIsIndirectStringMask));
3399 __ b(eq, &seq_or_external_string);
3401 __ tst(r1, Operand(kSlicedNotConsMask));
3402 __ b(ne, &sliced_string);
3403 // Cons string. Check whether it is flat, then fetch first part.
3404 __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
3405 __ CompareRoot(r5, Heap::kempty_stringRootIndex);
3407 __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
3408 // Update instance type.
3409 __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
3410 __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
3411 __ jmp(&underlying_unpacked);
3413 __ bind(&sliced_string);
3414 // Sliced string. Fetch parent and correct start index by offset.
3415 __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
3416 __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset));
3417 __ add(r3, r3, Operand(r4, ASR, 1)); // Add offset to index.
3418 // Update instance type.
3419 __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
3420 __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
3421 __ jmp(&underlying_unpacked);
3423 __ bind(&seq_or_external_string);
3424 // Sequential or external string. Just move string to the expected register.
3427 __ bind(&underlying_unpacked);
3429 if (FLAG_string_slices) {
3431 // r5: underlying subject string
3432 // r1: instance type of underlying subject string
3434 // r3: adjusted start index (untagged)
3435 __ cmp(r2, Operand(SlicedString::kMinLength));
3436 // Short slice. Copy instead of slicing.
3437 __ b(lt, ©_routine);
3438 // Allocate new sliced string. At this point we do not reload the instance
3439 // type including the string encoding because we simply rely on the info
3440 // provided by the original string. It does not matter if the original
3441 // string's encoding is wrong because we always have to recheck encoding of
3442 // the newly created string's parent anyways due to externalized strings.
3443 Label two_byte_slice, set_slice_header;
3444 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
3445 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
3446 __ tst(r1, Operand(kStringEncodingMask));
3447 __ b(eq, &two_byte_slice);
3448 __ AllocateAsciiSlicedString(r0, r2, r6, r4, &runtime);
3449 __ jmp(&set_slice_header);
3450 __ bind(&two_byte_slice);
3451 __ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime);
3452 __ bind(&set_slice_header);
3453 __ mov(r3, Operand(r3, LSL, 1));
3454 __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
3455 __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
3458 __ bind(©_routine);
3461 // r5: underlying subject string
3462 // r1: instance type of underlying subject string
3464 // r3: adjusted start index (untagged)
3465 Label two_byte_sequential, sequential_string, allocate_result;
3466 STATIC_ASSERT(kExternalStringTag != 0);
3467 STATIC_ASSERT(kSeqStringTag == 0);
3468 __ tst(r1, Operand(kExternalStringTag));
3469 __ b(eq, &sequential_string);
3471 // Handle external string.
3472 // Rule out short external strings.
3473 STATIC_ASSERT(kShortExternalStringTag != 0);
3474 __ tst(r1, Operand(kShortExternalStringTag));
3476 __ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset));
3477 // r5 already points to the first character of underlying string.
3478 __ jmp(&allocate_result);
3480 __ bind(&sequential_string);
3481 // Locate first character of underlying subject string.
3482 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3483 __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3485 __ bind(&allocate_result);
3486 // Sequential acii string. Allocate the result.
3487 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3488 __ tst(r1, Operand(kStringEncodingMask));
3489 __ b(eq, &two_byte_sequential);
3491 // Allocate and copy the resulting ASCII string.
3492 __ AllocateAsciiString(r0, r2, r4, r6, r1, &runtime);
3494 // Locate first character of substring to copy.
3496 // Locate first character of result.
3497 __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3499 // r0: result string
3500 // r1: first character of result string
3501 // r2: result string length
3502 // r5: first character of substring to copy
3503 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3504 StringHelper::GenerateCopyCharacters(
3505 masm, r1, r5, r2, r3, String::ONE_BYTE_ENCODING);
3508 // Allocate and copy the resulting two-byte string.
3509 __ bind(&two_byte_sequential);
3510 __ AllocateTwoByteString(r0, r2, r4, r6, r1, &runtime);
3512 // Locate first character of substring to copy.
3513 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
3514 __ add(r5, r5, Operand(r3, LSL, 1));
3515 // Locate first character of result.
3516 __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3518 // r0: result string.
3519 // r1: first character of result.
3520 // r2: result length.
3521 // r5: first character of substring to copy.
3522 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3523 StringHelper::GenerateCopyCharacters(
3524 masm, r1, r5, r2, r3, String::TWO_BYTE_ENCODING);
3526 __ bind(&return_r0);
3527 Counters* counters = isolate()->counters();
3528 __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
3532 // Just jump to runtime to create the sub string.
3534 __ TailCallRuntime(Runtime::kSubString, 3, 1);
3536 __ bind(&single_char);
3537 // r0: original string
3538 // r1: instance type
3540 // r3: from index (untagged)
3542 StringCharAtGenerator generator(
3543 r0, r3, r2, r0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
3544 generator.GenerateFast(masm);
3547 generator.SkipSlow(masm, &runtime);
3551 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
3556 Register scratch3) {
3557 Register length = scratch1;
3560 Label strings_not_equal, check_zero_length;
3561 __ ldr(length, FieldMemOperand(left, String::kLengthOffset));
3562 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
3563 __ cmp(length, scratch2);
3564 __ b(eq, &check_zero_length);
3565 __ bind(&strings_not_equal);
3566 __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
3569 // Check if the length is zero.
3570 Label compare_chars;
3571 __ bind(&check_zero_length);
3572 STATIC_ASSERT(kSmiTag == 0);
3573 __ cmp(length, Operand::Zero());
3574 __ b(ne, &compare_chars);
3575 __ mov(r0, Operand(Smi::FromInt(EQUAL)));
3578 // Compare characters.
3579 __ bind(&compare_chars);
3580 GenerateAsciiCharsCompareLoop(masm,
3581 left, right, length, scratch2, scratch3,
3582 &strings_not_equal);
3584 // Characters are equal.
3585 __ mov(r0, Operand(Smi::FromInt(EQUAL)));
3590 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
3596 Register scratch4) {
3597 Label result_not_equal, compare_lengths;
3598 // Find minimum length and length difference.
3599 __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
3600 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
3601 __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
3602 Register length_delta = scratch3;
3603 __ mov(scratch1, scratch2, LeaveCC, gt);
3604 Register min_length = scratch1;
3605 STATIC_ASSERT(kSmiTag == 0);
3606 __ cmp(min_length, Operand::Zero());
3607 __ b(eq, &compare_lengths);
3610 GenerateAsciiCharsCompareLoop(masm,
3611 left, right, min_length, scratch2, scratch4,
3614 // Compare lengths - strings up to min-length are equal.
3615 __ bind(&compare_lengths);
3616 DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
3617 // Use length_delta as result if it's zero.
3618 __ mov(r0, Operand(length_delta), SetCC);
3619 __ bind(&result_not_equal);
3620 // Conditionally update the result based either on length_delta or
3621 // the last comparion performed in the loop above.
3622 __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
3623 __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
3628 void StringCompareStub::GenerateAsciiCharsCompareLoop(
3629 MacroAssembler* masm,
3635 Label* chars_not_equal) {
3636 // Change index to run from -length to -1 by adding length to string
3637 // start. This means that loop ends when index reaches zero, which
3638 // doesn't need an additional compare.
3639 __ SmiUntag(length);
3640 __ add(scratch1, length,
3641 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3642 __ add(left, left, Operand(scratch1));
3643 __ add(right, right, Operand(scratch1));
3644 __ rsb(length, length, Operand::Zero());
3645 Register index = length; // index = -length;
3650 __ ldrb(scratch1, MemOperand(left, index));
3651 __ ldrb(scratch2, MemOperand(right, index));
3652 __ cmp(scratch1, scratch2);
3653 __ b(ne, chars_not_equal);
3654 __ add(index, index, Operand(1), SetCC);
3659 void StringCompareStub::Generate(MacroAssembler* masm) {
3662 Counters* counters = isolate()->counters();
3664 // Stack frame on entry.
3665 // sp[0]: right string
3666 // sp[4]: left string
3667 __ Ldrd(r0 , r1, MemOperand(sp)); // Load right in r0, left in r1.
3671 __ b(ne, ¬_same);
3672 STATIC_ASSERT(EQUAL == 0);
3673 STATIC_ASSERT(kSmiTag == 0);
3674 __ mov(r0, Operand(Smi::FromInt(EQUAL)));
3675 __ IncrementCounter(counters->string_compare_native(), 1, r1, r2);
3676 __ add(sp, sp, Operand(2 * kPointerSize));
3681 // Check that both objects are sequential ASCII strings.
3682 __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime);
3684 // Compare flat ASCII strings natively. Remove arguments from stack first.
3685 __ IncrementCounter(counters->string_compare_native(), 1, r2, r3);
3686 __ add(sp, sp, Operand(2 * kPointerSize));
3687 GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5);
3689 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
3690 // tagged as a small integer.
3692 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3696 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
3697 // ----------- S t a t e -------------
3700 // -- lr : return address
3701 // -----------------------------------
3703 // Load r2 with the allocation site. We stick an undefined dummy value here
3704 // and replace it with the real allocation site later when we instantiate this
3705 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
3706 __ Move(r2, handle(isolate()->heap()->undefined_value()));
3708 // Make sure that we actually patched the allocation site.
3709 if (FLAG_debug_code) {
3710 __ tst(r2, Operand(kSmiTagMask));
3711 __ Assert(ne, kExpectedAllocationSite);
3713 __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
3714 __ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex);
3717 __ Assert(eq, kExpectedAllocationSite);
3720 // Tail call into the stub that handles binary operations with allocation
3722 BinaryOpWithAllocationSiteStub stub(isolate(), state_);
3723 __ TailCallStub(&stub);
3727 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
3728 DCHECK(state_ == CompareIC::SMI);
3731 __ JumpIfNotSmi(r2, &miss);
3733 if (GetCondition() == eq) {
3734 // For equality we do not care about the sign of the result.
3735 __ sub(r0, r0, r1, SetCC);
3737 // Untag before subtracting to avoid handling overflow.
3739 __ sub(r0, r1, Operand::SmiUntag(r0));
3748 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
3749 DCHECK(state_ == CompareIC::NUMBER);
3752 Label unordered, maybe_undefined1, maybe_undefined2;
3755 if (left_ == CompareIC::SMI) {
3756 __ JumpIfNotSmi(r1, &miss);
3758 if (right_ == CompareIC::SMI) {
3759 __ JumpIfNotSmi(r0, &miss);
3762 // Inlining the double comparison and falling back to the general compare
3763 // stub if NaN is involved.
3764 // Load left and right operand.
3765 Label done, left, left_smi, right_smi;
3766 __ JumpIfSmi(r0, &right_smi);
3767 __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
3769 __ sub(r2, r0, Operand(kHeapObjectTag));
3770 __ vldr(d1, r2, HeapNumber::kValueOffset);
3772 __ bind(&right_smi);
3773 __ SmiToDouble(d1, r0);
3776 __ JumpIfSmi(r1, &left_smi);
3777 __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
3779 __ sub(r2, r1, Operand(kHeapObjectTag));
3780 __ vldr(d0, r2, HeapNumber::kValueOffset);
3783 __ SmiToDouble(d0, r1);
3786 // Compare operands.
3787 __ VFPCompareAndSetFlags(d0, d1);
3789 // Don't base result on status bits when a NaN is involved.
3790 __ b(vs, &unordered);
3792 // Return a result of -1, 0, or 1, based on status bits.
3793 __ mov(r0, Operand(EQUAL), LeaveCC, eq);
3794 __ mov(r0, Operand(LESS), LeaveCC, lt);
3795 __ mov(r0, Operand(GREATER), LeaveCC, gt);
3798 __ bind(&unordered);
3799 __ bind(&generic_stub);
3800 ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
3801 CompareIC::GENERIC);
3802 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3804 __ bind(&maybe_undefined1);
3805 if (Token::IsOrderedRelationalCompareOp(op_)) {
3806 __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
3808 __ JumpIfSmi(r1, &unordered);
3809 __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
3810 __ b(ne, &maybe_undefined2);
3814 __ bind(&maybe_undefined2);
3815 if (Token::IsOrderedRelationalCompareOp(op_)) {
3816 __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
3817 __ b(eq, &unordered);
3825 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3826 DCHECK(state_ == CompareIC::INTERNALIZED_STRING);
3829 // Registers containing left and right operands respectively.
3831 Register right = r0;
3835 // Check that both operands are heap objects.
3836 __ JumpIfEitherSmi(left, right, &miss);
3838 // Check that both operands are internalized strings.
3839 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3840 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3841 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3842 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3843 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3844 __ orr(tmp1, tmp1, Operand(tmp2));
3845 __ tst(tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3848 // Internalized strings are compared by identity.
3849 __ cmp(left, right);
3850 // Make sure r0 is non-zero. At this point input operands are
3851 // guaranteed to be non-zero.
3852 DCHECK(right.is(r0));
3853 STATIC_ASSERT(EQUAL == 0);
3854 STATIC_ASSERT(kSmiTag == 0);
3855 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
3863 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
3864 DCHECK(state_ == CompareIC::UNIQUE_NAME);
3865 DCHECK(GetCondition() == eq);
3868 // Registers containing left and right operands respectively.
3870 Register right = r0;
3874 // Check that both operands are heap objects.
3875 __ JumpIfEitherSmi(left, right, &miss);
3877 // Check that both operands are unique names. This leaves the instance
3878 // types loaded in tmp1 and tmp2.
3879 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3880 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3881 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3882 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3884 __ JumpIfNotUniqueName(tmp1, &miss);
3885 __ JumpIfNotUniqueName(tmp2, &miss);
3887 // Unique names are compared by identity.
3888 __ cmp(left, right);
3889 // Make sure r0 is non-zero. At this point input operands are
3890 // guaranteed to be non-zero.
3891 DCHECK(right.is(r0));
3892 STATIC_ASSERT(EQUAL == 0);
3893 STATIC_ASSERT(kSmiTag == 0);
3894 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
3902 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
3903 DCHECK(state_ == CompareIC::STRING);
3906 bool equality = Token::IsEqualityOp(op_);
3908 // Registers containing left and right operands respectively.
3910 Register right = r0;
3916 // Check that both operands are heap objects.
3917 __ JumpIfEitherSmi(left, right, &miss);
3919 // Check that both operands are strings. This leaves the instance
3920 // types loaded in tmp1 and tmp2.
3921 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3922 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3923 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3924 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3925 STATIC_ASSERT(kNotStringTag != 0);
3926 __ orr(tmp3, tmp1, tmp2);
3927 __ tst(tmp3, Operand(kIsNotStringMask));
3930 // Fast check for identical strings.
3931 __ cmp(left, right);
3932 STATIC_ASSERT(EQUAL == 0);
3933 STATIC_ASSERT(kSmiTag == 0);
3934 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
3937 // Handle not identical strings.
3939 // Check that both strings are internalized strings. If they are, we're done
3940 // because we already know they are not identical. We know they are both
3943 DCHECK(GetCondition() == eq);
3944 STATIC_ASSERT(kInternalizedTag == 0);
3945 __ orr(tmp3, tmp1, Operand(tmp2));
3946 __ tst(tmp3, Operand(kIsNotInternalizedMask));
3947 // Make sure r0 is non-zero. At this point input operands are
3948 // guaranteed to be non-zero.
3949 DCHECK(right.is(r0));
3953 // Check that both strings are sequential ASCII.
3955 __ JumpIfBothInstanceTypesAreNotSequentialAscii(
3956 tmp1, tmp2, tmp3, tmp4, &runtime);
3958 // Compare flat ASCII strings. Returns when done.
3960 StringCompareStub::GenerateFlatAsciiStringEquals(
3961 masm, left, right, tmp1, tmp2, tmp3);
3963 StringCompareStub::GenerateCompareFlatAsciiStrings(
3964 masm, left, right, tmp1, tmp2, tmp3, tmp4);
3967 // Handle more complex cases in runtime.
3969 __ Push(left, right);
3971 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
3973 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3981 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
3982 DCHECK(state_ == CompareIC::OBJECT);
3984 __ and_(r2, r1, Operand(r0));
3985 __ JumpIfSmi(r2, &miss);
3987 __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
3989 __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
3992 DCHECK(GetCondition() == eq);
3993 __ sub(r0, r0, Operand(r1));
4001 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
4003 __ and_(r2, r1, Operand(r0));
4004 __ JumpIfSmi(r2, &miss);
4005 __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
4006 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
4007 __ cmp(r2, Operand(known_map_));
4009 __ cmp(r3, Operand(known_map_));
4012 __ sub(r0, r0, Operand(r1));
4021 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
4023 // Call the runtime system in a fresh internal frame.
4024 ExternalReference miss =
4025 ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
4027 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
4029 __ Push(lr, r1, r0);
4030 __ mov(ip, Operand(Smi::FromInt(op_)));
4032 __ CallExternalReference(miss, 3);
4033 // Compute the entry point of the rewritten stub.
4034 __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
4035 // Restore registers.
4044 void DirectCEntryStub::Generate(MacroAssembler* masm) {
4045 // Place the return address on the stack, making the call
4046 // GC safe. The RegExp backend also relies on this.
4047 __ str(lr, MemOperand(sp, 0));
4048 __ blx(ip); // Call the C++ function.
4049 __ VFPEnsureFPSCRState(r2);
4050 __ ldr(pc, MemOperand(sp, 0));
4054 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
4057 reinterpret_cast<intptr_t>(GetCode().location());
4058 __ Move(ip, target);
4059 __ mov(lr, Operand(code, RelocInfo::CODE_TARGET));
4060 __ blx(lr); // Call the stub.
4064 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
4068 Register properties,
4070 Register scratch0) {
4071 DCHECK(name->IsUniqueName());
4072 // If names of slots in range from 1 to kProbes - 1 for the hash value are
4073 // not equal to the name and kProbes-th slot is not used (its name is the
4074 // undefined value), it guarantees the hash table doesn't contain the
4075 // property. It's true even if some slots represent deleted properties
4076 // (their names are the hole value).
4077 for (int i = 0; i < kInlinedProbes; i++) {
4078 // scratch0 points to properties hash.
4079 // Compute the masked index: (hash + i + i * i) & mask.
4080 Register index = scratch0;
4081 // Capacity is smi 2^n.
4082 __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
4083 __ sub(index, index, Operand(1));
4084 __ and_(index, index, Operand(
4085 Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
4087 // Scale the index by multiplying by the entry size.
4088 DCHECK(NameDictionary::kEntrySize == 3);
4089 __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
4091 Register entity_name = scratch0;
4092 // Having undefined at this place means the name is not contained.
4093 DCHECK_EQ(kSmiTagSize, 1);
4094 Register tmp = properties;
4095 __ add(tmp, properties, Operand(index, LSL, 1));
4096 __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
4098 DCHECK(!tmp.is(entity_name));
4099 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
4100 __ cmp(entity_name, tmp);
4103 // Load the hole ready for use below:
4104 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
4106 // Stop if found the property.
4107 __ cmp(entity_name, Operand(Handle<Name>(name)));
4111 __ cmp(entity_name, tmp);
4114 // Check if the entry name is not a unique name.
4115 __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
4116 __ ldrb(entity_name,
4117 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
4118 __ JumpIfNotUniqueName(entity_name, miss);
4121 // Restore the properties.
4123 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4126 const int spill_mask =
4127 (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
4128 r2.bit() | r1.bit() | r0.bit());
4130 __ stm(db_w, sp, spill_mask);
4131 __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4132 __ mov(r1, Operand(Handle<Name>(name)));
4133 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
4135 __ cmp(r0, Operand::Zero());
4136 __ ldm(ia_w, sp, spill_mask);
4143 // Probe the name dictionary in the |elements| register. Jump to the
4144 // |done| label if a property with the given name is found. Jump to
4145 // the |miss| label otherwise.
4146 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
4147 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
4153 Register scratch2) {
4154 DCHECK(!elements.is(scratch1));
4155 DCHECK(!elements.is(scratch2));
4156 DCHECK(!name.is(scratch1));
4157 DCHECK(!name.is(scratch2));
4159 __ AssertName(name);
4161 // Compute the capacity mask.
4162 __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
4163 __ SmiUntag(scratch1);
4164 __ sub(scratch1, scratch1, Operand(1));
4166 // Generate an unrolled loop that performs a few probes before
4167 // giving up. Measurements done on Gmail indicate that 2 probes
4168 // cover ~93% of loads from dictionaries.
4169 for (int i = 0; i < kInlinedProbes; i++) {
4170 // Compute the masked index: (hash + i + i * i) & mask.
4171 __ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
4173 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4174 // the hash in a separate instruction. The value hash + i + i * i is right
4175 // shifted in the following and instruction.
4176 DCHECK(NameDictionary::GetProbeOffset(i) <
4177 1 << (32 - Name::kHashFieldOffset));
4178 __ add(scratch2, scratch2, Operand(
4179 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4181 __ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
4183 // Scale the index by multiplying by the element size.
4184 DCHECK(NameDictionary::kEntrySize == 3);
4185 // scratch2 = scratch2 * 3.
4186 __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
4188 // Check if the key is identical to the name.
4189 __ add(scratch2, elements, Operand(scratch2, LSL, 2));
4190 __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
4191 __ cmp(name, Operand(ip));
4195 const int spill_mask =
4196 (lr.bit() | r6.bit() | r5.bit() | r4.bit() |
4197 r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
4198 ~(scratch1.bit() | scratch2.bit());
4200 __ stm(db_w, sp, spill_mask);
4202 DCHECK(!elements.is(r1));
4204 __ Move(r0, elements);
4206 __ Move(r0, elements);
4209 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
4211 __ cmp(r0, Operand::Zero());
4212 __ mov(scratch2, Operand(r2));
4213 __ ldm(ia_w, sp, spill_mask);
4220 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
4221 // This stub overrides SometimesSetsUpAFrame() to return false. That means
4222 // we cannot call anything that could cause a GC from this stub.
4224 // result: NameDictionary to probe
4226 // dictionary: NameDictionary to probe.
4227 // index: will hold an index of entry if lookup is successful.
4228 // might alias with result_.
4230 // result_ is zero if lookup failed, non zero otherwise.
4232 Register result = r0;
4233 Register dictionary = r0;
4235 Register index = r2;
4238 Register undefined = r5;
4239 Register entry_key = r6;
4241 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
4243 __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
4245 __ sub(mask, mask, Operand(1));
4247 __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
4249 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
4251 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
4252 // Compute the masked index: (hash + i + i * i) & mask.
4253 // Capacity is smi 2^n.
4255 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4256 // the hash in a separate instruction. The value hash + i + i * i is right
4257 // shifted in the following and instruction.
4258 DCHECK(NameDictionary::GetProbeOffset(i) <
4259 1 << (32 - Name::kHashFieldOffset));
4260 __ add(index, hash, Operand(
4261 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4263 __ mov(index, Operand(hash));
4265 __ and_(index, mask, Operand(index, LSR, Name::kHashShift));
4267 // Scale the index by multiplying by the entry size.
4268 DCHECK(NameDictionary::kEntrySize == 3);
4269 __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
4271 DCHECK_EQ(kSmiTagSize, 1);
4272 __ add(index, dictionary, Operand(index, LSL, 2));
4273 __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
4275 // Having undefined at this place means the name is not contained.
4276 __ cmp(entry_key, Operand(undefined));
4277 __ b(eq, ¬_in_dictionary);
4279 // Stop if found the property.
4280 __ cmp(entry_key, Operand(key));
4281 __ b(eq, &in_dictionary);
4283 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
4284 // Check if the entry name is not a unique name.
4285 __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
4287 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
4288 __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
4292 __ bind(&maybe_in_dictionary);
4293 // If we are doing negative lookup then probing failure should be
4294 // treated as a lookup success. For positive lookup probing failure
4295 // should be treated as lookup failure.
4296 if (mode_ == POSITIVE_LOOKUP) {
4297 __ mov(result, Operand::Zero());
4301 __ bind(&in_dictionary);
4302 __ mov(result, Operand(1));
4305 __ bind(¬_in_dictionary);
4306 __ mov(result, Operand::Zero());
4311 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
4313 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
4315 // Hydrogen code stubs need stub2 at snapshot time.
4316 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
4321 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
4322 // the value has just been written into the object, now this stub makes sure
4323 // we keep the GC informed. The word in the object where the value has been
4324 // written is in the address register.
4325 void RecordWriteStub::Generate(MacroAssembler* masm) {
4326 Label skip_to_incremental_noncompacting;
4327 Label skip_to_incremental_compacting;
4329 // The first two instructions are generated with labels so as to get the
4330 // offset fixed up correctly by the bind(Label*) call. We patch it back and
4331 // forth between a compare instructions (a nop in this position) and the
4332 // real branch when we start and stop incremental heap marking.
4333 // See RecordWriteStub::Patch for details.
4335 // Block literal pool emission, as the position of these two instructions
4336 // is assumed by the patching code.
4337 Assembler::BlockConstPoolScope block_const_pool(masm);
4338 __ b(&skip_to_incremental_noncompacting);
4339 __ b(&skip_to_incremental_compacting);
4342 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4343 __ RememberedSetHelper(object_,
4347 MacroAssembler::kReturnAtEnd);
4351 __ bind(&skip_to_incremental_noncompacting);
4352 GenerateIncremental(masm, INCREMENTAL);
4354 __ bind(&skip_to_incremental_compacting);
4355 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4357 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
4358 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
4359 DCHECK(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
4360 DCHECK(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
4361 PatchBranchIntoNop(masm, 0);
4362 PatchBranchIntoNop(masm, Assembler::kInstrSize);
4366 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4369 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4370 Label dont_need_remembered_set;
4372 __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
4373 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
4375 &dont_need_remembered_set);
4377 __ CheckPageFlag(regs_.object(),
4379 1 << MemoryChunk::SCAN_ON_SCAVENGE,
4381 &dont_need_remembered_set);
4383 // First notify the incremental marker if necessary, then update the
4385 CheckNeedsToInformIncrementalMarker(
4386 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4387 InformIncrementalMarker(masm);
4388 regs_.Restore(masm);
4389 __ RememberedSetHelper(object_,
4393 MacroAssembler::kReturnAtEnd);
4395 __ bind(&dont_need_remembered_set);
4398 CheckNeedsToInformIncrementalMarker(
4399 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4400 InformIncrementalMarker(masm);
4401 regs_.Restore(masm);
4406 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4407 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
4408 int argument_count = 3;
4409 __ PrepareCallCFunction(argument_count, regs_.scratch0());
4411 r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
4412 DCHECK(!address.is(regs_.object()));
4413 DCHECK(!address.is(r0));
4414 __ Move(address, regs_.address());
4415 __ Move(r0, regs_.object());
4416 __ Move(r1, address);
4417 __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
4419 AllowExternalCallThatCantCauseGC scope(masm);
4421 ExternalReference::incremental_marking_record_write_function(isolate()),
4423 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
4427 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4428 MacroAssembler* masm,
4429 OnNoNeedToInformIncrementalMarker on_no_need,
4432 Label need_incremental;
4433 Label need_incremental_pop_scratch;
4435 __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
4436 __ ldr(regs_.scratch1(),
4437 MemOperand(regs_.scratch0(),
4438 MemoryChunk::kWriteBarrierCounterOffset));
4439 __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC);
4440 __ str(regs_.scratch1(),
4441 MemOperand(regs_.scratch0(),
4442 MemoryChunk::kWriteBarrierCounterOffset));
4443 __ b(mi, &need_incremental);
4445 // Let's look at the color of the object: If it is not black we don't have
4446 // to inform the incremental marker.
4447 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4449 regs_.Restore(masm);
4450 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4451 __ RememberedSetHelper(object_,
4455 MacroAssembler::kReturnAtEnd);
4462 // Get the value from the slot.
4463 __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
4465 if (mode == INCREMENTAL_COMPACTION) {
4466 Label ensure_not_white;
4468 __ CheckPageFlag(regs_.scratch0(), // Contains value.
4469 regs_.scratch1(), // Scratch.
4470 MemoryChunk::kEvacuationCandidateMask,
4474 __ CheckPageFlag(regs_.object(),
4475 regs_.scratch1(), // Scratch.
4476 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
4480 __ bind(&ensure_not_white);
4483 // We need extra registers for this, so we push the object and the address
4484 // register temporarily.
4485 __ Push(regs_.object(), regs_.address());
4486 __ EnsureNotWhite(regs_.scratch0(), // The value.
4487 regs_.scratch1(), // Scratch.
4488 regs_.object(), // Scratch.
4489 regs_.address(), // Scratch.
4490 &need_incremental_pop_scratch);
4491 __ Pop(regs_.object(), regs_.address());
4493 regs_.Restore(masm);
4494 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4495 __ RememberedSetHelper(object_,
4499 MacroAssembler::kReturnAtEnd);
4504 __ bind(&need_incremental_pop_scratch);
4505 __ Pop(regs_.object(), regs_.address());
4507 __ bind(&need_incremental);
4509 // Fall through when we need to inform the incremental marker.
4513 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4514 // ----------- S t a t e -------------
4515 // -- r0 : element value to store
4516 // -- r3 : element index as smi
4517 // -- sp[0] : array literal index in function as smi
4518 // -- sp[4] : array literal
4519 // clobbers r1, r2, r4
4520 // -----------------------------------
4523 Label double_elements;
4525 Label slow_elements;
4526 Label fast_elements;
4528 // Get array literal index, array literal and its map.
4529 __ ldr(r4, MemOperand(sp, 0 * kPointerSize));
4530 __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
4531 __ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset));
4533 __ CheckFastElements(r2, r5, &double_elements);
4534 // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
4535 __ JumpIfSmi(r0, &smi_element);
4536 __ CheckFastSmiElements(r2, r5, &fast_elements);
4538 // Store into the array literal requires a elements transition. Call into
4540 __ bind(&slow_elements);
4542 __ Push(r1, r3, r0);
4543 __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4544 __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset));
4546 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4548 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4549 __ bind(&fast_elements);
4550 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
4551 __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
4552 __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4553 __ str(r0, MemOperand(r6, 0));
4554 // Update the write barrier for the array store.
4555 __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs,
4556 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
4559 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
4560 // and value is Smi.
4561 __ bind(&smi_element);
4562 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
4563 __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
4564 __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
4567 // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
4568 __ bind(&double_elements);
4569 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
4570 __ StoreNumberToDoubleElements(r0, r3, r5, r6, d0, &slow_elements);
4575 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4576 CEntryStub ces(isolate(), 1, kSaveFPRegs);
4577 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4578 int parameter_count_offset =
4579 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4580 __ ldr(r1, MemOperand(fp, parameter_count_offset));
4581 if (function_mode_ == JS_FUNCTION_STUB_MODE) {
4582 __ add(r1, r1, Operand(1));
4584 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4585 __ mov(r1, Operand(r1, LSL, kPointerSizeLog2));
4591 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4592 if (masm->isolate()->function_entry_hook() != NULL) {
4593 ProfileEntryHookStub stub(masm->isolate());
4594 int code_size = masm->CallStubSize(&stub) + 2 * Assembler::kInstrSize;
4595 PredictableCodeSizeScope predictable(masm, code_size);
4603 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4604 // The entry hook is a "push lr" instruction, followed by a call.
4605 const int32_t kReturnAddressDistanceFromFunctionStart =
4606 3 * Assembler::kInstrSize;
4608 // This should contain all kCallerSaved registers.
4609 const RegList kSavedRegs =
4616 // We also save lr, so the count here is one higher than the mask indicates.
4617 const int32_t kNumSavedRegs = 7;
4619 DCHECK((kCallerSaved & kSavedRegs) == kCallerSaved);
4621 // Save all caller-save registers as this may be called from anywhere.
4622 __ stm(db_w, sp, kSavedRegs | lr.bit());
4624 // Compute the function's address for the first argument.
4625 __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart));
4627 // The caller's return address is above the saved temporaries.
4628 // Grab that for the second argument to the hook.
4629 __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize));
4631 // Align the stack if necessary.
4632 int frame_alignment = masm->ActivationFrameAlignment();
4633 if (frame_alignment > kPointerSize) {
4635 DCHECK(IsPowerOf2(frame_alignment));
4636 __ and_(sp, sp, Operand(-frame_alignment));
4639 #if V8_HOST_ARCH_ARM
4640 int32_t entry_hook =
4641 reinterpret_cast<int32_t>(isolate()->function_entry_hook());
4642 __ mov(ip, Operand(entry_hook));
4644 // Under the simulator we need to indirect the entry hook through a
4645 // trampoline function at a known address.
4646 // It additionally takes an isolate as a third parameter
4647 __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
4649 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4650 __ mov(ip, Operand(ExternalReference(&dispatcher,
4651 ExternalReference::BUILTIN_CALL,
4656 // Restore the stack pointer if needed.
4657 if (frame_alignment > kPointerSize) {
4661 // Also pop pc to get Ret(0).
4662 __ ldm(ia_w, sp, kSavedRegs | pc.bit());
4667 static void CreateArrayDispatch(MacroAssembler* masm,
4668 AllocationSiteOverrideMode mode) {
4669 if (mode == DISABLE_ALLOCATION_SITES) {
4670 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4671 __ TailCallStub(&stub);
4672 } else if (mode == DONT_OVERRIDE) {
4673 int last_index = GetSequenceIndexFromFastElementsKind(
4674 TERMINAL_FAST_ELEMENTS_KIND);
4675 for (int i = 0; i <= last_index; ++i) {
4676 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4677 __ cmp(r3, Operand(kind));
4678 T stub(masm->isolate(), kind);
4679 __ TailCallStub(&stub, eq);
4682 // If we reached this point there is a problem.
4683 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4690 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4691 AllocationSiteOverrideMode mode) {
4692 // r2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4693 // r3 - kind (if mode != DISABLE_ALLOCATION_SITES)
4694 // r0 - number of arguments
4695 // r1 - constructor?
4696 // sp[0] - last argument
4697 Label normal_sequence;
4698 if (mode == DONT_OVERRIDE) {
4699 DCHECK(FAST_SMI_ELEMENTS == 0);
4700 DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
4701 DCHECK(FAST_ELEMENTS == 2);
4702 DCHECK(FAST_HOLEY_ELEMENTS == 3);
4703 DCHECK(FAST_DOUBLE_ELEMENTS == 4);
4704 DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
4706 // is the low bit set? If so, we are holey and that is good.
4707 __ tst(r3, Operand(1));
4708 __ b(ne, &normal_sequence);
4711 // look at the first argument
4712 __ ldr(r5, MemOperand(sp, 0));
4713 __ cmp(r5, Operand::Zero());
4714 __ b(eq, &normal_sequence);
4716 if (mode == DISABLE_ALLOCATION_SITES) {
4717 ElementsKind initial = GetInitialFastElementsKind();
4718 ElementsKind holey_initial = GetHoleyElementsKind(initial);
4720 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
4722 DISABLE_ALLOCATION_SITES);
4723 __ TailCallStub(&stub_holey);
4725 __ bind(&normal_sequence);
4726 ArraySingleArgumentConstructorStub stub(masm->isolate(),
4728 DISABLE_ALLOCATION_SITES);
4729 __ TailCallStub(&stub);
4730 } else if (mode == DONT_OVERRIDE) {
4731 // We are going to create a holey array, but our kind is non-holey.
4732 // Fix kind and retry (only if we have an allocation site in the slot).
4733 __ add(r3, r3, Operand(1));
4735 if (FLAG_debug_code) {
4736 __ ldr(r5, FieldMemOperand(r2, 0));
4737 __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
4738 __ Assert(eq, kExpectedAllocationSite);
4741 // Save the resulting elements kind in type info. We can't just store r3
4742 // in the AllocationSite::transition_info field because elements kind is
4743 // restricted to a portion of the field...upper bits need to be left alone.
4744 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4745 __ ldr(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
4746 __ add(r4, r4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
4747 __ str(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
4749 __ bind(&normal_sequence);
4750 int last_index = GetSequenceIndexFromFastElementsKind(
4751 TERMINAL_FAST_ELEMENTS_KIND);
4752 for (int i = 0; i <= last_index; ++i) {
4753 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4754 __ cmp(r3, Operand(kind));
4755 ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
4756 __ TailCallStub(&stub, eq);
4759 // If we reached this point there is a problem.
4760 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4768 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
4769 int to_index = GetSequenceIndexFromFastElementsKind(
4770 TERMINAL_FAST_ELEMENTS_KIND);
4771 for (int i = 0; i <= to_index; ++i) {
4772 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4773 T stub(isolate, kind);
4775 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
4776 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
4783 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
4784 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
4786 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
4788 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
4793 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
4795 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
4796 for (int i = 0; i < 2; i++) {
4797 // For internal arrays we only need a few things
4798 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
4800 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
4802 InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
4808 void ArrayConstructorStub::GenerateDispatchToArrayStub(
4809 MacroAssembler* masm,
4810 AllocationSiteOverrideMode mode) {
4811 if (argument_count_ == ANY) {
4812 Label not_zero_case, not_one_case;
4814 __ b(ne, ¬_zero_case);
4815 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4817 __ bind(¬_zero_case);
4818 __ cmp(r0, Operand(1));
4819 __ b(gt, ¬_one_case);
4820 CreateArrayDispatchOneArgument(masm, mode);
4822 __ bind(¬_one_case);
4823 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4824 } else if (argument_count_ == NONE) {
4825 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4826 } else if (argument_count_ == ONE) {
4827 CreateArrayDispatchOneArgument(masm, mode);
4828 } else if (argument_count_ == MORE_THAN_ONE) {
4829 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4836 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
4837 // ----------- S t a t e -------------
4838 // -- r0 : argc (only if argument_count_ == ANY)
4839 // -- r1 : constructor
4840 // -- r2 : AllocationSite or undefined
4841 // -- sp[0] : return address
4842 // -- sp[4] : last argument
4843 // -----------------------------------
4845 if (FLAG_debug_code) {
4846 // The array construct code is only set for the global and natives
4847 // builtin Array functions which always have maps.
4849 // Initial map for the builtin Array function should be a map.
4850 __ ldr(r4, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
4851 // Will both indicate a NULL and a Smi.
4852 __ tst(r4, Operand(kSmiTagMask));
4853 __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
4854 __ CompareObjectType(r4, r4, r5, MAP_TYPE);
4855 __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
4857 // We should either have undefined in r2 or a valid AllocationSite
4858 __ AssertUndefinedOrAllocationSite(r2, r4);
4862 // Get the elements kind and case on that.
4863 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
4866 __ ldr(r3, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
4868 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4869 __ and_(r3, r3, Operand(AllocationSite::ElementsKindBits::kMask));
4870 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
4873 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
4877 void InternalArrayConstructorStub::GenerateCase(
4878 MacroAssembler* masm, ElementsKind kind) {
4879 __ cmp(r0, Operand(1));
4881 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
4882 __ TailCallStub(&stub0, lo);
4884 InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
4885 __ TailCallStub(&stubN, hi);
4887 if (IsFastPackedElementsKind(kind)) {
4888 // We might need to create a holey array
4889 // look at the first argument
4890 __ ldr(r3, MemOperand(sp, 0));
4891 __ cmp(r3, Operand::Zero());
4893 InternalArraySingleArgumentConstructorStub
4894 stub1_holey(isolate(), GetHoleyElementsKind(kind));
4895 __ TailCallStub(&stub1_holey, ne);
4898 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
4899 __ TailCallStub(&stub1);
4903 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
4904 // ----------- S t a t e -------------
4906 // -- r1 : constructor
4907 // -- sp[0] : return address
4908 // -- sp[4] : last argument
4909 // -----------------------------------
4911 if (FLAG_debug_code) {
4912 // The array construct code is only set for the global and natives
4913 // builtin Array functions which always have maps.
4915 // Initial map for the builtin Array function should be a map.
4916 __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
4917 // Will both indicate a NULL and a Smi.
4918 __ tst(r3, Operand(kSmiTagMask));
4919 __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
4920 __ CompareObjectType(r3, r3, r4, MAP_TYPE);
4921 __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
4924 // Figure out the right elements kind
4925 __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
4926 // Load the map's "bit field 2" into |result|. We only need the first byte,
4927 // but the following bit field extraction takes care of that anyway.
4928 __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
4929 // Retrieve elements_kind from bit field 2.
4930 __ DecodeField<Map::ElementsKindBits>(r3);
4932 if (FLAG_debug_code) {
4934 __ cmp(r3, Operand(FAST_ELEMENTS));
4936 __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS));
4938 kInvalidElementsKindForInternalArrayOrInternalPackedArray);
4942 Label fast_elements_case;
4943 __ cmp(r3, Operand(FAST_ELEMENTS));
4944 __ b(eq, &fast_elements_case);
4945 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
4947 __ bind(&fast_elements_case);
4948 GenerateCase(masm, FAST_ELEMENTS);
4952 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
4953 // ----------- S t a t e -------------
4955 // -- r4 : call_data
4957 // -- r1 : api_function_address
4960 // -- sp[0] : last argument
4962 // -- sp[(argc - 1)* 4] : first argument
4963 // -- sp[argc * 4] : receiver
4964 // -----------------------------------
4966 Register callee = r0;
4967 Register call_data = r4;
4968 Register holder = r2;
4969 Register api_function_address = r1;
4970 Register context = cp;
4972 int argc = ArgumentBits::decode(bit_field_);
4973 bool is_store = IsStoreBits::decode(bit_field_);
4974 bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
4976 typedef FunctionCallbackArguments FCA;
4978 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
4979 STATIC_ASSERT(FCA::kCalleeIndex == 5);
4980 STATIC_ASSERT(FCA::kDataIndex == 4);
4981 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
4982 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
4983 STATIC_ASSERT(FCA::kIsolateIndex == 1);
4984 STATIC_ASSERT(FCA::kHolderIndex == 0);
4985 STATIC_ASSERT(FCA::kArgsLength == 7);
4989 // load context from callee
4990 __ ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
4998 Register scratch = call_data;
4999 if (!call_data_undefined) {
5000 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5004 // return value default
5008 Operand(ExternalReference::isolate_address(isolate())));
5013 // Prepare arguments.
5014 __ mov(scratch, sp);
5016 // Allocate the v8::Arguments structure in the arguments' space since
5017 // it's not controlled by GC.
5018 const int kApiStackSpace = 4;
5020 FrameScope frame_scope(masm, StackFrame::MANUAL);
5021 __ EnterExitFrame(false, kApiStackSpace);
5023 DCHECK(!api_function_address.is(r0) && !scratch.is(r0));
5024 // r0 = FunctionCallbackInfo&
5025 // Arguments is after the return address.
5026 __ add(r0, sp, Operand(1 * kPointerSize));
5027 // FunctionCallbackInfo::implicit_args_
5028 __ str(scratch, MemOperand(r0, 0 * kPointerSize));
5029 // FunctionCallbackInfo::values_
5030 __ add(ip, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
5031 __ str(ip, MemOperand(r0, 1 * kPointerSize));
5032 // FunctionCallbackInfo::length_ = argc
5033 __ mov(ip, Operand(argc));
5034 __ str(ip, MemOperand(r0, 2 * kPointerSize));
5035 // FunctionCallbackInfo::is_construct_call = 0
5036 __ mov(ip, Operand::Zero());
5037 __ str(ip, MemOperand(r0, 3 * kPointerSize));
5039 const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
5040 ExternalReference thunk_ref =
5041 ExternalReference::invoke_function_callback(isolate());
5043 AllowExternalCallThatCantCauseGC scope(masm);
5044 MemOperand context_restore_operand(
5045 fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
5046 // Stores return the first js argument
5047 int return_value_offset = 0;
5049 return_value_offset = 2 + FCA::kArgsLength;
5051 return_value_offset = 2 + FCA::kReturnValueOffset;
5053 MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
5055 __ CallApiFunctionAndReturn(api_function_address,
5058 return_value_operand,
5059 &context_restore_operand);
5063 void CallApiGetterStub::Generate(MacroAssembler* masm) {
5064 // ----------- S t a t e -------------
5066 // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
5068 // -- r2 : api_function_address
5069 // -----------------------------------
5071 Register api_function_address = r2;
5073 __ mov(r0, sp); // r0 = Handle<Name>
5074 __ add(r1, r0, Operand(1 * kPointerSize)); // r1 = PCA
5076 const int kApiStackSpace = 1;
5077 FrameScope frame_scope(masm, StackFrame::MANUAL);
5078 __ EnterExitFrame(false, kApiStackSpace);
5080 // Create PropertyAccessorInfo instance on the stack above the exit frame with
5081 // r1 (internal::Object** args_) as the data.
5082 __ str(r1, MemOperand(sp, 1 * kPointerSize));
5083 __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
5085 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
5087 ExternalReference thunk_ref =
5088 ExternalReference::invoke_accessor_getter_callback(isolate());
5089 __ CallApiFunctionAndReturn(api_function_address,
5092 MemOperand(fp, 6 * kPointerSize),
5099 } } // namespace v8::internal
5101 #endif // V8_TARGET_ARCH_ARM