1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "src/base/bits.h"
10 #include "src/bootstrapper.h"
11 #include "src/code-stubs.h"
12 #include "src/codegen.h"
13 #include "src/ic/handler-compiler.h"
14 #include "src/ic/ic.h"
15 #include "src/isolate.h"
16 #include "src/jsregexp.h"
17 #include "src/regexp-macro-assembler.h"
18 #include "src/runtime/runtime.h"
24 static void InitializeArrayConstructorDescriptor(
25 Isolate* isolate, CodeStubDescriptor* descriptor,
26 int constant_stack_parameter_count) {
27 Address deopt_handler = Runtime::FunctionForId(
28 Runtime::kArrayConstructor)->entry;
30 if (constant_stack_parameter_count == 0) {
31 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
32 JS_FUNCTION_STUB_MODE);
34 descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
35 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
40 static void InitializeInternalArrayConstructorDescriptor(
41 Isolate* isolate, CodeStubDescriptor* descriptor,
42 int constant_stack_parameter_count) {
43 Address deopt_handler = Runtime::FunctionForId(
44 Runtime::kInternalArrayConstructor)->entry;
46 if (constant_stack_parameter_count == 0) {
47 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
48 JS_FUNCTION_STUB_MODE);
50 descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
51 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
56 void ArrayNoArgumentConstructorStub::InitializeDescriptor(
57 CodeStubDescriptor* descriptor) {
58 InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
62 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
63 CodeStubDescriptor* descriptor) {
64 InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
68 void ArrayNArgumentsConstructorStub::InitializeDescriptor(
69 CodeStubDescriptor* descriptor) {
70 InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
74 void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
75 CodeStubDescriptor* descriptor) {
76 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
80 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
81 CodeStubDescriptor* descriptor) {
82 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
86 void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
87 CodeStubDescriptor* descriptor) {
88 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
92 #define __ ACCESS_MASM(masm)
95 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
98 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
104 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
109 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
110 ExternalReference miss) {
111 // Update the static counter each time a new code stub is generated.
112 isolate()->counters()->code_stubs()->Increment();
114 CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
115 int param_count = descriptor.GetEnvironmentParameterCount();
117 // Call the runtime system in a fresh internal frame.
118 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
119 DCHECK(param_count == 0 ||
120 r0.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
122 for (int i = 0; i < param_count; ++i) {
123 __ push(descriptor.GetEnvironmentParameterRegister(i));
125 __ CallExternalReference(miss, param_count);
132 void DoubleToIStub::Generate(MacroAssembler* masm) {
133 Label out_of_range, only_low, negate, done;
134 Register input_reg = source();
135 Register result_reg = destination();
136 DCHECK(is_truncating());
138 int double_offset = offset();
139 // Account for saved regs if input is sp.
140 if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
142 Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
143 Register scratch_low =
144 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
145 Register scratch_high =
146 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
147 LowDwVfpRegister double_scratch = kScratchDoubleReg;
149 __ Push(scratch_high, scratch_low, scratch);
151 if (!skip_fastpath()) {
152 // Load double input.
153 __ vldr(double_scratch, MemOperand(input_reg, double_offset));
154 __ vmov(scratch_low, scratch_high, double_scratch);
156 // Do fast-path convert from double to int.
157 __ vcvt_s32_f64(double_scratch.low(), double_scratch);
158 __ vmov(result_reg, double_scratch.low());
160 // If result is not saturated (0x7fffffff or 0x80000000), we are done.
161 __ sub(scratch, result_reg, Operand(1));
162 __ cmp(scratch, Operand(0x7ffffffe));
165 // We've already done MacroAssembler::TryFastTruncatedDoubleToILoad, so we
166 // know exponent > 31, so we can skip the vcvt_s32_f64 which will saturate.
167 if (double_offset == 0) {
168 __ ldm(ia, input_reg, scratch_low.bit() | scratch_high.bit());
170 __ ldr(scratch_low, MemOperand(input_reg, double_offset));
171 __ ldr(scratch_high, MemOperand(input_reg, double_offset + kIntSize));
175 __ Ubfx(scratch, scratch_high,
176 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
177 // Load scratch with exponent - 1. This is faster than loading
178 // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
179 STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
180 __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
181 // If exponent is greater than or equal to 84, the 32 less significant
182 // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
184 // Compare exponent with 84 (compare exponent - 1 with 83).
185 __ cmp(scratch, Operand(83));
186 __ b(ge, &out_of_range);
188 // If we reach this code, 31 <= exponent <= 83.
189 // So, we don't have to handle cases where 0 <= exponent <= 20 for
190 // which we would need to shift right the high part of the mantissa.
191 // Scratch contains exponent - 1.
192 // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
193 __ rsb(scratch, scratch, Operand(51), SetCC);
195 // 21 <= exponent <= 51, shift scratch_low and scratch_high
196 // to generate the result.
197 __ mov(scratch_low, Operand(scratch_low, LSR, scratch));
198 // Scratch contains: 52 - exponent.
199 // We needs: exponent - 20.
200 // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
201 __ rsb(scratch, scratch, Operand(32));
202 __ Ubfx(result_reg, scratch_high,
203 0, HeapNumber::kMantissaBitsInTopWord);
204 // Set the implicit 1 before the mantissa part in scratch_high.
205 __ orr(result_reg, result_reg,
206 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
207 __ orr(result_reg, scratch_low, Operand(result_reg, LSL, scratch));
210 __ bind(&out_of_range);
211 __ mov(result_reg, Operand::Zero());
215 // 52 <= exponent <= 83, shift only scratch_low.
216 // On entry, scratch contains: 52 - exponent.
217 __ rsb(scratch, scratch, Operand::Zero());
218 __ mov(result_reg, Operand(scratch_low, LSL, scratch));
221 // If input was positive, scratch_high ASR 31 equals 0 and
222 // scratch_high LSR 31 equals zero.
223 // New result = (result eor 0) + 0 = result.
224 // If the input was negative, we have to negate the result.
225 // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
226 // New result = (result eor 0xffffffff) + 1 = 0 - result.
227 __ eor(result_reg, result_reg, Operand(scratch_high, ASR, 31));
228 __ add(result_reg, result_reg, Operand(scratch_high, LSR, 31));
232 __ Pop(scratch_high, scratch_low, scratch);
237 // Handle the case where the lhs and rhs are the same object.
238 // Equality is almost reflexive (everything but NaN), so this is a test
239 // for "identity and not NaN".
240 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
244 Label heap_number, return_equal;
246 __ b(ne, ¬_identical);
248 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
249 // so we do the second best thing - test it ourselves.
250 // They are both equal and they are not both Smis so both of them are not
251 // Smis. If it's not a heap number, then return equal.
252 if (cond == lt || cond == gt) {
253 __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
256 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
257 __ b(eq, &heap_number);
258 // Comparing JS objects with <=, >= is complicated.
260 __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
262 // Normally here we fall through to return_equal, but undefined is
263 // special: (undefined == undefined) == true, but
264 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
265 if (cond == le || cond == ge) {
266 __ cmp(r4, Operand(ODDBALL_TYPE));
267 __ b(ne, &return_equal);
268 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
270 __ b(ne, &return_equal);
272 // undefined <= undefined should fail.
273 __ mov(r0, Operand(GREATER));
275 // undefined >= undefined should fail.
276 __ mov(r0, Operand(LESS));
283 __ bind(&return_equal);
285 __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
286 } else if (cond == gt) {
287 __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
289 __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
293 // For less and greater we don't have to check for NaN since the result of
294 // x < x is false regardless. For the others here is some code to check
296 if (cond != lt && cond != gt) {
297 __ bind(&heap_number);
298 // It is a heap number, so return non-equal if it's NaN and equal if it's
301 // The representation of NaN values has all exponent bits (52..62) set,
302 // and not all mantissa bits (0..51) clear.
303 // Read top bits of double representation (second word of value).
304 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
305 // Test that exponent bits are all set.
306 __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
307 // NaNs have all-one exponents so they sign extend to -1.
308 __ cmp(r3, Operand(-1));
309 __ b(ne, &return_equal);
311 // Shift out flag and all exponent bits, retaining only mantissa.
312 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
313 // Or with all low-bits of mantissa.
314 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
315 __ orr(r0, r3, Operand(r2), SetCC);
316 // For equal we already have the right value in r0: Return zero (equal)
317 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
318 // not (it's a NaN). For <= and >= we need to load r0 with the failing
319 // value if it's a NaN.
321 // All-zero means Infinity means equal.
324 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
326 __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
331 // No fall through here.
333 __ bind(¬_identical);
337 // See comment at call site.
338 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
344 DCHECK((lhs.is(r0) && rhs.is(r1)) ||
345 (lhs.is(r1) && rhs.is(r0)));
348 __ JumpIfSmi(rhs, &rhs_is_smi);
350 // Lhs is a Smi. Check whether the rhs is a heap number.
351 __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
353 // If rhs is not a number and lhs is a Smi then strict equality cannot
354 // succeed. Return non-equal
355 // If rhs is r0 then there is already a non zero value in it.
357 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
361 // Smi compared non-strictly with a non-Smi non-heap-number. Call
366 // Lhs is a smi, rhs is a number.
367 // Convert lhs to a double in d7.
368 __ SmiToDouble(d7, lhs);
369 // Load the double from rhs, tagged HeapNumber r0, to d6.
370 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
372 // We now have both loaded as doubles but we can skip the lhs nan check
376 __ bind(&rhs_is_smi);
377 // Rhs is a smi. Check whether the non-smi lhs is a heap number.
378 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
380 // If lhs is not a number and rhs is a smi then strict equality cannot
381 // succeed. Return non-equal.
382 // If lhs is r0 then there is already a non zero value in it.
384 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
388 // Smi compared non-strictly with a non-smi non-heap-number. Call
393 // Rhs is a smi, lhs is a heap number.
394 // Load the double from lhs, tagged HeapNumber r1, to d7.
395 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
396 // Convert rhs to a double in d6 .
397 __ SmiToDouble(d6, rhs);
398 // Fall through to both_loaded_as_doubles.
402 // See comment at call site.
403 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
406 DCHECK((lhs.is(r0) && rhs.is(r1)) ||
407 (lhs.is(r1) && rhs.is(r0)));
409 // If either operand is a JS object or an oddball value, then they are
410 // not equal since their pointers are different.
411 // There is no test for undetectability in strict equality.
412 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
413 Label first_non_object;
414 // Get the type of the first operand into r2 and compare it with
415 // FIRST_SPEC_OBJECT_TYPE.
416 __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE);
417 __ b(lt, &first_non_object);
419 // Return non-zero (r0 is not zero)
420 Label return_not_equal;
421 __ bind(&return_not_equal);
424 __ bind(&first_non_object);
425 // Check for oddballs: true, false, null, undefined.
426 __ cmp(r2, Operand(ODDBALL_TYPE));
427 __ b(eq, &return_not_equal);
429 __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE);
430 __ b(ge, &return_not_equal);
432 // Check for oddballs: true, false, null, undefined.
433 __ cmp(r3, Operand(ODDBALL_TYPE));
434 __ b(eq, &return_not_equal);
436 // Now that we have the types we might as well check for
437 // internalized-internalized.
438 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
439 __ orr(r2, r2, Operand(r3));
440 __ tst(r2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
441 __ b(eq, &return_not_equal);
445 // See comment at call site.
446 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
449 Label* both_loaded_as_doubles,
450 Label* not_heap_numbers,
452 DCHECK((lhs.is(r0) && rhs.is(r1)) ||
453 (lhs.is(r1) && rhs.is(r0)));
455 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
456 __ b(ne, not_heap_numbers);
457 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
459 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
461 // Both are heap numbers. Load them up then jump to the code we have
463 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
464 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
465 __ jmp(both_loaded_as_doubles);
469 // Fast negative check for internalized-to-internalized equality.
470 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
473 Label* possible_strings,
474 Label* not_both_strings) {
475 DCHECK((lhs.is(r0) && rhs.is(r1)) ||
476 (lhs.is(r1) && rhs.is(r0)));
478 // r2 is object type of rhs.
480 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
481 __ tst(r2, Operand(kIsNotStringMask));
482 __ b(ne, &object_test);
483 __ tst(r2, Operand(kIsNotInternalizedMask));
484 __ b(ne, possible_strings);
485 __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
486 __ b(ge, not_both_strings);
487 __ tst(r3, Operand(kIsNotInternalizedMask));
488 __ b(ne, possible_strings);
490 // Both are internalized. We already checked they weren't the same pointer
491 // so they are not equal.
492 __ mov(r0, Operand(NOT_EQUAL));
495 __ bind(&object_test);
496 __ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE));
497 __ b(lt, not_both_strings);
498 __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE);
499 __ b(lt, not_both_strings);
500 // If both objects are undetectable, they are equal. Otherwise, they
501 // are not equal, since they are different objects and an object is not
502 // equal to undefined.
503 __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
504 __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
505 __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
506 __ and_(r0, r2, Operand(r3));
507 __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
508 __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
513 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
515 CompareICState::State expected,
518 if (expected == CompareICState::SMI) {
519 __ JumpIfNotSmi(input, fail);
520 } else if (expected == CompareICState::NUMBER) {
521 __ JumpIfSmi(input, &ok);
522 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
525 // We could be strict about internalized/non-internalized here, but as long as
526 // hydrogen doesn't care, the stub doesn't have to care either.
531 // On entry r1 and r2 are the values to be compared.
532 // On exit r0 is 0, positive or negative to indicate the result of
534 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
537 Condition cc = GetCondition();
540 CompareICStub_CheckInputType(masm, lhs, r2, left(), &miss);
541 CompareICStub_CheckInputType(masm, rhs, r3, right(), &miss);
543 Label slow; // Call builtin.
544 Label not_smis, both_loaded_as_doubles, lhs_not_nan;
546 Label not_two_smis, smi_done;
548 __ JumpIfNotSmi(r2, ¬_two_smis);
549 __ mov(r1, Operand(r1, ASR, 1));
550 __ sub(r0, r1, Operand(r0, ASR, 1));
552 __ bind(¬_two_smis);
554 // NOTICE! This code is only reached after a smi-fast-case check, so
555 // it is certain that at least one operand isn't a smi.
557 // Handle the case where the objects are identical. Either returns the answer
558 // or goes to slow. Only falls through if the objects were not identical.
559 EmitIdenticalObjectComparison(masm, &slow, cc);
561 // If either is a Smi (we know that not both are), then they can only
562 // be strictly equal if the other is a HeapNumber.
563 STATIC_ASSERT(kSmiTag == 0);
564 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
565 __ and_(r2, lhs, Operand(rhs));
566 __ JumpIfNotSmi(r2, ¬_smis);
567 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
568 // 1) Return the answer.
570 // 3) Fall through to both_loaded_as_doubles.
571 // 4) Jump to lhs_not_nan.
572 // In cases 3 and 4 we have found out we were dealing with a number-number
573 // comparison. If VFP3 is supported the double values of the numbers have
574 // been loaded into d7 and d6. Otherwise, the double values have been loaded
575 // into r0, r1, r2, and r3.
576 EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
578 __ bind(&both_loaded_as_doubles);
579 // The arguments have been converted to doubles and stored in d6 and d7, if
580 // VFP3 is supported, or in r0, r1, r2, and r3.
581 __ bind(&lhs_not_nan);
583 // ARMv7 VFP3 instructions to implement double precision comparison.
584 __ VFPCompareAndSetFlags(d7, d6);
587 __ mov(r0, Operand(EQUAL), LeaveCC, eq);
588 __ mov(r0, Operand(LESS), LeaveCC, lt);
589 __ mov(r0, Operand(GREATER), LeaveCC, gt);
593 // If one of the sides was a NaN then the v flag is set. Load r0 with
594 // whatever it takes to make the comparison fail, since comparisons with NaN
596 if (cc == lt || cc == le) {
597 __ mov(r0, Operand(GREATER));
599 __ mov(r0, Operand(LESS));
604 // At this point we know we are dealing with two different objects,
605 // and neither of them is a Smi. The objects are in rhs_ and lhs_.
607 // This returns non-equal for some object types, or falls through if it
609 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
612 Label check_for_internalized_strings;
613 Label flat_string_check;
614 // Check for heap-number-heap-number comparison. Can jump to slow case,
615 // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
616 // that case. If the inputs are not doubles then jumps to
617 // check_for_internalized_strings.
618 // In this case r2 will contain the type of rhs_. Never falls through.
619 EmitCheckForTwoHeapNumbers(masm,
622 &both_loaded_as_doubles,
623 &check_for_internalized_strings,
626 __ bind(&check_for_internalized_strings);
627 // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
628 // internalized strings.
629 if (cc == eq && !strict()) {
630 // Returns an answer for two internalized strings or two detectable objects.
631 // Otherwise jumps to string case or not both strings case.
632 // Assumes that r2 is the type of rhs_ on entry.
633 EmitCheckForInternalizedStringsOrObjects(
634 masm, lhs, rhs, &flat_string_check, &slow);
637 // Check for both being sequential one-byte strings,
638 // and inline if that is the case.
639 __ bind(&flat_string_check);
641 __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, r2, r3, &slow);
643 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r2,
646 StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, r2, r3, r4);
648 StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, r2, r3, r4,
651 // Never falls through to here.
656 // Figure out which native to call and setup the arguments.
657 Builtins::JavaScript native;
659 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
661 native = Builtins::COMPARE;
662 int ncr; // NaN compare result
663 if (cc == lt || cc == le) {
666 DCHECK(cc == gt || cc == ge); // remaining cases
669 __ mov(r0, Operand(Smi::FromInt(ncr)));
673 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
674 // tagged as a small integer.
675 __ InvokeBuiltin(native, JUMP_FUNCTION);
682 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
683 // We don't allow a GC during a store buffer overflow so there is no need to
684 // store the registers in any particular way, but we do have to store and
686 __ stm(db_w, sp, kCallerSaved | lr.bit());
688 const Register scratch = r1;
690 if (save_doubles()) {
691 __ SaveFPRegs(sp, scratch);
693 const int argument_count = 1;
694 const int fp_argument_count = 0;
696 AllowExternalCallThatCantCauseGC scope(masm);
697 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
698 __ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
700 ExternalReference::store_buffer_overflow_function(isolate()),
702 if (save_doubles()) {
703 __ RestoreFPRegs(sp, scratch);
705 __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
709 void MathPowStub::Generate(MacroAssembler* masm) {
710 const Register base = r1;
711 const Register exponent = MathPowTaggedDescriptor::exponent();
712 DCHECK(exponent.is(r2));
713 const Register heapnumbermap = r5;
714 const Register heapnumber = r0;
715 const DwVfpRegister double_base = d0;
716 const DwVfpRegister double_exponent = d1;
717 const DwVfpRegister double_result = d2;
718 const DwVfpRegister double_scratch = d3;
719 const SwVfpRegister single_scratch = s6;
720 const Register scratch = r9;
721 const Register scratch2 = r4;
723 Label call_runtime, done, int_exponent;
724 if (exponent_type() == ON_STACK) {
725 Label base_is_smi, unpack_exponent;
726 // The exponent and base are supplied as arguments on the stack.
727 // This can only happen if the stub is called from non-optimized code.
728 // Load input parameters from stack to double registers.
729 __ ldr(base, MemOperand(sp, 1 * kPointerSize));
730 __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
732 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
734 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
735 __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
736 __ cmp(scratch, heapnumbermap);
737 __ b(ne, &call_runtime);
739 __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
740 __ jmp(&unpack_exponent);
742 __ bind(&base_is_smi);
743 __ vmov(single_scratch, scratch);
744 __ vcvt_f64_s32(double_base, single_scratch);
745 __ bind(&unpack_exponent);
747 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
749 __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
750 __ cmp(scratch, heapnumbermap);
751 __ b(ne, &call_runtime);
752 __ vldr(double_exponent,
753 FieldMemOperand(exponent, HeapNumber::kValueOffset));
754 } else if (exponent_type() == TAGGED) {
755 // Base is already in double_base.
756 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
758 __ vldr(double_exponent,
759 FieldMemOperand(exponent, HeapNumber::kValueOffset));
762 if (exponent_type() != INTEGER) {
763 Label int_exponent_convert;
764 // Detect integer exponents stored as double.
765 __ vcvt_u32_f64(single_scratch, double_exponent);
766 // We do not check for NaN or Infinity here because comparing numbers on
767 // ARM correctly distinguishes NaNs. We end up calling the built-in.
768 __ vcvt_f64_u32(double_scratch, single_scratch);
769 __ VFPCompareAndSetFlags(double_scratch, double_exponent);
770 __ b(eq, &int_exponent_convert);
772 if (exponent_type() == ON_STACK) {
773 // Detect square root case. Crankshaft detects constant +/-0.5 at
774 // compile time and uses DoMathPowHalf instead. We then skip this check
775 // for non-constant cases of +/-0.5 as these hardly occur.
779 __ vmov(double_scratch, 0.5, scratch);
780 __ VFPCompareAndSetFlags(double_exponent, double_scratch);
781 __ b(ne, ¬_plus_half);
783 // Calculates square root of base. Check for the special case of
784 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
785 __ vmov(double_scratch, -V8_INFINITY, scratch);
786 __ VFPCompareAndSetFlags(double_base, double_scratch);
787 __ vneg(double_result, double_scratch, eq);
790 // Add +0 to convert -0 to +0.
791 __ vadd(double_scratch, double_base, kDoubleRegZero);
792 __ vsqrt(double_result, double_scratch);
795 __ bind(¬_plus_half);
796 __ vmov(double_scratch, -0.5, scratch);
797 __ VFPCompareAndSetFlags(double_exponent, double_scratch);
798 __ b(ne, &call_runtime);
800 // Calculates square root of base. Check for the special case of
801 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
802 __ vmov(double_scratch, -V8_INFINITY, scratch);
803 __ VFPCompareAndSetFlags(double_base, double_scratch);
804 __ vmov(double_result, kDoubleRegZero, eq);
807 // Add +0 to convert -0 to +0.
808 __ vadd(double_scratch, double_base, kDoubleRegZero);
809 __ vmov(double_result, 1.0, scratch);
810 __ vsqrt(double_scratch, double_scratch);
811 __ vdiv(double_result, double_result, double_scratch);
817 AllowExternalCallThatCantCauseGC scope(masm);
818 __ PrepareCallCFunction(0, 2, scratch);
819 __ MovToFloatParameters(double_base, double_exponent);
821 ExternalReference::power_double_double_function(isolate()),
825 __ MovFromFloatResult(double_result);
828 __ bind(&int_exponent_convert);
829 __ vcvt_u32_f64(single_scratch, double_exponent);
830 __ vmov(scratch, single_scratch);
833 // Calculate power with integer exponent.
834 __ bind(&int_exponent);
836 // Get two copies of exponent in the registers scratch and exponent.
837 if (exponent_type() == INTEGER) {
838 __ mov(scratch, exponent);
840 // Exponent has previously been stored into scratch as untagged integer.
841 __ mov(exponent, scratch);
843 __ vmov(double_scratch, double_base); // Back up base.
844 __ vmov(double_result, 1.0, scratch2);
846 // Get absolute value of exponent.
847 __ cmp(scratch, Operand::Zero());
848 __ mov(scratch2, Operand::Zero(), LeaveCC, mi);
849 __ sub(scratch, scratch2, scratch, LeaveCC, mi);
852 __ bind(&while_true);
853 __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
854 __ vmul(double_result, double_result, double_scratch, cs);
855 __ vmul(double_scratch, double_scratch, double_scratch, ne);
856 __ b(ne, &while_true);
858 __ cmp(exponent, Operand::Zero());
860 __ vmov(double_scratch, 1.0, scratch);
861 __ vdiv(double_result, double_scratch, double_result);
862 // Test whether result is zero. Bail out to check for subnormal result.
863 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
864 __ VFPCompareAndSetFlags(double_result, 0.0);
866 // double_exponent may not containe the exponent value if the input was a
867 // smi. We set it with exponent value before bailing out.
868 __ vmov(single_scratch, exponent);
869 __ vcvt_f64_s32(double_exponent, single_scratch);
871 // Returning or bailing out.
872 Counters* counters = isolate()->counters();
873 if (exponent_type() == ON_STACK) {
874 // The arguments are still on the stack.
875 __ bind(&call_runtime);
876 __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
878 // The stub is called from non-optimized code, which expects the result
879 // as heap number in exponent.
881 __ AllocateHeapNumber(
882 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
883 __ vstr(double_result,
884 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
885 DCHECK(heapnumber.is(r0));
886 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
891 AllowExternalCallThatCantCauseGC scope(masm);
892 __ PrepareCallCFunction(0, 2, scratch);
893 __ MovToFloatParameters(double_base, double_exponent);
895 ExternalReference::power_double_double_function(isolate()),
899 __ MovFromFloatResult(double_result);
902 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
908 bool CEntryStub::NeedsImmovableCode() {
913 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
914 CEntryStub::GenerateAheadOfTime(isolate);
915 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
916 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
917 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
918 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
919 CreateWeakCellStub::GenerateAheadOfTime(isolate);
920 BinaryOpICStub::GenerateAheadOfTime(isolate);
921 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
925 void CodeStub::GenerateFPStubs(Isolate* isolate) {
926 // Generate if not already in cache.
927 SaveFPRegsMode mode = kSaveFPRegs;
928 CEntryStub(isolate, 1, mode).GetCode();
929 StoreBufferOverflowStub(isolate, mode).GetCode();
930 isolate->set_fp_stubs_generated(true);
934 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
935 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
940 void CEntryStub::Generate(MacroAssembler* masm) {
941 // Called from JavaScript; parameters are on stack as if calling JS function.
942 // r0: number of arguments including receiver
943 // r1: pointer to builtin function
944 // fp: frame pointer (restored after C call)
945 // sp: stack pointer (restored as callee's sp after C call)
946 // cp: current context (C callee-saved)
948 ProfileEntryHookStub::MaybeCallEntryHook(masm);
950 __ mov(r5, Operand(r1));
952 // Compute the argv pointer in a callee-saved register.
953 __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
954 __ sub(r1, r1, Operand(kPointerSize));
956 // Enter the exit frame that transitions from JavaScript to C++.
957 FrameScope scope(masm, StackFrame::MANUAL);
958 __ EnterExitFrame(save_doubles());
960 // Store a copy of argc in callee-saved registers for later.
961 __ mov(r4, Operand(r0));
963 // r0, r4: number of arguments including receiver (C callee-saved)
964 // r1: pointer to the first argument (C callee-saved)
965 // r5: pointer to builtin function (C callee-saved)
967 // Result returned in r0 or r0+r1 by default.
970 int frame_alignment = MacroAssembler::ActivationFrameAlignment();
971 int frame_alignment_mask = frame_alignment - 1;
972 if (FLAG_debug_code) {
973 if (frame_alignment > kPointerSize) {
974 Label alignment_as_expected;
975 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
976 __ tst(sp, Operand(frame_alignment_mask));
977 __ b(eq, &alignment_as_expected);
978 // Don't use Check here, as it will call Runtime_Abort re-entering here.
979 __ stop("Unexpected alignment");
980 __ bind(&alignment_as_expected);
986 // r0 = argc, r1 = argv
987 __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
989 // To let the GC traverse the return address of the exit frames, we need to
990 // know where the return address is. The CEntryStub is unmovable, so
991 // we can store the address on the stack to be able to find it again and
992 // we never have to restore it, because it will not change.
993 // Compute the return address in lr to return to after the jump below. Pc is
994 // already at '+ 8' from the current instruction but return is after three
995 // instructions so add another 4 to pc to get the return address.
997 // Prevent literal pool emission before return address.
998 Assembler::BlockConstPoolScope block_const_pool(masm);
999 __ add(lr, pc, Operand(4));
1000 __ str(lr, MemOperand(sp, 0));
1004 __ VFPEnsureFPSCRState(r2);
1006 // Runtime functions should not return 'the hole'. Allowing it to escape may
1007 // lead to crashes in the IC code later.
1008 if (FLAG_debug_code) {
1010 __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
1012 __ stop("The hole escaped");
1016 // Check result for exception sentinel.
1017 Label exception_returned;
1018 __ CompareRoot(r0, Heap::kExceptionRootIndex);
1019 __ b(eq, &exception_returned);
1021 ExternalReference pending_exception_address(
1022 Isolate::kPendingExceptionAddress, isolate());
1024 // Check that there is no pending exception, otherwise we
1025 // should have returned the exception sentinel.
1026 if (FLAG_debug_code) {
1028 __ mov(r2, Operand(pending_exception_address));
1029 __ ldr(r2, MemOperand(r2));
1030 __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
1031 // Cannot use check here as it attempts to generate call into runtime.
1033 __ stop("Unexpected pending exception");
1037 // Exit C frame and return.
1039 // sp: stack pointer
1040 // fp: frame pointer
1041 // Callee-saved register r4 still holds argc.
1042 __ LeaveExitFrame(save_doubles(), r4, true);
1045 // Handling of exception.
1046 __ bind(&exception_returned);
1048 // Retrieve the pending exception.
1049 __ mov(r2, Operand(pending_exception_address));
1050 __ ldr(r0, MemOperand(r2));
1052 // Clear the pending exception.
1053 __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
1054 __ str(r3, MemOperand(r2));
1056 // Special handling of termination exceptions which are uncatchable
1057 // by javascript code.
1058 Label throw_termination_exception;
1059 __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex);
1060 __ b(eq, &throw_termination_exception);
1062 // Handle normal exception.
1065 __ bind(&throw_termination_exception);
1066 __ ThrowUncatchable(r0);
1070 void JSEntryStub::Generate(MacroAssembler* masm) {
1077 Label invoke, handler_entry, exit;
1079 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1081 // Called from C, so do not pop argc and args on exit (preserve sp)
1082 // No need to save register-passed args
1083 // Save callee-saved registers (incl. cp and fp), sp, and lr
1084 __ stm(db_w, sp, kCalleeSaved | lr.bit());
1086 // Save callee-saved vfp registers.
1087 __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
1088 // Set up the reserved register for 0.0.
1089 __ vmov(kDoubleRegZero, 0.0);
1090 __ VFPEnsureFPSCRState(r4);
1092 // Get address of argv, see stm above.
1098 // Set up argv in r4.
1099 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1100 offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
1101 __ ldr(r4, MemOperand(sp, offset_to_argv));
1103 // Push a frame with special values setup to mark it as an entry frame.
1109 int marker = type();
1110 if (FLAG_enable_ool_constant_pool) {
1111 __ mov(r8, Operand(isolate()->factory()->empty_constant_pool_array()));
1113 __ mov(r7, Operand(Smi::FromInt(marker)));
1114 __ mov(r6, Operand(Smi::FromInt(marker)));
1116 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1117 __ ldr(r5, MemOperand(r5));
1118 __ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
1119 __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() |
1120 (FLAG_enable_ool_constant_pool ? r8.bit() : 0) |
1123 // Set up frame pointer for the frame to be pushed.
1124 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1126 // If this is the outermost JS call, set js_entry_sp value.
1127 Label non_outermost_js;
1128 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
1129 __ mov(r5, Operand(ExternalReference(js_entry_sp)));
1130 __ ldr(r6, MemOperand(r5));
1131 __ cmp(r6, Operand::Zero());
1132 __ b(ne, &non_outermost_js);
1133 __ str(fp, MemOperand(r5));
1134 __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1137 __ bind(&non_outermost_js);
1138 __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1142 // Jump to a faked try block that does the invoke, with a faked catch
1143 // block that sets the pending exception.
1146 // Block literal pool emission whilst taking the position of the handler
1147 // entry. This avoids making the assumption that literal pools are always
1148 // emitted after an instruction is emitted, rather than before.
1150 Assembler::BlockConstPoolScope block_const_pool(masm);
1151 __ bind(&handler_entry);
1152 handler_offset_ = handler_entry.pos();
1153 // Caught exception: Store result (exception) in the pending exception
1154 // field in the JSEnv and return a failure sentinel. Coming in here the
1155 // fp will be invalid because the PushTryHandler below sets it to 0 to
1156 // signal the existence of the JSEntry frame.
1157 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1160 __ str(r0, MemOperand(ip));
1161 __ LoadRoot(r0, Heap::kExceptionRootIndex);
1164 // Invoke: Link this frame into the handler chain. There's only one
1165 // handler block in this code object, so its index is 0.
1167 // Must preserve r0-r4, r5-r6 are available.
1168 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
1169 // If an exception not caught by another handler occurs, this handler
1170 // returns control to the code after the bl(&invoke) above, which
1171 // restores all kCalleeSaved registers (including cp and fp) to their
1172 // saved values before returning a failure to C.
1174 // Clear any pending exceptions.
1175 __ mov(r5, Operand(isolate()->factory()->the_hole_value()));
1176 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1178 __ str(r5, MemOperand(ip));
1180 // Invoke the function by calling through JS entry trampoline builtin.
1181 // Notice that we cannot store a reference to the trampoline code directly in
1182 // this stub, because runtime stubs are not traversed when doing GC.
1184 // Expected registers by Builtins::JSEntryTrampoline
1190 if (type() == StackFrame::ENTRY_CONSTRUCT) {
1191 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1193 __ mov(ip, Operand(construct_entry));
1195 ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
1196 __ mov(ip, Operand(entry));
1198 __ ldr(ip, MemOperand(ip)); // deref address
1199 __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
1201 // Branch and link to JSEntryTrampoline.
1204 // Unlink this frame from the handler chain.
1207 __ bind(&exit); // r0 holds result
1208 // Check if the current stack frame is marked as the outermost JS frame.
1209 Label non_outermost_js_2;
1211 __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1212 __ b(ne, &non_outermost_js_2);
1213 __ mov(r6, Operand::Zero());
1214 __ mov(r5, Operand(ExternalReference(js_entry_sp)));
1215 __ str(r6, MemOperand(r5));
1216 __ bind(&non_outermost_js_2);
1218 // Restore the top frame descriptors from the stack.
1221 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1222 __ str(r3, MemOperand(ip));
1224 // Reset the stack to the callee saved registers.
1225 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1227 // Restore callee-saved registers and return.
1229 if (FLAG_debug_code) {
1230 __ mov(lr, Operand(pc));
1234 // Restore callee-saved vfp registers.
1235 __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
1237 __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
1241 // Uses registers r0 to r4.
1242 // Expected input (depending on whether args are in registers or on the stack):
1243 // * object: r0 or at sp + 1 * kPointerSize.
1244 // * function: r1 or at sp.
1246 // An inlined call site may have been generated before calling this stub.
1247 // In this case the offset to the inline sites to patch are passed in r5 and r6.
1248 // (See LCodeGen::DoInstanceOfKnownGlobal)
1249 void InstanceofStub::Generate(MacroAssembler* masm) {
1250 // Call site inlining and patching implies arguments in registers.
1251 DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
1253 // Fixed register usage throughout the stub:
1254 const Register object = r0; // Object (lhs).
1255 Register map = r3; // Map of the object.
1256 const Register function = r1; // Function (rhs).
1257 const Register prototype = r4; // Prototype of the function.
1258 const Register scratch = r2;
1260 Label slow, loop, is_instance, is_not_instance, not_js_object;
1262 if (!HasArgsInRegisters()) {
1263 __ ldr(object, MemOperand(sp, 1 * kPointerSize));
1264 __ ldr(function, MemOperand(sp, 0));
1267 // Check that the left hand is a JS object and load map.
1268 __ JumpIfSmi(object, ¬_js_object);
1269 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
1271 // If there is a call site cache don't look in the global cache, but do the
1272 // real lookup and update the call site cache.
1273 if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
1275 __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1277 __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
1279 __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
1280 __ Ret(HasArgsInRegisters() ? 0 : 2);
1285 // Get the prototype of the function.
1286 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
1288 // Check that the function prototype is a JS object.
1289 __ JumpIfSmi(prototype, &slow);
1290 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
1292 // Update the global instanceof or call site inlined cache with the current
1293 // map and function. The cached answer will be set when it is known below.
1294 if (!HasCallSiteInlineCheck()) {
1295 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1296 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
1298 DCHECK(HasArgsInRegisters());
1299 // Patch the (relocated) inlined map check.
1301 // The map_load_offset was stored in r5
1302 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
1303 const Register map_load_offset = r5;
1304 __ sub(r9, lr, map_load_offset);
1305 // Get the map location in r5 and patch it.
1306 __ GetRelocatedValueLocation(r9, map_load_offset, scratch);
1307 __ ldr(map_load_offset, MemOperand(map_load_offset));
1308 __ str(map, FieldMemOperand(map_load_offset, Cell::kValueOffset));
1311 // Register mapping: r3 is object map and r4 is function prototype.
1312 // Get prototype of object into r2.
1313 __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
1315 // We don't need map any more. Use it as a scratch register.
1316 Register scratch2 = map;
1319 // Loop through the prototype chain looking for the function prototype.
1320 __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
1322 __ cmp(scratch, Operand(prototype));
1323 __ b(eq, &is_instance);
1324 __ cmp(scratch, scratch2);
1325 __ b(eq, &is_not_instance);
1326 __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
1327 __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
1329 Factory* factory = isolate()->factory();
1331 __ bind(&is_instance);
1332 if (!HasCallSiteInlineCheck()) {
1333 __ mov(r0, Operand(Smi::FromInt(0)));
1334 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
1335 if (ReturnTrueFalseObject()) {
1336 __ Move(r0, factory->true_value());
1339 // Patch the call site to return true.
1340 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
1341 // The bool_load_offset was stored in r6
1342 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
1343 const Register bool_load_offset = r6;
1344 __ sub(r9, lr, bool_load_offset);
1345 // Get the boolean result location in scratch and patch it.
1346 __ GetRelocatedValueLocation(r9, scratch, scratch2);
1347 __ str(r0, MemOperand(scratch));
1349 if (!ReturnTrueFalseObject()) {
1350 __ mov(r0, Operand(Smi::FromInt(0)));
1353 __ Ret(HasArgsInRegisters() ? 0 : 2);
1355 __ bind(&is_not_instance);
1356 if (!HasCallSiteInlineCheck()) {
1357 __ mov(r0, Operand(Smi::FromInt(1)));
1358 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
1359 if (ReturnTrueFalseObject()) {
1360 __ Move(r0, factory->false_value());
1363 // Patch the call site to return false.
1364 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
1365 // The bool_load_offset was stored in r6
1366 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
1367 const Register bool_load_offset = r6;
1368 __ sub(r9, lr, bool_load_offset);
1370 // Get the boolean result location in scratch and patch it.
1371 __ GetRelocatedValueLocation(r9, scratch, scratch2);
1372 __ str(r0, MemOperand(scratch));
1374 if (!ReturnTrueFalseObject()) {
1375 __ mov(r0, Operand(Smi::FromInt(1)));
1378 __ Ret(HasArgsInRegisters() ? 0 : 2);
1380 Label object_not_null, object_not_null_or_smi;
1381 __ bind(¬_js_object);
1382 // Before null, smi and string value checks, check that the rhs is a function
1383 // as for a non-function rhs an exception needs to be thrown.
1384 __ JumpIfSmi(function, &slow);
1385 __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE);
1388 // Null is not instance of anything.
1389 __ cmp(object, Operand(isolate()->factory()->null_value()));
1390 __ b(ne, &object_not_null);
1391 if (ReturnTrueFalseObject()) {
1392 __ Move(r0, factory->false_value());
1394 __ mov(r0, Operand(Smi::FromInt(1)));
1396 __ Ret(HasArgsInRegisters() ? 0 : 2);
1398 __ bind(&object_not_null);
1399 // Smi values are not instances of anything.
1400 __ JumpIfNotSmi(object, &object_not_null_or_smi);
1401 if (ReturnTrueFalseObject()) {
1402 __ Move(r0, factory->false_value());
1404 __ mov(r0, Operand(Smi::FromInt(1)));
1406 __ Ret(HasArgsInRegisters() ? 0 : 2);
1408 __ bind(&object_not_null_or_smi);
1409 // String values are not instances of anything.
1410 __ IsObjectJSStringType(object, scratch, &slow);
1411 if (ReturnTrueFalseObject()) {
1412 __ Move(r0, factory->false_value());
1414 __ mov(r0, Operand(Smi::FromInt(1)));
1416 __ Ret(HasArgsInRegisters() ? 0 : 2);
1418 // Slow-case. Tail call builtin.
1420 if (!ReturnTrueFalseObject()) {
1421 if (HasArgsInRegisters()) {
1424 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
1427 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
1429 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
1431 __ cmp(r0, Operand::Zero());
1432 __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
1433 __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
1434 __ Ret(HasArgsInRegisters() ? 0 : 2);
1439 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1441 Register receiver = LoadDescriptor::ReceiverRegister();
1442 // Ensure that the vector and slot registers won't be clobbered before
1443 // calling the miss handler.
1444 DCHECK(!FLAG_vector_ics ||
1445 !AreAliased(r4, r5, VectorLoadICDescriptor::VectorRegister(),
1446 VectorLoadICDescriptor::SlotRegister()));
1448 NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r4,
1451 PropertyAccessCompiler::TailCallBuiltin(
1452 masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1456 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
1457 // Return address is in lr.
1460 Register receiver = LoadDescriptor::ReceiverRegister();
1461 Register index = LoadDescriptor::NameRegister();
1462 Register scratch = r5;
1463 Register result = r0;
1464 DCHECK(!scratch.is(receiver) && !scratch.is(index));
1465 DCHECK(!FLAG_vector_ics ||
1466 (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
1467 result.is(VectorLoadICDescriptor::SlotRegister())));
1469 // StringCharAtGenerator doesn't use the result register until it's passed
1470 // the different miss possibilities. If it did, we would have a conflict
1471 // when FLAG_vector_ics is true.
1472 StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
1473 &miss, // When not a string.
1474 &miss, // When not a number.
1475 &miss, // When index out of range.
1476 STRING_INDEX_IS_ARRAY_INDEX,
1477 RECEIVER_IS_STRING);
1478 char_at_generator.GenerateFast(masm);
1481 StubRuntimeCallHelper call_helper;
1482 char_at_generator.GenerateSlow(masm, call_helper);
1485 PropertyAccessCompiler::TailCallBuiltin(
1486 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1490 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
1491 CHECK(!has_new_target());
1492 // The displacement is the offset of the last parameter (if any)
1493 // relative to the frame pointer.
1494 const int kDisplacement =
1495 StandardFrameConstants::kCallerSPOffset - kPointerSize;
1496 DCHECK(r1.is(ArgumentsAccessReadDescriptor::index()));
1497 DCHECK(r0.is(ArgumentsAccessReadDescriptor::parameter_count()));
1499 // Check that the key is a smi.
1501 __ JumpIfNotSmi(r1, &slow);
1503 // Check if the calling frame is an arguments adaptor frame.
1505 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1506 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
1507 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1510 // Check index against formal parameters count limit passed in
1511 // through register r0. Use unsigned comparison to get negative
1516 // Read the argument from the stack and return it.
1518 __ add(r3, fp, Operand::PointerOffsetFromSmiKey(r3));
1519 __ ldr(r0, MemOperand(r3, kDisplacement));
1522 // Arguments adaptor case: Check index against actual arguments
1523 // limit found in the arguments adaptor frame. Use unsigned
1524 // comparison to get negative check for free.
1526 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1530 // Read the argument from the adaptor frame and return it.
1532 __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r3));
1533 __ ldr(r0, MemOperand(r3, kDisplacement));
1536 // Slow-case: Handle non-smi or out-of-bounds access to arguments
1537 // by calling the runtime system.
1540 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
1544 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
1545 // sp[0] : number of parameters
1546 // sp[4] : receiver displacement
1549 CHECK(!has_new_target());
1551 // Check if the calling frame is an arguments adaptor frame.
1553 __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1554 __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
1555 __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1558 // Patch the arguments.length and the parameters pointer in the current frame.
1559 __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
1560 __ str(r2, MemOperand(sp, 0 * kPointerSize));
1561 __ add(r3, r3, Operand(r2, LSL, 1));
1562 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
1563 __ str(r3, MemOperand(sp, 1 * kPointerSize));
1566 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1570 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
1572 // sp[0] : number of parameters (tagged)
1573 // sp[4] : address of receiver argument
1575 // Registers used over whole function:
1576 // r6 : allocated object (tagged)
1577 // r9 : mapped parameter count (tagged)
1579 CHECK(!has_new_target());
1581 __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
1582 // r1 = parameter count (tagged)
1584 // Check if the calling frame is an arguments adaptor frame.
1586 Label adaptor_frame, try_allocate;
1587 __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1588 __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
1589 __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1590 __ b(eq, &adaptor_frame);
1592 // No adaptor, parameter count = argument count.
1594 __ b(&try_allocate);
1596 // We have an adaptor frame. Patch the parameters pointer.
1597 __ bind(&adaptor_frame);
1598 __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
1599 __ add(r3, r3, Operand(r2, LSL, 1));
1600 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
1601 __ str(r3, MemOperand(sp, 1 * kPointerSize));
1603 // r1 = parameter count (tagged)
1604 // r2 = argument count (tagged)
1605 // Compute the mapped parameter count = min(r1, r2) in r1.
1606 __ cmp(r1, Operand(r2));
1607 __ mov(r1, Operand(r2), LeaveCC, gt);
1609 __ bind(&try_allocate);
1611 // Compute the sizes of backing store, parameter map, and arguments object.
1612 // 1. Parameter map, has 2 extra words containing context and backing store.
1613 const int kParameterMapHeaderSize =
1614 FixedArray::kHeaderSize + 2 * kPointerSize;
1615 // If there are no mapped parameters, we do not need the parameter_map.
1616 __ cmp(r1, Operand(Smi::FromInt(0)));
1617 __ mov(r9, Operand::Zero(), LeaveCC, eq);
1618 __ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne);
1619 __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
1621 // 2. Backing store.
1622 __ add(r9, r9, Operand(r2, LSL, 1));
1623 __ add(r9, r9, Operand(FixedArray::kHeaderSize));
1625 // 3. Arguments object.
1626 __ add(r9, r9, Operand(Heap::kSloppyArgumentsObjectSize));
1628 // Do the allocation of all three objects in one go.
1629 __ Allocate(r9, r0, r3, r4, &runtime, TAG_OBJECT);
1631 // r0 = address of new object(s) (tagged)
1632 // r2 = argument count (smi-tagged)
1633 // Get the arguments boilerplate from the current native context into r4.
1634 const int kNormalOffset =
1635 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
1636 const int kAliasedOffset =
1637 Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
1639 __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1640 __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
1641 __ cmp(r1, Operand::Zero());
1642 __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
1643 __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
1645 // r0 = address of new object (tagged)
1646 // r1 = mapped parameter count (tagged)
1647 // r2 = argument count (smi-tagged)
1648 // r4 = address of arguments map (tagged)
1649 __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
1650 __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
1651 __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
1652 __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
1654 // Set up the callee in-object property.
1655 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
1656 __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
1657 __ AssertNotSmi(r3);
1658 const int kCalleeOffset = JSObject::kHeaderSize +
1659 Heap::kArgumentsCalleeIndex * kPointerSize;
1660 __ str(r3, FieldMemOperand(r0, kCalleeOffset));
1662 // Use the length (smi tagged) and set that as an in-object property too.
1664 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1665 const int kLengthOffset = JSObject::kHeaderSize +
1666 Heap::kArgumentsLengthIndex * kPointerSize;
1667 __ str(r2, FieldMemOperand(r0, kLengthOffset));
1669 // Set up the elements pointer in the allocated arguments object.
1670 // If we allocated a parameter map, r4 will point there, otherwise
1671 // it will point to the backing store.
1672 __ add(r4, r0, Operand(Heap::kSloppyArgumentsObjectSize));
1673 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
1675 // r0 = address of new object (tagged)
1676 // r1 = mapped parameter count (tagged)
1677 // r2 = argument count (tagged)
1678 // r4 = address of parameter map or backing store (tagged)
1679 // Initialize parameter map. If there are no mapped arguments, we're done.
1680 Label skip_parameter_map;
1681 __ cmp(r1, Operand(Smi::FromInt(0)));
1682 // Move backing store address to r3, because it is
1683 // expected there when filling in the unmapped arguments.
1684 __ mov(r3, r4, LeaveCC, eq);
1685 __ b(eq, &skip_parameter_map);
1687 __ LoadRoot(r6, Heap::kSloppyArgumentsElementsMapRootIndex);
1688 __ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset));
1689 __ add(r6, r1, Operand(Smi::FromInt(2)));
1690 __ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
1691 __ str(cp, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
1692 __ add(r6, r4, Operand(r1, LSL, 1));
1693 __ add(r6, r6, Operand(kParameterMapHeaderSize));
1694 __ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
1696 // Copy the parameter slots and the holes in the arguments.
1697 // We need to fill in mapped_parameter_count slots. They index the context,
1698 // where parameters are stored in reverse order, at
1699 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
1700 // The mapped parameter thus need to get indices
1701 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
1702 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
1703 // We loop from right to left.
1704 Label parameters_loop, parameters_test;
1706 __ ldr(r9, MemOperand(sp, 0 * kPointerSize));
1707 __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
1708 __ sub(r9, r9, Operand(r1));
1709 __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
1710 __ add(r3, r4, Operand(r6, LSL, 1));
1711 __ add(r3, r3, Operand(kParameterMapHeaderSize));
1713 // r6 = loop variable (tagged)
1714 // r1 = mapping index (tagged)
1715 // r3 = address of backing store (tagged)
1716 // r4 = address of parameter map (tagged), which is also the address of new
1717 // object + Heap::kSloppyArgumentsObjectSize (tagged)
1718 // r0 = temporary scratch (a.o., for address calculation)
1719 // r5 = the hole value
1720 __ jmp(¶meters_test);
1722 __ bind(¶meters_loop);
1723 __ sub(r6, r6, Operand(Smi::FromInt(1)));
1724 __ mov(r0, Operand(r6, LSL, 1));
1725 __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
1726 __ str(r9, MemOperand(r4, r0));
1727 __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
1728 __ str(r5, MemOperand(r3, r0));
1729 __ add(r9, r9, Operand(Smi::FromInt(1)));
1730 __ bind(¶meters_test);
1731 __ cmp(r6, Operand(Smi::FromInt(0)));
1732 __ b(ne, ¶meters_loop);
1734 // Restore r0 = new object (tagged)
1735 __ sub(r0, r4, Operand(Heap::kSloppyArgumentsObjectSize));
1737 __ bind(&skip_parameter_map);
1738 // r0 = address of new object (tagged)
1739 // r2 = argument count (tagged)
1740 // r3 = address of backing store (tagged)
1742 // Copy arguments header and remaining slots (if there are any).
1743 __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
1744 __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset));
1745 __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset));
1747 Label arguments_loop, arguments_test;
1749 __ ldr(r4, MemOperand(sp, 1 * kPointerSize));
1750 __ sub(r4, r4, Operand(r9, LSL, 1));
1751 __ jmp(&arguments_test);
1753 __ bind(&arguments_loop);
1754 __ sub(r4, r4, Operand(kPointerSize));
1755 __ ldr(r6, MemOperand(r4, 0));
1756 __ add(r5, r3, Operand(r9, LSL, 1));
1757 __ str(r6, FieldMemOperand(r5, FixedArray::kHeaderSize));
1758 __ add(r9, r9, Operand(Smi::FromInt(1)));
1760 __ bind(&arguments_test);
1761 __ cmp(r9, Operand(r2));
1762 __ b(lt, &arguments_loop);
1764 // Return and remove the on-stack parameters.
1765 __ add(sp, sp, Operand(3 * kPointerSize));
1768 // Do the runtime call to allocate the arguments object.
1769 // r0 = address of new object (tagged)
1770 // r2 = argument count (tagged)
1772 __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
1773 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1777 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
1778 // Return address is in lr.
1781 Register receiver = LoadDescriptor::ReceiverRegister();
1782 Register key = LoadDescriptor::NameRegister();
1784 // Check that the key is an array index, that is Uint32.
1785 __ NonNegativeSmiTst(key);
1788 // Everything is fine, call runtime.
1789 __ Push(receiver, key); // Receiver, key.
1791 // Perform tail call to the entry.
1792 __ TailCallExternalReference(
1793 ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
1798 PropertyAccessCompiler::TailCallBuiltin(
1799 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1803 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
1804 // sp[0] : number of parameters
1805 // sp[4] : receiver displacement
1807 // Check if the calling frame is an arguments adaptor frame.
1808 Label adaptor_frame, try_allocate, runtime;
1809 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1810 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
1811 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1812 __ b(eq, &adaptor_frame);
1814 // Get the length from the frame.
1815 __ ldr(r1, MemOperand(sp, 0));
1816 __ b(&try_allocate);
1818 // Patch the arguments.length and the parameters pointer.
1819 __ bind(&adaptor_frame);
1820 __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1821 if (has_new_target()) {
1822 // Subtract 1 from smi-tagged arguments count.
1823 __ sub(r1, r1, Operand(2));
1825 __ str(r1, MemOperand(sp, 0));
1826 __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
1827 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
1828 __ str(r3, MemOperand(sp, 1 * kPointerSize));
1830 // Try the new space allocation. Start out with computing the size
1831 // of the arguments object and the elements array in words.
1832 Label add_arguments_object;
1833 __ bind(&try_allocate);
1834 __ SmiUntag(r1, SetCC);
1835 __ b(eq, &add_arguments_object);
1836 __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
1837 __ bind(&add_arguments_object);
1838 __ add(r1, r1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
1840 // Do the allocation of both objects in one go.
1841 __ Allocate(r1, r0, r2, r3, &runtime,
1842 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
1844 // Get the arguments boilerplate from the current native context.
1845 __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1846 __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
1847 __ ldr(r4, MemOperand(
1848 r4, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
1850 __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
1851 __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
1852 __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
1853 __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
1855 // Get the length (smi tagged) and set that as an in-object property too.
1856 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1857 __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
1859 __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
1860 Heap::kArgumentsLengthIndex * kPointerSize));
1862 // If there are no actual arguments, we're done.
1864 __ cmp(r1, Operand::Zero());
1867 // Get the parameters pointer from the stack.
1868 __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
1870 // Set up the elements pointer in the allocated arguments object and
1871 // initialize the header in the elements fixed array.
1872 __ add(r4, r0, Operand(Heap::kStrictArgumentsObjectSize));
1873 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
1874 __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
1875 __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
1876 __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
1879 // Copy the fixed array slots.
1881 // Set up r4 to point to the first array slot.
1882 __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1884 // Pre-decrement r2 with kPointerSize on each iteration.
1885 // Pre-decrement in order to skip receiver.
1886 __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
1887 // Post-increment r4 with kPointerSize on each iteration.
1888 __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
1889 __ sub(r1, r1, Operand(1));
1890 __ cmp(r1, Operand::Zero());
1893 // Return and remove the on-stack parameters.
1895 __ add(sp, sp, Operand(3 * kPointerSize));
1898 // Do the runtime call to allocate the arguments object.
1900 __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
1904 void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
1905 // Stack layout on entry.
1906 // sp[0] : index of rest parameter
1907 // sp[4] : number of parameters
1908 // sp[8] : receiver displacement
1911 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1912 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
1913 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1916 // Patch the arguments.length and the parameters pointer.
1917 __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1918 __ str(r1, MemOperand(sp, 1 * kPointerSize));
1919 __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
1920 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
1921 __ str(r3, MemOperand(sp, 2 * kPointerSize));
1924 __ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
1928 void RegExpExecStub::Generate(MacroAssembler* masm) {
1929 // Just jump directly to runtime if native RegExp is not selected at compile
1930 // time or if regexp entry in generated code is turned off runtime switch or
1932 #ifdef V8_INTERPRETED_REGEXP
1933 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
1934 #else // V8_INTERPRETED_REGEXP
1936 // Stack frame on entry.
1937 // sp[0]: last_match_info (expected JSArray)
1938 // sp[4]: previous index
1939 // sp[8]: subject string
1940 // sp[12]: JSRegExp object
1942 const int kLastMatchInfoOffset = 0 * kPointerSize;
1943 const int kPreviousIndexOffset = 1 * kPointerSize;
1944 const int kSubjectOffset = 2 * kPointerSize;
1945 const int kJSRegExpOffset = 3 * kPointerSize;
1948 // Allocation of registers for this function. These are in callee save
1949 // registers and will be preserved by the call to the native RegExp code, as
1950 // this code is called using the normal C calling convention. When calling
1951 // directly from generated code the native RegExp code will not do a GC and
1952 // therefore the content of these registers are safe to use after the call.
1953 Register subject = r4;
1954 Register regexp_data = r5;
1955 Register last_match_info_elements = no_reg; // will be r6;
1957 // Ensure that a RegExp stack is allocated.
1958 ExternalReference address_of_regexp_stack_memory_address =
1959 ExternalReference::address_of_regexp_stack_memory_address(isolate());
1960 ExternalReference address_of_regexp_stack_memory_size =
1961 ExternalReference::address_of_regexp_stack_memory_size(isolate());
1962 __ mov(r0, Operand(address_of_regexp_stack_memory_size));
1963 __ ldr(r0, MemOperand(r0, 0));
1964 __ cmp(r0, Operand::Zero());
1967 // Check that the first argument is a JSRegExp object.
1968 __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
1969 __ JumpIfSmi(r0, &runtime);
1970 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
1973 // Check that the RegExp has been compiled (data contains a fixed array).
1974 __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
1975 if (FLAG_debug_code) {
1976 __ SmiTst(regexp_data);
1977 __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1978 __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
1979 __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1982 // regexp_data: RegExp data (FixedArray)
1983 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
1984 __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
1985 __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
1988 // regexp_data: RegExp data (FixedArray)
1989 // Check that the number of captures fit in the static offsets vector buffer.
1991 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
1992 // Check (number_of_captures + 1) * 2 <= offsets vector size
1993 // Or number_of_captures * 2 <= offsets vector size - 2
1994 // Multiplying by 2 comes for free since r2 is smi-tagged.
1995 STATIC_ASSERT(kSmiTag == 0);
1996 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
1997 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
1998 __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
2001 // Reset offset for possibly sliced string.
2002 __ mov(r9, Operand::Zero());
2003 __ ldr(subject, MemOperand(sp, kSubjectOffset));
2004 __ JumpIfSmi(subject, &runtime);
2005 __ mov(r3, subject); // Make a copy of the original subject string.
2006 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
2007 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
2008 // subject: subject string
2009 // r3: subject string
2010 // r0: subject string instance type
2011 // regexp_data: RegExp data (FixedArray)
2012 // Handle subject string according to its encoding and representation:
2013 // (1) Sequential string? If yes, go to (5).
2014 // (2) Anything but sequential or cons? If yes, go to (6).
2015 // (3) Cons string. If the string is flat, replace subject with first string.
2016 // Otherwise bailout.
2017 // (4) Is subject external? If yes, go to (7).
2018 // (5) Sequential string. Load regexp code according to encoding.
2022 // Deferred code at the end of the stub:
2023 // (6) Not a long external string? If yes, go to (8).
2024 // (7) External string. Make it, offset-wise, look like a sequential string.
2026 // (8) Short external string or not a string? If yes, bail out to runtime.
2027 // (9) Sliced string. Replace subject with parent. Go to (4).
2029 Label seq_string /* 5 */, external_string /* 7 */,
2030 check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
2031 not_long_external /* 8 */;
2033 // (1) Sequential string? If yes, go to (5).
2036 Operand(kIsNotStringMask |
2037 kStringRepresentationMask |
2038 kShortExternalStringMask),
2040 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
2041 __ b(eq, &seq_string); // Go to (5).
2043 // (2) Anything but sequential or cons? If yes, go to (6).
2044 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2045 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2046 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2047 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2048 __ cmp(r1, Operand(kExternalStringTag));
2049 __ b(ge, ¬_seq_nor_cons); // Go to (6).
2051 // (3) Cons string. Check that it's flat.
2052 // Replace subject with first string and reload instance type.
2053 __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
2054 __ CompareRoot(r0, Heap::kempty_stringRootIndex);
2056 __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2058 // (4) Is subject external? If yes, go to (7).
2059 __ bind(&check_underlying);
2060 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
2061 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
2062 STATIC_ASSERT(kSeqStringTag == 0);
2063 __ tst(r0, Operand(kStringRepresentationMask));
2064 // The underlying external string is never a short external string.
2065 STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
2066 STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2067 __ b(ne, &external_string); // Go to (7).
2069 // (5) Sequential string. Load regexp code according to encoding.
2070 __ bind(&seq_string);
2071 // subject: sequential subject string (or look-alike, external string)
2072 // r3: original subject string
2073 // Load previous index and check range before r3 is overwritten. We have to
2074 // use r3 instead of subject here because subject might have been only made
2075 // to look like a sequential string when it actually is an external string.
2076 __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
2077 __ JumpIfNotSmi(r1, &runtime);
2078 __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset));
2079 __ cmp(r3, Operand(r1));
2083 STATIC_ASSERT(4 == kOneByteStringTag);
2084 STATIC_ASSERT(kTwoByteStringTag == 0);
2085 __ and_(r0, r0, Operand(kStringEncodingMask));
2086 __ mov(r3, Operand(r0, ASR, 2), SetCC);
2087 __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset),
2089 __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
2091 // (E) Carry on. String handling is done.
2092 // r6: irregexp code
2093 // Check that the irregexp code has been generated for the actual string
2094 // encoding. If it has, the field contains a code object otherwise it contains
2095 // a smi (code flushing support).
2096 __ JumpIfSmi(r6, &runtime);
2098 // r1: previous index
2099 // r3: encoding of subject string (1 if one_byte, 0 if two_byte);
2101 // subject: Subject string
2102 // regexp_data: RegExp data (FixedArray)
2103 // All checks done. Now push arguments for native regexp code.
2104 __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r0, r2);
2106 // Isolates: note we add an additional parameter here (isolate pointer).
2107 const int kRegExpExecuteArguments = 9;
2108 const int kParameterRegisters = 4;
2109 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
2111 // Stack pointer now points to cell where return address is to be written.
2112 // Arguments are before that on the stack or in registers.
2114 // Argument 9 (sp[20]): Pass current isolate address.
2115 __ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2116 __ str(r0, MemOperand(sp, 5 * kPointerSize));
2118 // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
2119 __ mov(r0, Operand(1));
2120 __ str(r0, MemOperand(sp, 4 * kPointerSize));
2122 // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
2123 __ mov(r0, Operand(address_of_regexp_stack_memory_address));
2124 __ ldr(r0, MemOperand(r0, 0));
2125 __ mov(r2, Operand(address_of_regexp_stack_memory_size));
2126 __ ldr(r2, MemOperand(r2, 0));
2127 __ add(r0, r0, Operand(r2));
2128 __ str(r0, MemOperand(sp, 3 * kPointerSize));
2130 // Argument 6: Set the number of capture registers to zero to force global
2131 // regexps to behave as non-global. This does not affect non-global regexps.
2132 __ mov(r0, Operand::Zero());
2133 __ str(r0, MemOperand(sp, 2 * kPointerSize));
2135 // Argument 5 (sp[4]): static offsets vector buffer.
2137 Operand(ExternalReference::address_of_static_offsets_vector(
2139 __ str(r0, MemOperand(sp, 1 * kPointerSize));
2141 // For arguments 4 and 3 get string length, calculate start of string data and
2142 // calculate the shift of the index (0 for one-byte and 1 for two-byte).
2143 __ add(r7, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
2144 __ eor(r3, r3, Operand(1));
2145 // Load the length from the original subject string from the previous stack
2146 // frame. Therefore we have to use fp, which points exactly to two pointer
2147 // sizes below the previous sp. (Because creating a new stack frame pushes
2148 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
2149 __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2150 // If slice offset is not 0, load the length from the original sliced string.
2151 // Argument 4, r3: End of string data
2152 // Argument 3, r2: Start of string data
2153 // Prepare start and end index of the input.
2154 __ add(r9, r7, Operand(r9, LSL, r3));
2155 __ add(r2, r9, Operand(r1, LSL, r3));
2157 __ ldr(r7, FieldMemOperand(subject, String::kLengthOffset));
2159 __ add(r3, r9, Operand(r7, LSL, r3));
2161 // Argument 2 (r1): Previous index.
2164 // Argument 1 (r0): Subject string.
2165 __ mov(r0, subject);
2167 // Locate the code entry and call it.
2168 __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
2169 DirectCEntryStub stub(isolate());
2170 stub.GenerateCall(masm, r6);
2172 __ LeaveExitFrame(false, no_reg, true);
2174 last_match_info_elements = r6;
2177 // subject: subject string (callee saved)
2178 // regexp_data: RegExp data (callee saved)
2179 // last_match_info_elements: Last match info elements (callee saved)
2180 // Check the result.
2182 __ cmp(r0, Operand(1));
2183 // We expect exactly one result since we force the called regexp to behave
2187 __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
2189 __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
2190 // If not exception it can only be retry. Handle that in the runtime system.
2192 // Result must now be exception. If there is no pending exception already a
2193 // stack overflow (on the backtrack stack) was detected in RegExp code but
2194 // haven't created the exception yet. Handle that in the runtime system.
2195 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
2196 __ mov(r1, Operand(isolate()->factory()->the_hole_value()));
2197 __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2199 __ ldr(r0, MemOperand(r2, 0));
2203 __ str(r1, MemOperand(r2, 0)); // Clear pending exception.
2205 // Check if the exception is a termination. If so, throw as uncatchable.
2206 __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex);
2208 Label termination_exception;
2209 __ b(eq, &termination_exception);
2213 __ bind(&termination_exception);
2214 __ ThrowUncatchable(r0);
2217 // For failure and exception return null.
2218 __ mov(r0, Operand(isolate()->factory()->null_value()));
2219 __ add(sp, sp, Operand(4 * kPointerSize));
2222 // Process the result from the native regexp code.
2225 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2226 // Calculate number of capture registers (number_of_captures + 1) * 2.
2227 // Multiplying by 2 comes for free since r1 is smi-tagged.
2228 STATIC_ASSERT(kSmiTag == 0);
2229 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2230 __ add(r1, r1, Operand(2)); // r1 was a smi.
2232 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
2233 __ JumpIfSmi(r0, &runtime);
2234 __ CompareObjectType(r0, r2, r2, JS_ARRAY_TYPE);
2236 // Check that the JSArray is in fast case.
2237 __ ldr(last_match_info_elements,
2238 FieldMemOperand(r0, JSArray::kElementsOffset));
2239 __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2240 __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
2242 // Check that the last match info has space for the capture registers and the
2243 // additional information.
2245 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
2246 __ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead));
2247 __ cmp(r2, Operand::SmiUntag(r0));
2250 // r1: number of capture registers
2251 // r4: subject string
2252 // Store the capture count.
2254 __ str(r2, FieldMemOperand(last_match_info_elements,
2255 RegExpImpl::kLastCaptureCountOffset));
2256 // Store last subject and last input.
2258 FieldMemOperand(last_match_info_elements,
2259 RegExpImpl::kLastSubjectOffset));
2260 __ mov(r2, subject);
2261 __ RecordWriteField(last_match_info_elements,
2262 RegExpImpl::kLastSubjectOffset,
2267 __ mov(subject, r2);
2269 FieldMemOperand(last_match_info_elements,
2270 RegExpImpl::kLastInputOffset));
2271 __ RecordWriteField(last_match_info_elements,
2272 RegExpImpl::kLastInputOffset,
2278 // Get the static offsets vector filled by the native regexp code.
2279 ExternalReference address_of_static_offsets_vector =
2280 ExternalReference::address_of_static_offsets_vector(isolate());
2281 __ mov(r2, Operand(address_of_static_offsets_vector));
2283 // r1: number of capture registers
2284 // r2: offsets vector
2285 Label next_capture, done;
2286 // Capture register counter starts from number of capture registers and
2287 // counts down until wraping after zero.
2289 last_match_info_elements,
2290 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
2291 __ bind(&next_capture);
2292 __ sub(r1, r1, Operand(1), SetCC);
2294 // Read the value from the static offsets vector buffer.
2295 __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
2296 // Store the smi value in the last match info.
2298 __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
2299 __ jmp(&next_capture);
2302 // Return last match info.
2303 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
2304 __ add(sp, sp, Operand(4 * kPointerSize));
2307 // Do the runtime call to execute the regexp.
2309 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2311 // Deferred code for string handling.
2312 // (6) Not a long external string? If yes, go to (8).
2313 __ bind(¬_seq_nor_cons);
2314 // Compare flags are still set.
2315 __ b(gt, ¬_long_external); // Go to (8).
2317 // (7) External string. Make it, offset-wise, look like a sequential string.
2318 __ bind(&external_string);
2319 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
2320 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
2321 if (FLAG_debug_code) {
2322 // Assert that we do not have a cons or slice (indirect strings) here.
2323 // Sequential strings have already been ruled out.
2324 __ tst(r0, Operand(kIsIndirectStringMask));
2325 __ Assert(eq, kExternalStringExpectedButNotFound);
2328 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
2329 // Move the pointer so that offset-wise, it looks like a sequential string.
2330 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2333 Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
2334 __ jmp(&seq_string); // Go to (5).
2336 // (8) Short external string or not a string? If yes, bail out to runtime.
2337 __ bind(¬_long_external);
2338 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
2339 __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
2342 // (9) Sliced string. Replace subject with parent. Go to (4).
2343 // Load offset into r9 and replace subject string with parent.
2344 __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
2346 __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
2347 __ jmp(&check_underlying); // Go to (4).
2348 #endif // V8_INTERPRETED_REGEXP
2352 static void GenerateRecordCallTarget(MacroAssembler* masm) {
2353 // Cache the called function in a feedback vector slot. Cache states
2354 // are uninitialized, monomorphic (indicated by a JSFunction), and
2356 // r0 : number of arguments to the construct function
2357 // r1 : the function to call
2358 // r2 : Feedback vector
2359 // r3 : slot in feedback vector (Smi)
2360 Label initialize, done, miss, megamorphic, not_array_function;
2362 DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
2363 masm->isolate()->heap()->megamorphic_symbol());
2364 DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
2365 masm->isolate()->heap()->uninitialized_symbol());
2367 // Load the cache state into r4.
2368 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2369 __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
2371 // A monomorphic cache hit or an already megamorphic state: invoke the
2372 // function without changing the state.
2376 if (!FLAG_pretenuring_call_new) {
2377 // If we came here, we need to see if we are the array function.
2378 // If we didn't have a matching function, and we didn't find the megamorph
2379 // sentinel, then we have in the slot either some other function or an
2380 // AllocationSite. Do a map check on the object in ecx.
2381 __ ldr(r5, FieldMemOperand(r4, 0));
2382 __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
2385 // Make sure the function is the Array() function
2386 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
2388 __ b(ne, &megamorphic);
2394 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2396 __ CompareRoot(r4, Heap::kuninitialized_symbolRootIndex);
2397 __ b(eq, &initialize);
2398 // MegamorphicSentinel is an immortal immovable object (undefined) so no
2399 // write-barrier is needed.
2400 __ bind(&megamorphic);
2401 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2402 __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
2403 __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
2406 // An uninitialized cache is patched with the function
2407 __ bind(&initialize);
2409 if (!FLAG_pretenuring_call_new) {
2410 // Make sure the function is the Array() function
2411 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
2413 __ b(ne, ¬_array_function);
2415 // The target function is the Array constructor,
2416 // Create an AllocationSite if we don't already have it, store it in the
2419 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2421 // Arguments register must be smi-tagged to call out.
2423 __ Push(r3, r2, r1, r0);
2425 CreateAllocationSiteStub create_stub(masm->isolate());
2426 __ CallStub(&create_stub);
2428 __ Pop(r3, r2, r1, r0);
2433 __ bind(¬_array_function);
2436 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2437 __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2438 __ str(r1, MemOperand(r4, 0));
2440 __ Push(r4, r2, r1);
2441 __ RecordWrite(r2, r4, r1, kLRHasNotBeenSaved, kDontSaveFPRegs,
2442 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
2449 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
2450 // Do not transform the receiver for strict mode functions.
2451 __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
2452 __ ldr(r4, FieldMemOperand(r3, SharedFunctionInfo::kCompilerHintsOffset));
2453 __ tst(r4, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
2457 // Do not transform the receiver for native (Compilerhints already in r3).
2458 __ tst(r4, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
2463 static void EmitSlowCase(MacroAssembler* masm,
2465 Label* non_function) {
2466 // Check for function proxy.
2467 __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
2468 __ b(ne, non_function);
2469 __ push(r1); // put proxy as additional argument
2470 __ mov(r0, Operand(argc + 1, RelocInfo::NONE32));
2471 __ mov(r2, Operand::Zero());
2472 __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY);
2474 Handle<Code> adaptor =
2475 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
2476 __ Jump(adaptor, RelocInfo::CODE_TARGET);
2479 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
2480 // of the original receiver from the call site).
2481 __ bind(non_function);
2482 __ str(r1, MemOperand(sp, argc * kPointerSize));
2483 __ mov(r0, Operand(argc)); // Set up the number of arguments.
2484 __ mov(r2, Operand::Zero());
2485 __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION);
2486 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2487 RelocInfo::CODE_TARGET);
2491 static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
2492 // Wrap the receiver and patch it back onto the stack.
2493 { FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
2495 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
2498 __ str(r0, MemOperand(sp, argc * kPointerSize));
2503 static void CallFunctionNoFeedback(MacroAssembler* masm,
2504 int argc, bool needs_checks,
2505 bool call_as_method) {
2506 // r1 : the function to call
2507 Label slow, non_function, wrap, cont;
2510 // Check that the function is really a JavaScript function.
2511 // r1: pushed function (to be verified)
2512 __ JumpIfSmi(r1, &non_function);
2514 // Goto slow case if we do not have a function.
2515 __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
2519 // Fast-case: Invoke the function now.
2520 // r1: pushed function
2521 ParameterCount actual(argc);
2523 if (call_as_method) {
2525 EmitContinueIfStrictOrNative(masm, &cont);
2528 // Compute the receiver in sloppy mode.
2529 __ ldr(r3, MemOperand(sp, argc * kPointerSize));
2532 __ JumpIfSmi(r3, &wrap);
2533 __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
2542 __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper());
2545 // Slow-case: Non-function called.
2547 EmitSlowCase(masm, argc, &non_function);
2550 if (call_as_method) {
2552 EmitWrapCase(masm, argc, &cont);
2557 void CallFunctionStub::Generate(MacroAssembler* masm) {
2558 CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
2562 void CallConstructStub::Generate(MacroAssembler* masm) {
2563 // r0 : number of arguments
2564 // r1 : the function to call
2565 // r2 : feedback vector
2566 // r3 : (only if r2 is not the megamorphic symbol) slot in feedback
2568 Label slow, non_function_call;
2570 // Check that the function is not a smi.
2571 __ JumpIfSmi(r1, &non_function_call);
2572 // Check that the function is a JSFunction.
2573 __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
2576 if (RecordCallTarget()) {
2577 GenerateRecordCallTarget(masm);
2579 __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
2580 if (FLAG_pretenuring_call_new) {
2581 // Put the AllocationSite from the feedback vector into r2.
2582 // By adding kPointerSize we encode that we know the AllocationSite
2583 // entry is at the feedback vector slot given by r3 + 1.
2584 __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize + kPointerSize));
2586 Label feedback_register_initialized;
2587 // Put the AllocationSite from the feedback vector into r2, or undefined.
2588 __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize));
2589 __ ldr(r5, FieldMemOperand(r2, AllocationSite::kMapOffset));
2590 __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
2591 __ b(eq, &feedback_register_initialized);
2592 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
2593 __ bind(&feedback_register_initialized);
2596 __ AssertUndefinedOrAllocationSite(r2, r5);
2599 // Pass function as original constructor.
2600 if (IsSuperConstructorCall()) {
2601 __ mov(r4, Operand(1 * kPointerSize));
2602 __ add(r4, r4, Operand(r0, LSL, kPointerSizeLog2));
2603 __ ldr(r3, MemOperand(sp, r4));
2608 // Jump to the function-specific construct stub.
2609 Register jmp_reg = r4;
2610 __ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
2611 __ ldr(jmp_reg, FieldMemOperand(jmp_reg,
2612 SharedFunctionInfo::kConstructStubOffset));
2613 __ add(pc, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
2615 // r0: number of arguments
2616 // r1: called object
2620 __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
2621 __ b(ne, &non_function_call);
2622 __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
2625 __ bind(&non_function_call);
2626 __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
2628 // Set expected number of arguments to zero (not changing r0).
2629 __ mov(r2, Operand::Zero());
2630 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2631 RelocInfo::CODE_TARGET);
2635 static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
2636 __ ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
2637 __ ldr(vector, FieldMemOperand(vector,
2638 JSFunction::kSharedFunctionInfoOffset));
2639 __ ldr(vector, FieldMemOperand(vector,
2640 SharedFunctionInfo::kFeedbackVectorOffset));
2644 void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
2649 int argc = arg_count();
2650 ParameterCount actual(argc);
2652 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
2656 __ mov(r0, Operand(arg_count()));
2657 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2658 __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
2660 // Verify that r4 contains an AllocationSite
2661 __ ldr(r5, FieldMemOperand(r4, HeapObject::kMapOffset));
2662 __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
2667 ArrayConstructorStub stub(masm->isolate(), arg_count());
2668 __ TailCallStub(&stub);
2673 // The slow case, we need this no matter what to complete a call after a miss.
2674 CallFunctionNoFeedback(masm,
2680 __ stop("Unexpected code address");
2684 void CallICStub::Generate(MacroAssembler* masm) {
2686 // r3 - slot id (Smi)
2688 const int with_types_offset =
2689 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
2690 const int generic_offset =
2691 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
2692 Label extra_checks_or_miss, slow_start;
2693 Label slow, non_function, wrap, cont;
2694 Label have_js_function;
2695 int argc = arg_count();
2696 ParameterCount actual(argc);
2698 // The checks. First, does r1 match the recorded monomorphic target?
2699 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2700 __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
2702 // We don't know that we have a weak cell. We might have a private symbol
2703 // or an AllocationSite, but the memory is safe to examine.
2704 // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
2706 // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
2707 // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
2708 // computed, meaning that it can't appear to be a pointer. If the low bit is
2709 // 0, then hash is computed, but the 0 bit prevents the field from appearing
2711 STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
2712 STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
2713 WeakCell::kValueOffset &&
2714 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
2716 __ ldr(r5, FieldMemOperand(r4, WeakCell::kValueOffset));
2718 __ b(ne, &extra_checks_or_miss);
2720 // The compare above could have been a SMI/SMI comparison. Guard against this
2721 // convincing us that we have a monomorphic JSFunction.
2722 __ JumpIfSmi(r1, &extra_checks_or_miss);
2724 __ bind(&have_js_function);
2725 if (CallAsMethod()) {
2726 EmitContinueIfStrictOrNative(masm, &cont);
2727 // Compute the receiver in sloppy mode.
2728 __ ldr(r3, MemOperand(sp, argc * kPointerSize));
2730 __ JumpIfSmi(r3, &wrap);
2731 __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
2737 __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper());
2740 EmitSlowCase(masm, argc, &non_function);
2742 if (CallAsMethod()) {
2744 EmitWrapCase(masm, argc, &cont);
2747 __ bind(&extra_checks_or_miss);
2748 Label uninitialized, miss;
2750 __ CompareRoot(r4, Heap::kmegamorphic_symbolRootIndex);
2751 __ b(eq, &slow_start);
2753 // The following cases attempt to handle MISS cases without going to the
2755 if (FLAG_trace_ic) {
2759 __ CompareRoot(r4, Heap::kuninitialized_symbolRootIndex);
2760 __ b(eq, &uninitialized);
2762 // We are going megamorphic. If the feedback is a JSFunction, it is fine
2763 // to handle it here. More complex cases are dealt with in the runtime.
2764 __ AssertNotSmi(r4);
2765 __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
2767 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2768 __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
2769 __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
2770 // We have to update statistics for runtime profiling.
2771 __ ldr(r4, FieldMemOperand(r2, with_types_offset));
2772 __ sub(r4, r4, Operand(Smi::FromInt(1)));
2773 __ str(r4, FieldMemOperand(r2, with_types_offset));
2774 __ ldr(r4, FieldMemOperand(r2, generic_offset));
2775 __ add(r4, r4, Operand(Smi::FromInt(1)));
2776 __ str(r4, FieldMemOperand(r2, generic_offset));
2777 __ jmp(&slow_start);
2779 __ bind(&uninitialized);
2781 // We are going monomorphic, provided we actually have a JSFunction.
2782 __ JumpIfSmi(r1, &miss);
2784 // Goto miss case if we do not have a function.
2785 __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
2788 // Make sure the function is not the Array() function, which requires special
2789 // behavior on MISS.
2790 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
2795 __ ldr(r4, FieldMemOperand(r2, with_types_offset));
2796 __ add(r4, r4, Operand(Smi::FromInt(1)));
2797 __ str(r4, FieldMemOperand(r2, with_types_offset));
2799 // Store the function. Use a stub since we need a frame for allocation.
2804 FrameScope scope(masm, StackFrame::INTERNAL);
2805 CreateWeakCellStub create_stub(masm->isolate());
2807 __ CallStub(&create_stub);
2811 __ jmp(&have_js_function);
2813 // We are here because tracing is on or we encountered a MISS case we can't
2819 __ bind(&slow_start);
2820 // Check that the function is really a JavaScript function.
2821 // r1: pushed function (to be verified)
2822 __ JumpIfSmi(r1, &non_function);
2824 // Goto slow case if we do not have a function.
2825 __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
2827 __ jmp(&have_js_function);
2831 void CallICStub::GenerateMiss(MacroAssembler* masm) {
2832 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2834 // Push the receiver and the function and feedback info.
2835 __ Push(r1, r2, r3);
2838 IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
2839 : IC::kCallIC_Customization_Miss;
2841 ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
2842 __ CallExternalReference(miss, 3);
2844 // Move result to edi and exit the internal frame.
2849 // StringCharCodeAtGenerator
2850 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
2851 // If the receiver is a smi trigger the non-string case.
2852 if (check_mode_ == RECEIVER_IS_UNKNOWN) {
2853 __ JumpIfSmi(object_, receiver_not_string_);
2855 // Fetch the instance type of the receiver into result register.
2856 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2857 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2858 // If the receiver is not a string trigger the non-string case.
2859 __ tst(result_, Operand(kIsNotStringMask));
2860 __ b(ne, receiver_not_string_);
2863 // If the index is non-smi trigger the non-smi case.
2864 __ JumpIfNotSmi(index_, &index_not_smi_);
2865 __ bind(&got_smi_index_);
2867 // Check for index out of range.
2868 __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
2869 __ cmp(ip, Operand(index_));
2870 __ b(ls, index_out_of_range_);
2872 __ SmiUntag(index_);
2874 StringCharLoadGenerator::Generate(masm,
2885 void StringCharCodeAtGenerator::GenerateSlow(
2886 MacroAssembler* masm,
2887 const RuntimeCallHelper& call_helper) {
2888 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
2890 // Index is not a smi.
2891 __ bind(&index_not_smi_);
2892 // If index is a heap number, try converting it to an integer.
2895 Heap::kHeapNumberMapRootIndex,
2898 call_helper.BeforeCall(masm);
2900 __ push(index_); // Consumed by runtime conversion function.
2901 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
2902 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
2904 DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
2905 // NumberToSmi discards numbers that are not exact integers.
2906 __ CallRuntime(Runtime::kNumberToSmi, 1);
2908 // Save the conversion result before the pop instructions below
2909 // have a chance to overwrite it.
2910 __ Move(index_, r0);
2912 // Reload the instance type.
2913 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2914 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2915 call_helper.AfterCall(masm);
2916 // If index is still not a smi, it must be out of range.
2917 __ JumpIfNotSmi(index_, index_out_of_range_);
2918 // Otherwise, return to the fast path.
2919 __ jmp(&got_smi_index_);
2921 // Call runtime. We get here when the receiver is a string and the
2922 // index is a number, but the code of getting the actual character
2923 // is too complex (e.g., when the string needs to be flattened).
2924 __ bind(&call_runtime_);
2925 call_helper.BeforeCall(masm);
2927 __ Push(object_, index_);
2928 __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
2929 __ Move(result_, r0);
2930 call_helper.AfterCall(masm);
2933 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
2937 // -------------------------------------------------------------------------
2938 // StringCharFromCodeGenerator
2940 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
2941 // Fast case of Heap::LookupSingleCharacterStringFromCode.
2942 STATIC_ASSERT(kSmiTag == 0);
2943 STATIC_ASSERT(kSmiShiftSize == 0);
2944 DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
2946 Operand(kSmiTagMask |
2947 ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
2948 __ b(ne, &slow_case_);
2950 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
2951 // At this point code register contains smi tagged one-byte char code.
2952 __ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_));
2953 __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
2954 __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
2955 __ b(eq, &slow_case_);
2960 void StringCharFromCodeGenerator::GenerateSlow(
2961 MacroAssembler* masm,
2962 const RuntimeCallHelper& call_helper) {
2963 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
2965 __ bind(&slow_case_);
2966 call_helper.BeforeCall(masm);
2968 __ CallRuntime(Runtime::kCharFromCode, 1);
2969 __ Move(result_, r0);
2970 call_helper.AfterCall(masm);
2973 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
2977 enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
2980 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
2985 String::Encoding encoding) {
2986 if (FLAG_debug_code) {
2987 // Check that destination is word aligned.
2988 __ tst(dest, Operand(kPointerAlignmentMask));
2989 __ Check(eq, kDestinationOfCopyNotAligned);
2992 // Assumes word reads and writes are little endian.
2993 // Nothing to do for zero characters.
2995 if (encoding == String::TWO_BYTE_ENCODING) {
2996 __ add(count, count, Operand(count), SetCC);
2999 Register limit = count; // Read until dest equals this.
3000 __ add(limit, dest, Operand(count));
3002 Label loop_entry, loop;
3003 // Copy bytes from src to dest until dest hits limit.
3006 __ ldrb(scratch, MemOperand(src, 1, PostIndex), lt);
3007 __ strb(scratch, MemOperand(dest, 1, PostIndex));
3008 __ bind(&loop_entry);
3009 __ cmp(dest, Operand(limit));
3016 void SubStringStub::Generate(MacroAssembler* masm) {
3019 // Stack frame on entry.
3020 // lr: return address
3025 // This stub is called from the native-call %_SubString(...), so
3026 // nothing can be assumed about the arguments. It is tested that:
3027 // "string" is a sequential string,
3028 // both "from" and "to" are smis, and
3029 // 0 <= from <= to <= string.length.
3030 // If any of these assumptions fail, we call the runtime system.
3032 const int kToOffset = 0 * kPointerSize;
3033 const int kFromOffset = 1 * kPointerSize;
3034 const int kStringOffset = 2 * kPointerSize;
3036 __ Ldrd(r2, r3, MemOperand(sp, kToOffset));
3037 STATIC_ASSERT(kFromOffset == kToOffset + 4);
3038 STATIC_ASSERT(kSmiTag == 0);
3039 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3041 // Arithmetic shift right by one un-smi-tags. In this case we rotate right
3042 // instead because we bail out on non-smi values: ROR and ASR are equivalent
3043 // for smis but they set the flags in a way that's easier to optimize.
3044 __ mov(r2, Operand(r2, ROR, 1), SetCC);
3045 __ mov(r3, Operand(r3, ROR, 1), SetCC, cc);
3046 // If either to or from had the smi tag bit set, then C is set now, and N
3047 // has the same value: we rotated by 1, so the bottom bit is now the top bit.
3048 // We want to bailout to runtime here if From is negative. In that case, the
3049 // next instruction is not executed and we fall through to bailing out to
3051 // Executed if both r2 and r3 are untagged integers.
3052 __ sub(r2, r2, Operand(r3), SetCC, cc);
3053 // One of the above un-smis or the above SUB could have set N==1.
3054 __ b(mi, &runtime); // Either "from" or "to" is not an smi, or from > to.
3056 // Make sure first argument is a string.
3057 __ ldr(r0, MemOperand(sp, kStringOffset));
3058 __ JumpIfSmi(r0, &runtime);
3059 Condition is_string = masm->IsObjectStringType(r0, r1);
3060 __ b(NegateCondition(is_string), &runtime);
3063 __ cmp(r2, Operand(1));
3064 __ b(eq, &single_char);
3066 // Short-cut for the case of trivial substring.
3068 // r0: original string
3069 // r2: result string length
3070 __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
3071 __ cmp(r2, Operand(r4, ASR, 1));
3072 // Return original string.
3073 __ b(eq, &return_r0);
3074 // Longer than original string's length or negative: unsafe arguments.
3076 // Shorter than original string's length: an actual substring.
3078 // Deal with different string types: update the index if necessary
3079 // and put the underlying string into r5.
3080 // r0: original string
3081 // r1: instance type
3083 // r3: from index (untagged)
3084 Label underlying_unpacked, sliced_string, seq_or_external_string;
3085 // If the string is not indirect, it can only be sequential or external.
3086 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
3087 STATIC_ASSERT(kIsIndirectStringMask != 0);
3088 __ tst(r1, Operand(kIsIndirectStringMask));
3089 __ b(eq, &seq_or_external_string);
3091 __ tst(r1, Operand(kSlicedNotConsMask));
3092 __ b(ne, &sliced_string);
3093 // Cons string. Check whether it is flat, then fetch first part.
3094 __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
3095 __ CompareRoot(r5, Heap::kempty_stringRootIndex);
3097 __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
3098 // Update instance type.
3099 __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
3100 __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
3101 __ jmp(&underlying_unpacked);
3103 __ bind(&sliced_string);
3104 // Sliced string. Fetch parent and correct start index by offset.
3105 __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
3106 __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset));
3107 __ add(r3, r3, Operand(r4, ASR, 1)); // Add offset to index.
3108 // Update instance type.
3109 __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
3110 __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
3111 __ jmp(&underlying_unpacked);
3113 __ bind(&seq_or_external_string);
3114 // Sequential or external string. Just move string to the expected register.
3117 __ bind(&underlying_unpacked);
3119 if (FLAG_string_slices) {
3121 // r5: underlying subject string
3122 // r1: instance type of underlying subject string
3124 // r3: adjusted start index (untagged)
3125 __ cmp(r2, Operand(SlicedString::kMinLength));
3126 // Short slice. Copy instead of slicing.
3127 __ b(lt, ©_routine);
3128 // Allocate new sliced string. At this point we do not reload the instance
3129 // type including the string encoding because we simply rely on the info
3130 // provided by the original string. It does not matter if the original
3131 // string's encoding is wrong because we always have to recheck encoding of
3132 // the newly created string's parent anyways due to externalized strings.
3133 Label two_byte_slice, set_slice_header;
3134 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
3135 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
3136 __ tst(r1, Operand(kStringEncodingMask));
3137 __ b(eq, &two_byte_slice);
3138 __ AllocateOneByteSlicedString(r0, r2, r6, r4, &runtime);
3139 __ jmp(&set_slice_header);
3140 __ bind(&two_byte_slice);
3141 __ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime);
3142 __ bind(&set_slice_header);
3143 __ mov(r3, Operand(r3, LSL, 1));
3144 __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
3145 __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
3148 __ bind(©_routine);
3151 // r5: underlying subject string
3152 // r1: instance type of underlying subject string
3154 // r3: adjusted start index (untagged)
3155 Label two_byte_sequential, sequential_string, allocate_result;
3156 STATIC_ASSERT(kExternalStringTag != 0);
3157 STATIC_ASSERT(kSeqStringTag == 0);
3158 __ tst(r1, Operand(kExternalStringTag));
3159 __ b(eq, &sequential_string);
3161 // Handle external string.
3162 // Rule out short external strings.
3163 STATIC_ASSERT(kShortExternalStringTag != 0);
3164 __ tst(r1, Operand(kShortExternalStringTag));
3166 __ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset));
3167 // r5 already points to the first character of underlying string.
3168 __ jmp(&allocate_result);
3170 __ bind(&sequential_string);
3171 // Locate first character of underlying subject string.
3172 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3173 __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3175 __ bind(&allocate_result);
3176 // Sequential acii string. Allocate the result.
3177 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3178 __ tst(r1, Operand(kStringEncodingMask));
3179 __ b(eq, &two_byte_sequential);
3181 // Allocate and copy the resulting one-byte string.
3182 __ AllocateOneByteString(r0, r2, r4, r6, r1, &runtime);
3184 // Locate first character of substring to copy.
3186 // Locate first character of result.
3187 __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3189 // r0: result string
3190 // r1: first character of result string
3191 // r2: result string length
3192 // r5: first character of substring to copy
3193 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3194 StringHelper::GenerateCopyCharacters(
3195 masm, r1, r5, r2, r3, String::ONE_BYTE_ENCODING);
3198 // Allocate and copy the resulting two-byte string.
3199 __ bind(&two_byte_sequential);
3200 __ AllocateTwoByteString(r0, r2, r4, r6, r1, &runtime);
3202 // Locate first character of substring to copy.
3203 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
3204 __ add(r5, r5, Operand(r3, LSL, 1));
3205 // Locate first character of result.
3206 __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3208 // r0: result string.
3209 // r1: first character of result.
3210 // r2: result length.
3211 // r5: first character of substring to copy.
3212 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3213 StringHelper::GenerateCopyCharacters(
3214 masm, r1, r5, r2, r3, String::TWO_BYTE_ENCODING);
3216 __ bind(&return_r0);
3217 Counters* counters = isolate()->counters();
3218 __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
3222 // Just jump to runtime to create the sub string.
3224 __ TailCallRuntime(Runtime::kSubString, 3, 1);
3226 __ bind(&single_char);
3227 // r0: original string
3228 // r1: instance type
3230 // r3: from index (untagged)
3232 StringCharAtGenerator generator(r0, r3, r2, r0, &runtime, &runtime, &runtime,
3233 STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
3234 generator.GenerateFast(masm);
3237 generator.SkipSlow(masm, &runtime);
3241 void ToNumberStub::Generate(MacroAssembler* masm) {
3242 // The ToNumber stub takes one argument in r0.
3244 __ JumpIfNotSmi(r0, ¬_smi);
3248 Label not_heap_number;
3249 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
3250 __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
3252 // r1: instance type.
3253 __ cmp(r1, Operand(HEAP_NUMBER_TYPE));
3254 __ b(ne, ¬_heap_number);
3256 __ bind(¬_heap_number);
3258 Label not_string, slow_string;
3259 __ cmp(r1, Operand(FIRST_NONSTRING_TYPE));
3260 __ b(hs, ¬_string);
3261 // Check if string has a cached array index.
3262 __ ldr(r2, FieldMemOperand(r0, String::kHashFieldOffset));
3263 __ tst(r2, Operand(String::kContainsCachedArrayIndexMask));
3264 __ b(ne, &slow_string);
3265 __ IndexFromHash(r2, r0);
3267 __ bind(&slow_string);
3268 __ push(r0); // Push argument.
3269 __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
3270 __ bind(¬_string);
3273 __ cmp(r1, Operand(ODDBALL_TYPE));
3274 __ b(ne, ¬_oddball);
3275 __ ldr(r0, FieldMemOperand(r0, Oddball::kToNumberOffset));
3277 __ bind(¬_oddball);
3279 __ push(r0); // Push argument.
3280 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
3284 void StringHelper::GenerateFlatOneByteStringEquals(
3285 MacroAssembler* masm, Register left, Register right, Register scratch1,
3286 Register scratch2, Register scratch3) {
3287 Register length = scratch1;
3290 Label strings_not_equal, check_zero_length;
3291 __ ldr(length, FieldMemOperand(left, String::kLengthOffset));
3292 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
3293 __ cmp(length, scratch2);
3294 __ b(eq, &check_zero_length);
3295 __ bind(&strings_not_equal);
3296 __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
3299 // Check if the length is zero.
3300 Label compare_chars;
3301 __ bind(&check_zero_length);
3302 STATIC_ASSERT(kSmiTag == 0);
3303 __ cmp(length, Operand::Zero());
3304 __ b(ne, &compare_chars);
3305 __ mov(r0, Operand(Smi::FromInt(EQUAL)));
3308 // Compare characters.
3309 __ bind(&compare_chars);
3310 GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
3311 &strings_not_equal);
3313 // Characters are equal.
3314 __ mov(r0, Operand(Smi::FromInt(EQUAL)));
3319 void StringHelper::GenerateCompareFlatOneByteStrings(
3320 MacroAssembler* masm, Register left, Register right, Register scratch1,
3321 Register scratch2, Register scratch3, Register scratch4) {
3322 Label result_not_equal, compare_lengths;
3323 // Find minimum length and length difference.
3324 __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
3325 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
3326 __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
3327 Register length_delta = scratch3;
3328 __ mov(scratch1, scratch2, LeaveCC, gt);
3329 Register min_length = scratch1;
3330 STATIC_ASSERT(kSmiTag == 0);
3331 __ cmp(min_length, Operand::Zero());
3332 __ b(eq, &compare_lengths);
3335 GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
3336 scratch4, &result_not_equal);
3338 // Compare lengths - strings up to min-length are equal.
3339 __ bind(&compare_lengths);
3340 DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
3341 // Use length_delta as result if it's zero.
3342 __ mov(r0, Operand(length_delta), SetCC);
3343 __ bind(&result_not_equal);
3344 // Conditionally update the result based either on length_delta or
3345 // the last comparion performed in the loop above.
3346 __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
3347 __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
3352 void StringHelper::GenerateOneByteCharsCompareLoop(
3353 MacroAssembler* masm, Register left, Register right, Register length,
3354 Register scratch1, Register scratch2, Label* chars_not_equal) {
3355 // Change index to run from -length to -1 by adding length to string
3356 // start. This means that loop ends when index reaches zero, which
3357 // doesn't need an additional compare.
3358 __ SmiUntag(length);
3359 __ add(scratch1, length,
3360 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3361 __ add(left, left, Operand(scratch1));
3362 __ add(right, right, Operand(scratch1));
3363 __ rsb(length, length, Operand::Zero());
3364 Register index = length; // index = -length;
3369 __ ldrb(scratch1, MemOperand(left, index));
3370 __ ldrb(scratch2, MemOperand(right, index));
3371 __ cmp(scratch1, scratch2);
3372 __ b(ne, chars_not_equal);
3373 __ add(index, index, Operand(1), SetCC);
3378 void StringCompareStub::Generate(MacroAssembler* masm) {
3381 Counters* counters = isolate()->counters();
3383 // Stack frame on entry.
3384 // sp[0]: right string
3385 // sp[4]: left string
3386 __ Ldrd(r0 , r1, MemOperand(sp)); // Load right in r0, left in r1.
3390 __ b(ne, ¬_same);
3391 STATIC_ASSERT(EQUAL == 0);
3392 STATIC_ASSERT(kSmiTag == 0);
3393 __ mov(r0, Operand(Smi::FromInt(EQUAL)));
3394 __ IncrementCounter(counters->string_compare_native(), 1, r1, r2);
3395 __ add(sp, sp, Operand(2 * kPointerSize));
3400 // Check that both objects are sequential one-byte strings.
3401 __ JumpIfNotBothSequentialOneByteStrings(r1, r0, r2, r3, &runtime);
3403 // Compare flat one-byte strings natively. Remove arguments from stack first.
3404 __ IncrementCounter(counters->string_compare_native(), 1, r2, r3);
3405 __ add(sp, sp, Operand(2 * kPointerSize));
3406 StringHelper::GenerateCompareFlatOneByteStrings(masm, r1, r0, r2, r3, r4, r5);
3408 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
3409 // tagged as a small integer.
3411 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3415 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
3416 // ----------- S t a t e -------------
3419 // -- lr : return address
3420 // -----------------------------------
3422 // Load r2 with the allocation site. We stick an undefined dummy value here
3423 // and replace it with the real allocation site later when we instantiate this
3424 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
3425 __ Move(r2, handle(isolate()->heap()->undefined_value()));
3427 // Make sure that we actually patched the allocation site.
3428 if (FLAG_debug_code) {
3429 __ tst(r2, Operand(kSmiTagMask));
3430 __ Assert(ne, kExpectedAllocationSite);
3432 __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
3433 __ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex);
3436 __ Assert(eq, kExpectedAllocationSite);
3439 // Tail call into the stub that handles binary operations with allocation
3441 BinaryOpWithAllocationSiteStub stub(isolate(), state());
3442 __ TailCallStub(&stub);
3446 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
3447 DCHECK(state() == CompareICState::SMI);
3450 __ JumpIfNotSmi(r2, &miss);
3452 if (GetCondition() == eq) {
3453 // For equality we do not care about the sign of the result.
3454 __ sub(r0, r0, r1, SetCC);
3456 // Untag before subtracting to avoid handling overflow.
3458 __ sub(r0, r1, Operand::SmiUntag(r0));
3467 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
3468 DCHECK(state() == CompareICState::NUMBER);
3471 Label unordered, maybe_undefined1, maybe_undefined2;
3474 if (left() == CompareICState::SMI) {
3475 __ JumpIfNotSmi(r1, &miss);
3477 if (right() == CompareICState::SMI) {
3478 __ JumpIfNotSmi(r0, &miss);
3481 // Inlining the double comparison and falling back to the general compare
3482 // stub if NaN is involved.
3483 // Load left and right operand.
3484 Label done, left, left_smi, right_smi;
3485 __ JumpIfSmi(r0, &right_smi);
3486 __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
3488 __ sub(r2, r0, Operand(kHeapObjectTag));
3489 __ vldr(d1, r2, HeapNumber::kValueOffset);
3491 __ bind(&right_smi);
3492 __ SmiToDouble(d1, r0);
3495 __ JumpIfSmi(r1, &left_smi);
3496 __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
3498 __ sub(r2, r1, Operand(kHeapObjectTag));
3499 __ vldr(d0, r2, HeapNumber::kValueOffset);
3502 __ SmiToDouble(d0, r1);
3505 // Compare operands.
3506 __ VFPCompareAndSetFlags(d0, d1);
3508 // Don't base result on status bits when a NaN is involved.
3509 __ b(vs, &unordered);
3511 // Return a result of -1, 0, or 1, based on status bits.
3512 __ mov(r0, Operand(EQUAL), LeaveCC, eq);
3513 __ mov(r0, Operand(LESS), LeaveCC, lt);
3514 __ mov(r0, Operand(GREATER), LeaveCC, gt);
3517 __ bind(&unordered);
3518 __ bind(&generic_stub);
3519 CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
3520 CompareICState::GENERIC, CompareICState::GENERIC);
3521 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3523 __ bind(&maybe_undefined1);
3524 if (Token::IsOrderedRelationalCompareOp(op())) {
3525 __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
3527 __ JumpIfSmi(r1, &unordered);
3528 __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
3529 __ b(ne, &maybe_undefined2);
3533 __ bind(&maybe_undefined2);
3534 if (Token::IsOrderedRelationalCompareOp(op())) {
3535 __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
3536 __ b(eq, &unordered);
3544 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3545 DCHECK(state() == CompareICState::INTERNALIZED_STRING);
3548 // Registers containing left and right operands respectively.
3550 Register right = r0;
3554 // Check that both operands are heap objects.
3555 __ JumpIfEitherSmi(left, right, &miss);
3557 // Check that both operands are internalized strings.
3558 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3559 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3560 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3561 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3562 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3563 __ orr(tmp1, tmp1, Operand(tmp2));
3564 __ tst(tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3567 // Internalized strings are compared by identity.
3568 __ cmp(left, right);
3569 // Make sure r0 is non-zero. At this point input operands are
3570 // guaranteed to be non-zero.
3571 DCHECK(right.is(r0));
3572 STATIC_ASSERT(EQUAL == 0);
3573 STATIC_ASSERT(kSmiTag == 0);
3574 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
3582 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
3583 DCHECK(state() == CompareICState::UNIQUE_NAME);
3584 DCHECK(GetCondition() == eq);
3587 // Registers containing left and right operands respectively.
3589 Register right = r0;
3593 // Check that both operands are heap objects.
3594 __ JumpIfEitherSmi(left, right, &miss);
3596 // Check that both operands are unique names. This leaves the instance
3597 // types loaded in tmp1 and tmp2.
3598 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3599 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3600 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3601 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3603 __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
3604 __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
3606 // Unique names are compared by identity.
3607 __ cmp(left, right);
3608 // Make sure r0 is non-zero. At this point input operands are
3609 // guaranteed to be non-zero.
3610 DCHECK(right.is(r0));
3611 STATIC_ASSERT(EQUAL == 0);
3612 STATIC_ASSERT(kSmiTag == 0);
3613 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
3621 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
3622 DCHECK(state() == CompareICState::STRING);
3625 bool equality = Token::IsEqualityOp(op());
3627 // Registers containing left and right operands respectively.
3629 Register right = r0;
3635 // Check that both operands are heap objects.
3636 __ JumpIfEitherSmi(left, right, &miss);
3638 // Check that both operands are strings. This leaves the instance
3639 // types loaded in tmp1 and tmp2.
3640 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3641 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3642 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3643 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3644 STATIC_ASSERT(kNotStringTag != 0);
3645 __ orr(tmp3, tmp1, tmp2);
3646 __ tst(tmp3, Operand(kIsNotStringMask));
3649 // Fast check for identical strings.
3650 __ cmp(left, right);
3651 STATIC_ASSERT(EQUAL == 0);
3652 STATIC_ASSERT(kSmiTag == 0);
3653 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
3656 // Handle not identical strings.
3658 // Check that both strings are internalized strings. If they are, we're done
3659 // because we already know they are not identical. We know they are both
3662 DCHECK(GetCondition() == eq);
3663 STATIC_ASSERT(kInternalizedTag == 0);
3664 __ orr(tmp3, tmp1, Operand(tmp2));
3665 __ tst(tmp3, Operand(kIsNotInternalizedMask));
3666 // Make sure r0 is non-zero. At this point input operands are
3667 // guaranteed to be non-zero.
3668 DCHECK(right.is(r0));
3672 // Check that both strings are sequential one-byte.
3674 __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
3677 // Compare flat one-byte strings. Returns when done.
3679 StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
3682 StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
3686 // Handle more complex cases in runtime.
3688 __ Push(left, right);
3690 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
3692 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3700 void CompareICStub::GenerateObjects(MacroAssembler* masm) {
3701 DCHECK(state() == CompareICState::OBJECT);
3703 __ and_(r2, r1, Operand(r0));
3704 __ JumpIfSmi(r2, &miss);
3706 __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
3708 __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
3711 DCHECK(GetCondition() == eq);
3712 __ sub(r0, r0, Operand(r1));
3720 void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
3722 Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
3723 __ and_(r2, r1, Operand(r0));
3724 __ JumpIfSmi(r2, &miss);
3725 __ GetWeakValue(r4, cell);
3726 __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
3727 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
3733 __ sub(r0, r0, Operand(r1));
3741 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
3743 // Call the runtime system in a fresh internal frame.
3744 ExternalReference miss =
3745 ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
3747 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
3749 __ Push(lr, r1, r0);
3750 __ mov(ip, Operand(Smi::FromInt(op())));
3752 __ CallExternalReference(miss, 3);
3753 // Compute the entry point of the rewritten stub.
3754 __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
3755 // Restore registers.
3764 void DirectCEntryStub::Generate(MacroAssembler* masm) {
3765 // Place the return address on the stack, making the call
3766 // GC safe. The RegExp backend also relies on this.
3767 __ str(lr, MemOperand(sp, 0));
3768 __ blx(ip); // Call the C++ function.
3769 __ VFPEnsureFPSCRState(r2);
3770 __ ldr(pc, MemOperand(sp, 0));
3774 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
3777 reinterpret_cast<intptr_t>(GetCode().location());
3778 __ Move(ip, target);
3779 __ mov(lr, Operand(code, RelocInfo::CODE_TARGET));
3780 __ blx(lr); // Call the stub.
3784 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
3788 Register properties,
3790 Register scratch0) {
3791 DCHECK(name->IsUniqueName());
3792 // If names of slots in range from 1 to kProbes - 1 for the hash value are
3793 // not equal to the name and kProbes-th slot is not used (its name is the
3794 // undefined value), it guarantees the hash table doesn't contain the
3795 // property. It's true even if some slots represent deleted properties
3796 // (their names are the hole value).
3797 for (int i = 0; i < kInlinedProbes; i++) {
3798 // scratch0 points to properties hash.
3799 // Compute the masked index: (hash + i + i * i) & mask.
3800 Register index = scratch0;
3801 // Capacity is smi 2^n.
3802 __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
3803 __ sub(index, index, Operand(1));
3804 __ and_(index, index, Operand(
3805 Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
3807 // Scale the index by multiplying by the entry size.
3808 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
3809 __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
3811 Register entity_name = scratch0;
3812 // Having undefined at this place means the name is not contained.
3813 DCHECK_EQ(kSmiTagSize, 1);
3814 Register tmp = properties;
3815 __ add(tmp, properties, Operand(index, LSL, 1));
3816 __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
3818 DCHECK(!tmp.is(entity_name));
3819 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
3820 __ cmp(entity_name, tmp);
3823 // Load the hole ready for use below:
3824 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
3826 // Stop if found the property.
3827 __ cmp(entity_name, Operand(Handle<Name>(name)));
3831 __ cmp(entity_name, tmp);
3834 // Check if the entry name is not a unique name.
3835 __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
3836 __ ldrb(entity_name,
3837 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
3838 __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
3841 // Restore the properties.
3843 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
3846 const int spill_mask =
3847 (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
3848 r2.bit() | r1.bit() | r0.bit());
3850 __ stm(db_w, sp, spill_mask);
3851 __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
3852 __ mov(r1, Operand(Handle<Name>(name)));
3853 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
3855 __ cmp(r0, Operand::Zero());
3856 __ ldm(ia_w, sp, spill_mask);
3863 // Probe the name dictionary in the |elements| register. Jump to the
3864 // |done| label if a property with the given name is found. Jump to
3865 // the |miss| label otherwise.
3866 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
3867 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
3873 Register scratch2) {
3874 DCHECK(!elements.is(scratch1));
3875 DCHECK(!elements.is(scratch2));
3876 DCHECK(!name.is(scratch1));
3877 DCHECK(!name.is(scratch2));
3879 __ AssertName(name);
3881 // Compute the capacity mask.
3882 __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
3883 __ SmiUntag(scratch1);
3884 __ sub(scratch1, scratch1, Operand(1));
3886 // Generate an unrolled loop that performs a few probes before
3887 // giving up. Measurements done on Gmail indicate that 2 probes
3888 // cover ~93% of loads from dictionaries.
3889 for (int i = 0; i < kInlinedProbes; i++) {
3890 // Compute the masked index: (hash + i + i * i) & mask.
3891 __ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
3893 // Add the probe offset (i + i * i) left shifted to avoid right shifting
3894 // the hash in a separate instruction. The value hash + i + i * i is right
3895 // shifted in the following and instruction.
3896 DCHECK(NameDictionary::GetProbeOffset(i) <
3897 1 << (32 - Name::kHashFieldOffset));
3898 __ add(scratch2, scratch2, Operand(
3899 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
3901 __ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
3903 // Scale the index by multiplying by the element size.
3904 DCHECK(NameDictionary::kEntrySize == 3);
3905 // scratch2 = scratch2 * 3.
3906 __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
3908 // Check if the key is identical to the name.
3909 __ add(scratch2, elements, Operand(scratch2, LSL, 2));
3910 __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
3911 __ cmp(name, Operand(ip));
3915 const int spill_mask =
3916 (lr.bit() | r6.bit() | r5.bit() | r4.bit() |
3917 r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
3918 ~(scratch1.bit() | scratch2.bit());
3920 __ stm(db_w, sp, spill_mask);
3922 DCHECK(!elements.is(r1));
3924 __ Move(r0, elements);
3926 __ Move(r0, elements);
3929 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
3931 __ cmp(r0, Operand::Zero());
3932 __ mov(scratch2, Operand(r2));
3933 __ ldm(ia_w, sp, spill_mask);
3940 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
3941 // This stub overrides SometimesSetsUpAFrame() to return false. That means
3942 // we cannot call anything that could cause a GC from this stub.
3944 // result: NameDictionary to probe
3946 // dictionary: NameDictionary to probe.
3947 // index: will hold an index of entry if lookup is successful.
3948 // might alias with result_.
3950 // result_ is zero if lookup failed, non zero otherwise.
3952 Register result = r0;
3953 Register dictionary = r0;
3955 Register index = r2;
3958 Register undefined = r5;
3959 Register entry_key = r6;
3961 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
3963 __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
3965 __ sub(mask, mask, Operand(1));
3967 __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
3969 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
3971 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
3972 // Compute the masked index: (hash + i + i * i) & mask.
3973 // Capacity is smi 2^n.
3975 // Add the probe offset (i + i * i) left shifted to avoid right shifting
3976 // the hash in a separate instruction. The value hash + i + i * i is right
3977 // shifted in the following and instruction.
3978 DCHECK(NameDictionary::GetProbeOffset(i) <
3979 1 << (32 - Name::kHashFieldOffset));
3980 __ add(index, hash, Operand(
3981 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
3983 __ mov(index, Operand(hash));
3985 __ and_(index, mask, Operand(index, LSR, Name::kHashShift));
3987 // Scale the index by multiplying by the entry size.
3988 DCHECK(NameDictionary::kEntrySize == 3);
3989 __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
3991 DCHECK_EQ(kSmiTagSize, 1);
3992 __ add(index, dictionary, Operand(index, LSL, 2));
3993 __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
3995 // Having undefined at this place means the name is not contained.
3996 __ cmp(entry_key, Operand(undefined));
3997 __ b(eq, ¬_in_dictionary);
3999 // Stop if found the property.
4000 __ cmp(entry_key, Operand(key));
4001 __ b(eq, &in_dictionary);
4003 if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
4004 // Check if the entry name is not a unique name.
4005 __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
4007 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
4008 __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
4012 __ bind(&maybe_in_dictionary);
4013 // If we are doing negative lookup then probing failure should be
4014 // treated as a lookup success. For positive lookup probing failure
4015 // should be treated as lookup failure.
4016 if (mode() == POSITIVE_LOOKUP) {
4017 __ mov(result, Operand::Zero());
4021 __ bind(&in_dictionary);
4022 __ mov(result, Operand(1));
4025 __ bind(¬_in_dictionary);
4026 __ mov(result, Operand::Zero());
4031 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
4033 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
4035 // Hydrogen code stubs need stub2 at snapshot time.
4036 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
4041 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
4042 // the value has just been written into the object, now this stub makes sure
4043 // we keep the GC informed. The word in the object where the value has been
4044 // written is in the address register.
4045 void RecordWriteStub::Generate(MacroAssembler* masm) {
4046 Label skip_to_incremental_noncompacting;
4047 Label skip_to_incremental_compacting;
4049 // The first two instructions are generated with labels so as to get the
4050 // offset fixed up correctly by the bind(Label*) call. We patch it back and
4051 // forth between a compare instructions (a nop in this position) and the
4052 // real branch when we start and stop incremental heap marking.
4053 // See RecordWriteStub::Patch for details.
4055 // Block literal pool emission, as the position of these two instructions
4056 // is assumed by the patching code.
4057 Assembler::BlockConstPoolScope block_const_pool(masm);
4058 __ b(&skip_to_incremental_noncompacting);
4059 __ b(&skip_to_incremental_compacting);
4062 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4063 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
4064 MacroAssembler::kReturnAtEnd);
4068 __ bind(&skip_to_incremental_noncompacting);
4069 GenerateIncremental(masm, INCREMENTAL);
4071 __ bind(&skip_to_incremental_compacting);
4072 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4074 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
4075 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
4076 DCHECK(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
4077 DCHECK(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
4078 PatchBranchIntoNop(masm, 0);
4079 PatchBranchIntoNop(masm, Assembler::kInstrSize);
4083 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4086 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4087 Label dont_need_remembered_set;
4089 __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
4090 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
4092 &dont_need_remembered_set);
4094 __ CheckPageFlag(regs_.object(),
4096 1 << MemoryChunk::SCAN_ON_SCAVENGE,
4098 &dont_need_remembered_set);
4100 // First notify the incremental marker if necessary, then update the
4102 CheckNeedsToInformIncrementalMarker(
4103 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4104 InformIncrementalMarker(masm);
4105 regs_.Restore(masm);
4106 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
4107 MacroAssembler::kReturnAtEnd);
4109 __ bind(&dont_need_remembered_set);
4112 CheckNeedsToInformIncrementalMarker(
4113 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4114 InformIncrementalMarker(masm);
4115 regs_.Restore(masm);
4120 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4121 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
4122 int argument_count = 3;
4123 __ PrepareCallCFunction(argument_count, regs_.scratch0());
4125 r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
4126 DCHECK(!address.is(regs_.object()));
4127 DCHECK(!address.is(r0));
4128 __ Move(address, regs_.address());
4129 __ Move(r0, regs_.object());
4130 __ Move(r1, address);
4131 __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
4133 AllowExternalCallThatCantCauseGC scope(masm);
4135 ExternalReference::incremental_marking_record_write_function(isolate()),
4137 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
4141 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4142 MacroAssembler* masm,
4143 OnNoNeedToInformIncrementalMarker on_no_need,
4146 Label need_incremental;
4147 Label need_incremental_pop_scratch;
4149 __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
4150 __ ldr(regs_.scratch1(),
4151 MemOperand(regs_.scratch0(),
4152 MemoryChunk::kWriteBarrierCounterOffset));
4153 __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC);
4154 __ str(regs_.scratch1(),
4155 MemOperand(regs_.scratch0(),
4156 MemoryChunk::kWriteBarrierCounterOffset));
4157 __ b(mi, &need_incremental);
4159 // Let's look at the color of the object: If it is not black we don't have
4160 // to inform the incremental marker.
4161 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4163 regs_.Restore(masm);
4164 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4165 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
4166 MacroAssembler::kReturnAtEnd);
4173 // Get the value from the slot.
4174 __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
4176 if (mode == INCREMENTAL_COMPACTION) {
4177 Label ensure_not_white;
4179 __ CheckPageFlag(regs_.scratch0(), // Contains value.
4180 regs_.scratch1(), // Scratch.
4181 MemoryChunk::kEvacuationCandidateMask,
4185 __ CheckPageFlag(regs_.object(),
4186 regs_.scratch1(), // Scratch.
4187 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
4191 __ bind(&ensure_not_white);
4194 // We need extra registers for this, so we push the object and the address
4195 // register temporarily.
4196 __ Push(regs_.object(), regs_.address());
4197 __ EnsureNotWhite(regs_.scratch0(), // The value.
4198 regs_.scratch1(), // Scratch.
4199 regs_.object(), // Scratch.
4200 regs_.address(), // Scratch.
4201 &need_incremental_pop_scratch);
4202 __ Pop(regs_.object(), regs_.address());
4204 regs_.Restore(masm);
4205 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4206 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
4207 MacroAssembler::kReturnAtEnd);
4212 __ bind(&need_incremental_pop_scratch);
4213 __ Pop(regs_.object(), regs_.address());
4215 __ bind(&need_incremental);
4217 // Fall through when we need to inform the incremental marker.
4221 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4222 // ----------- S t a t e -------------
4223 // -- r0 : element value to store
4224 // -- r3 : element index as smi
4225 // -- sp[0] : array literal index in function as smi
4226 // -- sp[4] : array literal
4227 // clobbers r1, r2, r4
4228 // -----------------------------------
4231 Label double_elements;
4233 Label slow_elements;
4234 Label fast_elements;
4236 // Get array literal index, array literal and its map.
4237 __ ldr(r4, MemOperand(sp, 0 * kPointerSize));
4238 __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
4239 __ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset));
4241 __ CheckFastElements(r2, r5, &double_elements);
4242 // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
4243 __ JumpIfSmi(r0, &smi_element);
4244 __ CheckFastSmiElements(r2, r5, &fast_elements);
4246 // Store into the array literal requires a elements transition. Call into
4248 __ bind(&slow_elements);
4250 __ Push(r1, r3, r0);
4251 __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4252 __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset));
4254 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4256 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4257 __ bind(&fast_elements);
4258 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
4259 __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
4260 __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4261 __ str(r0, MemOperand(r6, 0));
4262 // Update the write barrier for the array store.
4263 __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs,
4264 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
4267 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
4268 // and value is Smi.
4269 __ bind(&smi_element);
4270 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
4271 __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
4272 __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
4275 // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
4276 __ bind(&double_elements);
4277 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
4278 __ StoreNumberToDoubleElements(r0, r3, r5, r6, d0, &slow_elements);
4283 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4284 CEntryStub ces(isolate(), 1, kSaveFPRegs);
4285 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4286 int parameter_count_offset =
4287 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4288 __ ldr(r1, MemOperand(fp, parameter_count_offset));
4289 if (function_mode() == JS_FUNCTION_STUB_MODE) {
4290 __ add(r1, r1, Operand(1));
4292 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4293 __ mov(r1, Operand(r1, LSL, kPointerSizeLog2));
4299 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
4300 EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4301 VectorLoadStub stub(isolate(), state());
4302 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4306 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
4307 EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4308 VectorKeyedLoadStub stub(isolate());
4309 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4313 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
4314 EmitLoadTypeFeedbackVector(masm, r2);
4315 CallICStub stub(isolate(), state());
4316 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4320 void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
4321 EmitLoadTypeFeedbackVector(masm, r2);
4322 CallIC_ArrayStub stub(isolate(), state());
4323 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4327 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4328 if (masm->isolate()->function_entry_hook() != NULL) {
4329 ProfileEntryHookStub stub(masm->isolate());
4330 int code_size = masm->CallStubSize(&stub) + 2 * Assembler::kInstrSize;
4331 PredictableCodeSizeScope predictable(masm, code_size);
4339 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4340 // The entry hook is a "push lr" instruction, followed by a call.
4341 const int32_t kReturnAddressDistanceFromFunctionStart =
4342 3 * Assembler::kInstrSize;
4344 // This should contain all kCallerSaved registers.
4345 const RegList kSavedRegs =
4352 // We also save lr, so the count here is one higher than the mask indicates.
4353 const int32_t kNumSavedRegs = 7;
4355 DCHECK((kCallerSaved & kSavedRegs) == kCallerSaved);
4357 // Save all caller-save registers as this may be called from anywhere.
4358 __ stm(db_w, sp, kSavedRegs | lr.bit());
4360 // Compute the function's address for the first argument.
4361 __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart));
4363 // The caller's return address is above the saved temporaries.
4364 // Grab that for the second argument to the hook.
4365 __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize));
4367 // Align the stack if necessary.
4368 int frame_alignment = masm->ActivationFrameAlignment();
4369 if (frame_alignment > kPointerSize) {
4371 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4372 __ and_(sp, sp, Operand(-frame_alignment));
4375 #if V8_HOST_ARCH_ARM
4376 int32_t entry_hook =
4377 reinterpret_cast<int32_t>(isolate()->function_entry_hook());
4378 __ mov(ip, Operand(entry_hook));
4380 // Under the simulator we need to indirect the entry hook through a
4381 // trampoline function at a known address.
4382 // It additionally takes an isolate as a third parameter
4383 __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
4385 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4386 __ mov(ip, Operand(ExternalReference(&dispatcher,
4387 ExternalReference::BUILTIN_CALL,
4392 // Restore the stack pointer if needed.
4393 if (frame_alignment > kPointerSize) {
4397 // Also pop pc to get Ret(0).
4398 __ ldm(ia_w, sp, kSavedRegs | pc.bit());
4403 static void CreateArrayDispatch(MacroAssembler* masm,
4404 AllocationSiteOverrideMode mode) {
4405 if (mode == DISABLE_ALLOCATION_SITES) {
4406 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4407 __ TailCallStub(&stub);
4408 } else if (mode == DONT_OVERRIDE) {
4409 int last_index = GetSequenceIndexFromFastElementsKind(
4410 TERMINAL_FAST_ELEMENTS_KIND);
4411 for (int i = 0; i <= last_index; ++i) {
4412 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4413 __ cmp(r3, Operand(kind));
4414 T stub(masm->isolate(), kind);
4415 __ TailCallStub(&stub, eq);
4418 // If we reached this point there is a problem.
4419 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4426 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4427 AllocationSiteOverrideMode mode) {
4428 // r2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4429 // r3 - kind (if mode != DISABLE_ALLOCATION_SITES)
4430 // r0 - number of arguments
4431 // r1 - constructor?
4432 // sp[0] - last argument
4433 Label normal_sequence;
4434 if (mode == DONT_OVERRIDE) {
4435 DCHECK(FAST_SMI_ELEMENTS == 0);
4436 DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
4437 DCHECK(FAST_ELEMENTS == 2);
4438 DCHECK(FAST_HOLEY_ELEMENTS == 3);
4439 DCHECK(FAST_DOUBLE_ELEMENTS == 4);
4440 DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
4442 // is the low bit set? If so, we are holey and that is good.
4443 __ tst(r3, Operand(1));
4444 __ b(ne, &normal_sequence);
4447 // look at the first argument
4448 __ ldr(r5, MemOperand(sp, 0));
4449 __ cmp(r5, Operand::Zero());
4450 __ b(eq, &normal_sequence);
4452 if (mode == DISABLE_ALLOCATION_SITES) {
4453 ElementsKind initial = GetInitialFastElementsKind();
4454 ElementsKind holey_initial = GetHoleyElementsKind(initial);
4456 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
4458 DISABLE_ALLOCATION_SITES);
4459 __ TailCallStub(&stub_holey);
4461 __ bind(&normal_sequence);
4462 ArraySingleArgumentConstructorStub stub(masm->isolate(),
4464 DISABLE_ALLOCATION_SITES);
4465 __ TailCallStub(&stub);
4466 } else if (mode == DONT_OVERRIDE) {
4467 // We are going to create a holey array, but our kind is non-holey.
4468 // Fix kind and retry (only if we have an allocation site in the slot).
4469 __ add(r3, r3, Operand(1));
4471 if (FLAG_debug_code) {
4472 __ ldr(r5, FieldMemOperand(r2, 0));
4473 __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
4474 __ Assert(eq, kExpectedAllocationSite);
4477 // Save the resulting elements kind in type info. We can't just store r3
4478 // in the AllocationSite::transition_info field because elements kind is
4479 // restricted to a portion of the field...upper bits need to be left alone.
4480 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4481 __ ldr(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
4482 __ add(r4, r4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
4483 __ str(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
4485 __ bind(&normal_sequence);
4486 int last_index = GetSequenceIndexFromFastElementsKind(
4487 TERMINAL_FAST_ELEMENTS_KIND);
4488 for (int i = 0; i <= last_index; ++i) {
4489 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4490 __ cmp(r3, Operand(kind));
4491 ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
4492 __ TailCallStub(&stub, eq);
4495 // If we reached this point there is a problem.
4496 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4504 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
4505 int to_index = GetSequenceIndexFromFastElementsKind(
4506 TERMINAL_FAST_ELEMENTS_KIND);
4507 for (int i = 0; i <= to_index; ++i) {
4508 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4509 T stub(isolate, kind);
4511 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
4512 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
4519 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
4520 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
4522 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
4524 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
4529 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
4531 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
4532 for (int i = 0; i < 2; i++) {
4533 // For internal arrays we only need a few things
4534 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
4536 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
4538 InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
4544 void ArrayConstructorStub::GenerateDispatchToArrayStub(
4545 MacroAssembler* masm,
4546 AllocationSiteOverrideMode mode) {
4547 if (argument_count() == ANY) {
4548 Label not_zero_case, not_one_case;
4550 __ b(ne, ¬_zero_case);
4551 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4553 __ bind(¬_zero_case);
4554 __ cmp(r0, Operand(1));
4555 __ b(gt, ¬_one_case);
4556 CreateArrayDispatchOneArgument(masm, mode);
4558 __ bind(¬_one_case);
4559 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4560 } else if (argument_count() == NONE) {
4561 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4562 } else if (argument_count() == ONE) {
4563 CreateArrayDispatchOneArgument(masm, mode);
4564 } else if (argument_count() == MORE_THAN_ONE) {
4565 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4572 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
4573 // ----------- S t a t e -------------
4574 // -- r0 : argc (only if argument_count() == ANY)
4575 // -- r1 : constructor
4576 // -- r2 : AllocationSite or undefined
4577 // -- r3 : original constructor
4578 // -- sp[0] : return address
4579 // -- sp[4] : last argument
4580 // -----------------------------------
4582 if (FLAG_debug_code) {
4583 // The array construct code is only set for the global and natives
4584 // builtin Array functions which always have maps.
4586 // Initial map for the builtin Array function should be a map.
4587 __ ldr(r4, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
4588 // Will both indicate a NULL and a Smi.
4589 __ tst(r4, Operand(kSmiTagMask));
4590 __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
4591 __ CompareObjectType(r4, r4, r5, MAP_TYPE);
4592 __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
4594 // We should either have undefined in r2 or a valid AllocationSite
4595 __ AssertUndefinedOrAllocationSite(r2, r4);
4600 __ b(ne, &subclassing);
4603 // Get the elements kind and case on that.
4604 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
4607 __ ldr(r3, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
4609 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4610 __ and_(r3, r3, Operand(AllocationSite::ElementsKindBits::kMask));
4611 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
4614 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
4616 __ bind(&subclassing);
4621 switch (argument_count()) {
4624 __ add(r0, r0, Operand(2));
4627 __ mov(r0, Operand(2));
4630 __ mov(r0, Operand(3));
4634 __ JumpToExternalReference(
4635 ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
4639 void InternalArrayConstructorStub::GenerateCase(
4640 MacroAssembler* masm, ElementsKind kind) {
4641 __ cmp(r0, Operand(1));
4643 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
4644 __ TailCallStub(&stub0, lo);
4646 InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
4647 __ TailCallStub(&stubN, hi);
4649 if (IsFastPackedElementsKind(kind)) {
4650 // We might need to create a holey array
4651 // look at the first argument
4652 __ ldr(r3, MemOperand(sp, 0));
4653 __ cmp(r3, Operand::Zero());
4655 InternalArraySingleArgumentConstructorStub
4656 stub1_holey(isolate(), GetHoleyElementsKind(kind));
4657 __ TailCallStub(&stub1_holey, ne);
4660 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
4661 __ TailCallStub(&stub1);
4665 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
4666 // ----------- S t a t e -------------
4668 // -- r1 : constructor
4669 // -- sp[0] : return address
4670 // -- sp[4] : last argument
4671 // -----------------------------------
4673 if (FLAG_debug_code) {
4674 // The array construct code is only set for the global and natives
4675 // builtin Array functions which always have maps.
4677 // Initial map for the builtin Array function should be a map.
4678 __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
4679 // Will both indicate a NULL and a Smi.
4680 __ tst(r3, Operand(kSmiTagMask));
4681 __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
4682 __ CompareObjectType(r3, r3, r4, MAP_TYPE);
4683 __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
4686 // Figure out the right elements kind
4687 __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
4688 // Load the map's "bit field 2" into |result|. We only need the first byte,
4689 // but the following bit field extraction takes care of that anyway.
4690 __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
4691 // Retrieve elements_kind from bit field 2.
4692 __ DecodeField<Map::ElementsKindBits>(r3);
4694 if (FLAG_debug_code) {
4696 __ cmp(r3, Operand(FAST_ELEMENTS));
4698 __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS));
4700 kInvalidElementsKindForInternalArrayOrInternalPackedArray);
4704 Label fast_elements_case;
4705 __ cmp(r3, Operand(FAST_ELEMENTS));
4706 __ b(eq, &fast_elements_case);
4707 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
4709 __ bind(&fast_elements_case);
4710 GenerateCase(masm, FAST_ELEMENTS);
4714 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
4715 return ref0.address() - ref1.address();
4719 // Calls an API function. Allocates HandleScope, extracts returned value
4720 // from handle and propagates exceptions. Restores context. stack_space
4721 // - space to be unwound on exit (includes the call JS arguments space and
4722 // the additional space allocated for the fast call).
4723 static void CallApiFunctionAndReturn(MacroAssembler* masm,
4724 Register function_address,
4725 ExternalReference thunk_ref,
4727 MemOperand* stack_space_operand,
4728 MemOperand return_value_operand,
4729 MemOperand* context_restore_operand) {
4730 Isolate* isolate = masm->isolate();
4731 ExternalReference next_address =
4732 ExternalReference::handle_scope_next_address(isolate);
4733 const int kNextOffset = 0;
4734 const int kLimitOffset = AddressOffset(
4735 ExternalReference::handle_scope_limit_address(isolate), next_address);
4736 const int kLevelOffset = AddressOffset(
4737 ExternalReference::handle_scope_level_address(isolate), next_address);
4739 DCHECK(function_address.is(r1) || function_address.is(r2));
4741 Label profiler_disabled;
4742 Label end_profiler_check;
4743 __ mov(r9, Operand(ExternalReference::is_profiling_address(isolate)));
4744 __ ldrb(r9, MemOperand(r9, 0));
4745 __ cmp(r9, Operand(0));
4746 __ b(eq, &profiler_disabled);
4748 // Additional parameter is the address of the actual callback.
4749 __ mov(r3, Operand(thunk_ref));
4750 __ jmp(&end_profiler_check);
4752 __ bind(&profiler_disabled);
4753 __ Move(r3, function_address);
4754 __ bind(&end_profiler_check);
4756 // Allocate HandleScope in callee-save registers.
4757 __ mov(r9, Operand(next_address));
4758 __ ldr(r4, MemOperand(r9, kNextOffset));
4759 __ ldr(r5, MemOperand(r9, kLimitOffset));
4760 __ ldr(r6, MemOperand(r9, kLevelOffset));
4761 __ add(r6, r6, Operand(1));
4762 __ str(r6, MemOperand(r9, kLevelOffset));
4764 if (FLAG_log_timer_events) {
4765 FrameScope frame(masm, StackFrame::MANUAL);
4766 __ PushSafepointRegisters();
4767 __ PrepareCallCFunction(1, r0);
4768 __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
4769 __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
4771 __ PopSafepointRegisters();
4774 // Native call returns to the DirectCEntry stub which redirects to the
4775 // return address pushed on stack (could have moved after GC).
4776 // DirectCEntry stub itself is generated early and never moves.
4777 DirectCEntryStub stub(isolate);
4778 stub.GenerateCall(masm, r3);
4780 if (FLAG_log_timer_events) {
4781 FrameScope frame(masm, StackFrame::MANUAL);
4782 __ PushSafepointRegisters();
4783 __ PrepareCallCFunction(1, r0);
4784 __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
4785 __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
4787 __ PopSafepointRegisters();
4790 Label promote_scheduled_exception;
4791 Label exception_handled;
4792 Label delete_allocated_handles;
4793 Label leave_exit_frame;
4794 Label return_value_loaded;
4796 // load value from ReturnValue
4797 __ ldr(r0, return_value_operand);
4798 __ bind(&return_value_loaded);
4799 // No more valid handles (the result handle was the last one). Restore
4800 // previous handle scope.
4801 __ str(r4, MemOperand(r9, kNextOffset));
4802 if (__ emit_debug_code()) {
4803 __ ldr(r1, MemOperand(r9, kLevelOffset));
4805 __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
4807 __ sub(r6, r6, Operand(1));
4808 __ str(r6, MemOperand(r9, kLevelOffset));
4809 __ ldr(ip, MemOperand(r9, kLimitOffset));
4811 __ b(ne, &delete_allocated_handles);
4813 // Check if the function scheduled an exception.
4814 __ bind(&leave_exit_frame);
4815 __ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
4816 __ mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate)));
4817 __ ldr(r5, MemOperand(ip));
4819 __ b(ne, &promote_scheduled_exception);
4820 __ bind(&exception_handled);
4822 bool restore_context = context_restore_operand != NULL;
4823 if (restore_context) {
4824 __ ldr(cp, *context_restore_operand);
4826 // LeaveExitFrame expects unwind space to be in a register.
4827 if (stack_space_operand != NULL) {
4828 __ ldr(r4, *stack_space_operand);
4830 __ mov(r4, Operand(stack_space));
4832 __ LeaveExitFrame(false, r4, !restore_context, stack_space_operand != NULL);
4835 __ bind(&promote_scheduled_exception);
4837 FrameScope frame(masm, StackFrame::INTERNAL);
4838 __ CallExternalReference(
4839 ExternalReference(Runtime::kPromoteScheduledException, isolate), 0);
4841 __ jmp(&exception_handled);
4843 // HandleScope limit has changed. Delete allocated extensions.
4844 __ bind(&delete_allocated_handles);
4845 __ str(r5, MemOperand(r9, kLimitOffset));
4847 __ PrepareCallCFunction(1, r5);
4848 __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
4849 __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
4852 __ jmp(&leave_exit_frame);
4856 static void CallApiFunctionStubHelper(MacroAssembler* masm,
4857 const ParameterCount& argc,
4858 bool return_first_arg,
4859 bool call_data_undefined) {
4860 // ----------- S t a t e -------------
4862 // -- r4 : call_data
4864 // -- r1 : api_function_address
4865 // -- r3 : number of arguments if argc is a register
4868 // -- sp[0] : last argument
4870 // -- sp[(argc - 1)* 4] : first argument
4871 // -- sp[argc * 4] : receiver
4872 // -----------------------------------
4874 Register callee = r0;
4875 Register call_data = r4;
4876 Register holder = r2;
4877 Register api_function_address = r1;
4878 Register context = cp;
4880 typedef FunctionCallbackArguments FCA;
4882 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
4883 STATIC_ASSERT(FCA::kCalleeIndex == 5);
4884 STATIC_ASSERT(FCA::kDataIndex == 4);
4885 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
4886 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
4887 STATIC_ASSERT(FCA::kIsolateIndex == 1);
4888 STATIC_ASSERT(FCA::kHolderIndex == 0);
4889 STATIC_ASSERT(FCA::kArgsLength == 7);
4891 DCHECK(argc.is_immediate() || r3.is(argc.reg()));
4895 // load context from callee
4896 __ ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
4904 Register scratch = call_data;
4905 if (!call_data_undefined) {
4906 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4910 // return value default
4913 __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
4918 // Prepare arguments.
4919 __ mov(scratch, sp);
4921 // Allocate the v8::Arguments structure in the arguments' space since
4922 // it's not controlled by GC.
4923 const int kApiStackSpace = 4;
4925 FrameScope frame_scope(masm, StackFrame::MANUAL);
4926 __ EnterExitFrame(false, kApiStackSpace);
4928 DCHECK(!api_function_address.is(r0) && !scratch.is(r0));
4929 // r0 = FunctionCallbackInfo&
4930 // Arguments is after the return address.
4931 __ add(r0, sp, Operand(1 * kPointerSize));
4932 // FunctionCallbackInfo::implicit_args_
4933 __ str(scratch, MemOperand(r0, 0 * kPointerSize));
4934 if (argc.is_immediate()) {
4935 // FunctionCallbackInfo::values_
4937 Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
4938 __ str(ip, MemOperand(r0, 1 * kPointerSize));
4939 // FunctionCallbackInfo::length_ = argc
4940 __ mov(ip, Operand(argc.immediate()));
4941 __ str(ip, MemOperand(r0, 2 * kPointerSize));
4942 // FunctionCallbackInfo::is_construct_call_ = 0
4943 __ mov(ip, Operand::Zero());
4944 __ str(ip, MemOperand(r0, 3 * kPointerSize));
4946 // FunctionCallbackInfo::values_
4947 __ add(ip, scratch, Operand(argc.reg(), LSL, kPointerSizeLog2));
4948 __ add(ip, ip, Operand((FCA::kArgsLength - 1) * kPointerSize));
4949 __ str(ip, MemOperand(r0, 1 * kPointerSize));
4950 // FunctionCallbackInfo::length_ = argc
4951 __ str(argc.reg(), MemOperand(r0, 2 * kPointerSize));
4952 // FunctionCallbackInfo::is_construct_call_
4953 __ add(argc.reg(), argc.reg(), Operand(FCA::kArgsLength + 1));
4954 __ mov(ip, Operand(argc.reg(), LSL, kPointerSizeLog2));
4955 __ str(ip, MemOperand(r0, 3 * kPointerSize));
4958 ExternalReference thunk_ref =
4959 ExternalReference::invoke_function_callback(masm->isolate());
4961 AllowExternalCallThatCantCauseGC scope(masm);
4962 MemOperand context_restore_operand(
4963 fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
4964 // Stores return the first js argument
4965 int return_value_offset = 0;
4966 if (return_first_arg) {
4967 return_value_offset = 2 + FCA::kArgsLength;
4969 return_value_offset = 2 + FCA::kReturnValueOffset;
4971 MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
4972 int stack_space = 0;
4973 MemOperand is_construct_call_operand = MemOperand(sp, 4 * kPointerSize);
4974 MemOperand* stack_space_operand = &is_construct_call_operand;
4975 if (argc.is_immediate()) {
4976 stack_space = argc.immediate() + FCA::kArgsLength + 1;
4977 stack_space_operand = NULL;
4979 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
4980 stack_space_operand, return_value_operand,
4981 &context_restore_operand);
4985 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
4986 bool call_data_undefined = this->call_data_undefined();
4987 CallApiFunctionStubHelper(masm, ParameterCount(r3), false,
4988 call_data_undefined);
4992 void CallApiAccessorStub::Generate(MacroAssembler* masm) {
4993 bool is_store = this->is_store();
4994 int argc = this->argc();
4995 bool call_data_undefined = this->call_data_undefined();
4996 CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
4997 call_data_undefined);
5001 void CallApiGetterStub::Generate(MacroAssembler* masm) {
5002 // ----------- S t a t e -------------
5004 // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
5006 // -- r2 : api_function_address
5007 // -----------------------------------
5009 Register api_function_address = ApiGetterDescriptor::function_address();
5010 DCHECK(api_function_address.is(r2));
5012 __ mov(r0, sp); // r0 = Handle<Name>
5013 __ add(r1, r0, Operand(1 * kPointerSize)); // r1 = PCA
5015 const int kApiStackSpace = 1;
5016 FrameScope frame_scope(masm, StackFrame::MANUAL);
5017 __ EnterExitFrame(false, kApiStackSpace);
5019 // Create PropertyAccessorInfo instance on the stack above the exit frame with
5020 // r1 (internal::Object** args_) as the data.
5021 __ str(r1, MemOperand(sp, 1 * kPointerSize));
5022 __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
5024 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
5026 ExternalReference thunk_ref =
5027 ExternalReference::invoke_accessor_getter_callback(isolate());
5028 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
5029 kStackUnwindSpace, NULL,
5030 MemOperand(fp, 6 * kPointerSize), NULL);
5036 } } // namespace v8::internal
5038 #endif // V8_TARGET_ARCH_ARM