1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/base/bits.h"
8 #include "src/bootstrapper.h"
9 #include "src/code-stubs.h"
10 #include "src/codegen.h"
11 #include "src/ic/handler-compiler.h"
12 #include "src/ic/ic.h"
13 #include "src/ic/stub-cache.h"
14 #include "src/isolate.h"
15 #include "src/regexp/jsregexp.h"
16 #include "src/regexp/regexp-macro-assembler.h"
17 #include "src/runtime/runtime.h"
23 static void InitializeArrayConstructorDescriptor(
24 Isolate* isolate, CodeStubDescriptor* descriptor,
25 int constant_stack_parameter_count) {
26 Address deopt_handler = Runtime::FunctionForId(
27 Runtime::kArrayConstructor)->entry;
29 if (constant_stack_parameter_count == 0) {
30 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
31 JS_FUNCTION_STUB_MODE);
33 descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
34 JS_FUNCTION_STUB_MODE);
39 static void InitializeInternalArrayConstructorDescriptor(
40 Isolate* isolate, CodeStubDescriptor* descriptor,
41 int constant_stack_parameter_count) {
42 Address deopt_handler = Runtime::FunctionForId(
43 Runtime::kInternalArrayConstructor)->entry;
45 if (constant_stack_parameter_count == 0) {
46 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
47 JS_FUNCTION_STUB_MODE);
49 descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
50 JS_FUNCTION_STUB_MODE);
55 void ArrayNoArgumentConstructorStub::InitializeDescriptor(
56 CodeStubDescriptor* descriptor) {
57 InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
61 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
62 CodeStubDescriptor* descriptor) {
63 InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
67 void ArrayNArgumentsConstructorStub::InitializeDescriptor(
68 CodeStubDescriptor* descriptor) {
69 InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
73 void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
74 CodeStubDescriptor* descriptor) {
75 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
79 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
80 CodeStubDescriptor* descriptor) {
81 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
85 void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
86 CodeStubDescriptor* descriptor) {
87 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
91 #define __ ACCESS_MASM(masm)
94 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
95 Condition cond, Strength strength);
96 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
102 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
107 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
108 ExternalReference miss) {
109 // Update the static counter each time a new code stub is generated.
110 isolate()->counters()->code_stubs()->Increment();
112 CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
113 int param_count = descriptor.GetRegisterParameterCount();
115 // Call the runtime system in a fresh internal frame.
116 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
117 DCHECK(param_count == 0 ||
118 r0.is(descriptor.GetRegisterParameter(param_count - 1)));
120 for (int i = 0; i < param_count; ++i) {
121 __ push(descriptor.GetRegisterParameter(i));
123 __ CallExternalReference(miss, param_count);
130 void DoubleToIStub::Generate(MacroAssembler* masm) {
131 Label out_of_range, only_low, negate, done;
132 Register input_reg = source();
133 Register result_reg = destination();
134 DCHECK(is_truncating());
136 int double_offset = offset();
137 // Account for saved regs if input is sp.
138 if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
140 Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
141 Register scratch_low =
142 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
143 Register scratch_high =
144 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
145 LowDwVfpRegister double_scratch = kScratchDoubleReg;
147 __ Push(scratch_high, scratch_low, scratch);
149 if (!skip_fastpath()) {
150 // Load double input.
151 __ vldr(double_scratch, MemOperand(input_reg, double_offset));
152 __ vmov(scratch_low, scratch_high, double_scratch);
154 // Do fast-path convert from double to int.
155 __ vcvt_s32_f64(double_scratch.low(), double_scratch);
156 __ vmov(result_reg, double_scratch.low());
158 // If result is not saturated (0x7fffffff or 0x80000000), we are done.
159 __ sub(scratch, result_reg, Operand(1));
160 __ cmp(scratch, Operand(0x7ffffffe));
163 // We've already done MacroAssembler::TryFastTruncatedDoubleToILoad, so we
164 // know exponent > 31, so we can skip the vcvt_s32_f64 which will saturate.
165 if (double_offset == 0) {
166 __ ldm(ia, input_reg, scratch_low.bit() | scratch_high.bit());
168 __ ldr(scratch_low, MemOperand(input_reg, double_offset));
169 __ ldr(scratch_high, MemOperand(input_reg, double_offset + kIntSize));
173 __ Ubfx(scratch, scratch_high,
174 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
175 // Load scratch with exponent - 1. This is faster than loading
176 // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
177 STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
178 __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
179 // If exponent is greater than or equal to 84, the 32 less significant
180 // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
182 // Compare exponent with 84 (compare exponent - 1 with 83).
183 __ cmp(scratch, Operand(83));
184 __ b(ge, &out_of_range);
186 // If we reach this code, 31 <= exponent <= 83.
187 // So, we don't have to handle cases where 0 <= exponent <= 20 for
188 // which we would need to shift right the high part of the mantissa.
189 // Scratch contains exponent - 1.
190 // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
191 __ rsb(scratch, scratch, Operand(51), SetCC);
193 // 21 <= exponent <= 51, shift scratch_low and scratch_high
194 // to generate the result.
195 __ mov(scratch_low, Operand(scratch_low, LSR, scratch));
196 // Scratch contains: 52 - exponent.
197 // We needs: exponent - 20.
198 // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
199 __ rsb(scratch, scratch, Operand(32));
200 __ Ubfx(result_reg, scratch_high,
201 0, HeapNumber::kMantissaBitsInTopWord);
202 // Set the implicit 1 before the mantissa part in scratch_high.
203 __ orr(result_reg, result_reg,
204 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
205 __ orr(result_reg, scratch_low, Operand(result_reg, LSL, scratch));
208 __ bind(&out_of_range);
209 __ mov(result_reg, Operand::Zero());
213 // 52 <= exponent <= 83, shift only scratch_low.
214 // On entry, scratch contains: 52 - exponent.
215 __ rsb(scratch, scratch, Operand::Zero());
216 __ mov(result_reg, Operand(scratch_low, LSL, scratch));
219 // If input was positive, scratch_high ASR 31 equals 0 and
220 // scratch_high LSR 31 equals zero.
221 // New result = (result eor 0) + 0 = result.
222 // If the input was negative, we have to negate the result.
223 // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
224 // New result = (result eor 0xffffffff) + 1 = 0 - result.
225 __ eor(result_reg, result_reg, Operand(scratch_high, ASR, 31));
226 __ add(result_reg, result_reg, Operand(scratch_high, LSR, 31));
230 __ Pop(scratch_high, scratch_low, scratch);
235 // Handle the case where the lhs and rhs are the same object.
236 // Equality is almost reflexive (everything but NaN), so this is a test
237 // for "identity and not NaN".
238 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
239 Condition cond, Strength strength) {
241 Label heap_number, return_equal;
243 __ b(ne, ¬_identical);
245 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
246 // so we do the second best thing - test it ourselves.
247 // They are both equal and they are not both Smis so both of them are not
248 // Smis. If it's not a heap number, then return equal.
249 if (cond == lt || cond == gt) {
250 // Call runtime on identical JSObjects.
251 __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
253 // Call runtime on identical symbols since we need to throw a TypeError.
254 __ cmp(r4, Operand(SYMBOL_TYPE));
256 // Call runtime on identical SIMD values since we must throw a TypeError.
257 __ cmp(r4, Operand(SIMD128_VALUE_TYPE));
259 if (is_strong(strength)) {
260 // Call the runtime on anything that is converted in the semantics, since
261 // we need to throw a TypeError. Smis have already been ruled out.
262 __ cmp(r4, Operand(HEAP_NUMBER_TYPE));
263 __ b(eq, &return_equal);
264 __ tst(r4, Operand(kIsNotStringMask));
268 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
269 __ b(eq, &heap_number);
270 // Comparing JS objects with <=, >= is complicated.
272 __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
274 // Call runtime on identical symbols since we need to throw a TypeError.
275 __ cmp(r4, Operand(SYMBOL_TYPE));
277 // Call runtime on identical SIMD values since we must throw a TypeError.
278 __ cmp(r4, Operand(SIMD128_VALUE_TYPE));
280 if (is_strong(strength)) {
281 // Call the runtime on anything that is converted in the semantics,
282 // since we need to throw a TypeError. Smis and heap numbers have
283 // already been ruled out.
284 __ tst(r4, Operand(kIsNotStringMask));
287 // Normally here we fall through to return_equal, but undefined is
288 // special: (undefined == undefined) == true, but
289 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
290 if (cond == le || cond == ge) {
291 __ cmp(r4, Operand(ODDBALL_TYPE));
292 __ b(ne, &return_equal);
293 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
295 __ b(ne, &return_equal);
297 // undefined <= undefined should fail.
298 __ mov(r0, Operand(GREATER));
300 // undefined >= undefined should fail.
301 __ mov(r0, Operand(LESS));
308 __ bind(&return_equal);
310 __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
311 } else if (cond == gt) {
312 __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
314 __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
318 // For less and greater we don't have to check for NaN since the result of
319 // x < x is false regardless. For the others here is some code to check
321 if (cond != lt && cond != gt) {
322 __ bind(&heap_number);
323 // It is a heap number, so return non-equal if it's NaN and equal if it's
326 // The representation of NaN values has all exponent bits (52..62) set,
327 // and not all mantissa bits (0..51) clear.
328 // Read top bits of double representation (second word of value).
329 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
330 // Test that exponent bits are all set.
331 __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
332 // NaNs have all-one exponents so they sign extend to -1.
333 __ cmp(r3, Operand(-1));
334 __ b(ne, &return_equal);
336 // Shift out flag and all exponent bits, retaining only mantissa.
337 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
338 // Or with all low-bits of mantissa.
339 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
340 __ orr(r0, r3, Operand(r2), SetCC);
341 // For equal we already have the right value in r0: Return zero (equal)
342 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
343 // not (it's a NaN). For <= and >= we need to load r0 with the failing
344 // value if it's a NaN.
346 // All-zero means Infinity means equal.
349 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
351 __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
356 // No fall through here.
358 __ bind(¬_identical);
362 // See comment at call site.
363 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
369 DCHECK((lhs.is(r0) && rhs.is(r1)) ||
370 (lhs.is(r1) && rhs.is(r0)));
373 __ JumpIfSmi(rhs, &rhs_is_smi);
375 // Lhs is a Smi. Check whether the rhs is a heap number.
376 __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
378 // If rhs is not a number and lhs is a Smi then strict equality cannot
379 // succeed. Return non-equal
380 // If rhs is r0 then there is already a non zero value in it.
382 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
386 // Smi compared non-strictly with a non-Smi non-heap-number. Call
391 // Lhs is a smi, rhs is a number.
392 // Convert lhs to a double in d7.
393 __ SmiToDouble(d7, lhs);
394 // Load the double from rhs, tagged HeapNumber r0, to d6.
395 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
397 // We now have both loaded as doubles but we can skip the lhs nan check
401 __ bind(&rhs_is_smi);
402 // Rhs is a smi. Check whether the non-smi lhs is a heap number.
403 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
405 // If lhs is not a number and rhs is a smi then strict equality cannot
406 // succeed. Return non-equal.
407 // If lhs is r0 then there is already a non zero value in it.
409 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
413 // Smi compared non-strictly with a non-smi non-heap-number. Call
418 // Rhs is a smi, lhs is a heap number.
419 // Load the double from lhs, tagged HeapNumber r1, to d7.
420 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
421 // Convert rhs to a double in d6 .
422 __ SmiToDouble(d6, rhs);
423 // Fall through to both_loaded_as_doubles.
427 // See comment at call site.
428 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
431 DCHECK((lhs.is(r0) && rhs.is(r1)) ||
432 (lhs.is(r1) && rhs.is(r0)));
434 // If either operand is a JS object or an oddball value, then they are
435 // not equal since their pointers are different.
436 // There is no test for undetectability in strict equality.
437 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
438 Label first_non_object;
439 // Get the type of the first operand into r2 and compare it with
440 // FIRST_SPEC_OBJECT_TYPE.
441 __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE);
442 __ b(lt, &first_non_object);
444 // Return non-zero (r0 is not zero)
445 Label return_not_equal;
446 __ bind(&return_not_equal);
449 __ bind(&first_non_object);
450 // Check for oddballs: true, false, null, undefined.
451 __ cmp(r2, Operand(ODDBALL_TYPE));
452 __ b(eq, &return_not_equal);
454 __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE);
455 __ b(ge, &return_not_equal);
457 // Check for oddballs: true, false, null, undefined.
458 __ cmp(r3, Operand(ODDBALL_TYPE));
459 __ b(eq, &return_not_equal);
461 // Now that we have the types we might as well check for
462 // internalized-internalized.
463 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
464 __ orr(r2, r2, Operand(r3));
465 __ tst(r2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
466 __ b(eq, &return_not_equal);
470 // See comment at call site.
471 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
474 Label* both_loaded_as_doubles,
475 Label* not_heap_numbers,
477 DCHECK((lhs.is(r0) && rhs.is(r1)) ||
478 (lhs.is(r1) && rhs.is(r0)));
480 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
481 __ b(ne, not_heap_numbers);
482 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
484 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
486 // Both are heap numbers. Load them up then jump to the code we have
488 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
489 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
490 __ jmp(both_loaded_as_doubles);
494 // Fast negative check for internalized-to-internalized equality.
495 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
498 Label* possible_strings,
499 Label* not_both_strings) {
500 DCHECK((lhs.is(r0) && rhs.is(r1)) ||
501 (lhs.is(r1) && rhs.is(r0)));
503 // r2 is object type of rhs.
505 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
506 __ tst(r2, Operand(kIsNotStringMask));
507 __ b(ne, &object_test);
508 __ tst(r2, Operand(kIsNotInternalizedMask));
509 __ b(ne, possible_strings);
510 __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
511 __ b(ge, not_both_strings);
512 __ tst(r3, Operand(kIsNotInternalizedMask));
513 __ b(ne, possible_strings);
515 // Both are internalized. We already checked they weren't the same pointer
516 // so they are not equal.
517 __ mov(r0, Operand(NOT_EQUAL));
520 __ bind(&object_test);
521 __ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE));
522 __ b(lt, not_both_strings);
523 __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE);
524 __ b(lt, not_both_strings);
525 // If both objects are undetectable, they are equal. Otherwise, they
526 // are not equal, since they are different objects and an object is not
527 // equal to undefined.
528 __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
529 __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
530 __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
531 __ and_(r0, r2, Operand(r3));
532 __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
533 __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
538 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
540 CompareICState::State expected,
543 if (expected == CompareICState::SMI) {
544 __ JumpIfNotSmi(input, fail);
545 } else if (expected == CompareICState::NUMBER) {
546 __ JumpIfSmi(input, &ok);
547 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
550 // We could be strict about internalized/non-internalized here, but as long as
551 // hydrogen doesn't care, the stub doesn't have to care either.
556 // On entry r1 and r2 are the values to be compared.
557 // On exit r0 is 0, positive or negative to indicate the result of
559 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
562 Condition cc = GetCondition();
565 CompareICStub_CheckInputType(masm, lhs, r2, left(), &miss);
566 CompareICStub_CheckInputType(masm, rhs, r3, right(), &miss);
568 Label slow; // Call builtin.
569 Label not_smis, both_loaded_as_doubles, lhs_not_nan;
571 Label not_two_smis, smi_done;
573 __ JumpIfNotSmi(r2, ¬_two_smis);
574 __ mov(r1, Operand(r1, ASR, 1));
575 __ sub(r0, r1, Operand(r0, ASR, 1));
577 __ bind(¬_two_smis);
579 // NOTICE! This code is only reached after a smi-fast-case check, so
580 // it is certain that at least one operand isn't a smi.
582 // Handle the case where the objects are identical. Either returns the answer
583 // or goes to slow. Only falls through if the objects were not identical.
584 EmitIdenticalObjectComparison(masm, &slow, cc, strength());
586 // If either is a Smi (we know that not both are), then they can only
587 // be strictly equal if the other is a HeapNumber.
588 STATIC_ASSERT(kSmiTag == 0);
589 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
590 __ and_(r2, lhs, Operand(rhs));
591 __ JumpIfNotSmi(r2, ¬_smis);
592 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
593 // 1) Return the answer.
595 // 3) Fall through to both_loaded_as_doubles.
596 // 4) Jump to lhs_not_nan.
597 // In cases 3 and 4 we have found out we were dealing with a number-number
598 // comparison. If VFP3 is supported the double values of the numbers have
599 // been loaded into d7 and d6. Otherwise, the double values have been loaded
600 // into r0, r1, r2, and r3.
601 EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
603 __ bind(&both_loaded_as_doubles);
604 // The arguments have been converted to doubles and stored in d6 and d7, if
605 // VFP3 is supported, or in r0, r1, r2, and r3.
606 __ bind(&lhs_not_nan);
608 // ARMv7 VFP3 instructions to implement double precision comparison.
609 __ VFPCompareAndSetFlags(d7, d6);
612 __ mov(r0, Operand(EQUAL), LeaveCC, eq);
613 __ mov(r0, Operand(LESS), LeaveCC, lt);
614 __ mov(r0, Operand(GREATER), LeaveCC, gt);
618 // If one of the sides was a NaN then the v flag is set. Load r0 with
619 // whatever it takes to make the comparison fail, since comparisons with NaN
621 if (cc == lt || cc == le) {
622 __ mov(r0, Operand(GREATER));
624 __ mov(r0, Operand(LESS));
629 // At this point we know we are dealing with two different objects,
630 // and neither of them is a Smi. The objects are in rhs_ and lhs_.
632 // This returns non-equal for some object types, or falls through if it
634 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
637 Label check_for_internalized_strings;
638 Label flat_string_check;
639 // Check for heap-number-heap-number comparison. Can jump to slow case,
640 // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
641 // that case. If the inputs are not doubles then jumps to
642 // check_for_internalized_strings.
643 // In this case r2 will contain the type of rhs_. Never falls through.
644 EmitCheckForTwoHeapNumbers(masm,
647 &both_loaded_as_doubles,
648 &check_for_internalized_strings,
651 __ bind(&check_for_internalized_strings);
652 // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
653 // internalized strings.
654 if (cc == eq && !strict()) {
655 // Returns an answer for two internalized strings or two detectable objects.
656 // Otherwise jumps to string case or not both strings case.
657 // Assumes that r2 is the type of rhs_ on entry.
658 EmitCheckForInternalizedStringsOrObjects(
659 masm, lhs, rhs, &flat_string_check, &slow);
662 // Check for both being sequential one-byte strings,
663 // and inline if that is the case.
664 __ bind(&flat_string_check);
666 __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, r2, r3, &slow);
668 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r2,
671 StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, r2, r3, r4);
673 StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, r2, r3, r4,
676 // Never falls through to here.
681 // Figure out which native to call and setup the arguments.
682 if (cc == eq && strict()) {
683 __ TailCallRuntime(Runtime::kStrictEquals, 2, 1);
687 context_index = Context::EQUALS_BUILTIN_INDEX;
689 context_index = is_strong(strength())
690 ? Context::COMPARE_STRONG_BUILTIN_INDEX
691 : Context::COMPARE_BUILTIN_INDEX;
692 int ncr; // NaN compare result
693 if (cc == lt || cc == le) {
696 DCHECK(cc == gt || cc == ge); // remaining cases
699 __ mov(r0, Operand(Smi::FromInt(ncr)));
703 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
704 // tagged as a small integer.
705 __ InvokeBuiltin(context_index, JUMP_FUNCTION);
713 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
714 // We don't allow a GC during a store buffer overflow so there is no need to
715 // store the registers in any particular way, but we do have to store and
717 __ stm(db_w, sp, kCallerSaved | lr.bit());
719 const Register scratch = r1;
721 if (save_doubles()) {
722 __ SaveFPRegs(sp, scratch);
724 const int argument_count = 1;
725 const int fp_argument_count = 0;
727 AllowExternalCallThatCantCauseGC scope(masm);
728 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
729 __ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
731 ExternalReference::store_buffer_overflow_function(isolate()),
733 if (save_doubles()) {
734 __ RestoreFPRegs(sp, scratch);
736 __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
740 void MathPowStub::Generate(MacroAssembler* masm) {
741 const Register base = r1;
742 const Register exponent = MathPowTaggedDescriptor::exponent();
743 DCHECK(exponent.is(r2));
744 const Register heapnumbermap = r5;
745 const Register heapnumber = r0;
746 const DwVfpRegister double_base = d0;
747 const DwVfpRegister double_exponent = d1;
748 const DwVfpRegister double_result = d2;
749 const DwVfpRegister double_scratch = d3;
750 const SwVfpRegister single_scratch = s6;
751 const Register scratch = r9;
752 const Register scratch2 = r4;
754 Label call_runtime, done, int_exponent;
755 if (exponent_type() == ON_STACK) {
756 Label base_is_smi, unpack_exponent;
757 // The exponent and base are supplied as arguments on the stack.
758 // This can only happen if the stub is called from non-optimized code.
759 // Load input parameters from stack to double registers.
760 __ ldr(base, MemOperand(sp, 1 * kPointerSize));
761 __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
763 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
765 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
766 __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
767 __ cmp(scratch, heapnumbermap);
768 __ b(ne, &call_runtime);
770 __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
771 __ jmp(&unpack_exponent);
773 __ bind(&base_is_smi);
774 __ vmov(single_scratch, scratch);
775 __ vcvt_f64_s32(double_base, single_scratch);
776 __ bind(&unpack_exponent);
778 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
780 __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
781 __ cmp(scratch, heapnumbermap);
782 __ b(ne, &call_runtime);
783 __ vldr(double_exponent,
784 FieldMemOperand(exponent, HeapNumber::kValueOffset));
785 } else if (exponent_type() == TAGGED) {
786 // Base is already in double_base.
787 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
789 __ vldr(double_exponent,
790 FieldMemOperand(exponent, HeapNumber::kValueOffset));
793 if (exponent_type() != INTEGER) {
794 Label int_exponent_convert;
795 // Detect integer exponents stored as double.
796 __ vcvt_u32_f64(single_scratch, double_exponent);
797 // We do not check for NaN or Infinity here because comparing numbers on
798 // ARM correctly distinguishes NaNs. We end up calling the built-in.
799 __ vcvt_f64_u32(double_scratch, single_scratch);
800 __ VFPCompareAndSetFlags(double_scratch, double_exponent);
801 __ b(eq, &int_exponent_convert);
803 if (exponent_type() == ON_STACK) {
804 // Detect square root case. Crankshaft detects constant +/-0.5 at
805 // compile time and uses DoMathPowHalf instead. We then skip this check
806 // for non-constant cases of +/-0.5 as these hardly occur.
810 __ vmov(double_scratch, 0.5, scratch);
811 __ VFPCompareAndSetFlags(double_exponent, double_scratch);
812 __ b(ne, ¬_plus_half);
814 // Calculates square root of base. Check for the special case of
815 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
816 __ vmov(double_scratch, -V8_INFINITY, scratch);
817 __ VFPCompareAndSetFlags(double_base, double_scratch);
818 __ vneg(double_result, double_scratch, eq);
821 // Add +0 to convert -0 to +0.
822 __ vadd(double_scratch, double_base, kDoubleRegZero);
823 __ vsqrt(double_result, double_scratch);
826 __ bind(¬_plus_half);
827 __ vmov(double_scratch, -0.5, scratch);
828 __ VFPCompareAndSetFlags(double_exponent, double_scratch);
829 __ b(ne, &call_runtime);
831 // Calculates square root of base. Check for the special case of
832 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
833 __ vmov(double_scratch, -V8_INFINITY, scratch);
834 __ VFPCompareAndSetFlags(double_base, double_scratch);
835 __ vmov(double_result, kDoubleRegZero, eq);
838 // Add +0 to convert -0 to +0.
839 __ vadd(double_scratch, double_base, kDoubleRegZero);
840 __ vmov(double_result, 1.0, scratch);
841 __ vsqrt(double_scratch, double_scratch);
842 __ vdiv(double_result, double_result, double_scratch);
848 AllowExternalCallThatCantCauseGC scope(masm);
849 __ PrepareCallCFunction(0, 2, scratch);
850 __ MovToFloatParameters(double_base, double_exponent);
852 ExternalReference::power_double_double_function(isolate()),
856 __ MovFromFloatResult(double_result);
859 __ bind(&int_exponent_convert);
860 __ vcvt_u32_f64(single_scratch, double_exponent);
861 __ vmov(scratch, single_scratch);
864 // Calculate power with integer exponent.
865 __ bind(&int_exponent);
867 // Get two copies of exponent in the registers scratch and exponent.
868 if (exponent_type() == INTEGER) {
869 __ mov(scratch, exponent);
871 // Exponent has previously been stored into scratch as untagged integer.
872 __ mov(exponent, scratch);
874 __ vmov(double_scratch, double_base); // Back up base.
875 __ vmov(double_result, 1.0, scratch2);
877 // Get absolute value of exponent.
878 __ cmp(scratch, Operand::Zero());
879 __ mov(scratch2, Operand::Zero(), LeaveCC, mi);
880 __ sub(scratch, scratch2, scratch, LeaveCC, mi);
883 __ bind(&while_true);
884 __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
885 __ vmul(double_result, double_result, double_scratch, cs);
886 __ vmul(double_scratch, double_scratch, double_scratch, ne);
887 __ b(ne, &while_true);
889 __ cmp(exponent, Operand::Zero());
891 __ vmov(double_scratch, 1.0, scratch);
892 __ vdiv(double_result, double_scratch, double_result);
893 // Test whether result is zero. Bail out to check for subnormal result.
894 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
895 __ VFPCompareAndSetFlags(double_result, 0.0);
897 // double_exponent may not containe the exponent value if the input was a
898 // smi. We set it with exponent value before bailing out.
899 __ vmov(single_scratch, exponent);
900 __ vcvt_f64_s32(double_exponent, single_scratch);
902 // Returning or bailing out.
903 Counters* counters = isolate()->counters();
904 if (exponent_type() == ON_STACK) {
905 // The arguments are still on the stack.
906 __ bind(&call_runtime);
907 __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
909 // The stub is called from non-optimized code, which expects the result
910 // as heap number in exponent.
912 __ AllocateHeapNumber(
913 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
914 __ vstr(double_result,
915 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
916 DCHECK(heapnumber.is(r0));
917 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
922 AllowExternalCallThatCantCauseGC scope(masm);
923 __ PrepareCallCFunction(0, 2, scratch);
924 __ MovToFloatParameters(double_base, double_exponent);
926 ExternalReference::power_double_double_function(isolate()),
930 __ MovFromFloatResult(double_result);
933 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
939 bool CEntryStub::NeedsImmovableCode() {
944 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
945 CEntryStub::GenerateAheadOfTime(isolate);
946 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
947 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
948 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
949 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
950 CreateWeakCellStub::GenerateAheadOfTime(isolate);
951 BinaryOpICStub::GenerateAheadOfTime(isolate);
952 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
953 StoreFastElementStub::GenerateAheadOfTime(isolate);
954 TypeofStub::GenerateAheadOfTime(isolate);
958 void CodeStub::GenerateFPStubs(Isolate* isolate) {
959 // Generate if not already in cache.
960 SaveFPRegsMode mode = kSaveFPRegs;
961 CEntryStub(isolate, 1, mode).GetCode();
962 StoreBufferOverflowStub(isolate, mode).GetCode();
963 isolate->set_fp_stubs_generated(true);
967 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
968 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
973 void CEntryStub::Generate(MacroAssembler* masm) {
974 // Called from JavaScript; parameters are on stack as if calling JS function.
975 // r0: number of arguments including receiver
976 // r1: pointer to builtin function
977 // fp: frame pointer (restored after C call)
978 // sp: stack pointer (restored as callee's sp after C call)
979 // cp: current context (C callee-saved)
981 ProfileEntryHookStub::MaybeCallEntryHook(masm);
983 __ mov(r5, Operand(r1));
985 // Compute the argv pointer in a callee-saved register.
986 __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
987 __ sub(r1, r1, Operand(kPointerSize));
989 // Enter the exit frame that transitions from JavaScript to C++.
990 FrameScope scope(masm, StackFrame::MANUAL);
991 __ EnterExitFrame(save_doubles());
993 // Store a copy of argc in callee-saved registers for later.
994 __ mov(r4, Operand(r0));
996 // r0, r4: number of arguments including receiver (C callee-saved)
997 // r1: pointer to the first argument (C callee-saved)
998 // r5: pointer to builtin function (C callee-saved)
1000 // Result returned in r0 or r0+r1 by default.
1002 #if V8_HOST_ARCH_ARM
1003 int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1004 int frame_alignment_mask = frame_alignment - 1;
1005 if (FLAG_debug_code) {
1006 if (frame_alignment > kPointerSize) {
1007 Label alignment_as_expected;
1008 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
1009 __ tst(sp, Operand(frame_alignment_mask));
1010 __ b(eq, &alignment_as_expected);
1011 // Don't use Check here, as it will call Runtime_Abort re-entering here.
1012 __ stop("Unexpected alignment");
1013 __ bind(&alignment_as_expected);
1019 // r0 = argc, r1 = argv
1020 __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
1022 // To let the GC traverse the return address of the exit frames, we need to
1023 // know where the return address is. The CEntryStub is unmovable, so
1024 // we can store the address on the stack to be able to find it again and
1025 // we never have to restore it, because it will not change.
1026 // Compute the return address in lr to return to after the jump below. Pc is
1027 // already at '+ 8' from the current instruction but return is after three
1028 // instructions so add another 4 to pc to get the return address.
1030 // Prevent literal pool emission before return address.
1031 Assembler::BlockConstPoolScope block_const_pool(masm);
1032 __ add(lr, pc, Operand(4));
1033 __ str(lr, MemOperand(sp, 0));
1037 __ VFPEnsureFPSCRState(r2);
1039 // Check result for exception sentinel.
1040 Label exception_returned;
1041 __ CompareRoot(r0, Heap::kExceptionRootIndex);
1042 __ b(eq, &exception_returned);
1044 // Check that there is no pending exception, otherwise we
1045 // should have returned the exception sentinel.
1046 if (FLAG_debug_code) {
1048 ExternalReference pending_exception_address(
1049 Isolate::kPendingExceptionAddress, isolate());
1050 __ mov(r2, Operand(pending_exception_address));
1051 __ ldr(r2, MemOperand(r2));
1052 __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
1053 // Cannot use check here as it attempts to generate call into runtime.
1055 __ stop("Unexpected pending exception");
1059 // Exit C frame and return.
1061 // sp: stack pointer
1062 // fp: frame pointer
1063 // Callee-saved register r4 still holds argc.
1064 __ LeaveExitFrame(save_doubles(), r4, true);
1067 // Handling of exception.
1068 __ bind(&exception_returned);
1070 ExternalReference pending_handler_context_address(
1071 Isolate::kPendingHandlerContextAddress, isolate());
1072 ExternalReference pending_handler_code_address(
1073 Isolate::kPendingHandlerCodeAddress, isolate());
1074 ExternalReference pending_handler_offset_address(
1075 Isolate::kPendingHandlerOffsetAddress, isolate());
1076 ExternalReference pending_handler_fp_address(
1077 Isolate::kPendingHandlerFPAddress, isolate());
1078 ExternalReference pending_handler_sp_address(
1079 Isolate::kPendingHandlerSPAddress, isolate());
1081 // Ask the runtime for help to determine the handler. This will set r0 to
1082 // contain the current pending exception, don't clobber it.
1083 ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
1086 FrameScope scope(masm, StackFrame::MANUAL);
1087 __ PrepareCallCFunction(3, 0, r0);
1088 __ mov(r0, Operand(0));
1089 __ mov(r1, Operand(0));
1090 __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
1091 __ CallCFunction(find_handler, 3);
1094 // Retrieve the handler context, SP and FP.
1095 __ mov(cp, Operand(pending_handler_context_address));
1096 __ ldr(cp, MemOperand(cp));
1097 __ mov(sp, Operand(pending_handler_sp_address));
1098 __ ldr(sp, MemOperand(sp));
1099 __ mov(fp, Operand(pending_handler_fp_address));
1100 __ ldr(fp, MemOperand(fp));
1102 // If the handler is a JS frame, restore the context to the frame. Note that
1103 // the context will be set to (cp == 0) for non-JS frames.
1104 __ cmp(cp, Operand(0));
1105 __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
1107 // Compute the handler entry address and jump to it.
1108 ConstantPoolUnavailableScope constant_pool_unavailable(masm);
1109 __ mov(r1, Operand(pending_handler_code_address));
1110 __ ldr(r1, MemOperand(r1));
1111 __ mov(r2, Operand(pending_handler_offset_address));
1112 __ ldr(r2, MemOperand(r2));
1113 __ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
1114 if (FLAG_enable_embedded_constant_pool) {
1115 __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r1);
1121 void JSEntryStub::Generate(MacroAssembler* masm) {
1128 Label invoke, handler_entry, exit;
1130 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1132 // Called from C, so do not pop argc and args on exit (preserve sp)
1133 // No need to save register-passed args
1134 // Save callee-saved registers (incl. cp and fp), sp, and lr
1135 __ stm(db_w, sp, kCalleeSaved | lr.bit());
1137 // Save callee-saved vfp registers.
1138 __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
1139 // Set up the reserved register for 0.0.
1140 __ vmov(kDoubleRegZero, 0.0);
1141 __ VFPEnsureFPSCRState(r4);
1143 // Get address of argv, see stm above.
1149 // Set up argv in r4.
1150 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1151 offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
1152 __ ldr(r4, MemOperand(sp, offset_to_argv));
1154 // Push a frame with special values setup to mark it as an entry frame.
1160 int marker = type();
1161 if (FLAG_enable_embedded_constant_pool) {
1162 __ mov(r8, Operand::Zero());
1164 __ mov(r7, Operand(Smi::FromInt(marker)));
1165 __ mov(r6, Operand(Smi::FromInt(marker)));
1167 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1168 __ ldr(r5, MemOperand(r5));
1169 __ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
1170 __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() |
1171 (FLAG_enable_embedded_constant_pool ? r8.bit() : 0) |
1174 // Set up frame pointer for the frame to be pushed.
1175 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1177 // If this is the outermost JS call, set js_entry_sp value.
1178 Label non_outermost_js;
1179 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
1180 __ mov(r5, Operand(ExternalReference(js_entry_sp)));
1181 __ ldr(r6, MemOperand(r5));
1182 __ cmp(r6, Operand::Zero());
1183 __ b(ne, &non_outermost_js);
1184 __ str(fp, MemOperand(r5));
1185 __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1188 __ bind(&non_outermost_js);
1189 __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1193 // Jump to a faked try block that does the invoke, with a faked catch
1194 // block that sets the pending exception.
1197 // Block literal pool emission whilst taking the position of the handler
1198 // entry. This avoids making the assumption that literal pools are always
1199 // emitted after an instruction is emitted, rather than before.
1201 Assembler::BlockConstPoolScope block_const_pool(masm);
1202 __ bind(&handler_entry);
1203 handler_offset_ = handler_entry.pos();
1204 // Caught exception: Store result (exception) in the pending exception
1205 // field in the JSEnv and return a failure sentinel. Coming in here the
1206 // fp will be invalid because the PushStackHandler below sets it to 0 to
1207 // signal the existence of the JSEntry frame.
1208 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1211 __ str(r0, MemOperand(ip));
1212 __ LoadRoot(r0, Heap::kExceptionRootIndex);
1215 // Invoke: Link this frame into the handler chain.
1217 // Must preserve r0-r4, r5-r6 are available.
1218 __ PushStackHandler();
1219 // If an exception not caught by another handler occurs, this handler
1220 // returns control to the code after the bl(&invoke) above, which
1221 // restores all kCalleeSaved registers (including cp and fp) to their
1222 // saved values before returning a failure to C.
1224 // Clear any pending exceptions.
1225 __ mov(r5, Operand(isolate()->factory()->the_hole_value()));
1226 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1228 __ str(r5, MemOperand(ip));
1230 // Invoke the function by calling through JS entry trampoline builtin.
1231 // Notice that we cannot store a reference to the trampoline code directly in
1232 // this stub, because runtime stubs are not traversed when doing GC.
1234 // Expected registers by Builtins::JSEntryTrampoline
1240 if (type() == StackFrame::ENTRY_CONSTRUCT) {
1241 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1243 __ mov(ip, Operand(construct_entry));
1245 ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
1246 __ mov(ip, Operand(entry));
1248 __ ldr(ip, MemOperand(ip)); // deref address
1249 __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
1251 // Branch and link to JSEntryTrampoline.
1254 // Unlink this frame from the handler chain.
1255 __ PopStackHandler();
1257 __ bind(&exit); // r0 holds result
1258 // Check if the current stack frame is marked as the outermost JS frame.
1259 Label non_outermost_js_2;
1261 __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1262 __ b(ne, &non_outermost_js_2);
1263 __ mov(r6, Operand::Zero());
1264 __ mov(r5, Operand(ExternalReference(js_entry_sp)));
1265 __ str(r6, MemOperand(r5));
1266 __ bind(&non_outermost_js_2);
1268 // Restore the top frame descriptors from the stack.
1271 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1272 __ str(r3, MemOperand(ip));
1274 // Reset the stack to the callee saved registers.
1275 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1277 // Restore callee-saved registers and return.
1279 if (FLAG_debug_code) {
1280 __ mov(lr, Operand(pc));
1284 // Restore callee-saved vfp registers.
1285 __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
1287 __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
1291 void InstanceOfStub::Generate(MacroAssembler* masm) {
1292 Register const object = r1; // Object (lhs).
1293 Register const function = r0; // Function (rhs).
1294 Register const object_map = r2; // Map of {object}.
1295 Register const function_map = r3; // Map of {function}.
1296 Register const function_prototype = r4; // Prototype of {function}.
1297 Register const scratch = r5;
1299 DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
1300 DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
1302 // Check if {object} is a smi.
1303 Label object_is_smi;
1304 __ JumpIfSmi(object, &object_is_smi);
1306 // Lookup the {function} and the {object} map in the global instanceof cache.
1307 // Note: This is safe because we clear the global instanceof cache whenever
1308 // we change the prototype of any object.
1309 Label fast_case, slow_case;
1310 __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
1311 __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1312 __ b(ne, &fast_case);
1313 __ CompareRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
1314 __ b(ne, &fast_case);
1315 __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
1318 // If {object} is a smi we can safely return false if {function} is a JS
1319 // function, otherwise we have to miss to the runtime and throw an exception.
1320 __ bind(&object_is_smi);
1321 __ JumpIfSmi(function, &slow_case);
1322 __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
1323 __ b(ne, &slow_case);
1324 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
1327 // Fast-case: The {function} must be a valid JSFunction.
1328 __ bind(&fast_case);
1329 __ JumpIfSmi(function, &slow_case);
1330 __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
1331 __ b(ne, &slow_case);
1333 // Ensure that {function} has an instance prototype.
1334 __ ldrb(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
1335 __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
1336 __ b(ne, &slow_case);
1338 // Ensure that {function} is not bound.
1339 Register const shared_info = scratch;
1341 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
1342 __ ldr(scratch, FieldMemOperand(shared_info,
1343 SharedFunctionInfo::kCompilerHintsOffset));
1345 Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
1346 __ b(ne, &slow_case);
1348 // Get the "prototype" (or initial map) of the {function}.
1349 __ ldr(function_prototype,
1350 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
1351 __ AssertNotSmi(function_prototype);
1353 // Resolve the prototype if the {function} has an initial map. Afterwards the
1354 // {function_prototype} will be either the JSReceiver prototype object or the
1355 // hole value, which means that no instances of the {function} were created so
1356 // far and hence we should return false.
1357 Label function_prototype_valid;
1358 __ CompareObjectType(function_prototype, scratch, scratch, MAP_TYPE);
1359 __ b(ne, &function_prototype_valid);
1360 __ ldr(function_prototype,
1361 FieldMemOperand(function_prototype, Map::kPrototypeOffset));
1362 __ bind(&function_prototype_valid);
1363 __ AssertNotSmi(function_prototype);
1365 // Update the global instanceof cache with the current {object} map and
1366 // {function}. The cached answer will be set when it is known below.
1367 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1368 __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
1370 // Loop through the prototype chain looking for the {function} prototype.
1371 // Assume true, and change to false if not found.
1372 Register const object_prototype = object_map;
1373 Register const null = scratch;
1375 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
1376 __ LoadRoot(null, Heap::kNullValueRootIndex);
1378 __ ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
1379 __ cmp(object_prototype, function_prototype);
1381 __ cmp(object_prototype, null);
1382 __ ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
1384 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
1386 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
1389 // Slow-case: Call the runtime function.
1390 __ bind(&slow_case);
1391 __ Push(object, function);
1392 __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
1396 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1398 Register receiver = LoadDescriptor::ReceiverRegister();
1399 // Ensure that the vector and slot registers won't be clobbered before
1400 // calling the miss handler.
1401 DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::VectorRegister(),
1402 LoadWithVectorDescriptor::SlotRegister()));
1404 NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r4,
1407 PropertyAccessCompiler::TailCallBuiltin(
1408 masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1412 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
1413 // Return address is in lr.
1416 Register receiver = LoadDescriptor::ReceiverRegister();
1417 Register index = LoadDescriptor::NameRegister();
1418 Register scratch = r5;
1419 Register result = r0;
1420 DCHECK(!scratch.is(receiver) && !scratch.is(index));
1421 DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
1422 result.is(LoadWithVectorDescriptor::SlotRegister()));
1424 // StringCharAtGenerator doesn't use the result register until it's passed
1425 // the different miss possibilities. If it did, we would have a conflict
1426 // when FLAG_vector_ics is true.
1427 StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
1428 &miss, // When not a string.
1429 &miss, // When not a number.
1430 &miss, // When index out of range.
1431 STRING_INDEX_IS_ARRAY_INDEX,
1432 RECEIVER_IS_STRING);
1433 char_at_generator.GenerateFast(masm);
1436 StubRuntimeCallHelper call_helper;
1437 char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
1440 PropertyAccessCompiler::TailCallBuiltin(
1441 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1445 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
1446 // The displacement is the offset of the last parameter (if any)
1447 // relative to the frame pointer.
1448 const int kDisplacement =
1449 StandardFrameConstants::kCallerSPOffset - kPointerSize;
1450 DCHECK(r1.is(ArgumentsAccessReadDescriptor::index()));
1451 DCHECK(r0.is(ArgumentsAccessReadDescriptor::parameter_count()));
1453 // Check that the key is a smi.
1455 __ JumpIfNotSmi(r1, &slow);
1457 // Check if the calling frame is an arguments adaptor frame.
1459 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1460 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
1461 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1464 // Check index against formal parameters count limit passed in
1465 // through register r0. Use unsigned comparison to get negative
1470 // Read the argument from the stack and return it.
1472 __ add(r3, fp, Operand::PointerOffsetFromSmiKey(r3));
1473 __ ldr(r0, MemOperand(r3, kDisplacement));
1476 // Arguments adaptor case: Check index against actual arguments
1477 // limit found in the arguments adaptor frame. Use unsigned
1478 // comparison to get negative check for free.
1480 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1484 // Read the argument from the adaptor frame and return it.
1486 __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r3));
1487 __ ldr(r0, MemOperand(r3, kDisplacement));
1490 // Slow-case: Handle non-smi or out-of-bounds access to arguments
1491 // by calling the runtime system.
1494 __ TailCallRuntime(Runtime::kArguments, 1, 1);
1498 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
1499 // sp[0] : number of parameters
1500 // sp[4] : receiver displacement
1503 // Check if the calling frame is an arguments adaptor frame.
1505 __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1506 __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
1507 __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1510 // Patch the arguments.length and the parameters pointer in the current frame.
1511 __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
1512 __ str(r2, MemOperand(sp, 0 * kPointerSize));
1513 __ add(r3, r3, Operand(r2, LSL, 1));
1514 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
1515 __ str(r3, MemOperand(sp, 1 * kPointerSize));
1518 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1522 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
1524 // sp[0] : number of parameters (tagged)
1525 // sp[4] : address of receiver argument
1527 // Registers used over whole function:
1528 // r6 : allocated object (tagged)
1529 // r9 : mapped parameter count (tagged)
1531 __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
1532 // r1 = parameter count (tagged)
1534 // Check if the calling frame is an arguments adaptor frame.
1536 Label adaptor_frame, try_allocate;
1537 __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1538 __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
1539 __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1540 __ b(eq, &adaptor_frame);
1542 // No adaptor, parameter count = argument count.
1544 __ b(&try_allocate);
1546 // We have an adaptor frame. Patch the parameters pointer.
1547 __ bind(&adaptor_frame);
1548 __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
1549 __ add(r3, r3, Operand(r2, LSL, 1));
1550 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
1551 __ str(r3, MemOperand(sp, 1 * kPointerSize));
1553 // r1 = parameter count (tagged)
1554 // r2 = argument count (tagged)
1555 // Compute the mapped parameter count = min(r1, r2) in r1.
1556 __ cmp(r1, Operand(r2));
1557 __ mov(r1, Operand(r2), LeaveCC, gt);
1559 __ bind(&try_allocate);
1561 // Compute the sizes of backing store, parameter map, and arguments object.
1562 // 1. Parameter map, has 2 extra words containing context and backing store.
1563 const int kParameterMapHeaderSize =
1564 FixedArray::kHeaderSize + 2 * kPointerSize;
1565 // If there are no mapped parameters, we do not need the parameter_map.
1566 __ cmp(r1, Operand(Smi::FromInt(0)));
1567 __ mov(r9, Operand::Zero(), LeaveCC, eq);
1568 __ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne);
1569 __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
1571 // 2. Backing store.
1572 __ add(r9, r9, Operand(r2, LSL, 1));
1573 __ add(r9, r9, Operand(FixedArray::kHeaderSize));
1575 // 3. Arguments object.
1576 __ add(r9, r9, Operand(Heap::kSloppyArgumentsObjectSize));
1578 // Do the allocation of all three objects in one go.
1579 __ Allocate(r9, r0, r3, r4, &runtime, TAG_OBJECT);
1581 // r0 = address of new object(s) (tagged)
1582 // r2 = argument count (smi-tagged)
1583 // Get the arguments boilerplate from the current native context into r4.
1584 const int kNormalOffset =
1585 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
1586 const int kAliasedOffset =
1587 Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
1589 __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1590 __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
1591 __ cmp(r1, Operand::Zero());
1592 __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
1593 __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
1595 // r0 = address of new object (tagged)
1596 // r1 = mapped parameter count (tagged)
1597 // r2 = argument count (smi-tagged)
1598 // r4 = address of arguments map (tagged)
1599 __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
1600 __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
1601 __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
1602 __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
1604 // Set up the callee in-object property.
1605 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
1606 __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
1607 __ AssertNotSmi(r3);
1608 const int kCalleeOffset = JSObject::kHeaderSize +
1609 Heap::kArgumentsCalleeIndex * kPointerSize;
1610 __ str(r3, FieldMemOperand(r0, kCalleeOffset));
1612 // Use the length (smi tagged) and set that as an in-object property too.
1614 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1615 const int kLengthOffset = JSObject::kHeaderSize +
1616 Heap::kArgumentsLengthIndex * kPointerSize;
1617 __ str(r2, FieldMemOperand(r0, kLengthOffset));
1619 // Set up the elements pointer in the allocated arguments object.
1620 // If we allocated a parameter map, r4 will point there, otherwise
1621 // it will point to the backing store.
1622 __ add(r4, r0, Operand(Heap::kSloppyArgumentsObjectSize));
1623 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
1625 // r0 = address of new object (tagged)
1626 // r1 = mapped parameter count (tagged)
1627 // r2 = argument count (tagged)
1628 // r4 = address of parameter map or backing store (tagged)
1629 // Initialize parameter map. If there are no mapped arguments, we're done.
1630 Label skip_parameter_map;
1631 __ cmp(r1, Operand(Smi::FromInt(0)));
1632 // Move backing store address to r3, because it is
1633 // expected there when filling in the unmapped arguments.
1634 __ mov(r3, r4, LeaveCC, eq);
1635 __ b(eq, &skip_parameter_map);
1637 __ LoadRoot(r6, Heap::kSloppyArgumentsElementsMapRootIndex);
1638 __ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset));
1639 __ add(r6, r1, Operand(Smi::FromInt(2)));
1640 __ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
1641 __ str(cp, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
1642 __ add(r6, r4, Operand(r1, LSL, 1));
1643 __ add(r6, r6, Operand(kParameterMapHeaderSize));
1644 __ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
1646 // Copy the parameter slots and the holes in the arguments.
1647 // We need to fill in mapped_parameter_count slots. They index the context,
1648 // where parameters are stored in reverse order, at
1649 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
1650 // The mapped parameter thus need to get indices
1651 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
1652 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
1653 // We loop from right to left.
1654 Label parameters_loop, parameters_test;
1656 __ ldr(r9, MemOperand(sp, 0 * kPointerSize));
1657 __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
1658 __ sub(r9, r9, Operand(r1));
1659 __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
1660 __ add(r3, r4, Operand(r6, LSL, 1));
1661 __ add(r3, r3, Operand(kParameterMapHeaderSize));
1663 // r6 = loop variable (tagged)
1664 // r1 = mapping index (tagged)
1665 // r3 = address of backing store (tagged)
1666 // r4 = address of parameter map (tagged), which is also the address of new
1667 // object + Heap::kSloppyArgumentsObjectSize (tagged)
1668 // r0 = temporary scratch (a.o., for address calculation)
1669 // r5 = the hole value
1670 __ jmp(¶meters_test);
1672 __ bind(¶meters_loop);
1673 __ sub(r6, r6, Operand(Smi::FromInt(1)));
1674 __ mov(r0, Operand(r6, LSL, 1));
1675 __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
1676 __ str(r9, MemOperand(r4, r0));
1677 __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
1678 __ str(r5, MemOperand(r3, r0));
1679 __ add(r9, r9, Operand(Smi::FromInt(1)));
1680 __ bind(¶meters_test);
1681 __ cmp(r6, Operand(Smi::FromInt(0)));
1682 __ b(ne, ¶meters_loop);
1684 // Restore r0 = new object (tagged)
1685 __ sub(r0, r4, Operand(Heap::kSloppyArgumentsObjectSize));
1687 __ bind(&skip_parameter_map);
1688 // r0 = address of new object (tagged)
1689 // r2 = argument count (tagged)
1690 // r3 = address of backing store (tagged)
1692 // Copy arguments header and remaining slots (if there are any).
1693 __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
1694 __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset));
1695 __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset));
1697 Label arguments_loop, arguments_test;
1699 __ ldr(r4, MemOperand(sp, 1 * kPointerSize));
1700 __ sub(r4, r4, Operand(r9, LSL, 1));
1701 __ jmp(&arguments_test);
1703 __ bind(&arguments_loop);
1704 __ sub(r4, r4, Operand(kPointerSize));
1705 __ ldr(r6, MemOperand(r4, 0));
1706 __ add(r5, r3, Operand(r9, LSL, 1));
1707 __ str(r6, FieldMemOperand(r5, FixedArray::kHeaderSize));
1708 __ add(r9, r9, Operand(Smi::FromInt(1)));
1710 __ bind(&arguments_test);
1711 __ cmp(r9, Operand(r2));
1712 __ b(lt, &arguments_loop);
1714 // Return and remove the on-stack parameters.
1715 __ add(sp, sp, Operand(3 * kPointerSize));
1718 // Do the runtime call to allocate the arguments object.
1719 // r0 = address of new object (tagged)
1720 // r2 = argument count (tagged)
1722 __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
1723 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1727 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
1728 // Return address is in lr.
1731 Register receiver = LoadDescriptor::ReceiverRegister();
1732 Register key = LoadDescriptor::NameRegister();
1734 // Check that the key is an array index, that is Uint32.
1735 __ NonNegativeSmiTst(key);
1738 // Everything is fine, call runtime.
1739 __ Push(receiver, key); // Receiver, key.
1741 // Perform tail call to the entry.
1742 __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
1745 PropertyAccessCompiler::TailCallBuiltin(
1746 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1750 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
1751 // sp[0] : number of parameters
1752 // sp[4] : receiver displacement
1754 // Check if the calling frame is an arguments adaptor frame.
1755 Label adaptor_frame, try_allocate, runtime;
1756 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1757 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
1758 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1759 __ b(eq, &adaptor_frame);
1761 // Get the length from the frame.
1762 __ ldr(r1, MemOperand(sp, 0));
1763 __ b(&try_allocate);
1765 // Patch the arguments.length and the parameters pointer.
1766 __ bind(&adaptor_frame);
1767 __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1768 __ str(r1, MemOperand(sp, 0));
1769 __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
1770 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
1771 __ str(r3, MemOperand(sp, 1 * kPointerSize));
1773 // Try the new space allocation. Start out with computing the size
1774 // of the arguments object and the elements array in words.
1775 Label add_arguments_object;
1776 __ bind(&try_allocate);
1777 __ SmiUntag(r1, SetCC);
1778 __ b(eq, &add_arguments_object);
1779 __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
1780 __ bind(&add_arguments_object);
1781 __ add(r1, r1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
1783 // Do the allocation of both objects in one go.
1784 __ Allocate(r1, r0, r2, r3, &runtime,
1785 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
1787 // Get the arguments boilerplate from the current native context.
1788 __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1789 __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
1790 __ ldr(r4, MemOperand(
1791 r4, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
1793 __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
1794 __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
1795 __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
1796 __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
1798 // Get the length (smi tagged) and set that as an in-object property too.
1799 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1800 __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
1802 __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
1803 Heap::kArgumentsLengthIndex * kPointerSize));
1805 // If there are no actual arguments, we're done.
1807 __ cmp(r1, Operand::Zero());
1810 // Get the parameters pointer from the stack.
1811 __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
1813 // Set up the elements pointer in the allocated arguments object and
1814 // initialize the header in the elements fixed array.
1815 __ add(r4, r0, Operand(Heap::kStrictArgumentsObjectSize));
1816 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
1817 __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
1818 __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
1819 __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
1822 // Copy the fixed array slots.
1824 // Set up r4 to point to the first array slot.
1825 __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1827 // Pre-decrement r2 with kPointerSize on each iteration.
1828 // Pre-decrement in order to skip receiver.
1829 __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
1830 // Post-increment r4 with kPointerSize on each iteration.
1831 __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
1832 __ sub(r1, r1, Operand(1));
1833 __ cmp(r1, Operand::Zero());
1836 // Return and remove the on-stack parameters.
1838 __ add(sp, sp, Operand(3 * kPointerSize));
1841 // Do the runtime call to allocate the arguments object.
1843 __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
1847 void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
1848 // Stack layout on entry.
1849 // sp[0] : language mode
1850 // sp[4] : index of rest parameter
1851 // sp[8] : number of parameters
1852 // sp[12] : receiver displacement
1855 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1856 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
1857 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1860 // Patch the arguments.length and the parameters pointer.
1861 __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1862 __ str(r1, MemOperand(sp, 2 * kPointerSize));
1863 __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
1864 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
1865 __ str(r3, MemOperand(sp, 3 * kPointerSize));
1868 __ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
1872 void RegExpExecStub::Generate(MacroAssembler* masm) {
1873 // Just jump directly to runtime if native RegExp is not selected at compile
1874 // time or if regexp entry in generated code is turned off runtime switch or
1876 #ifdef V8_INTERPRETED_REGEXP
1877 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
1878 #else // V8_INTERPRETED_REGEXP
1880 // Stack frame on entry.
1881 // sp[0]: last_match_info (expected JSArray)
1882 // sp[4]: previous index
1883 // sp[8]: subject string
1884 // sp[12]: JSRegExp object
1886 const int kLastMatchInfoOffset = 0 * kPointerSize;
1887 const int kPreviousIndexOffset = 1 * kPointerSize;
1888 const int kSubjectOffset = 2 * kPointerSize;
1889 const int kJSRegExpOffset = 3 * kPointerSize;
1892 // Allocation of registers for this function. These are in callee save
1893 // registers and will be preserved by the call to the native RegExp code, as
1894 // this code is called using the normal C calling convention. When calling
1895 // directly from generated code the native RegExp code will not do a GC and
1896 // therefore the content of these registers are safe to use after the call.
1897 Register subject = r4;
1898 Register regexp_data = r5;
1899 Register last_match_info_elements = no_reg; // will be r6;
1901 // Ensure that a RegExp stack is allocated.
1902 ExternalReference address_of_regexp_stack_memory_address =
1903 ExternalReference::address_of_regexp_stack_memory_address(isolate());
1904 ExternalReference address_of_regexp_stack_memory_size =
1905 ExternalReference::address_of_regexp_stack_memory_size(isolate());
1906 __ mov(r0, Operand(address_of_regexp_stack_memory_size));
1907 __ ldr(r0, MemOperand(r0, 0));
1908 __ cmp(r0, Operand::Zero());
1911 // Check that the first argument is a JSRegExp object.
1912 __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
1913 __ JumpIfSmi(r0, &runtime);
1914 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
1917 // Check that the RegExp has been compiled (data contains a fixed array).
1918 __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
1919 if (FLAG_debug_code) {
1920 __ SmiTst(regexp_data);
1921 __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1922 __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
1923 __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1926 // regexp_data: RegExp data (FixedArray)
1927 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
1928 __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
1929 __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
1932 // regexp_data: RegExp data (FixedArray)
1933 // Check that the number of captures fit in the static offsets vector buffer.
1935 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
1936 // Check (number_of_captures + 1) * 2 <= offsets vector size
1937 // Or number_of_captures * 2 <= offsets vector size - 2
1938 // Multiplying by 2 comes for free since r2 is smi-tagged.
1939 STATIC_ASSERT(kSmiTag == 0);
1940 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
1941 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
1942 __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
1945 // Reset offset for possibly sliced string.
1946 __ mov(r9, Operand::Zero());
1947 __ ldr(subject, MemOperand(sp, kSubjectOffset));
1948 __ JumpIfSmi(subject, &runtime);
1949 __ mov(r3, subject); // Make a copy of the original subject string.
1950 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
1951 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
1952 // subject: subject string
1953 // r3: subject string
1954 // r0: subject string instance type
1955 // regexp_data: RegExp data (FixedArray)
1956 // Handle subject string according to its encoding and representation:
1957 // (1) Sequential string? If yes, go to (5).
1958 // (2) Anything but sequential or cons? If yes, go to (6).
1959 // (3) Cons string. If the string is flat, replace subject with first string.
1960 // Otherwise bailout.
1961 // (4) Is subject external? If yes, go to (7).
1962 // (5) Sequential string. Load regexp code according to encoding.
1966 // Deferred code at the end of the stub:
1967 // (6) Not a long external string? If yes, go to (8).
1968 // (7) External string. Make it, offset-wise, look like a sequential string.
1970 // (8) Short external string or not a string? If yes, bail out to runtime.
1971 // (9) Sliced string. Replace subject with parent. Go to (4).
1973 Label seq_string /* 5 */, external_string /* 7 */,
1974 check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
1975 not_long_external /* 8 */;
1977 // (1) Sequential string? If yes, go to (5).
1980 Operand(kIsNotStringMask |
1981 kStringRepresentationMask |
1982 kShortExternalStringMask),
1984 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
1985 __ b(eq, &seq_string); // Go to (5).
1987 // (2) Anything but sequential or cons? If yes, go to (6).
1988 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
1989 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
1990 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
1991 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
1992 __ cmp(r1, Operand(kExternalStringTag));
1993 __ b(ge, ¬_seq_nor_cons); // Go to (6).
1995 // (3) Cons string. Check that it's flat.
1996 // Replace subject with first string and reload instance type.
1997 __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
1998 __ CompareRoot(r0, Heap::kempty_stringRootIndex);
2000 __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2002 // (4) Is subject external? If yes, go to (7).
2003 __ bind(&check_underlying);
2004 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
2005 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
2006 STATIC_ASSERT(kSeqStringTag == 0);
2007 __ tst(r0, Operand(kStringRepresentationMask));
2008 // The underlying external string is never a short external string.
2009 STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
2010 STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2011 __ b(ne, &external_string); // Go to (7).
2013 // (5) Sequential string. Load regexp code according to encoding.
2014 __ bind(&seq_string);
2015 // subject: sequential subject string (or look-alike, external string)
2016 // r3: original subject string
2017 // Load previous index and check range before r3 is overwritten. We have to
2018 // use r3 instead of subject here because subject might have been only made
2019 // to look like a sequential string when it actually is an external string.
2020 __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
2021 __ JumpIfNotSmi(r1, &runtime);
2022 __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset));
2023 __ cmp(r3, Operand(r1));
2027 STATIC_ASSERT(4 == kOneByteStringTag);
2028 STATIC_ASSERT(kTwoByteStringTag == 0);
2029 __ and_(r0, r0, Operand(kStringEncodingMask));
2030 __ mov(r3, Operand(r0, ASR, 2), SetCC);
2031 __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset),
2033 __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
2035 // (E) Carry on. String handling is done.
2036 // r6: irregexp code
2037 // Check that the irregexp code has been generated for the actual string
2038 // encoding. If it has, the field contains a code object otherwise it contains
2039 // a smi (code flushing support).
2040 __ JumpIfSmi(r6, &runtime);
2042 // r1: previous index
2043 // r3: encoding of subject string (1 if one_byte, 0 if two_byte);
2045 // subject: Subject string
2046 // regexp_data: RegExp data (FixedArray)
2047 // All checks done. Now push arguments for native regexp code.
2048 __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r0, r2);
2050 // Isolates: note we add an additional parameter here (isolate pointer).
2051 const int kRegExpExecuteArguments = 9;
2052 const int kParameterRegisters = 4;
2053 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
2055 // Stack pointer now points to cell where return address is to be written.
2056 // Arguments are before that on the stack or in registers.
2058 // Argument 9 (sp[20]): Pass current isolate address.
2059 __ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2060 __ str(r0, MemOperand(sp, 5 * kPointerSize));
2062 // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
2063 __ mov(r0, Operand(1));
2064 __ str(r0, MemOperand(sp, 4 * kPointerSize));
2066 // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
2067 __ mov(r0, Operand(address_of_regexp_stack_memory_address));
2068 __ ldr(r0, MemOperand(r0, 0));
2069 __ mov(r2, Operand(address_of_regexp_stack_memory_size));
2070 __ ldr(r2, MemOperand(r2, 0));
2071 __ add(r0, r0, Operand(r2));
2072 __ str(r0, MemOperand(sp, 3 * kPointerSize));
2074 // Argument 6: Set the number of capture registers to zero to force global
2075 // regexps to behave as non-global. This does not affect non-global regexps.
2076 __ mov(r0, Operand::Zero());
2077 __ str(r0, MemOperand(sp, 2 * kPointerSize));
2079 // Argument 5 (sp[4]): static offsets vector buffer.
2081 Operand(ExternalReference::address_of_static_offsets_vector(
2083 __ str(r0, MemOperand(sp, 1 * kPointerSize));
2085 // For arguments 4 and 3 get string length, calculate start of string data and
2086 // calculate the shift of the index (0 for one-byte and 1 for two-byte).
2087 __ add(r7, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
2088 __ eor(r3, r3, Operand(1));
2089 // Load the length from the original subject string from the previous stack
2090 // frame. Therefore we have to use fp, which points exactly to two pointer
2091 // sizes below the previous sp. (Because creating a new stack frame pushes
2092 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
2093 __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2094 // If slice offset is not 0, load the length from the original sliced string.
2095 // Argument 4, r3: End of string data
2096 // Argument 3, r2: Start of string data
2097 // Prepare start and end index of the input.
2098 __ add(r9, r7, Operand(r9, LSL, r3));
2099 __ add(r2, r9, Operand(r1, LSL, r3));
2101 __ ldr(r7, FieldMemOperand(subject, String::kLengthOffset));
2103 __ add(r3, r9, Operand(r7, LSL, r3));
2105 // Argument 2 (r1): Previous index.
2108 // Argument 1 (r0): Subject string.
2109 __ mov(r0, subject);
2111 // Locate the code entry and call it.
2112 __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
2113 DirectCEntryStub stub(isolate());
2114 stub.GenerateCall(masm, r6);
2116 __ LeaveExitFrame(false, no_reg, true);
2118 last_match_info_elements = r6;
2121 // subject: subject string (callee saved)
2122 // regexp_data: RegExp data (callee saved)
2123 // last_match_info_elements: Last match info elements (callee saved)
2124 // Check the result.
2126 __ cmp(r0, Operand(1));
2127 // We expect exactly one result since we force the called regexp to behave
2131 __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
2133 __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
2134 // If not exception it can only be retry. Handle that in the runtime system.
2136 // Result must now be exception. If there is no pending exception already a
2137 // stack overflow (on the backtrack stack) was detected in RegExp code but
2138 // haven't created the exception yet. Handle that in the runtime system.
2139 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
2140 __ mov(r1, Operand(isolate()->factory()->the_hole_value()));
2141 __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2143 __ ldr(r0, MemOperand(r2, 0));
2147 // For exception, throw the exception again.
2148 __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
2151 // For failure and exception return null.
2152 __ mov(r0, Operand(isolate()->factory()->null_value()));
2153 __ add(sp, sp, Operand(4 * kPointerSize));
2156 // Process the result from the native regexp code.
2159 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2160 // Calculate number of capture registers (number_of_captures + 1) * 2.
2161 // Multiplying by 2 comes for free since r1 is smi-tagged.
2162 STATIC_ASSERT(kSmiTag == 0);
2163 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2164 __ add(r1, r1, Operand(2)); // r1 was a smi.
2166 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
2167 __ JumpIfSmi(r0, &runtime);
2168 __ CompareObjectType(r0, r2, r2, JS_ARRAY_TYPE);
2170 // Check that the JSArray is in fast case.
2171 __ ldr(last_match_info_elements,
2172 FieldMemOperand(r0, JSArray::kElementsOffset));
2173 __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2174 __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
2176 // Check that the last match info has space for the capture registers and the
2177 // additional information.
2179 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
2180 __ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead));
2181 __ cmp(r2, Operand::SmiUntag(r0));
2184 // r1: number of capture registers
2185 // r4: subject string
2186 // Store the capture count.
2188 __ str(r2, FieldMemOperand(last_match_info_elements,
2189 RegExpImpl::kLastCaptureCountOffset));
2190 // Store last subject and last input.
2192 FieldMemOperand(last_match_info_elements,
2193 RegExpImpl::kLastSubjectOffset));
2194 __ mov(r2, subject);
2195 __ RecordWriteField(last_match_info_elements,
2196 RegExpImpl::kLastSubjectOffset,
2201 __ mov(subject, r2);
2203 FieldMemOperand(last_match_info_elements,
2204 RegExpImpl::kLastInputOffset));
2205 __ RecordWriteField(last_match_info_elements,
2206 RegExpImpl::kLastInputOffset,
2212 // Get the static offsets vector filled by the native regexp code.
2213 ExternalReference address_of_static_offsets_vector =
2214 ExternalReference::address_of_static_offsets_vector(isolate());
2215 __ mov(r2, Operand(address_of_static_offsets_vector));
2217 // r1: number of capture registers
2218 // r2: offsets vector
2219 Label next_capture, done;
2220 // Capture register counter starts from number of capture registers and
2221 // counts down until wraping after zero.
2223 last_match_info_elements,
2224 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
2225 __ bind(&next_capture);
2226 __ sub(r1, r1, Operand(1), SetCC);
2228 // Read the value from the static offsets vector buffer.
2229 __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
2230 // Store the smi value in the last match info.
2232 __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
2233 __ jmp(&next_capture);
2236 // Return last match info.
2237 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
2238 __ add(sp, sp, Operand(4 * kPointerSize));
2241 // Do the runtime call to execute the regexp.
2243 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
2245 // Deferred code for string handling.
2246 // (6) Not a long external string? If yes, go to (8).
2247 __ bind(¬_seq_nor_cons);
2248 // Compare flags are still set.
2249 __ b(gt, ¬_long_external); // Go to (8).
2251 // (7) External string. Make it, offset-wise, look like a sequential string.
2252 __ bind(&external_string);
2253 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
2254 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
2255 if (FLAG_debug_code) {
2256 // Assert that we do not have a cons or slice (indirect strings) here.
2257 // Sequential strings have already been ruled out.
2258 __ tst(r0, Operand(kIsIndirectStringMask));
2259 __ Assert(eq, kExternalStringExpectedButNotFound);
2262 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
2263 // Move the pointer so that offset-wise, it looks like a sequential string.
2264 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2267 Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
2268 __ jmp(&seq_string); // Go to (5).
2270 // (8) Short external string or not a string? If yes, bail out to runtime.
2271 __ bind(¬_long_external);
2272 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
2273 __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
2276 // (9) Sliced string. Replace subject with parent. Go to (4).
2277 // Load offset into r9 and replace subject string with parent.
2278 __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
2280 __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
2281 __ jmp(&check_underlying); // Go to (4).
2282 #endif // V8_INTERPRETED_REGEXP
2286 static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
2288 // r0 : number of arguments to the construct function
2289 // r1 : the function to call
2290 // r2 : feedback vector
2291 // r3 : slot in feedback vector (Smi)
2292 // r4 : original constructor (for IsSuperConstructorCall)
2293 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2295 // Number-of-arguments register must be smi-tagged to call out.
2297 __ Push(r3, r2, r1, r0);
2307 __ Pop(r3, r2, r1, r0);
2312 static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
2313 // Cache the called function in a feedback vector slot. Cache states
2314 // are uninitialized, monomorphic (indicated by a JSFunction), and
2316 // r0 : number of arguments to the construct function
2317 // r1 : the function to call
2318 // r2 : feedback vector
2319 // r3 : slot in feedback vector (Smi)
2320 // r4 : original constructor (for IsSuperConstructorCall)
2321 Label initialize, done, miss, megamorphic, not_array_function;
2323 DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
2324 masm->isolate()->heap()->megamorphic_symbol());
2325 DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
2326 masm->isolate()->heap()->uninitialized_symbol());
2328 // Load the cache state into r5.
2329 __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
2330 __ ldr(r5, FieldMemOperand(r5, FixedArray::kHeaderSize));
2332 // A monomorphic cache hit or an already megamorphic state: invoke the
2333 // function without changing the state.
2334 // We don't know if r5 is a WeakCell or a Symbol, but it's harmless to read at
2335 // this position in a symbol (see static asserts in type-feedback-vector.h).
2336 Label check_allocation_site;
2337 Register feedback_map = r6;
2338 Register weak_value = r9;
2339 __ ldr(weak_value, FieldMemOperand(r5, WeakCell::kValueOffset));
2340 __ cmp(r1, weak_value);
2342 __ CompareRoot(r5, Heap::kmegamorphic_symbolRootIndex);
2344 __ ldr(feedback_map, FieldMemOperand(r5, HeapObject::kMapOffset));
2345 __ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
2346 __ b(ne, FLAG_pretenuring_call_new ? &miss : &check_allocation_site);
2348 // If the weak cell is cleared, we have a new chance to become monomorphic.
2349 __ JumpIfSmi(weak_value, &initialize);
2350 __ jmp(&megamorphic);
2352 if (!FLAG_pretenuring_call_new) {
2353 __ bind(&check_allocation_site);
2354 // If we came here, we need to see if we are the array function.
2355 // If we didn't have a matching function, and we didn't find the megamorph
2356 // sentinel, then we have in the slot either some other function or an
2358 __ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
2361 // Make sure the function is the Array() function
2362 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r5);
2364 __ b(ne, &megamorphic);
2370 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2372 __ CompareRoot(r5, Heap::kuninitialized_symbolRootIndex);
2373 __ b(eq, &initialize);
2374 // MegamorphicSentinel is an immortal immovable object (undefined) so no
2375 // write-barrier is needed.
2376 __ bind(&megamorphic);
2377 __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
2378 __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
2379 __ str(ip, FieldMemOperand(r5, FixedArray::kHeaderSize));
2382 // An uninitialized cache is patched with the function
2383 __ bind(&initialize);
2385 if (!FLAG_pretenuring_call_new) {
2386 // Make sure the function is the Array() function
2387 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r5);
2389 __ b(ne, ¬_array_function);
2391 // The target function is the Array constructor,
2392 // Create an AllocationSite if we don't already have it, store it in the
2394 CreateAllocationSiteStub create_stub(masm->isolate());
2395 CallStubInRecordCallTarget(masm, &create_stub, is_super);
2398 __ bind(¬_array_function);
2401 CreateWeakCellStub create_stub(masm->isolate());
2402 CallStubInRecordCallTarget(masm, &create_stub, is_super);
2407 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
2408 // Do not transform the receiver for strict mode functions.
2409 __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
2410 __ ldr(r4, FieldMemOperand(r3, SharedFunctionInfo::kCompilerHintsOffset));
2411 __ tst(r4, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
2415 // Do not transform the receiver for native (Compilerhints already in r3).
2416 __ tst(r4, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
2421 static void EmitSlowCase(MacroAssembler* masm,
2423 Label* non_function) {
2424 // Check for function proxy.
2425 __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
2426 __ b(ne, non_function);
2427 __ push(r1); // put proxy as additional argument
2428 __ mov(r0, Operand(argc + 1, RelocInfo::NONE32));
2429 __ mov(r2, Operand::Zero());
2430 __ GetBuiltinFunction(r1, Context::CALL_FUNCTION_PROXY_BUILTIN_INDEX);
2432 Handle<Code> adaptor =
2433 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
2434 __ Jump(adaptor, RelocInfo::CODE_TARGET);
2437 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
2438 // of the original receiver from the call site).
2439 __ bind(non_function);
2440 __ str(r1, MemOperand(sp, argc * kPointerSize));
2441 __ mov(r0, Operand(argc)); // Set up the number of arguments.
2442 __ mov(r2, Operand::Zero());
2443 __ GetBuiltinFunction(r1, Context::CALL_NON_FUNCTION_BUILTIN_INDEX);
2444 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2445 RelocInfo::CODE_TARGET);
2449 static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
2450 // Wrap the receiver and patch it back onto the stack.
2451 { FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
2454 ToObjectStub stub(masm->isolate());
2458 __ str(r0, MemOperand(sp, argc * kPointerSize));
2463 static void CallFunctionNoFeedback(MacroAssembler* masm,
2464 int argc, bool needs_checks,
2465 bool call_as_method) {
2466 // r1 : the function to call
2467 Label slow, non_function, wrap, cont;
2470 // Check that the function is really a JavaScript function.
2471 // r1: pushed function (to be verified)
2472 __ JumpIfSmi(r1, &non_function);
2474 // Goto slow case if we do not have a function.
2475 __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
2479 // Fast-case: Invoke the function now.
2480 // r1: pushed function
2481 ParameterCount actual(argc);
2483 if (call_as_method) {
2485 EmitContinueIfStrictOrNative(masm, &cont);
2488 // Compute the receiver in sloppy mode.
2489 __ ldr(r3, MemOperand(sp, argc * kPointerSize));
2492 __ JumpIfSmi(r3, &wrap);
2493 __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
2502 __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper());
2505 // Slow-case: Non-function called.
2507 EmitSlowCase(masm, argc, &non_function);
2510 if (call_as_method) {
2512 EmitWrapCase(masm, argc, &cont);
2517 void CallFunctionStub::Generate(MacroAssembler* masm) {
2518 CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
2522 void CallConstructStub::Generate(MacroAssembler* masm) {
2523 // r0 : number of arguments
2524 // r1 : the function to call
2525 // r2 : feedback vector
2526 // r3 : slot in feedback vector (Smi, for RecordCallTarget)
2527 // r4 : original constructor (for IsSuperConstructorCall)
2528 Label slow, non_function_call;
2530 // Check that the function is not a smi.
2531 __ JumpIfSmi(r1, &non_function_call);
2532 // Check that the function is a JSFunction.
2533 __ CompareObjectType(r1, r5, r5, JS_FUNCTION_TYPE);
2536 if (RecordCallTarget()) {
2537 GenerateRecordCallTarget(masm, IsSuperConstructorCall());
2539 __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
2540 if (FLAG_pretenuring_call_new) {
2541 // Put the AllocationSite from the feedback vector into r2.
2542 // By adding kPointerSize we encode that we know the AllocationSite
2543 // entry is at the feedback vector slot given by r3 + 1.
2544 __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize + kPointerSize));
2546 Label feedback_register_initialized;
2547 // Put the AllocationSite from the feedback vector into r2, or undefined.
2548 __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize));
2549 __ ldr(r5, FieldMemOperand(r2, AllocationSite::kMapOffset));
2550 __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
2551 __ b(eq, &feedback_register_initialized);
2552 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
2553 __ bind(&feedback_register_initialized);
2556 __ AssertUndefinedOrAllocationSite(r2, r5);
2559 // Pass function as original constructor.
2560 if (IsSuperConstructorCall()) {
2566 // Jump to the function-specific construct stub.
2567 Register jmp_reg = r4;
2568 __ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
2569 __ ldr(jmp_reg, FieldMemOperand(jmp_reg,
2570 SharedFunctionInfo::kConstructStubOffset));
2571 __ add(pc, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
2573 // r0: number of arguments
2574 // r1: called object
2578 __ cmp(r5, Operand(JS_FUNCTION_PROXY_TYPE));
2579 __ b(ne, &non_function_call);
2580 __ GetBuiltinFunction(
2581 r1, Context::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR_BUILTIN_INDEX);
2584 __ bind(&non_function_call);
2585 __ GetBuiltinFunction(
2586 r1, Context::CALL_NON_FUNCTION_AS_CONSTRUCTOR_BUILTIN_INDEX);
2588 // Set expected number of arguments to zero (not changing r0).
2589 __ mov(r2, Operand::Zero());
2590 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2591 RelocInfo::CODE_TARGET);
2595 static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
2596 __ ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
2597 __ ldr(vector, FieldMemOperand(vector,
2598 JSFunction::kSharedFunctionInfoOffset));
2599 __ ldr(vector, FieldMemOperand(vector,
2600 SharedFunctionInfo::kFeedbackVectorOffset));
2604 void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
2609 int argc = arg_count();
2610 ParameterCount actual(argc);
2612 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
2616 __ mov(r0, Operand(arg_count()));
2617 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2618 __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
2620 // Verify that r4 contains an AllocationSite
2621 __ ldr(r5, FieldMemOperand(r4, HeapObject::kMapOffset));
2622 __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
2625 // Increment the call count for monomorphic function calls.
2626 __ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
2627 __ add(r2, r2, Operand(FixedArray::kHeaderSize + kPointerSize));
2628 __ ldr(r3, FieldMemOperand(r2, 0));
2629 __ add(r3, r3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
2630 __ str(r3, FieldMemOperand(r2, 0));
2634 ArrayConstructorStub stub(masm->isolate(), arg_count());
2635 __ TailCallStub(&stub);
2640 // The slow case, we need this no matter what to complete a call after a miss.
2641 CallFunctionNoFeedback(masm,
2647 __ stop("Unexpected code address");
2651 void CallICStub::Generate(MacroAssembler* masm) {
2653 // r3 - slot id (Smi)
2655 const int with_types_offset =
2656 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
2657 const int generic_offset =
2658 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
2659 Label extra_checks_or_miss, slow_start;
2660 Label slow, non_function, wrap, cont;
2661 Label have_js_function;
2662 int argc = arg_count();
2663 ParameterCount actual(argc);
2665 // The checks. First, does r1 match the recorded monomorphic target?
2666 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2667 __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
2669 // We don't know that we have a weak cell. We might have a private symbol
2670 // or an AllocationSite, but the memory is safe to examine.
2671 // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
2673 // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
2674 // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
2675 // computed, meaning that it can't appear to be a pointer. If the low bit is
2676 // 0, then hash is computed, but the 0 bit prevents the field from appearing
2678 STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
2679 STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
2680 WeakCell::kValueOffset &&
2681 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
2683 __ ldr(r5, FieldMemOperand(r4, WeakCell::kValueOffset));
2685 __ b(ne, &extra_checks_or_miss);
2687 // The compare above could have been a SMI/SMI comparison. Guard against this
2688 // convincing us that we have a monomorphic JSFunction.
2689 __ JumpIfSmi(r1, &extra_checks_or_miss);
2691 // Increment the call count for monomorphic function calls.
2692 __ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
2693 __ add(r2, r2, Operand(FixedArray::kHeaderSize + kPointerSize));
2694 __ ldr(r3, FieldMemOperand(r2, 0));
2695 __ add(r3, r3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
2696 __ str(r3, FieldMemOperand(r2, 0));
2698 __ bind(&have_js_function);
2699 if (CallAsMethod()) {
2700 EmitContinueIfStrictOrNative(masm, &cont);
2701 // Compute the receiver in sloppy mode.
2702 __ ldr(r3, MemOperand(sp, argc * kPointerSize));
2704 __ JumpIfSmi(r3, &wrap);
2705 __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
2711 __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper());
2714 EmitSlowCase(masm, argc, &non_function);
2716 if (CallAsMethod()) {
2718 EmitWrapCase(masm, argc, &cont);
2721 __ bind(&extra_checks_or_miss);
2722 Label uninitialized, miss;
2724 __ CompareRoot(r4, Heap::kmegamorphic_symbolRootIndex);
2725 __ b(eq, &slow_start);
2727 // The following cases attempt to handle MISS cases without going to the
2729 if (FLAG_trace_ic) {
2733 __ CompareRoot(r4, Heap::kuninitialized_symbolRootIndex);
2734 __ b(eq, &uninitialized);
2736 // We are going megamorphic. If the feedback is a JSFunction, it is fine
2737 // to handle it here. More complex cases are dealt with in the runtime.
2738 __ AssertNotSmi(r4);
2739 __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
2741 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2742 __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
2743 __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
2744 // We have to update statistics for runtime profiling.
2745 __ ldr(r4, FieldMemOperand(r2, with_types_offset));
2746 __ sub(r4, r4, Operand(Smi::FromInt(1)));
2747 __ str(r4, FieldMemOperand(r2, with_types_offset));
2748 __ ldr(r4, FieldMemOperand(r2, generic_offset));
2749 __ add(r4, r4, Operand(Smi::FromInt(1)));
2750 __ str(r4, FieldMemOperand(r2, generic_offset));
2751 __ jmp(&slow_start);
2753 __ bind(&uninitialized);
2755 // We are going monomorphic, provided we actually have a JSFunction.
2756 __ JumpIfSmi(r1, &miss);
2758 // Goto miss case if we do not have a function.
2759 __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
2762 // Make sure the function is not the Array() function, which requires special
2763 // behavior on MISS.
2764 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
2769 __ ldr(r4, FieldMemOperand(r2, with_types_offset));
2770 __ add(r4, r4, Operand(Smi::FromInt(1)));
2771 __ str(r4, FieldMemOperand(r2, with_types_offset));
2773 // Initialize the call counter.
2774 __ Move(r5, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
2775 __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2776 __ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + kPointerSize));
2778 // Store the function. Use a stub since we need a frame for allocation.
2783 FrameScope scope(masm, StackFrame::INTERNAL);
2784 CreateWeakCellStub create_stub(masm->isolate());
2786 __ CallStub(&create_stub);
2790 __ jmp(&have_js_function);
2792 // We are here because tracing is on or we encountered a MISS case we can't
2798 __ bind(&slow_start);
2799 // Check that the function is really a JavaScript function.
2800 // r1: pushed function (to be verified)
2801 __ JumpIfSmi(r1, &non_function);
2803 // Goto slow case if we do not have a function.
2804 __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
2806 __ jmp(&have_js_function);
2810 void CallICStub::GenerateMiss(MacroAssembler* masm) {
2811 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2813 // Push the receiver and the function and feedback info.
2814 __ Push(r1, r2, r3);
2817 Runtime::FunctionId id = GetICState() == DEFAULT
2818 ? Runtime::kCallIC_Miss
2819 : Runtime::kCallIC_Customization_Miss;
2820 __ CallRuntime(id, 3);
2822 // Move result to edi and exit the internal frame.
2827 // StringCharCodeAtGenerator
2828 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
2829 // If the receiver is a smi trigger the non-string case.
2830 if (check_mode_ == RECEIVER_IS_UNKNOWN) {
2831 __ JumpIfSmi(object_, receiver_not_string_);
2833 // Fetch the instance type of the receiver into result register.
2834 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2835 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2836 // If the receiver is not a string trigger the non-string case.
2837 __ tst(result_, Operand(kIsNotStringMask));
2838 __ b(ne, receiver_not_string_);
2841 // If the index is non-smi trigger the non-smi case.
2842 __ JumpIfNotSmi(index_, &index_not_smi_);
2843 __ bind(&got_smi_index_);
2845 // Check for index out of range.
2846 __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
2847 __ cmp(ip, Operand(index_));
2848 __ b(ls, index_out_of_range_);
2850 __ SmiUntag(index_);
2852 StringCharLoadGenerator::Generate(masm,
2863 void StringCharCodeAtGenerator::GenerateSlow(
2864 MacroAssembler* masm, EmbedMode embed_mode,
2865 const RuntimeCallHelper& call_helper) {
2866 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
2868 // Index is not a smi.
2869 __ bind(&index_not_smi_);
2870 // If index is a heap number, try converting it to an integer.
2873 Heap::kHeapNumberMapRootIndex,
2876 call_helper.BeforeCall(masm);
2877 if (embed_mode == PART_OF_IC_HANDLER) {
2878 __ Push(LoadWithVectorDescriptor::VectorRegister(),
2879 LoadWithVectorDescriptor::SlotRegister(), object_, index_);
2881 // index_ is consumed by runtime conversion function.
2882 __ Push(object_, index_);
2884 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
2885 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
2887 DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
2888 // NumberToSmi discards numbers that are not exact integers.
2889 __ CallRuntime(Runtime::kNumberToSmi, 1);
2891 // Save the conversion result before the pop instructions below
2892 // have a chance to overwrite it.
2893 __ Move(index_, r0);
2894 if (embed_mode == PART_OF_IC_HANDLER) {
2895 __ Pop(LoadWithVectorDescriptor::VectorRegister(),
2896 LoadWithVectorDescriptor::SlotRegister(), object_);
2900 // Reload the instance type.
2901 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2902 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2903 call_helper.AfterCall(masm);
2904 // If index is still not a smi, it must be out of range.
2905 __ JumpIfNotSmi(index_, index_out_of_range_);
2906 // Otherwise, return to the fast path.
2907 __ jmp(&got_smi_index_);
2909 // Call runtime. We get here when the receiver is a string and the
2910 // index is a number, but the code of getting the actual character
2911 // is too complex (e.g., when the string needs to be flattened).
2912 __ bind(&call_runtime_);
2913 call_helper.BeforeCall(masm);
2915 __ Push(object_, index_);
2916 __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
2917 __ Move(result_, r0);
2918 call_helper.AfterCall(masm);
2921 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
2925 // -------------------------------------------------------------------------
2926 // StringCharFromCodeGenerator
2928 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
2929 // Fast case of Heap::LookupSingleCharacterStringFromCode.
2930 STATIC_ASSERT(kSmiTag == 0);
2931 STATIC_ASSERT(kSmiShiftSize == 0);
2932 DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
2933 __ tst(code_, Operand(kSmiTagMask |
2934 ((~String::kMaxOneByteCharCodeU) << kSmiTagSize)));
2935 __ b(ne, &slow_case_);
2937 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
2938 // At this point code register contains smi tagged one-byte char code.
2939 __ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_));
2940 __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
2941 __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
2942 __ b(eq, &slow_case_);
2947 void StringCharFromCodeGenerator::GenerateSlow(
2948 MacroAssembler* masm,
2949 const RuntimeCallHelper& call_helper) {
2950 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
2952 __ bind(&slow_case_);
2953 call_helper.BeforeCall(masm);
2955 __ CallRuntime(Runtime::kCharFromCode, 1);
2956 __ Move(result_, r0);
2957 call_helper.AfterCall(masm);
2960 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
2964 enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
2967 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
2972 String::Encoding encoding) {
2973 if (FLAG_debug_code) {
2974 // Check that destination is word aligned.
2975 __ tst(dest, Operand(kPointerAlignmentMask));
2976 __ Check(eq, kDestinationOfCopyNotAligned);
2979 // Assumes word reads and writes are little endian.
2980 // Nothing to do for zero characters.
2982 if (encoding == String::TWO_BYTE_ENCODING) {
2983 __ add(count, count, Operand(count), SetCC);
2986 Register limit = count; // Read until dest equals this.
2987 __ add(limit, dest, Operand(count));
2989 Label loop_entry, loop;
2990 // Copy bytes from src to dest until dest hits limit.
2993 __ ldrb(scratch, MemOperand(src, 1, PostIndex), lt);
2994 __ strb(scratch, MemOperand(dest, 1, PostIndex));
2995 __ bind(&loop_entry);
2996 __ cmp(dest, Operand(limit));
3003 void SubStringStub::Generate(MacroAssembler* masm) {
3006 // Stack frame on entry.
3007 // lr: return address
3012 // This stub is called from the native-call %_SubString(...), so
3013 // nothing can be assumed about the arguments. It is tested that:
3014 // "string" is a sequential string,
3015 // both "from" and "to" are smis, and
3016 // 0 <= from <= to <= string.length.
3017 // If any of these assumptions fail, we call the runtime system.
3019 const int kToOffset = 0 * kPointerSize;
3020 const int kFromOffset = 1 * kPointerSize;
3021 const int kStringOffset = 2 * kPointerSize;
3023 __ Ldrd(r2, r3, MemOperand(sp, kToOffset));
3024 STATIC_ASSERT(kFromOffset == kToOffset + 4);
3025 STATIC_ASSERT(kSmiTag == 0);
3026 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3028 // Arithmetic shift right by one un-smi-tags. In this case we rotate right
3029 // instead because we bail out on non-smi values: ROR and ASR are equivalent
3030 // for smis but they set the flags in a way that's easier to optimize.
3031 __ mov(r2, Operand(r2, ROR, 1), SetCC);
3032 __ mov(r3, Operand(r3, ROR, 1), SetCC, cc);
3033 // If either to or from had the smi tag bit set, then C is set now, and N
3034 // has the same value: we rotated by 1, so the bottom bit is now the top bit.
3035 // We want to bailout to runtime here if From is negative. In that case, the
3036 // next instruction is not executed and we fall through to bailing out to
3038 // Executed if both r2 and r3 are untagged integers.
3039 __ sub(r2, r2, Operand(r3), SetCC, cc);
3040 // One of the above un-smis or the above SUB could have set N==1.
3041 __ b(mi, &runtime); // Either "from" or "to" is not an smi, or from > to.
3043 // Make sure first argument is a string.
3044 __ ldr(r0, MemOperand(sp, kStringOffset));
3045 __ JumpIfSmi(r0, &runtime);
3046 Condition is_string = masm->IsObjectStringType(r0, r1);
3047 __ b(NegateCondition(is_string), &runtime);
3050 __ cmp(r2, Operand(1));
3051 __ b(eq, &single_char);
3053 // Short-cut for the case of trivial substring.
3055 // r0: original string
3056 // r2: result string length
3057 __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
3058 __ cmp(r2, Operand(r4, ASR, 1));
3059 // Return original string.
3060 __ b(eq, &return_r0);
3061 // Longer than original string's length or negative: unsafe arguments.
3063 // Shorter than original string's length: an actual substring.
3065 // Deal with different string types: update the index if necessary
3066 // and put the underlying string into r5.
3067 // r0: original string
3068 // r1: instance type
3070 // r3: from index (untagged)
3071 Label underlying_unpacked, sliced_string, seq_or_external_string;
3072 // If the string is not indirect, it can only be sequential or external.
3073 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
3074 STATIC_ASSERT(kIsIndirectStringMask != 0);
3075 __ tst(r1, Operand(kIsIndirectStringMask));
3076 __ b(eq, &seq_or_external_string);
3078 __ tst(r1, Operand(kSlicedNotConsMask));
3079 __ b(ne, &sliced_string);
3080 // Cons string. Check whether it is flat, then fetch first part.
3081 __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
3082 __ CompareRoot(r5, Heap::kempty_stringRootIndex);
3084 __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
3085 // Update instance type.
3086 __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
3087 __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
3088 __ jmp(&underlying_unpacked);
3090 __ bind(&sliced_string);
3091 // Sliced string. Fetch parent and correct start index by offset.
3092 __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
3093 __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset));
3094 __ add(r3, r3, Operand(r4, ASR, 1)); // Add offset to index.
3095 // Update instance type.
3096 __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
3097 __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
3098 __ jmp(&underlying_unpacked);
3100 __ bind(&seq_or_external_string);
3101 // Sequential or external string. Just move string to the expected register.
3104 __ bind(&underlying_unpacked);
3106 if (FLAG_string_slices) {
3108 // r5: underlying subject string
3109 // r1: instance type of underlying subject string
3111 // r3: adjusted start index (untagged)
3112 __ cmp(r2, Operand(SlicedString::kMinLength));
3113 // Short slice. Copy instead of slicing.
3114 __ b(lt, ©_routine);
3115 // Allocate new sliced string. At this point we do not reload the instance
3116 // type including the string encoding because we simply rely on the info
3117 // provided by the original string. It does not matter if the original
3118 // string's encoding is wrong because we always have to recheck encoding of
3119 // the newly created string's parent anyways due to externalized strings.
3120 Label two_byte_slice, set_slice_header;
3121 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
3122 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
3123 __ tst(r1, Operand(kStringEncodingMask));
3124 __ b(eq, &two_byte_slice);
3125 __ AllocateOneByteSlicedString(r0, r2, r6, r4, &runtime);
3126 __ jmp(&set_slice_header);
3127 __ bind(&two_byte_slice);
3128 __ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime);
3129 __ bind(&set_slice_header);
3130 __ mov(r3, Operand(r3, LSL, 1));
3131 __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
3132 __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
3135 __ bind(©_routine);
3138 // r5: underlying subject string
3139 // r1: instance type of underlying subject string
3141 // r3: adjusted start index (untagged)
3142 Label two_byte_sequential, sequential_string, allocate_result;
3143 STATIC_ASSERT(kExternalStringTag != 0);
3144 STATIC_ASSERT(kSeqStringTag == 0);
3145 __ tst(r1, Operand(kExternalStringTag));
3146 __ b(eq, &sequential_string);
3148 // Handle external string.
3149 // Rule out short external strings.
3150 STATIC_ASSERT(kShortExternalStringTag != 0);
3151 __ tst(r1, Operand(kShortExternalStringTag));
3153 __ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset));
3154 // r5 already points to the first character of underlying string.
3155 __ jmp(&allocate_result);
3157 __ bind(&sequential_string);
3158 // Locate first character of underlying subject string.
3159 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3160 __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3162 __ bind(&allocate_result);
3163 // Sequential acii string. Allocate the result.
3164 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3165 __ tst(r1, Operand(kStringEncodingMask));
3166 __ b(eq, &two_byte_sequential);
3168 // Allocate and copy the resulting one-byte string.
3169 __ AllocateOneByteString(r0, r2, r4, r6, r1, &runtime);
3171 // Locate first character of substring to copy.
3173 // Locate first character of result.
3174 __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3176 // r0: result string
3177 // r1: first character of result string
3178 // r2: result string length
3179 // r5: first character of substring to copy
3180 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3181 StringHelper::GenerateCopyCharacters(
3182 masm, r1, r5, r2, r3, String::ONE_BYTE_ENCODING);
3185 // Allocate and copy the resulting two-byte string.
3186 __ bind(&two_byte_sequential);
3187 __ AllocateTwoByteString(r0, r2, r4, r6, r1, &runtime);
3189 // Locate first character of substring to copy.
3190 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
3191 __ add(r5, r5, Operand(r3, LSL, 1));
3192 // Locate first character of result.
3193 __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3195 // r0: result string.
3196 // r1: first character of result.
3197 // r2: result length.
3198 // r5: first character of substring to copy.
3199 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3200 StringHelper::GenerateCopyCharacters(
3201 masm, r1, r5, r2, r3, String::TWO_BYTE_ENCODING);
3203 __ bind(&return_r0);
3204 Counters* counters = isolate()->counters();
3205 __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
3209 // Just jump to runtime to create the sub string.
3211 __ TailCallRuntime(Runtime::kSubString, 3, 1);
3213 __ bind(&single_char);
3214 // r0: original string
3215 // r1: instance type
3217 // r3: from index (untagged)
3219 StringCharAtGenerator generator(r0, r3, r2, r0, &runtime, &runtime, &runtime,
3220 STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
3221 generator.GenerateFast(masm);
3224 generator.SkipSlow(masm, &runtime);
3228 void ToNumberStub::Generate(MacroAssembler* masm) {
3229 // The ToNumber stub takes one argument in r0.
3231 __ JumpIfNotSmi(r0, ¬_smi);
3235 Label not_heap_number;
3236 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
3237 __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
3239 // r1: instance type.
3240 __ cmp(r1, Operand(HEAP_NUMBER_TYPE));
3241 __ b(ne, ¬_heap_number);
3243 __ bind(¬_heap_number);
3245 Label not_string, slow_string;
3246 __ cmp(r1, Operand(FIRST_NONSTRING_TYPE));
3247 __ b(hs, ¬_string);
3248 // Check if string has a cached array index.
3249 __ ldr(r2, FieldMemOperand(r0, String::kHashFieldOffset));
3250 __ tst(r2, Operand(String::kContainsCachedArrayIndexMask));
3251 __ b(ne, &slow_string);
3252 __ IndexFromHash(r2, r0);
3254 __ bind(&slow_string);
3255 __ push(r0); // Push argument.
3256 __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
3257 __ bind(¬_string);
3260 __ cmp(r1, Operand(ODDBALL_TYPE));
3261 __ b(ne, ¬_oddball);
3262 __ ldr(r0, FieldMemOperand(r0, Oddball::kToNumberOffset));
3264 __ bind(¬_oddball);
3266 __ push(r0); // Push argument.
3267 __ TailCallRuntime(Runtime::kToNumber, 1, 1);
3271 void StringHelper::GenerateFlatOneByteStringEquals(
3272 MacroAssembler* masm, Register left, Register right, Register scratch1,
3273 Register scratch2, Register scratch3) {
3274 Register length = scratch1;
3277 Label strings_not_equal, check_zero_length;
3278 __ ldr(length, FieldMemOperand(left, String::kLengthOffset));
3279 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
3280 __ cmp(length, scratch2);
3281 __ b(eq, &check_zero_length);
3282 __ bind(&strings_not_equal);
3283 __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
3286 // Check if the length is zero.
3287 Label compare_chars;
3288 __ bind(&check_zero_length);
3289 STATIC_ASSERT(kSmiTag == 0);
3290 __ cmp(length, Operand::Zero());
3291 __ b(ne, &compare_chars);
3292 __ mov(r0, Operand(Smi::FromInt(EQUAL)));
3295 // Compare characters.
3296 __ bind(&compare_chars);
3297 GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
3298 &strings_not_equal);
3300 // Characters are equal.
3301 __ mov(r0, Operand(Smi::FromInt(EQUAL)));
3306 void StringHelper::GenerateCompareFlatOneByteStrings(
3307 MacroAssembler* masm, Register left, Register right, Register scratch1,
3308 Register scratch2, Register scratch3, Register scratch4) {
3309 Label result_not_equal, compare_lengths;
3310 // Find minimum length and length difference.
3311 __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
3312 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
3313 __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
3314 Register length_delta = scratch3;
3315 __ mov(scratch1, scratch2, LeaveCC, gt);
3316 Register min_length = scratch1;
3317 STATIC_ASSERT(kSmiTag == 0);
3318 __ cmp(min_length, Operand::Zero());
3319 __ b(eq, &compare_lengths);
3322 GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
3323 scratch4, &result_not_equal);
3325 // Compare lengths - strings up to min-length are equal.
3326 __ bind(&compare_lengths);
3327 DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
3328 // Use length_delta as result if it's zero.
3329 __ mov(r0, Operand(length_delta), SetCC);
3330 __ bind(&result_not_equal);
3331 // Conditionally update the result based either on length_delta or
3332 // the last comparion performed in the loop above.
3333 __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
3334 __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
3339 void StringHelper::GenerateOneByteCharsCompareLoop(
3340 MacroAssembler* masm, Register left, Register right, Register length,
3341 Register scratch1, Register scratch2, Label* chars_not_equal) {
3342 // Change index to run from -length to -1 by adding length to string
3343 // start. This means that loop ends when index reaches zero, which
3344 // doesn't need an additional compare.
3345 __ SmiUntag(length);
3346 __ add(scratch1, length,
3347 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3348 __ add(left, left, Operand(scratch1));
3349 __ add(right, right, Operand(scratch1));
3350 __ rsb(length, length, Operand::Zero());
3351 Register index = length; // index = -length;
3356 __ ldrb(scratch1, MemOperand(left, index));
3357 __ ldrb(scratch2, MemOperand(right, index));
3358 __ cmp(scratch1, scratch2);
3359 __ b(ne, chars_not_equal);
3360 __ add(index, index, Operand(1), SetCC);
3365 void StringCompareStub::Generate(MacroAssembler* masm) {
3368 Counters* counters = isolate()->counters();
3370 // Stack frame on entry.
3371 // sp[0]: right string
3372 // sp[4]: left string
3373 __ Ldrd(r0 , r1, MemOperand(sp)); // Load right in r0, left in r1.
3377 __ b(ne, ¬_same);
3378 STATIC_ASSERT(EQUAL == 0);
3379 STATIC_ASSERT(kSmiTag == 0);
3380 __ mov(r0, Operand(Smi::FromInt(EQUAL)));
3381 __ IncrementCounter(counters->string_compare_native(), 1, r1, r2);
3382 __ add(sp, sp, Operand(2 * kPointerSize));
3387 // Check that both objects are sequential one-byte strings.
3388 __ JumpIfNotBothSequentialOneByteStrings(r1, r0, r2, r3, &runtime);
3390 // Compare flat one-byte strings natively. Remove arguments from stack first.
3391 __ IncrementCounter(counters->string_compare_native(), 1, r2, r3);
3392 __ add(sp, sp, Operand(2 * kPointerSize));
3393 StringHelper::GenerateCompareFlatOneByteStrings(masm, r1, r0, r2, r3, r4, r5);
3395 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
3396 // tagged as a small integer.
3398 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3402 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
3403 // ----------- S t a t e -------------
3406 // -- lr : return address
3407 // -----------------------------------
3409 // Load r2 with the allocation site. We stick an undefined dummy value here
3410 // and replace it with the real allocation site later when we instantiate this
3411 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
3412 __ Move(r2, handle(isolate()->heap()->undefined_value()));
3414 // Make sure that we actually patched the allocation site.
3415 if (FLAG_debug_code) {
3416 __ tst(r2, Operand(kSmiTagMask));
3417 __ Assert(ne, kExpectedAllocationSite);
3419 __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
3420 __ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex);
3423 __ Assert(eq, kExpectedAllocationSite);
3426 // Tail call into the stub that handles binary operations with allocation
3428 BinaryOpWithAllocationSiteStub stub(isolate(), state());
3429 __ TailCallStub(&stub);
3433 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
3434 DCHECK(state() == CompareICState::SMI);
3437 __ JumpIfNotSmi(r2, &miss);
3439 if (GetCondition() == eq) {
3440 // For equality we do not care about the sign of the result.
3441 __ sub(r0, r0, r1, SetCC);
3443 // Untag before subtracting to avoid handling overflow.
3445 __ sub(r0, r1, Operand::SmiUntag(r0));
3454 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
3455 DCHECK(state() == CompareICState::NUMBER);
3458 Label unordered, maybe_undefined1, maybe_undefined2;
3461 if (left() == CompareICState::SMI) {
3462 __ JumpIfNotSmi(r1, &miss);
3464 if (right() == CompareICState::SMI) {
3465 __ JumpIfNotSmi(r0, &miss);
3468 // Inlining the double comparison and falling back to the general compare
3469 // stub if NaN is involved.
3470 // Load left and right operand.
3471 Label done, left, left_smi, right_smi;
3472 __ JumpIfSmi(r0, &right_smi);
3473 __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
3475 __ sub(r2, r0, Operand(kHeapObjectTag));
3476 __ vldr(d1, r2, HeapNumber::kValueOffset);
3478 __ bind(&right_smi);
3479 __ SmiToDouble(d1, r0);
3482 __ JumpIfSmi(r1, &left_smi);
3483 __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
3485 __ sub(r2, r1, Operand(kHeapObjectTag));
3486 __ vldr(d0, r2, HeapNumber::kValueOffset);
3489 __ SmiToDouble(d0, r1);
3492 // Compare operands.
3493 __ VFPCompareAndSetFlags(d0, d1);
3495 // Don't base result on status bits when a NaN is involved.
3496 __ b(vs, &unordered);
3498 // Return a result of -1, 0, or 1, based on status bits.
3499 __ mov(r0, Operand(EQUAL), LeaveCC, eq);
3500 __ mov(r0, Operand(LESS), LeaveCC, lt);
3501 __ mov(r0, Operand(GREATER), LeaveCC, gt);
3504 __ bind(&unordered);
3505 __ bind(&generic_stub);
3506 CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
3507 CompareICState::GENERIC, CompareICState::GENERIC);
3508 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3510 __ bind(&maybe_undefined1);
3511 if (Token::IsOrderedRelationalCompareOp(op())) {
3512 __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
3514 __ JumpIfSmi(r1, &unordered);
3515 __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
3516 __ b(ne, &maybe_undefined2);
3520 __ bind(&maybe_undefined2);
3521 if (Token::IsOrderedRelationalCompareOp(op())) {
3522 __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
3523 __ b(eq, &unordered);
3531 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3532 DCHECK(state() == CompareICState::INTERNALIZED_STRING);
3535 // Registers containing left and right operands respectively.
3537 Register right = r0;
3541 // Check that both operands are heap objects.
3542 __ JumpIfEitherSmi(left, right, &miss);
3544 // Check that both operands are internalized strings.
3545 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3546 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3547 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3548 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3549 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3550 __ orr(tmp1, tmp1, Operand(tmp2));
3551 __ tst(tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3554 // Internalized strings are compared by identity.
3555 __ cmp(left, right);
3556 // Make sure r0 is non-zero. At this point input operands are
3557 // guaranteed to be non-zero.
3558 DCHECK(right.is(r0));
3559 STATIC_ASSERT(EQUAL == 0);
3560 STATIC_ASSERT(kSmiTag == 0);
3561 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
3569 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
3570 DCHECK(state() == CompareICState::UNIQUE_NAME);
3571 DCHECK(GetCondition() == eq);
3574 // Registers containing left and right operands respectively.
3576 Register right = r0;
3580 // Check that both operands are heap objects.
3581 __ JumpIfEitherSmi(left, right, &miss);
3583 // Check that both operands are unique names. This leaves the instance
3584 // types loaded in tmp1 and tmp2.
3585 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3586 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3587 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3588 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3590 __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
3591 __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
3593 // Unique names are compared by identity.
3594 __ cmp(left, right);
3595 // Make sure r0 is non-zero. At this point input operands are
3596 // guaranteed to be non-zero.
3597 DCHECK(right.is(r0));
3598 STATIC_ASSERT(EQUAL == 0);
3599 STATIC_ASSERT(kSmiTag == 0);
3600 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
3608 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
3609 DCHECK(state() == CompareICState::STRING);
3612 bool equality = Token::IsEqualityOp(op());
3614 // Registers containing left and right operands respectively.
3616 Register right = r0;
3622 // Check that both operands are heap objects.
3623 __ JumpIfEitherSmi(left, right, &miss);
3625 // Check that both operands are strings. This leaves the instance
3626 // types loaded in tmp1 and tmp2.
3627 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3628 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3629 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3630 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3631 STATIC_ASSERT(kNotStringTag != 0);
3632 __ orr(tmp3, tmp1, tmp2);
3633 __ tst(tmp3, Operand(kIsNotStringMask));
3636 // Fast check for identical strings.
3637 __ cmp(left, right);
3638 STATIC_ASSERT(EQUAL == 0);
3639 STATIC_ASSERT(kSmiTag == 0);
3640 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
3643 // Handle not identical strings.
3645 // Check that both strings are internalized strings. If they are, we're done
3646 // because we already know they are not identical. We know they are both
3649 DCHECK(GetCondition() == eq);
3650 STATIC_ASSERT(kInternalizedTag == 0);
3651 __ orr(tmp3, tmp1, Operand(tmp2));
3652 __ tst(tmp3, Operand(kIsNotInternalizedMask));
3653 // Make sure r0 is non-zero. At this point input operands are
3654 // guaranteed to be non-zero.
3655 DCHECK(right.is(r0));
3659 // Check that both strings are sequential one-byte.
3661 __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
3664 // Compare flat one-byte strings. Returns when done.
3666 StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
3669 StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
3673 // Handle more complex cases in runtime.
3675 __ Push(left, right);
3677 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
3679 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3687 void CompareICStub::GenerateObjects(MacroAssembler* masm) {
3688 DCHECK(state() == CompareICState::OBJECT);
3690 __ and_(r2, r1, Operand(r0));
3691 __ JumpIfSmi(r2, &miss);
3693 __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
3695 __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
3698 DCHECK(GetCondition() == eq);
3699 __ sub(r0, r0, Operand(r1));
3707 void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
3709 Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
3710 __ and_(r2, r1, Operand(r0));
3711 __ JumpIfSmi(r2, &miss);
3712 __ GetWeakValue(r4, cell);
3713 __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
3714 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
3720 __ sub(r0, r0, Operand(r1));
3728 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
3730 // Call the runtime system in a fresh internal frame.
3731 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
3733 __ Push(lr, r1, r0);
3734 __ mov(ip, Operand(Smi::FromInt(op())));
3736 __ CallRuntime(Runtime::kCompareIC_Miss, 3);
3737 // Compute the entry point of the rewritten stub.
3738 __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
3739 // Restore registers.
3748 void DirectCEntryStub::Generate(MacroAssembler* masm) {
3749 // Place the return address on the stack, making the call
3750 // GC safe. The RegExp backend also relies on this.
3751 __ str(lr, MemOperand(sp, 0));
3752 __ blx(ip); // Call the C++ function.
3753 __ VFPEnsureFPSCRState(r2);
3754 __ ldr(pc, MemOperand(sp, 0));
3758 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
3761 reinterpret_cast<intptr_t>(GetCode().location());
3762 __ Move(ip, target);
3763 __ mov(lr, Operand(code, RelocInfo::CODE_TARGET));
3764 __ blx(lr); // Call the stub.
3768 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
3772 Register properties,
3774 Register scratch0) {
3775 DCHECK(name->IsUniqueName());
3776 // If names of slots in range from 1 to kProbes - 1 for the hash value are
3777 // not equal to the name and kProbes-th slot is not used (its name is the
3778 // undefined value), it guarantees the hash table doesn't contain the
3779 // property. It's true even if some slots represent deleted properties
3780 // (their names are the hole value).
3781 for (int i = 0; i < kInlinedProbes; i++) {
3782 // scratch0 points to properties hash.
3783 // Compute the masked index: (hash + i + i * i) & mask.
3784 Register index = scratch0;
3785 // Capacity is smi 2^n.
3786 __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
3787 __ sub(index, index, Operand(1));
3788 __ and_(index, index, Operand(
3789 Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
3791 // Scale the index by multiplying by the entry size.
3792 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
3793 __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
3795 Register entity_name = scratch0;
3796 // Having undefined at this place means the name is not contained.
3797 STATIC_ASSERT(kSmiTagSize == 1);
3798 Register tmp = properties;
3799 __ add(tmp, properties, Operand(index, LSL, 1));
3800 __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
3802 DCHECK(!tmp.is(entity_name));
3803 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
3804 __ cmp(entity_name, tmp);
3807 // Load the hole ready for use below:
3808 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
3810 // Stop if found the property.
3811 __ cmp(entity_name, Operand(Handle<Name>(name)));
3815 __ cmp(entity_name, tmp);
3818 // Check if the entry name is not a unique name.
3819 __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
3820 __ ldrb(entity_name,
3821 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
3822 __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
3825 // Restore the properties.
3827 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
3830 const int spill_mask =
3831 (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
3832 r2.bit() | r1.bit() | r0.bit());
3834 __ stm(db_w, sp, spill_mask);
3835 __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
3836 __ mov(r1, Operand(Handle<Name>(name)));
3837 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
3839 __ cmp(r0, Operand::Zero());
3840 __ ldm(ia_w, sp, spill_mask);
3847 // Probe the name dictionary in the |elements| register. Jump to the
3848 // |done| label if a property with the given name is found. Jump to
3849 // the |miss| label otherwise.
3850 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
3851 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
3857 Register scratch2) {
3858 DCHECK(!elements.is(scratch1));
3859 DCHECK(!elements.is(scratch2));
3860 DCHECK(!name.is(scratch1));
3861 DCHECK(!name.is(scratch2));
3863 __ AssertName(name);
3865 // Compute the capacity mask.
3866 __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
3867 __ SmiUntag(scratch1);
3868 __ sub(scratch1, scratch1, Operand(1));
3870 // Generate an unrolled loop that performs a few probes before
3871 // giving up. Measurements done on Gmail indicate that 2 probes
3872 // cover ~93% of loads from dictionaries.
3873 for (int i = 0; i < kInlinedProbes; i++) {
3874 // Compute the masked index: (hash + i + i * i) & mask.
3875 __ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
3877 // Add the probe offset (i + i * i) left shifted to avoid right shifting
3878 // the hash in a separate instruction. The value hash + i + i * i is right
3879 // shifted in the following and instruction.
3880 DCHECK(NameDictionary::GetProbeOffset(i) <
3881 1 << (32 - Name::kHashFieldOffset));
3882 __ add(scratch2, scratch2, Operand(
3883 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
3885 __ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
3887 // Scale the index by multiplying by the entry size.
3888 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
3889 // scratch2 = scratch2 * 3.
3890 __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
3892 // Check if the key is identical to the name.
3893 __ add(scratch2, elements, Operand(scratch2, LSL, 2));
3894 __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
3895 __ cmp(name, Operand(ip));
3899 const int spill_mask =
3900 (lr.bit() | r6.bit() | r5.bit() | r4.bit() |
3901 r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
3902 ~(scratch1.bit() | scratch2.bit());
3904 __ stm(db_w, sp, spill_mask);
3906 DCHECK(!elements.is(r1));
3908 __ Move(r0, elements);
3910 __ Move(r0, elements);
3913 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
3915 __ cmp(r0, Operand::Zero());
3916 __ mov(scratch2, Operand(r2));
3917 __ ldm(ia_w, sp, spill_mask);
3924 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
3925 // This stub overrides SometimesSetsUpAFrame() to return false. That means
3926 // we cannot call anything that could cause a GC from this stub.
3928 // result: NameDictionary to probe
3930 // dictionary: NameDictionary to probe.
3931 // index: will hold an index of entry if lookup is successful.
3932 // might alias with result_.
3934 // result_ is zero if lookup failed, non zero otherwise.
3936 Register result = r0;
3937 Register dictionary = r0;
3939 Register index = r2;
3942 Register undefined = r5;
3943 Register entry_key = r6;
3945 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
3947 __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
3949 __ sub(mask, mask, Operand(1));
3951 __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
3953 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
3955 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
3956 // Compute the masked index: (hash + i + i * i) & mask.
3957 // Capacity is smi 2^n.
3959 // Add the probe offset (i + i * i) left shifted to avoid right shifting
3960 // the hash in a separate instruction. The value hash + i + i * i is right
3961 // shifted in the following and instruction.
3962 DCHECK(NameDictionary::GetProbeOffset(i) <
3963 1 << (32 - Name::kHashFieldOffset));
3964 __ add(index, hash, Operand(
3965 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
3967 __ mov(index, Operand(hash));
3969 __ and_(index, mask, Operand(index, LSR, Name::kHashShift));
3971 // Scale the index by multiplying by the entry size.
3972 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
3973 __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
3975 STATIC_ASSERT(kSmiTagSize == 1);
3976 __ add(index, dictionary, Operand(index, LSL, 2));
3977 __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
3979 // Having undefined at this place means the name is not contained.
3980 __ cmp(entry_key, Operand(undefined));
3981 __ b(eq, ¬_in_dictionary);
3983 // Stop if found the property.
3984 __ cmp(entry_key, Operand(key));
3985 __ b(eq, &in_dictionary);
3987 if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
3988 // Check if the entry name is not a unique name.
3989 __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
3991 FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
3992 __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
3996 __ bind(&maybe_in_dictionary);
3997 // If we are doing negative lookup then probing failure should be
3998 // treated as a lookup success. For positive lookup probing failure
3999 // should be treated as lookup failure.
4000 if (mode() == POSITIVE_LOOKUP) {
4001 __ mov(result, Operand::Zero());
4005 __ bind(&in_dictionary);
4006 __ mov(result, Operand(1));
4009 __ bind(¬_in_dictionary);
4010 __ mov(result, Operand::Zero());
4015 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
4017 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
4019 // Hydrogen code stubs need stub2 at snapshot time.
4020 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
4025 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
4026 // the value has just been written into the object, now this stub makes sure
4027 // we keep the GC informed. The word in the object where the value has been
4028 // written is in the address register.
4029 void RecordWriteStub::Generate(MacroAssembler* masm) {
4030 Label skip_to_incremental_noncompacting;
4031 Label skip_to_incremental_compacting;
4033 // The first two instructions are generated with labels so as to get the
4034 // offset fixed up correctly by the bind(Label*) call. We patch it back and
4035 // forth between a compare instructions (a nop in this position) and the
4036 // real branch when we start and stop incremental heap marking.
4037 // See RecordWriteStub::Patch for details.
4039 // Block literal pool emission, as the position of these two instructions
4040 // is assumed by the patching code.
4041 Assembler::BlockConstPoolScope block_const_pool(masm);
4042 __ b(&skip_to_incremental_noncompacting);
4043 __ b(&skip_to_incremental_compacting);
4046 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4047 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
4048 MacroAssembler::kReturnAtEnd);
4052 __ bind(&skip_to_incremental_noncompacting);
4053 GenerateIncremental(masm, INCREMENTAL);
4055 __ bind(&skip_to_incremental_compacting);
4056 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4058 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
4059 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
4060 DCHECK(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
4061 DCHECK(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
4062 PatchBranchIntoNop(masm, 0);
4063 PatchBranchIntoNop(masm, Assembler::kInstrSize);
4067 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4070 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4071 Label dont_need_remembered_set;
4073 __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
4074 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
4076 &dont_need_remembered_set);
4078 __ CheckPageFlag(regs_.object(),
4080 1 << MemoryChunk::SCAN_ON_SCAVENGE,
4082 &dont_need_remembered_set);
4084 // First notify the incremental marker if necessary, then update the
4086 CheckNeedsToInformIncrementalMarker(
4087 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4088 InformIncrementalMarker(masm);
4089 regs_.Restore(masm);
4090 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
4091 MacroAssembler::kReturnAtEnd);
4093 __ bind(&dont_need_remembered_set);
4096 CheckNeedsToInformIncrementalMarker(
4097 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4098 InformIncrementalMarker(masm);
4099 regs_.Restore(masm);
4104 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4105 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
4106 int argument_count = 3;
4107 __ PrepareCallCFunction(argument_count, regs_.scratch0());
4109 r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
4110 DCHECK(!address.is(regs_.object()));
4111 DCHECK(!address.is(r0));
4112 __ Move(address, regs_.address());
4113 __ Move(r0, regs_.object());
4114 __ Move(r1, address);
4115 __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
4117 AllowExternalCallThatCantCauseGC scope(masm);
4119 ExternalReference::incremental_marking_record_write_function(isolate()),
4121 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
4125 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4126 MacroAssembler* masm,
4127 OnNoNeedToInformIncrementalMarker on_no_need,
4130 Label need_incremental;
4131 Label need_incremental_pop_scratch;
4133 __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
4134 __ ldr(regs_.scratch1(),
4135 MemOperand(regs_.scratch0(),
4136 MemoryChunk::kWriteBarrierCounterOffset));
4137 __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC);
4138 __ str(regs_.scratch1(),
4139 MemOperand(regs_.scratch0(),
4140 MemoryChunk::kWriteBarrierCounterOffset));
4141 __ b(mi, &need_incremental);
4143 // Let's look at the color of the object: If it is not black we don't have
4144 // to inform the incremental marker.
4145 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4147 regs_.Restore(masm);
4148 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4149 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
4150 MacroAssembler::kReturnAtEnd);
4157 // Get the value from the slot.
4158 __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
4160 if (mode == INCREMENTAL_COMPACTION) {
4161 Label ensure_not_white;
4163 __ CheckPageFlag(regs_.scratch0(), // Contains value.
4164 regs_.scratch1(), // Scratch.
4165 MemoryChunk::kEvacuationCandidateMask,
4169 __ CheckPageFlag(regs_.object(),
4170 regs_.scratch1(), // Scratch.
4171 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
4175 __ bind(&ensure_not_white);
4178 // We need extra registers for this, so we push the object and the address
4179 // register temporarily.
4180 __ Push(regs_.object(), regs_.address());
4181 __ EnsureNotWhite(regs_.scratch0(), // The value.
4182 regs_.scratch1(), // Scratch.
4183 regs_.object(), // Scratch.
4184 regs_.address(), // Scratch.
4185 &need_incremental_pop_scratch);
4186 __ Pop(regs_.object(), regs_.address());
4188 regs_.Restore(masm);
4189 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4190 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
4191 MacroAssembler::kReturnAtEnd);
4196 __ bind(&need_incremental_pop_scratch);
4197 __ Pop(regs_.object(), regs_.address());
4199 __ bind(&need_incremental);
4201 // Fall through when we need to inform the incremental marker.
4205 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4206 // ----------- S t a t e -------------
4207 // -- r0 : element value to store
4208 // -- r3 : element index as smi
4209 // -- sp[0] : array literal index in function as smi
4210 // -- sp[4] : array literal
4211 // clobbers r1, r2, r4
4212 // -----------------------------------
4215 Label double_elements;
4217 Label slow_elements;
4218 Label fast_elements;
4220 // Get array literal index, array literal and its map.
4221 __ ldr(r4, MemOperand(sp, 0 * kPointerSize));
4222 __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
4223 __ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset));
4225 __ CheckFastElements(r2, r5, &double_elements);
4226 // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
4227 __ JumpIfSmi(r0, &smi_element);
4228 __ CheckFastSmiElements(r2, r5, &fast_elements);
4230 // Store into the array literal requires a elements transition. Call into
4232 __ bind(&slow_elements);
4234 __ Push(r1, r3, r0);
4235 __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4236 __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset));
4238 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4240 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4241 __ bind(&fast_elements);
4242 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
4243 __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
4244 __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4245 __ str(r0, MemOperand(r6, 0));
4246 // Update the write barrier for the array store.
4247 __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs,
4248 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
4251 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
4252 // and value is Smi.
4253 __ bind(&smi_element);
4254 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
4255 __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
4256 __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
4259 // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
4260 __ bind(&double_elements);
4261 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
4262 __ StoreNumberToDoubleElements(r0, r3, r5, r6, d0, &slow_elements);
4267 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4268 CEntryStub ces(isolate(), 1, kSaveFPRegs);
4269 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4270 int parameter_count_offset =
4271 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4272 __ ldr(r1, MemOperand(fp, parameter_count_offset));
4273 if (function_mode() == JS_FUNCTION_STUB_MODE) {
4274 __ add(r1, r1, Operand(1));
4276 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4277 __ mov(r1, Operand(r1, LSL, kPointerSizeLog2));
4283 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
4284 EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
4285 LoadICStub stub(isolate(), state());
4286 stub.GenerateForTrampoline(masm);
4290 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
4291 EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
4292 KeyedLoadICStub stub(isolate(), state());
4293 stub.GenerateForTrampoline(masm);
4297 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
4298 EmitLoadTypeFeedbackVector(masm, r2);
4299 CallICStub stub(isolate(), state());
4300 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4304 void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
4305 EmitLoadTypeFeedbackVector(masm, r2);
4306 CallIC_ArrayStub stub(isolate(), state());
4307 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4311 void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
4314 void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
4315 GenerateImpl(masm, true);
4319 static void HandleArrayCases(MacroAssembler* masm, Register receiver,
4320 Register key, Register vector, Register slot,
4321 Register feedback, Register receiver_map,
4322 Register scratch1, Register scratch2,
4323 bool is_polymorphic, Label* miss) {
4324 // feedback initially contains the feedback array
4325 Label next_loop, prepare_next;
4326 Label start_polymorphic;
4328 Register cached_map = scratch1;
4331 FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
4332 __ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
4333 __ cmp(receiver_map, cached_map);
4334 __ b(ne, &start_polymorphic);
4335 // found, now call handler.
4336 Register handler = feedback;
4337 __ ldr(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
4338 __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
4341 Register length = scratch2;
4342 __ bind(&start_polymorphic);
4343 __ ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
4344 if (!is_polymorphic) {
4345 // If the IC could be monomorphic we have to make sure we don't go past the
4346 // end of the feedback array.
4347 __ cmp(length, Operand(Smi::FromInt(2)));
4351 Register too_far = length;
4352 Register pointer_reg = feedback;
4354 // +-----+------+------+-----+-----+ ... ----+
4355 // | map | len | wm0 | h0 | wm1 | hN |
4356 // +-----+------+------+-----+-----+ ... ----+
4360 // pointer_reg too_far
4361 // aka feedback scratch2
4362 // also need receiver_map
4363 // use cached_map (scratch1) to look in the weak map values.
4364 __ add(too_far, feedback, Operand::PointerOffsetFromSmiKey(length));
4365 __ add(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4366 __ add(pointer_reg, feedback,
4367 Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
4369 __ bind(&next_loop);
4370 __ ldr(cached_map, MemOperand(pointer_reg));
4371 __ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
4372 __ cmp(receiver_map, cached_map);
4373 __ b(ne, &prepare_next);
4374 __ ldr(handler, MemOperand(pointer_reg, kPointerSize));
4375 __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
4377 __ bind(&prepare_next);
4378 __ add(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
4379 __ cmp(pointer_reg, too_far);
4380 __ b(lt, &next_loop);
4382 // We exhausted our array of map handler pairs.
4387 static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
4388 Register receiver_map, Register feedback,
4389 Register vector, Register slot,
4390 Register scratch, Label* compare_map,
4391 Label* load_smi_map, Label* try_array) {
4392 __ JumpIfSmi(receiver, load_smi_map);
4393 __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
4394 __ bind(compare_map);
4395 Register cached_map = scratch;
4396 // Move the weak map into the weak_cell register.
4397 __ ldr(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
4398 __ cmp(cached_map, receiver_map);
4399 __ b(ne, try_array);
4400 Register handler = feedback;
4401 __ add(handler, vector, Operand::PointerOffsetFromSmiKey(slot));
4403 FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
4404 __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
4408 void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4409 Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r1
4410 Register name = LoadWithVectorDescriptor::NameRegister(); // r2
4411 Register vector = LoadWithVectorDescriptor::VectorRegister(); // r3
4412 Register slot = LoadWithVectorDescriptor::SlotRegister(); // r0
4413 Register feedback = r4;
4414 Register receiver_map = r5;
4415 Register scratch1 = r6;
4417 __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
4418 __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
4420 // Try to quickly handle the monomorphic case without knowing for sure
4421 // if we have a weak cell in feedback. We do know it's safe to look
4422 // at WeakCell::kValueOffset.
4423 Label try_array, load_smi_map, compare_map;
4424 Label not_array, miss;
4425 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
4426 scratch1, &compare_map, &load_smi_map, &try_array);
4428 // Is it a fixed array?
4429 __ bind(&try_array);
4430 __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
4431 __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
4432 __ b(ne, ¬_array);
4433 HandleArrayCases(masm, receiver, name, vector, slot, feedback, receiver_map,
4434 scratch1, r9, true, &miss);
4436 __ bind(¬_array);
4437 __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
4439 Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
4440 Code::ComputeHandlerFlags(Code::LOAD_IC));
4441 masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
4442 receiver, name, feedback,
4443 receiver_map, scratch1, r9);
4446 LoadIC::GenerateMiss(masm);
4449 __ bind(&load_smi_map);
4450 __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
4451 __ jmp(&compare_map);
4455 void KeyedLoadICStub::Generate(MacroAssembler* masm) {
4456 GenerateImpl(masm, false);
4460 void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
4461 GenerateImpl(masm, true);
4465 void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4466 Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r1
4467 Register key = LoadWithVectorDescriptor::NameRegister(); // r2
4468 Register vector = LoadWithVectorDescriptor::VectorRegister(); // r3
4469 Register slot = LoadWithVectorDescriptor::SlotRegister(); // r0
4470 Register feedback = r4;
4471 Register receiver_map = r5;
4472 Register scratch1 = r6;
4474 __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
4475 __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
4477 // Try to quickly handle the monomorphic case without knowing for sure
4478 // if we have a weak cell in feedback. We do know it's safe to look
4479 // at WeakCell::kValueOffset.
4480 Label try_array, load_smi_map, compare_map;
4481 Label not_array, miss;
4482 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
4483 scratch1, &compare_map, &load_smi_map, &try_array);
4485 __ bind(&try_array);
4486 // Is it a fixed array?
4487 __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
4488 __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
4489 __ b(ne, ¬_array);
4491 // We have a polymorphic element handler.
4492 Label polymorphic, try_poly_name;
4493 __ bind(&polymorphic);
4494 HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map,
4495 scratch1, r9, true, &miss);
4497 __ bind(¬_array);
4499 __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
4500 __ b(ne, &try_poly_name);
4501 Handle<Code> megamorphic_stub =
4502 KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
4503 __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
4505 __ bind(&try_poly_name);
4506 // We might have a name in feedback, and a fixed array in the next slot.
4507 __ cmp(key, feedback);
4509 // If the name comparison succeeded, we know we have a fixed array with
4510 // at least one map/handler pair.
4511 __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
4513 FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
4514 HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map,
4515 scratch1, r9, false, &miss);
4518 KeyedLoadIC::GenerateMiss(masm);
4520 __ bind(&load_smi_map);
4521 __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
4522 __ jmp(&compare_map);
4526 void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
4527 EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
4528 VectorStoreICStub stub(isolate(), state());
4529 stub.GenerateForTrampoline(masm);
4533 void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
4534 EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
4535 VectorKeyedStoreICStub stub(isolate(), state());
4536 stub.GenerateForTrampoline(masm);
4540 void VectorStoreICStub::Generate(MacroAssembler* masm) {
4541 GenerateImpl(masm, false);
4545 void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
4546 GenerateImpl(masm, true);
4550 void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4553 // TODO(mvstanton): Implement.
4555 StoreIC::GenerateMiss(masm);
4559 void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
4560 GenerateImpl(masm, false);
4564 void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
4565 GenerateImpl(masm, true);
4569 void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4572 // TODO(mvstanton): Implement.
4574 KeyedStoreIC::GenerateMiss(masm);
4578 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4579 if (masm->isolate()->function_entry_hook() != NULL) {
4580 ProfileEntryHookStub stub(masm->isolate());
4581 PredictableCodeSizeScope predictable(masm);
4582 predictable.ExpectSize(masm->CallStubSize(&stub) +
4583 2 * Assembler::kInstrSize);
4591 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4592 // The entry hook is a "push lr" instruction, followed by a call.
4593 const int32_t kReturnAddressDistanceFromFunctionStart =
4594 3 * Assembler::kInstrSize;
4596 // This should contain all kCallerSaved registers.
4597 const RegList kSavedRegs =
4604 // We also save lr, so the count here is one higher than the mask indicates.
4605 const int32_t kNumSavedRegs = 7;
4607 DCHECK((kCallerSaved & kSavedRegs) == kCallerSaved);
4609 // Save all caller-save registers as this may be called from anywhere.
4610 __ stm(db_w, sp, kSavedRegs | lr.bit());
4612 // Compute the function's address for the first argument.
4613 __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart));
4615 // The caller's return address is above the saved temporaries.
4616 // Grab that for the second argument to the hook.
4617 __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize));
4619 // Align the stack if necessary.
4620 int frame_alignment = masm->ActivationFrameAlignment();
4621 if (frame_alignment > kPointerSize) {
4623 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4624 __ and_(sp, sp, Operand(-frame_alignment));
4627 #if V8_HOST_ARCH_ARM
4628 int32_t entry_hook =
4629 reinterpret_cast<int32_t>(isolate()->function_entry_hook());
4630 __ mov(ip, Operand(entry_hook));
4632 // Under the simulator we need to indirect the entry hook through a
4633 // trampoline function at a known address.
4634 // It additionally takes an isolate as a third parameter
4635 __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
4637 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4638 __ mov(ip, Operand(ExternalReference(&dispatcher,
4639 ExternalReference::BUILTIN_CALL,
4644 // Restore the stack pointer if needed.
4645 if (frame_alignment > kPointerSize) {
4649 // Also pop pc to get Ret(0).
4650 __ ldm(ia_w, sp, kSavedRegs | pc.bit());
4655 static void CreateArrayDispatch(MacroAssembler* masm,
4656 AllocationSiteOverrideMode mode) {
4657 if (mode == DISABLE_ALLOCATION_SITES) {
4658 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4659 __ TailCallStub(&stub);
4660 } else if (mode == DONT_OVERRIDE) {
4661 int last_index = GetSequenceIndexFromFastElementsKind(
4662 TERMINAL_FAST_ELEMENTS_KIND);
4663 for (int i = 0; i <= last_index; ++i) {
4664 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4665 __ cmp(r3, Operand(kind));
4666 T stub(masm->isolate(), kind);
4667 __ TailCallStub(&stub, eq);
4670 // If we reached this point there is a problem.
4671 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4678 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4679 AllocationSiteOverrideMode mode) {
4680 // r2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4681 // r3 - kind (if mode != DISABLE_ALLOCATION_SITES)
4682 // r0 - number of arguments
4683 // r1 - constructor?
4684 // sp[0] - last argument
4685 Label normal_sequence;
4686 if (mode == DONT_OVERRIDE) {
4687 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4688 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4689 STATIC_ASSERT(FAST_ELEMENTS == 2);
4690 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4691 STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
4692 STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
4694 // is the low bit set? If so, we are holey and that is good.
4695 __ tst(r3, Operand(1));
4696 __ b(ne, &normal_sequence);
4699 // look at the first argument
4700 __ ldr(r5, MemOperand(sp, 0));
4701 __ cmp(r5, Operand::Zero());
4702 __ b(eq, &normal_sequence);
4704 if (mode == DISABLE_ALLOCATION_SITES) {
4705 ElementsKind initial = GetInitialFastElementsKind();
4706 ElementsKind holey_initial = GetHoleyElementsKind(initial);
4708 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
4710 DISABLE_ALLOCATION_SITES);
4711 __ TailCallStub(&stub_holey);
4713 __ bind(&normal_sequence);
4714 ArraySingleArgumentConstructorStub stub(masm->isolate(),
4716 DISABLE_ALLOCATION_SITES);
4717 __ TailCallStub(&stub);
4718 } else if (mode == DONT_OVERRIDE) {
4719 // We are going to create a holey array, but our kind is non-holey.
4720 // Fix kind and retry (only if we have an allocation site in the slot).
4721 __ add(r3, r3, Operand(1));
4723 if (FLAG_debug_code) {
4724 __ ldr(r5, FieldMemOperand(r2, 0));
4725 __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
4726 __ Assert(eq, kExpectedAllocationSite);
4729 // Save the resulting elements kind in type info. We can't just store r3
4730 // in the AllocationSite::transition_info field because elements kind is
4731 // restricted to a portion of the field...upper bits need to be left alone.
4732 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4733 __ ldr(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
4734 __ add(r4, r4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
4735 __ str(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
4737 __ bind(&normal_sequence);
4738 int last_index = GetSequenceIndexFromFastElementsKind(
4739 TERMINAL_FAST_ELEMENTS_KIND);
4740 for (int i = 0; i <= last_index; ++i) {
4741 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4742 __ cmp(r3, Operand(kind));
4743 ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
4744 __ TailCallStub(&stub, eq);
4747 // If we reached this point there is a problem.
4748 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4756 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
4757 int to_index = GetSequenceIndexFromFastElementsKind(
4758 TERMINAL_FAST_ELEMENTS_KIND);
4759 for (int i = 0; i <= to_index; ++i) {
4760 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4761 T stub(isolate, kind);
4763 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
4764 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
4771 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
4772 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
4774 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
4776 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
4781 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
4783 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
4784 for (int i = 0; i < 2; i++) {
4785 // For internal arrays we only need a few things
4786 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
4788 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
4790 InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
4796 void ArrayConstructorStub::GenerateDispatchToArrayStub(
4797 MacroAssembler* masm,
4798 AllocationSiteOverrideMode mode) {
4799 if (argument_count() == ANY) {
4800 Label not_zero_case, not_one_case;
4802 __ b(ne, ¬_zero_case);
4803 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4805 __ bind(¬_zero_case);
4806 __ cmp(r0, Operand(1));
4807 __ b(gt, ¬_one_case);
4808 CreateArrayDispatchOneArgument(masm, mode);
4810 __ bind(¬_one_case);
4811 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4812 } else if (argument_count() == NONE) {
4813 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4814 } else if (argument_count() == ONE) {
4815 CreateArrayDispatchOneArgument(masm, mode);
4816 } else if (argument_count() == MORE_THAN_ONE) {
4817 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4824 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
4825 // ----------- S t a t e -------------
4826 // -- r0 : argc (only if argument_count() == ANY)
4827 // -- r1 : constructor
4828 // -- r2 : AllocationSite or undefined
4829 // -- r3 : original constructor
4830 // -- sp[0] : return address
4831 // -- sp[4] : last argument
4832 // -----------------------------------
4834 if (FLAG_debug_code) {
4835 // The array construct code is only set for the global and natives
4836 // builtin Array functions which always have maps.
4838 // Initial map for the builtin Array function should be a map.
4839 __ ldr(r4, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
4840 // Will both indicate a NULL and a Smi.
4841 __ tst(r4, Operand(kSmiTagMask));
4842 __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
4843 __ CompareObjectType(r4, r4, r5, MAP_TYPE);
4844 __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
4846 // We should either have undefined in r2 or a valid AllocationSite
4847 __ AssertUndefinedOrAllocationSite(r2, r4);
4852 __ b(ne, &subclassing);
4855 // Get the elements kind and case on that.
4856 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
4859 __ ldr(r3, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
4861 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4862 __ and_(r3, r3, Operand(AllocationSite::ElementsKindBits::kMask));
4863 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
4866 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
4868 __ bind(&subclassing);
4873 switch (argument_count()) {
4876 __ add(r0, r0, Operand(2));
4879 __ mov(r0, Operand(2));
4882 __ mov(r0, Operand(3));
4886 __ JumpToExternalReference(
4887 ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
4891 void InternalArrayConstructorStub::GenerateCase(
4892 MacroAssembler* masm, ElementsKind kind) {
4893 __ cmp(r0, Operand(1));
4895 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
4896 __ TailCallStub(&stub0, lo);
4898 InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
4899 __ TailCallStub(&stubN, hi);
4901 if (IsFastPackedElementsKind(kind)) {
4902 // We might need to create a holey array
4903 // look at the first argument
4904 __ ldr(r3, MemOperand(sp, 0));
4905 __ cmp(r3, Operand::Zero());
4907 InternalArraySingleArgumentConstructorStub
4908 stub1_holey(isolate(), GetHoleyElementsKind(kind));
4909 __ TailCallStub(&stub1_holey, ne);
4912 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
4913 __ TailCallStub(&stub1);
4917 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
4918 // ----------- S t a t e -------------
4920 // -- r1 : constructor
4921 // -- sp[0] : return address
4922 // -- sp[4] : last argument
4923 // -----------------------------------
4925 if (FLAG_debug_code) {
4926 // The array construct code is only set for the global and natives
4927 // builtin Array functions which always have maps.
4929 // Initial map for the builtin Array function should be a map.
4930 __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
4931 // Will both indicate a NULL and a Smi.
4932 __ tst(r3, Operand(kSmiTagMask));
4933 __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
4934 __ CompareObjectType(r3, r3, r4, MAP_TYPE);
4935 __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
4938 // Figure out the right elements kind
4939 __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
4940 // Load the map's "bit field 2" into |result|. We only need the first byte,
4941 // but the following bit field extraction takes care of that anyway.
4942 __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
4943 // Retrieve elements_kind from bit field 2.
4944 __ DecodeField<Map::ElementsKindBits>(r3);
4946 if (FLAG_debug_code) {
4948 __ cmp(r3, Operand(FAST_ELEMENTS));
4950 __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS));
4952 kInvalidElementsKindForInternalArrayOrInternalPackedArray);
4956 Label fast_elements_case;
4957 __ cmp(r3, Operand(FAST_ELEMENTS));
4958 __ b(eq, &fast_elements_case);
4959 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
4961 __ bind(&fast_elements_case);
4962 GenerateCase(masm, FAST_ELEMENTS);
4966 void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
4967 Register context = cp;
4968 Register result = r0;
4971 // Go up the context chain to the script context.
4972 for (int i = 0; i < depth(); ++i) {
4973 __ ldr(result, ContextOperand(context, Context::PREVIOUS_INDEX));
4977 // Load the PropertyCell value at the specified slot.
4978 __ add(result, context, Operand(slot, LSL, kPointerSizeLog2));
4979 __ ldr(result, ContextOperand(result));
4980 __ ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
4982 // If the result is not the_hole, return. Otherwise, handle in the runtime.
4983 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
4986 // Fallback to runtime.
4989 __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
4993 void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
4994 Register value = r0;
4998 Register cell_details = r4;
4999 Register cell_value = r5;
5000 Register cell_value_map = r6;
5001 Register scratch = r9;
5003 Register context = cp;
5004 Register context_temp = cell;
5006 Label fast_heapobject_case, fast_smi_case, slow_case;
5008 if (FLAG_debug_code) {
5009 __ CompareRoot(value, Heap::kTheHoleValueRootIndex);
5010 __ Check(ne, kUnexpectedValue);
5013 // Go up the context chain to the script context.
5014 for (int i = 0; i < depth(); i++) {
5015 __ ldr(context_temp, ContextOperand(context, Context::PREVIOUS_INDEX));
5016 context = context_temp;
5019 // Load the PropertyCell at the specified slot.
5020 __ add(cell, context, Operand(slot, LSL, kPointerSizeLog2));
5021 __ ldr(cell, ContextOperand(cell));
5023 // Load PropertyDetails for the cell (actually only the cell_type and kind).
5024 __ ldr(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
5025 __ SmiUntag(cell_details);
5026 __ and_(cell_details, cell_details,
5027 Operand(PropertyDetails::PropertyCellTypeField::kMask |
5028 PropertyDetails::KindField::kMask |
5029 PropertyDetails::kAttributesReadOnlyMask));
5031 // Check if PropertyCell holds mutable data.
5032 Label not_mutable_data;
5033 __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
5034 PropertyCellType::kMutable) |
5035 PropertyDetails::KindField::encode(kData)));
5036 __ b(ne, ¬_mutable_data);
5037 __ JumpIfSmi(value, &fast_smi_case);
5039 __ bind(&fast_heapobject_case);
5040 __ str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
5041 // RecordWriteField clobbers the value register, so we copy it before the
5043 __ mov(r4, Operand(value));
5044 __ RecordWriteField(cell, PropertyCell::kValueOffset, r4, scratch,
5045 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
5049 __ bind(¬_mutable_data);
5050 // Check if PropertyCell value matches the new value (relevant for Constant,
5051 // ConstantType and Undefined cells).
5052 Label not_same_value;
5053 __ ldr(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
5054 __ cmp(cell_value, value);
5055 __ b(ne, ¬_same_value);
5057 // Make sure the PropertyCell is not marked READ_ONLY.
5058 __ tst(cell_details, Operand(PropertyDetails::kAttributesReadOnlyMask));
5059 __ b(ne, &slow_case);
5061 if (FLAG_debug_code) {
5063 // This can only be true for Constant, ConstantType and Undefined cells,
5064 // because we never store the_hole via this stub.
5065 __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
5066 PropertyCellType::kConstant) |
5067 PropertyDetails::KindField::encode(kData)));
5069 __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
5070 PropertyCellType::kConstantType) |
5071 PropertyDetails::KindField::encode(kData)));
5073 __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
5074 PropertyCellType::kUndefined) |
5075 PropertyDetails::KindField::encode(kData)));
5076 __ Check(eq, kUnexpectedValue);
5080 __ bind(¬_same_value);
5082 // Check if PropertyCell contains data with constant type (and is not
5084 __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
5085 PropertyCellType::kConstantType) |
5086 PropertyDetails::KindField::encode(kData)));
5087 __ b(ne, &slow_case);
5089 // Now either both old and new values must be smis or both must be heap
5090 // objects with same map.
5091 Label value_is_heap_object;
5092 __ JumpIfNotSmi(value, &value_is_heap_object);
5093 __ JumpIfNotSmi(cell_value, &slow_case);
5094 // Old and new values are smis, no need for a write barrier here.
5095 __ bind(&fast_smi_case);
5096 __ str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
5099 __ bind(&value_is_heap_object);
5100 __ JumpIfSmi(cell_value, &slow_case);
5102 __ ldr(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
5103 __ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5104 __ cmp(cell_value_map, scratch);
5105 __ b(eq, &fast_heapobject_case);
5107 // Fallback to runtime.
5108 __ bind(&slow_case);
5110 __ Push(slot, value);
5111 __ TailCallRuntime(is_strict(language_mode())
5112 ? Runtime::kStoreGlobalViaContext_Strict
5113 : Runtime::kStoreGlobalViaContext_Sloppy,
5118 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
5119 return ref0.address() - ref1.address();
5123 // Calls an API function. Allocates HandleScope, extracts returned value
5124 // from handle and propagates exceptions. Restores context. stack_space
5125 // - space to be unwound on exit (includes the call JS arguments space and
5126 // the additional space allocated for the fast call).
5127 static void CallApiFunctionAndReturn(MacroAssembler* masm,
5128 Register function_address,
5129 ExternalReference thunk_ref,
5131 MemOperand* stack_space_operand,
5132 MemOperand return_value_operand,
5133 MemOperand* context_restore_operand) {
5134 Isolate* isolate = masm->isolate();
5135 ExternalReference next_address =
5136 ExternalReference::handle_scope_next_address(isolate);
5137 const int kNextOffset = 0;
5138 const int kLimitOffset = AddressOffset(
5139 ExternalReference::handle_scope_limit_address(isolate), next_address);
5140 const int kLevelOffset = AddressOffset(
5141 ExternalReference::handle_scope_level_address(isolate), next_address);
5143 DCHECK(function_address.is(r1) || function_address.is(r2));
5145 Label profiler_disabled;
5146 Label end_profiler_check;
5147 __ mov(r9, Operand(ExternalReference::is_profiling_address(isolate)));
5148 __ ldrb(r9, MemOperand(r9, 0));
5149 __ cmp(r9, Operand(0));
5150 __ b(eq, &profiler_disabled);
5152 // Additional parameter is the address of the actual callback.
5153 __ mov(r3, Operand(thunk_ref));
5154 __ jmp(&end_profiler_check);
5156 __ bind(&profiler_disabled);
5157 __ Move(r3, function_address);
5158 __ bind(&end_profiler_check);
5160 // Allocate HandleScope in callee-save registers.
5161 __ mov(r9, Operand(next_address));
5162 __ ldr(r4, MemOperand(r9, kNextOffset));
5163 __ ldr(r5, MemOperand(r9, kLimitOffset));
5164 __ ldr(r6, MemOperand(r9, kLevelOffset));
5165 __ add(r6, r6, Operand(1));
5166 __ str(r6, MemOperand(r9, kLevelOffset));
5168 if (FLAG_log_timer_events) {
5169 FrameScope frame(masm, StackFrame::MANUAL);
5170 __ PushSafepointRegisters();
5171 __ PrepareCallCFunction(1, r0);
5172 __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
5173 __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
5175 __ PopSafepointRegisters();
5178 // Native call returns to the DirectCEntry stub which redirects to the
5179 // return address pushed on stack (could have moved after GC).
5180 // DirectCEntry stub itself is generated early and never moves.
5181 DirectCEntryStub stub(isolate);
5182 stub.GenerateCall(masm, r3);
5184 if (FLAG_log_timer_events) {
5185 FrameScope frame(masm, StackFrame::MANUAL);
5186 __ PushSafepointRegisters();
5187 __ PrepareCallCFunction(1, r0);
5188 __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
5189 __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
5191 __ PopSafepointRegisters();
5194 Label promote_scheduled_exception;
5195 Label delete_allocated_handles;
5196 Label leave_exit_frame;
5197 Label return_value_loaded;
5199 // load value from ReturnValue
5200 __ ldr(r0, return_value_operand);
5201 __ bind(&return_value_loaded);
5202 // No more valid handles (the result handle was the last one). Restore
5203 // previous handle scope.
5204 __ str(r4, MemOperand(r9, kNextOffset));
5205 if (__ emit_debug_code()) {
5206 __ ldr(r1, MemOperand(r9, kLevelOffset));
5208 __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
5210 __ sub(r6, r6, Operand(1));
5211 __ str(r6, MemOperand(r9, kLevelOffset));
5212 __ ldr(ip, MemOperand(r9, kLimitOffset));
5214 __ b(ne, &delete_allocated_handles);
5216 // Leave the API exit frame.
5217 __ bind(&leave_exit_frame);
5218 bool restore_context = context_restore_operand != NULL;
5219 if (restore_context) {
5220 __ ldr(cp, *context_restore_operand);
5222 // LeaveExitFrame expects unwind space to be in a register.
5223 if (stack_space_operand != NULL) {
5224 __ ldr(r4, *stack_space_operand);
5226 __ mov(r4, Operand(stack_space));
5228 __ LeaveExitFrame(false, r4, !restore_context, stack_space_operand != NULL);
5230 // Check if the function scheduled an exception.
5231 __ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
5232 __ mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate)));
5233 __ ldr(r5, MemOperand(ip));
5235 __ b(ne, &promote_scheduled_exception);
5239 // Re-throw by promoting a scheduled exception.
5240 __ bind(&promote_scheduled_exception);
5241 __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
5243 // HandleScope limit has changed. Delete allocated extensions.
5244 __ bind(&delete_allocated_handles);
5245 __ str(r5, MemOperand(r9, kLimitOffset));
5247 __ PrepareCallCFunction(1, r5);
5248 __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
5249 __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
5252 __ jmp(&leave_exit_frame);
5256 static void CallApiFunctionStubHelper(MacroAssembler* masm,
5257 const ParameterCount& argc,
5258 bool return_first_arg,
5259 bool call_data_undefined) {
5260 // ----------- S t a t e -------------
5262 // -- r4 : call_data
5264 // -- r1 : api_function_address
5265 // -- r3 : number of arguments if argc is a register
5268 // -- sp[0] : last argument
5270 // -- sp[(argc - 1)* 4] : first argument
5271 // -- sp[argc * 4] : receiver
5272 // -----------------------------------
5274 Register callee = r0;
5275 Register call_data = r4;
5276 Register holder = r2;
5277 Register api_function_address = r1;
5278 Register context = cp;
5280 typedef FunctionCallbackArguments FCA;
5282 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
5283 STATIC_ASSERT(FCA::kCalleeIndex == 5);
5284 STATIC_ASSERT(FCA::kDataIndex == 4);
5285 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
5286 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
5287 STATIC_ASSERT(FCA::kIsolateIndex == 1);
5288 STATIC_ASSERT(FCA::kHolderIndex == 0);
5289 STATIC_ASSERT(FCA::kArgsLength == 7);
5291 DCHECK(argc.is_immediate() || r3.is(argc.reg()));
5295 // load context from callee
5296 __ ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
5304 Register scratch = call_data;
5305 if (!call_data_undefined) {
5306 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5310 // return value default
5313 __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
5318 // Prepare arguments.
5319 __ mov(scratch, sp);
5321 // Allocate the v8::Arguments structure in the arguments' space since
5322 // it's not controlled by GC.
5323 const int kApiStackSpace = 4;
5325 FrameScope frame_scope(masm, StackFrame::MANUAL);
5326 __ EnterExitFrame(false, kApiStackSpace);
5328 DCHECK(!api_function_address.is(r0) && !scratch.is(r0));
5329 // r0 = FunctionCallbackInfo&
5330 // Arguments is after the return address.
5331 __ add(r0, sp, Operand(1 * kPointerSize));
5332 // FunctionCallbackInfo::implicit_args_
5333 __ str(scratch, MemOperand(r0, 0 * kPointerSize));
5334 if (argc.is_immediate()) {
5335 // FunctionCallbackInfo::values_
5337 Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
5338 __ str(ip, MemOperand(r0, 1 * kPointerSize));
5339 // FunctionCallbackInfo::length_ = argc
5340 __ mov(ip, Operand(argc.immediate()));
5341 __ str(ip, MemOperand(r0, 2 * kPointerSize));
5342 // FunctionCallbackInfo::is_construct_call_ = 0
5343 __ mov(ip, Operand::Zero());
5344 __ str(ip, MemOperand(r0, 3 * kPointerSize));
5346 // FunctionCallbackInfo::values_
5347 __ add(ip, scratch, Operand(argc.reg(), LSL, kPointerSizeLog2));
5348 __ add(ip, ip, Operand((FCA::kArgsLength - 1) * kPointerSize));
5349 __ str(ip, MemOperand(r0, 1 * kPointerSize));
5350 // FunctionCallbackInfo::length_ = argc
5351 __ str(argc.reg(), MemOperand(r0, 2 * kPointerSize));
5352 // FunctionCallbackInfo::is_construct_call_
5353 __ add(argc.reg(), argc.reg(), Operand(FCA::kArgsLength + 1));
5354 __ mov(ip, Operand(argc.reg(), LSL, kPointerSizeLog2));
5355 __ str(ip, MemOperand(r0, 3 * kPointerSize));
5358 ExternalReference thunk_ref =
5359 ExternalReference::invoke_function_callback(masm->isolate());
5361 AllowExternalCallThatCantCauseGC scope(masm);
5362 MemOperand context_restore_operand(
5363 fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
5364 // Stores return the first js argument
5365 int return_value_offset = 0;
5366 if (return_first_arg) {
5367 return_value_offset = 2 + FCA::kArgsLength;
5369 return_value_offset = 2 + FCA::kReturnValueOffset;
5371 MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
5372 int stack_space = 0;
5373 MemOperand is_construct_call_operand = MemOperand(sp, 4 * kPointerSize);
5374 MemOperand* stack_space_operand = &is_construct_call_operand;
5375 if (argc.is_immediate()) {
5376 stack_space = argc.immediate() + FCA::kArgsLength + 1;
5377 stack_space_operand = NULL;
5379 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
5380 stack_space_operand, return_value_operand,
5381 &context_restore_operand);
5385 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
5386 bool call_data_undefined = this->call_data_undefined();
5387 CallApiFunctionStubHelper(masm, ParameterCount(r3), false,
5388 call_data_undefined);
5392 void CallApiAccessorStub::Generate(MacroAssembler* masm) {
5393 bool is_store = this->is_store();
5394 int argc = this->argc();
5395 bool call_data_undefined = this->call_data_undefined();
5396 CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
5397 call_data_undefined);
5401 void CallApiGetterStub::Generate(MacroAssembler* masm) {
5402 // ----------- S t a t e -------------
5404 // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
5406 // -- r2 : api_function_address
5407 // -----------------------------------
5409 Register api_function_address = ApiGetterDescriptor::function_address();
5410 DCHECK(api_function_address.is(r2));
5412 __ mov(r0, sp); // r0 = Handle<Name>
5413 __ add(r1, r0, Operand(1 * kPointerSize)); // r1 = PCA
5415 const int kApiStackSpace = 1;
5416 FrameScope frame_scope(masm, StackFrame::MANUAL);
5417 __ EnterExitFrame(false, kApiStackSpace);
5419 // Create PropertyAccessorInfo instance on the stack above the exit frame with
5420 // r1 (internal::Object** args_) as the data.
5421 __ str(r1, MemOperand(sp, 1 * kPointerSize));
5422 __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
5424 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
5426 ExternalReference thunk_ref =
5427 ExternalReference::invoke_accessor_getter_callback(isolate());
5428 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
5429 kStackUnwindSpace, NULL,
5430 MemOperand(fp, 6 * kPointerSize), NULL);
5436 } // namespace internal
5439 #endif // V8_TARGET_ARCH_ARM