1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #if V8_TARGET_ARCH_ARM64
9 #include "src/bootstrapper.h"
10 #include "src/code-stubs.h"
11 #include "src/codegen.h"
12 #include "src/ic/handler-compiler.h"
13 #include "src/ic/ic.h"
14 #include "src/isolate.h"
15 #include "src/jsregexp.h"
16 #include "src/regexp-macro-assembler.h"
17 #include "src/runtime/runtime.h"
23 static void InitializeArrayConstructorDescriptor(
24 Isolate* isolate, CodeStubDescriptor* descriptor,
25 int constant_stack_parameter_count) {
28 // x2: allocation site with elements kind
29 // x0: number of arguments to the constructor function
30 Address deopt_handler = Runtime::FunctionForId(
31 Runtime::kArrayConstructor)->entry;
33 if (constant_stack_parameter_count == 0) {
34 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
35 JS_FUNCTION_STUB_MODE);
37 descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
38 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
43 void ArrayNoArgumentConstructorStub::InitializeDescriptor(
44 CodeStubDescriptor* descriptor) {
45 InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
49 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
50 CodeStubDescriptor* descriptor) {
51 InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
55 void ArrayNArgumentsConstructorStub::InitializeDescriptor(
56 CodeStubDescriptor* descriptor) {
57 InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
61 static void InitializeInternalArrayConstructorDescriptor(
62 Isolate* isolate, CodeStubDescriptor* descriptor,
63 int constant_stack_parameter_count) {
64 Address deopt_handler = Runtime::FunctionForId(
65 Runtime::kInternalArrayConstructor)->entry;
67 if (constant_stack_parameter_count == 0) {
68 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
69 JS_FUNCTION_STUB_MODE);
71 descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
72 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
77 void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
78 CodeStubDescriptor* descriptor) {
79 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
83 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
84 CodeStubDescriptor* descriptor) {
85 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
89 void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
90 CodeStubDescriptor* descriptor) {
91 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
95 #define __ ACCESS_MASM(masm)
98 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
99 ExternalReference miss) {
100 // Update the static counter each time a new code stub is generated.
101 isolate()->counters()->code_stubs()->Increment();
103 CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
104 int param_count = descriptor.GetEnvironmentParameterCount();
106 // Call the runtime system in a fresh internal frame.
107 FrameScope scope(masm, StackFrame::INTERNAL);
108 DCHECK((param_count == 0) ||
109 x0.Is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
112 MacroAssembler::PushPopQueue queue(masm);
113 for (int i = 0; i < param_count; ++i) {
114 queue.Queue(descriptor.GetEnvironmentParameterRegister(i));
118 __ CallExternalReference(miss, param_count);
125 void DoubleToIStub::Generate(MacroAssembler* masm) {
127 Register input = source();
128 Register result = destination();
129 DCHECK(is_truncating());
131 DCHECK(result.Is64Bits());
132 DCHECK(jssp.Is(masm->StackPointer()));
134 int double_offset = offset();
136 DoubleRegister double_scratch = d0; // only used if !skip_fastpath()
137 Register scratch1 = GetAllocatableRegisterThatIsNotOneOf(input, result);
139 GetAllocatableRegisterThatIsNotOneOf(input, result, scratch1);
141 __ Push(scratch1, scratch2);
142 // Account for saved regs if input is jssp.
143 if (input.is(jssp)) double_offset += 2 * kPointerSize;
145 if (!skip_fastpath()) {
146 __ Push(double_scratch);
147 if (input.is(jssp)) double_offset += 1 * kDoubleSize;
148 __ Ldr(double_scratch, MemOperand(input, double_offset));
149 // Try to convert with a FPU convert instruction. This handles all
150 // non-saturating cases.
151 __ TryConvertDoubleToInt64(result, double_scratch, &done);
152 __ Fmov(result, double_scratch);
154 __ Ldr(result, MemOperand(input, double_offset));
157 // If we reach here we need to manually convert the input to an int32.
159 // Extract the exponent.
160 Register exponent = scratch1;
161 __ Ubfx(exponent, result, HeapNumber::kMantissaBits,
162 HeapNumber::kExponentBits);
164 // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
165 // the mantissa gets shifted completely out of the int32_t result.
166 __ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32);
167 __ CzeroX(result, ge);
170 // The Fcvtzs sequence handles all cases except where the conversion causes
171 // signed overflow in the int64_t target. Since we've already handled
172 // exponents >= 84, we can guarantee that 63 <= exponent < 84.
174 if (masm->emit_debug_code()) {
175 __ Cmp(exponent, HeapNumber::kExponentBias + 63);
176 // Exponents less than this should have been handled by the Fcvt case.
177 __ Check(ge, kUnexpectedValue);
180 // Isolate the mantissa bits, and set the implicit '1'.
181 Register mantissa = scratch2;
182 __ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
183 __ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits);
185 // Negate the mantissa if necessary.
186 __ Tst(result, kXSignMask);
187 __ Cneg(mantissa, mantissa, ne);
189 // Shift the mantissa bits in the correct place. We know that we have to shift
190 // it left here, because exponent >= 63 >= kMantissaBits.
191 __ Sub(exponent, exponent,
192 HeapNumber::kExponentBias + HeapNumber::kMantissaBits);
193 __ Lsl(result, mantissa, exponent);
196 if (!skip_fastpath()) {
197 __ Pop(double_scratch);
199 __ Pop(scratch2, scratch1);
204 // See call site for description.
205 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
209 FPRegister double_scratch,
212 DCHECK(!AreAliased(left, right, scratch));
213 Label not_identical, return_equal, heap_number;
214 Register result = x0;
217 __ B(ne, ¬_identical);
219 // Test for NaN. Sadly, we can't just compare to factory::nan_value(),
220 // so we do the second best thing - test it ourselves.
221 // They are both equal and they are not both Smis so both of them are not
222 // Smis. If it's not a heap number, then return equal.
223 if ((cond == lt) || (cond == gt)) {
224 __ JumpIfObjectType(right, scratch, scratch, FIRST_SPEC_OBJECT_TYPE, slow,
226 } else if (cond == eq) {
227 __ JumpIfHeapNumber(right, &heap_number);
229 Register right_type = scratch;
230 __ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
232 // Comparing JS objects with <=, >= is complicated.
233 __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
235 // Normally here we fall through to return_equal, but undefined is
236 // special: (undefined == undefined) == true, but
237 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
238 if ((cond == le) || (cond == ge)) {
239 __ Cmp(right_type, ODDBALL_TYPE);
240 __ B(ne, &return_equal);
241 __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal);
243 // undefined <= undefined should fail.
244 __ Mov(result, GREATER);
246 // undefined >= undefined should fail.
247 __ Mov(result, LESS);
253 __ Bind(&return_equal);
255 __ Mov(result, GREATER); // Things aren't less than themselves.
256 } else if (cond == gt) {
257 __ Mov(result, LESS); // Things aren't greater than themselves.
259 __ Mov(result, EQUAL); // Things are <=, >=, ==, === themselves.
263 // Cases lt and gt have been handled earlier, and case ne is never seen, as
264 // it is handled in the parser (see Parser::ParseBinaryExpression). We are
265 // only concerned with cases ge, le and eq here.
266 if ((cond != lt) && (cond != gt)) {
267 DCHECK((cond == ge) || (cond == le) || (cond == eq));
268 __ Bind(&heap_number);
269 // Left and right are identical pointers to a heap number object. Return
270 // non-equal if the heap number is a NaN, and equal otherwise. Comparing
271 // the number to itself will set the overflow flag iff the number is NaN.
272 __ Ldr(double_scratch, FieldMemOperand(right, HeapNumber::kValueOffset));
273 __ Fcmp(double_scratch, double_scratch);
274 __ B(vc, &return_equal); // Not NaN, so treat as normal heap number.
277 __ Mov(result, GREATER);
279 __ Mov(result, LESS);
284 // No fall through here.
285 if (FLAG_debug_code) {
289 __ Bind(¬_identical);
293 // See call site for description.
294 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
300 DCHECK(!AreAliased(left, right, left_type, right_type, scratch));
302 if (masm->emit_debug_code()) {
303 // We assume that the arguments are not identical.
305 __ Assert(ne, kExpectedNonIdenticalObjects);
308 // If either operand is a JS object or an oddball value, then they are not
309 // equal since their pointers are different.
310 // There is no test for undetectability in strict equality.
311 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
312 Label right_non_object;
314 __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
315 __ B(lt, &right_non_object);
317 // Return non-zero - x0 already contains a non-zero pointer.
318 DCHECK(left.is(x0) || right.is(x0));
319 Label return_not_equal;
320 __ Bind(&return_not_equal);
323 __ Bind(&right_non_object);
325 // Check for oddballs: true, false, null, undefined.
326 __ Cmp(right_type, ODDBALL_TYPE);
328 // If right is not ODDBALL, test left. Otherwise, set eq condition.
329 __ Ccmp(left_type, ODDBALL_TYPE, ZFlag, ne);
331 // If right or left is not ODDBALL, test left >= FIRST_SPEC_OBJECT_TYPE.
332 // Otherwise, right or left is ODDBALL, so set a ge condition.
333 __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NVFlag, ne);
335 __ B(ge, &return_not_equal);
337 // Internalized strings are unique, so they can only be equal if they are the
338 // same object. We have already tested that case, so if left and right are
339 // both internalized strings, they cannot be equal.
340 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
341 __ Orr(scratch, left_type, right_type);
342 __ TestAndBranchIfAllClear(
343 scratch, kIsNotStringMask | kIsNotInternalizedMask, &return_not_equal);
347 // See call site for description.
348 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
355 DCHECK(!AreAliased(left_d, right_d));
356 DCHECK((left.is(x0) && right.is(x1)) ||
357 (right.is(x0) && left.is(x1)));
358 Register result = x0;
360 Label right_is_smi, done;
361 __ JumpIfSmi(right, &right_is_smi);
363 // Left is the smi. Check whether right is a heap number.
365 // If right is not a number and left is a smi, then strict equality cannot
366 // succeed. Return non-equal.
367 Label is_heap_number;
368 __ JumpIfHeapNumber(right, &is_heap_number);
369 // Register right is a non-zero pointer, which is a valid NOT_EQUAL result.
370 if (!right.is(result)) {
371 __ Mov(result, NOT_EQUAL);
374 __ Bind(&is_heap_number);
376 // Smi compared non-strictly with a non-smi, non-heap-number. Call the
378 __ JumpIfNotHeapNumber(right, slow);
381 // Left is the smi. Right is a heap number. Load right value into right_d, and
382 // convert left smi into double in left_d.
383 __ Ldr(right_d, FieldMemOperand(right, HeapNumber::kValueOffset));
384 __ SmiUntagToDouble(left_d, left);
387 __ Bind(&right_is_smi);
388 // Right is a smi. Check whether the non-smi left is a heap number.
390 // If left is not a number and right is a smi then strict equality cannot
391 // succeed. Return non-equal.
392 Label is_heap_number;
393 __ JumpIfHeapNumber(left, &is_heap_number);
394 // Register left is a non-zero pointer, which is a valid NOT_EQUAL result.
395 if (!left.is(result)) {
396 __ Mov(result, NOT_EQUAL);
399 __ Bind(&is_heap_number);
401 // Smi compared non-strictly with a non-smi, non-heap-number. Call the
403 __ JumpIfNotHeapNumber(left, slow);
406 // Right is the smi. Left is a heap number. Load left value into left_d, and
407 // convert right smi into double in right_d.
408 __ Ldr(left_d, FieldMemOperand(left, HeapNumber::kValueOffset));
409 __ SmiUntagToDouble(right_d, right);
411 // Fall through to both_loaded_as_doubles.
416 // Fast negative check for internalized-to-internalized equality.
417 // See call site for description.
418 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
425 Label* possible_strings,
426 Label* not_both_strings) {
427 DCHECK(!AreAliased(left, right, left_map, right_map, left_type, right_type));
428 Register result = x0;
431 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
432 // TODO(all): reexamine this branch sequence for optimisation wrt branch
434 __ Tbnz(right_type, MaskToBit(kIsNotStringMask), &object_test);
435 __ Tbnz(right_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
436 __ Tbnz(left_type, MaskToBit(kIsNotStringMask), not_both_strings);
437 __ Tbnz(left_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
439 // Both are internalized. We already checked that they weren't the same
440 // pointer, so they are not equal.
441 __ Mov(result, NOT_EQUAL);
444 __ Bind(&object_test);
446 __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
448 // If right >= FIRST_SPEC_OBJECT_TYPE, test left.
449 // Otherwise, right < FIRST_SPEC_OBJECT_TYPE, so set lt condition.
450 __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NFlag, ge);
452 __ B(lt, not_both_strings);
454 // If both objects are undetectable, they are equal. Otherwise, they are not
455 // equal, since they are different objects and an object is not equal to
458 // Returning here, so we can corrupt right_type and left_type.
459 Register right_bitfield = right_type;
460 Register left_bitfield = left_type;
461 __ Ldrb(right_bitfield, FieldMemOperand(right_map, Map::kBitFieldOffset));
462 __ Ldrb(left_bitfield, FieldMemOperand(left_map, Map::kBitFieldOffset));
463 __ And(result, right_bitfield, left_bitfield);
464 __ And(result, result, 1 << Map::kIsUndetectable);
465 __ Eor(result, result, 1 << Map::kIsUndetectable);
470 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
471 CompareICState::State expected,
474 if (expected == CompareICState::SMI) {
475 __ JumpIfNotSmi(input, fail);
476 } else if (expected == CompareICState::NUMBER) {
477 __ JumpIfSmi(input, &ok);
478 __ JumpIfNotHeapNumber(input, fail);
480 // We could be strict about internalized/non-internalized here, but as long as
481 // hydrogen doesn't care, the stub doesn't have to care either.
486 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
489 Register result = x0;
490 Condition cond = GetCondition();
493 CompareICStub_CheckInputType(masm, lhs, left(), &miss);
494 CompareICStub_CheckInputType(masm, rhs, right(), &miss);
496 Label slow; // Call builtin.
497 Label not_smis, both_loaded_as_doubles;
498 Label not_two_smis, smi_done;
499 __ JumpIfEitherNotSmi(lhs, rhs, ¬_two_smis);
501 __ Sub(result, lhs, Operand::UntagSmi(rhs));
504 __ Bind(¬_two_smis);
506 // NOTICE! This code is only reached after a smi-fast-case check, so it is
507 // certain that at least one operand isn't a smi.
509 // Handle the case where the objects are identical. Either returns the answer
510 // or goes to slow. Only falls through if the objects were not identical.
511 EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond);
513 // If either is a smi (we know that at least one is not a smi), then they can
514 // only be strictly equal if the other is a HeapNumber.
515 __ JumpIfBothNotSmi(lhs, rhs, ¬_smis);
517 // Exactly one operand is a smi. EmitSmiNonsmiComparison generates code that
519 // 1) Return the answer.
520 // 2) Branch to the slow case.
521 // 3) Fall through to both_loaded_as_doubles.
522 // In case 3, we have found out that we were dealing with a number-number
523 // comparison. The double values of the numbers have been loaded, right into
524 // rhs_d, left into lhs_d.
525 FPRegister rhs_d = d0;
526 FPRegister lhs_d = d1;
527 EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, &slow, strict());
529 __ Bind(&both_loaded_as_doubles);
530 // The arguments have been converted to doubles and stored in rhs_d and
533 __ Fcmp(lhs_d, rhs_d);
534 __ B(vs, &nan); // Overflow flag set if either is NaN.
535 STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
536 __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
537 __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
541 // Left and/or right is a NaN. Load the result register with whatever makes
542 // the comparison fail, since comparisons with NaN always fail (except ne,
543 // which is filtered out at a higher level.)
545 if ((cond == lt) || (cond == le)) {
546 __ Mov(result, GREATER);
548 __ Mov(result, LESS);
553 // At this point we know we are dealing with two different objects, and
554 // neither of them is a smi. The objects are in rhs_ and lhs_.
556 // Load the maps and types of the objects.
557 Register rhs_map = x10;
558 Register rhs_type = x11;
559 Register lhs_map = x12;
560 Register lhs_type = x13;
561 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
562 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
563 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
564 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
567 // This emits a non-equal return sequence for some object types, or falls
568 // through if it was not lucky.
569 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs, lhs_type, rhs_type, x14);
572 Label check_for_internalized_strings;
573 Label flat_string_check;
574 // Check for heap number comparison. Branch to earlier double comparison code
575 // if they are heap numbers, otherwise, branch to internalized string check.
576 __ Cmp(rhs_type, HEAP_NUMBER_TYPE);
577 __ B(ne, &check_for_internalized_strings);
578 __ Cmp(lhs_map, rhs_map);
580 // If maps aren't equal, lhs_ and rhs_ are not heap numbers. Branch to flat
582 __ B(ne, &flat_string_check);
584 // Both lhs_ and rhs_ are heap numbers. Load them and branch to the double
586 __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
587 __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
588 __ B(&both_loaded_as_doubles);
590 __ Bind(&check_for_internalized_strings);
591 // In the strict case, the EmitStrictTwoHeapObjectCompare already took care
592 // of internalized strings.
593 if ((cond == eq) && !strict()) {
594 // Returns an answer for two internalized strings or two detectable objects.
595 // Otherwise branches to the string case or not both strings case.
596 EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, lhs_map, rhs_map,
598 &flat_string_check, &slow);
601 // Check for both being sequential one-byte strings,
602 // and inline if that is the case.
603 __ Bind(&flat_string_check);
604 __ JumpIfBothInstanceTypesAreNotSequentialOneByte(lhs_type, rhs_type, x14,
607 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x10,
610 StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, x10, x11,
613 StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, x10, x11,
617 // Never fall through to here.
618 if (FLAG_debug_code) {
625 // Figure out which native to call and setup the arguments.
626 Builtins::JavaScript native;
628 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
630 native = Builtins::COMPARE;
631 int ncr; // NaN compare result
632 if ((cond == lt) || (cond == le)) {
635 DCHECK((cond == gt) || (cond == ge)); // remaining cases
638 __ Mov(x10, Smi::FromInt(ncr));
642 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
643 // tagged as a small integer.
644 __ InvokeBuiltin(native, JUMP_FUNCTION);
651 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
652 CPURegList saved_regs = kCallerSaved;
653 CPURegList saved_fp_regs = kCallerSavedFP;
655 // We don't allow a GC during a store buffer overflow so there is no need to
656 // store the registers in any particular way, but we do have to store and
659 // We don't care if MacroAssembler scratch registers are corrupted.
660 saved_regs.Remove(*(masm->TmpList()));
661 saved_fp_regs.Remove(*(masm->FPTmpList()));
663 __ PushCPURegList(saved_regs);
664 if (save_doubles()) {
665 __ PushCPURegList(saved_fp_regs);
668 AllowExternalCallThatCantCauseGC scope(masm);
669 __ Mov(x0, ExternalReference::isolate_address(isolate()));
671 ExternalReference::store_buffer_overflow_function(isolate()), 1, 0);
673 if (save_doubles()) {
674 __ PopCPURegList(saved_fp_regs);
676 __ PopCPURegList(saved_regs);
681 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
683 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
685 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
690 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
691 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
692 UseScratchRegisterScope temps(masm);
693 Register saved_lr = temps.UnsafeAcquire(to_be_pushed_lr());
694 Register return_address = temps.AcquireX();
695 __ Mov(return_address, lr);
696 // Restore lr with the value it had before the call to this stub (the value
697 // which must be pushed).
698 __ Mov(lr, saved_lr);
699 __ PushSafepointRegisters();
700 __ Ret(return_address);
704 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
705 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
706 UseScratchRegisterScope temps(masm);
707 Register return_address = temps.AcquireX();
708 // Preserve the return address (lr will be clobbered by the pop).
709 __ Mov(return_address, lr);
710 __ PopSafepointRegisters();
711 __ Ret(return_address);
715 void MathPowStub::Generate(MacroAssembler* masm) {
717 // jssp[0]: Exponent (as a tagged value).
718 // jssp[1]: Base (as a tagged value).
720 // The (tagged) result will be returned in x0, as a heap number.
722 Register result_tagged = x0;
723 Register base_tagged = x10;
724 Register exponent_tagged = MathPowTaggedDescriptor::exponent();
725 DCHECK(exponent_tagged.is(x11));
726 Register exponent_integer = MathPowIntegerDescriptor::exponent();
727 DCHECK(exponent_integer.is(x12));
728 Register scratch1 = x14;
729 Register scratch0 = x15;
730 Register saved_lr = x19;
731 FPRegister result_double = d0;
732 FPRegister base_double = d0;
733 FPRegister exponent_double = d1;
734 FPRegister base_double_copy = d2;
735 FPRegister scratch1_double = d6;
736 FPRegister scratch0_double = d7;
738 // A fast-path for integer exponents.
739 Label exponent_is_smi, exponent_is_integer;
740 // Bail out to runtime.
742 // Allocate a heap number for the result, and return it.
745 // Unpack the inputs.
746 if (exponent_type() == ON_STACK) {
748 Label unpack_exponent;
750 __ Pop(exponent_tagged, base_tagged);
752 __ JumpIfSmi(base_tagged, &base_is_smi);
753 __ JumpIfNotHeapNumber(base_tagged, &call_runtime);
754 // base_tagged is a heap number, so load its double value.
755 __ Ldr(base_double, FieldMemOperand(base_tagged, HeapNumber::kValueOffset));
756 __ B(&unpack_exponent);
757 __ Bind(&base_is_smi);
758 // base_tagged is a SMI, so untag it and convert it to a double.
759 __ SmiUntagToDouble(base_double, base_tagged);
761 __ Bind(&unpack_exponent);
762 // x10 base_tagged The tagged base (input).
763 // x11 exponent_tagged The tagged exponent (input).
764 // d1 base_double The base as a double.
765 __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
766 __ JumpIfNotHeapNumber(exponent_tagged, &call_runtime);
767 // exponent_tagged is a heap number, so load its double value.
768 __ Ldr(exponent_double,
769 FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
770 } else if (exponent_type() == TAGGED) {
771 __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
772 __ Ldr(exponent_double,
773 FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
776 // Handle double (heap number) exponents.
777 if (exponent_type() != INTEGER) {
778 // Detect integer exponents stored as doubles and handle those in the
779 // integer fast-path.
780 __ TryRepresentDoubleAsInt64(exponent_integer, exponent_double,
781 scratch0_double, &exponent_is_integer);
783 if (exponent_type() == ON_STACK) {
784 FPRegister half_double = d3;
785 FPRegister minus_half_double = d4;
786 // Detect square root case. Crankshaft detects constant +/-0.5 at compile
787 // time and uses DoMathPowHalf instead. We then skip this check for
788 // non-constant cases of +/-0.5 as these hardly occur.
790 __ Fmov(minus_half_double, -0.5);
791 __ Fmov(half_double, 0.5);
792 __ Fcmp(minus_half_double, exponent_double);
793 __ Fccmp(half_double, exponent_double, NZFlag, ne);
794 // Condition flags at this point:
795 // 0.5; nZCv // Identified by eq && pl
796 // -0.5: NZcv // Identified by eq && mi
797 // other: ?z?? // Identified by ne
798 __ B(ne, &call_runtime);
800 // The exponent is 0.5 or -0.5.
802 // Given that exponent is known to be either 0.5 or -0.5, the following
803 // special cases could apply (according to ECMA-262 15.8.2.13):
805 // base.isNaN(): The result is NaN.
806 // (base == +INFINITY) || (base == -INFINITY)
807 // exponent == 0.5: The result is +INFINITY.
808 // exponent == -0.5: The result is +0.
809 // (base == +0) || (base == -0)
810 // exponent == 0.5: The result is +0.
811 // exponent == -0.5: The result is +INFINITY.
812 // (base < 0) && base.isFinite(): The result is NaN.
814 // Fsqrt (and Fdiv for the -0.5 case) can handle all of those except
815 // where base is -INFINITY or -0.
817 // Add +0 to base. This has no effect other than turning -0 into +0.
818 __ Fadd(base_double, base_double, fp_zero);
819 // The operation -0+0 results in +0 in all cases except where the
820 // FPCR rounding mode is 'round towards minus infinity' (RM). The
821 // ARM64 simulator does not currently simulate FPCR (where the rounding
822 // mode is set), so test the operation with some debug code.
823 if (masm->emit_debug_code()) {
824 UseScratchRegisterScope temps(masm);
825 Register temp = temps.AcquireX();
826 __ Fneg(scratch0_double, fp_zero);
827 // Verify that we correctly generated +0.0 and -0.0.
828 // bits(+0.0) = 0x0000000000000000
829 // bits(-0.0) = 0x8000000000000000
830 __ Fmov(temp, fp_zero);
831 __ CheckRegisterIsClear(temp, kCouldNotGenerateZero);
832 __ Fmov(temp, scratch0_double);
833 __ Eor(temp, temp, kDSignMask);
834 __ CheckRegisterIsClear(temp, kCouldNotGenerateNegativeZero);
835 // Check that -0.0 + 0.0 == +0.0.
836 __ Fadd(scratch0_double, scratch0_double, fp_zero);
837 __ Fmov(temp, scratch0_double);
838 __ CheckRegisterIsClear(temp, kExpectedPositiveZero);
841 // If base is -INFINITY, make it +INFINITY.
842 // * Calculate base - base: All infinities will become NaNs since both
843 // -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in ARM64.
844 // * If the result is NaN, calculate abs(base).
845 __ Fsub(scratch0_double, base_double, base_double);
846 __ Fcmp(scratch0_double, 0.0);
847 __ Fabs(scratch1_double, base_double);
848 __ Fcsel(base_double, scratch1_double, base_double, vs);
850 // Calculate the square root of base.
851 __ Fsqrt(result_double, base_double);
852 __ Fcmp(exponent_double, 0.0);
853 __ B(ge, &done); // Finish now for exponents of 0.5.
854 // Find the inverse for exponents of -0.5.
855 __ Fmov(scratch0_double, 1.0);
856 __ Fdiv(result_double, scratch0_double, result_double);
861 AllowExternalCallThatCantCauseGC scope(masm);
862 __ Mov(saved_lr, lr);
864 ExternalReference::power_double_double_function(isolate()),
866 __ Mov(lr, saved_lr);
870 // Handle SMI exponents.
871 __ Bind(&exponent_is_smi);
872 // x10 base_tagged The tagged base (input).
873 // x11 exponent_tagged The tagged exponent (input).
874 // d1 base_double The base as a double.
875 __ SmiUntag(exponent_integer, exponent_tagged);
878 __ Bind(&exponent_is_integer);
879 // x10 base_tagged The tagged base (input).
880 // x11 exponent_tagged The tagged exponent (input).
881 // x12 exponent_integer The exponent as an integer.
882 // d1 base_double The base as a double.
884 // Find abs(exponent). For negative exponents, we can find the inverse later.
885 Register exponent_abs = x13;
886 __ Cmp(exponent_integer, 0);
887 __ Cneg(exponent_abs, exponent_integer, mi);
888 // x13 exponent_abs The value of abs(exponent_integer).
890 // Repeatedly multiply to calculate the power.
892 // For each bit n (exponent_integer{n}) {
893 // if (exponent_integer{n}) {
897 // if (remaining bits in exponent_integer are all zero) {
901 Label power_loop, power_loop_entry, power_loop_exit;
902 __ Fmov(scratch1_double, base_double);
903 __ Fmov(base_double_copy, base_double);
904 __ Fmov(result_double, 1.0);
905 __ B(&power_loop_entry);
907 __ Bind(&power_loop);
908 __ Fmul(scratch1_double, scratch1_double, scratch1_double);
909 __ Lsr(exponent_abs, exponent_abs, 1);
910 __ Cbz(exponent_abs, &power_loop_exit);
912 __ Bind(&power_loop_entry);
913 __ Tbz(exponent_abs, 0, &power_loop);
914 __ Fmul(result_double, result_double, scratch1_double);
917 __ Bind(&power_loop_exit);
919 // If the exponent was positive, result_double holds the result.
920 __ Tbz(exponent_integer, kXSignBit, &done);
922 // The exponent was negative, so find the inverse.
923 __ Fmov(scratch0_double, 1.0);
924 __ Fdiv(result_double, scratch0_double, result_double);
925 // ECMA-262 only requires Math.pow to return an 'implementation-dependent
926 // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow
927 // to calculate the subnormal value 2^-1074. This method of calculating
928 // negative powers doesn't work because 2^1074 overflows to infinity. To
929 // catch this corner-case, we bail out if the result was 0. (This can only
930 // occur if the divisor is infinity or the base is zero.)
931 __ Fcmp(result_double, 0.0);
934 if (exponent_type() == ON_STACK) {
935 // Bail out to runtime code.
936 __ Bind(&call_runtime);
937 // Put the arguments back on the stack.
938 __ Push(base_tagged, exponent_tagged);
939 __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
943 __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1,
945 DCHECK(result_tagged.is(x0));
947 isolate()->counters()->math_pow(), 1, scratch0, scratch1);
950 AllowExternalCallThatCantCauseGC scope(masm);
951 __ Mov(saved_lr, lr);
952 __ Fmov(base_double, base_double_copy);
953 __ Scvtf(exponent_double, exponent_integer);
955 ExternalReference::power_double_double_function(isolate()),
957 __ Mov(lr, saved_lr);
960 isolate()->counters()->math_pow(), 1, scratch0, scratch1);
966 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
967 // It is important that the following stubs are generated in this order
968 // because pregenerated stubs can only call other pregenerated stubs.
969 // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses
971 CEntryStub::GenerateAheadOfTime(isolate);
972 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
973 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
974 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
975 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
976 BinaryOpICStub::GenerateAheadOfTime(isolate);
977 StoreRegistersStateStub::GenerateAheadOfTime(isolate);
978 RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
979 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
983 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
984 StoreRegistersStateStub stub(isolate);
989 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
990 RestoreRegistersStateStub stub(isolate);
995 void CodeStub::GenerateFPStubs(Isolate* isolate) {
996 // Floating-point code doesn't get special handling in ARM64, so there's
997 // nothing to do here.
1002 bool CEntryStub::NeedsImmovableCode() {
1003 // CEntryStub stores the return address on the stack before calling into
1004 // C++ code. In some cases, the VM accesses this address, but it is not used
1005 // when the C++ code returns to the stub because LR holds the return address
1006 // in AAPCS64. If the stub is moved (perhaps during a GC), we could end up
1007 // returning to dead code.
1008 // TODO(jbramley): Whilst this is the only analysis that makes sense, I can't
1009 // find any comment to confirm this, and I don't hit any crashes whatever
1010 // this function returns. The anaylsis should be properly confirmed.
1015 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1016 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
1018 CEntryStub stub_fp(isolate, 1, kSaveFPRegs);
1023 void CEntryStub::Generate(MacroAssembler* masm) {
1024 // The Abort mechanism relies on CallRuntime, which in turn relies on
1025 // CEntryStub, so until this stub has been generated, we have to use a
1026 // fall-back Abort mechanism.
1028 // Note that this stub must be generated before any use of Abort.
1029 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
1031 ASM_LOCATION("CEntryStub::Generate entry");
1032 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1034 // Register parameters:
1035 // x0: argc (including receiver, untagged)
1038 // The stack on entry holds the arguments and the receiver, with the receiver
1039 // at the highest address:
1041 // jssp]argc-1]: receiver
1042 // jssp[argc-2]: arg[argc-2]
1047 // The arguments are in reverse order, so that arg[argc-2] is actually the
1048 // first argument to the target function and arg[0] is the last.
1049 DCHECK(jssp.Is(__ StackPointer()));
1050 const Register& argc_input = x0;
1051 const Register& target_input = x1;
1053 // Calculate argv, argc and the target address, and store them in
1054 // callee-saved registers so we can retry the call without having to reload
1056 // TODO(jbramley): If the first call attempt succeeds in the common case (as
1057 // it should), then we might be better off putting these parameters directly
1058 // into their argument registers, rather than using callee-saved registers and
1059 // preserving them on the stack.
1060 const Register& argv = x21;
1061 const Register& argc = x22;
1062 const Register& target = x23;
1064 // Derive argv from the stack pointer so that it points to the first argument
1065 // (arg[argc-2]), or just below the receiver in case there are no arguments.
1066 // - Adjust for the arg[] array.
1067 Register temp_argv = x11;
1068 __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
1069 // - Adjust for the receiver.
1070 __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
1072 // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved
1074 FrameScope scope(masm, StackFrame::MANUAL);
1075 __ EnterExitFrame(save_doubles(), x10, 3);
1076 DCHECK(csp.Is(__ StackPointer()));
1078 // Poke callee-saved registers into reserved space.
1079 __ Poke(argv, 1 * kPointerSize);
1080 __ Poke(argc, 2 * kPointerSize);
1081 __ Poke(target, 3 * kPointerSize);
1083 // We normally only keep tagged values in callee-saved registers, as they
1084 // could be pushed onto the stack by called stubs and functions, and on the
1085 // stack they can confuse the GC. However, we're only calling C functions
1086 // which can push arbitrary data onto the stack anyway, and so the GC won't
1087 // examine that part of the stack.
1088 __ Mov(argc, argc_input);
1089 __ Mov(target, target_input);
1090 __ Mov(argv, temp_argv);
1094 // x23 : call target
1096 // The stack (on entry) holds the arguments and the receiver, with the
1097 // receiver at the highest address:
1099 // argv[8]: receiver
1100 // argv -> argv[0]: arg[argc-2]
1102 // argv[...]: arg[1]
1103 // argv[...]: arg[0]
1105 // Immediately below (after) this is the exit frame, as constructed by
1107 // fp[8]: CallerPC (lr)
1108 // fp -> fp[0]: CallerFP (old fp)
1109 // fp[-8]: Space reserved for SPOffset.
1110 // fp[-16]: CodeObject()
1111 // csp[...]: Saved doubles, if saved_doubles is true.
1112 // csp[32]: Alignment padding, if necessary.
1113 // csp[24]: Preserved x23 (used for target).
1114 // csp[16]: Preserved x22 (used for argc).
1115 // csp[8]: Preserved x21 (used for argv).
1116 // csp -> csp[0]: Space reserved for the return address.
1118 // After a successful call, the exit frame, preserved registers (x21-x23) and
1119 // the arguments (including the receiver) are dropped or popped as
1120 // appropriate. The stub then returns.
1122 // After an unsuccessful call, the exit frame and suchlike are left
1123 // untouched, and the stub either throws an exception by jumping to one of
1124 // the exception_returned label.
1126 DCHECK(csp.Is(__ StackPointer()));
1128 // Prepare AAPCS64 arguments to pass to the builtin.
1131 __ Mov(x2, ExternalReference::isolate_address(isolate()));
1133 Label return_location;
1134 __ Adr(x12, &return_location);
1137 if (__ emit_debug_code()) {
1138 // Verify that the slot below fp[kSPOffset]-8 points to the return location
1139 // (currently in x12).
1140 UseScratchRegisterScope temps(masm);
1141 Register temp = temps.AcquireX();
1142 __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
1143 __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSize)));
1145 __ Check(eq, kReturnAddressNotFoundInFrame);
1148 // Call the builtin.
1150 __ Bind(&return_location);
1152 // x0 result The return code from the call.
1156 const Register& result = x0;
1158 // Check result for exception sentinel.
1159 Label exception_returned;
1160 __ CompareRoot(result, Heap::kExceptionRootIndex);
1161 __ B(eq, &exception_returned);
1163 // The call succeeded, so unwind the stack and return.
1165 // Restore callee-saved registers x21-x23.
1168 __ Peek(argv, 1 * kPointerSize);
1169 __ Peek(argc, 2 * kPointerSize);
1170 __ Peek(target, 3 * kPointerSize);
1172 __ LeaveExitFrame(save_doubles(), x10, true);
1173 DCHECK(jssp.Is(__ StackPointer()));
1174 // Pop or drop the remaining stack slots and return from the stub.
1175 // jssp[24]: Arguments array (of size argc), including receiver.
1176 // jssp[16]: Preserved x23 (used for target).
1177 // jssp[8]: Preserved x22 (used for argc).
1178 // jssp[0]: Preserved x21 (used for argv).
1180 __ AssertFPCRState();
1183 // The stack pointer is still csp if we aren't returning, and the frame
1184 // hasn't changed (except for the return address).
1185 __ SetStackPointer(csp);
1187 // Handling of exception.
1188 __ Bind(&exception_returned);
1190 // Retrieve the pending exception.
1191 ExternalReference pending_exception_address(
1192 Isolate::kPendingExceptionAddress, isolate());
1193 const Register& exception = result;
1194 const Register& exception_address = x11;
1195 __ Mov(exception_address, Operand(pending_exception_address));
1196 __ Ldr(exception, MemOperand(exception_address));
1198 // Clear the pending exception.
1199 __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
1200 __ Str(x10, MemOperand(exception_address));
1202 // x0 exception The exception descriptor.
1207 // Special handling of termination exceptions, which are uncatchable by
1209 Label throw_termination_exception;
1210 __ Cmp(exception, Operand(isolate()->factory()->termination_exception()));
1211 __ B(eq, &throw_termination_exception);
1213 // We didn't execute a return case, so the stack frame hasn't been updated
1214 // (except for the return address slot). However, we don't need to initialize
1215 // jssp because the throw method will immediately overwrite it when it
1216 // unwinds the stack.
1217 __ SetStackPointer(jssp);
1219 ASM_LOCATION("Throw normal");
1223 __ Throw(x0, x10, x11, x12, x13);
1225 __ Bind(&throw_termination_exception);
1226 ASM_LOCATION("Throw termination");
1230 __ ThrowUncatchable(x0, x10, x11, x12, x13);
1234 // This is the entry point from C++. 5 arguments are provided in x0-x4.
1235 // See use of the CALL_GENERATED_CODE macro for example in src/execution.cc.
1244 void JSEntryStub::Generate(MacroAssembler* masm) {
1245 DCHECK(jssp.Is(__ StackPointer()));
1246 Register code_entry = x0;
1248 // Enable instruction instrumentation. This only works on the simulator, and
1249 // will have no effect on the model or real hardware.
1250 __ EnableInstrumentation();
1252 Label invoke, handler_entry, exit;
1254 // Push callee-saved registers and synchronize the system stack pointer (csp)
1255 // and the JavaScript stack pointer (jssp).
1257 // We must not write to jssp until after the PushCalleeSavedRegisters()
1258 // call, since jssp is itself a callee-saved register.
1259 __ SetStackPointer(csp);
1260 __ PushCalleeSavedRegisters();
1262 __ SetStackPointer(jssp);
1264 // Configure the FPCR. We don't restore it, so this is technically not allowed
1265 // according to AAPCS64. However, we only set default-NaN mode and this will
1266 // be harmless for most C code. Also, it works for ARM.
1269 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1271 // Set up the reserved register for 0.0.
1272 __ Fmov(fp_zero, 0.0);
1274 // Build an entry frame (see layout below).
1275 int marker = type();
1276 int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
1277 __ Mov(x13, bad_frame_pointer);
1278 __ Mov(x12, Smi::FromInt(marker));
1279 __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
1280 __ Ldr(x10, MemOperand(x11));
1282 __ Push(x13, xzr, x12, x10);
1284 __ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset);
1286 // Push the JS entry frame marker. Also set js_entry_sp if this is the
1287 // outermost JS call.
1288 Label non_outermost_js, done;
1289 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
1290 __ Mov(x10, ExternalReference(js_entry_sp));
1291 __ Ldr(x11, MemOperand(x10));
1292 __ Cbnz(x11, &non_outermost_js);
1293 __ Str(fp, MemOperand(x10));
1294 __ Mov(x12, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
1297 __ Bind(&non_outermost_js);
1298 // We spare one instruction by pushing xzr since the marker is 0.
1299 DCHECK(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
1303 // The frame set up looks like this:
1304 // jssp[0] : JS entry frame marker.
1305 // jssp[1] : C entry FP.
1306 // jssp[2] : stack frame marker.
1307 // jssp[3] : stack frmae marker.
1308 // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
1311 // Jump to a faked try block that does the invoke, with a faked catch
1312 // block that sets the pending exception.
1315 // Prevent the constant pool from being emitted between the record of the
1316 // handler_entry position and the first instruction of the sequence here.
1317 // There is no risk because Assembler::Emit() emits the instruction before
1318 // checking for constant pool emission, but we do not want to depend on
1321 Assembler::BlockPoolsScope block_pools(masm);
1322 __ bind(&handler_entry);
1323 handler_offset_ = handler_entry.pos();
1324 // Caught exception: Store result (exception) in the pending exception
1325 // field in the JSEnv and return a failure sentinel. Coming in here the
1326 // fp will be invalid because the PushTryHandler below sets it to 0 to
1327 // signal the existence of the JSEntry frame.
1328 __ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1331 __ Str(code_entry, MemOperand(x10));
1332 __ LoadRoot(x0, Heap::kExceptionRootIndex);
1335 // Invoke: Link this frame into the handler chain. There's only one
1336 // handler block in this code object, so its index is 0.
1338 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
1339 // If an exception not caught by another handler occurs, this handler
1340 // returns control to the code after the B(&invoke) above, which
1341 // restores all callee-saved registers (including cp and fp) to their
1342 // saved values before returning a failure to C.
1344 // Clear any pending exceptions.
1345 __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
1346 __ Mov(x11, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1348 __ Str(x10, MemOperand(x11));
1350 // Invoke the function by calling through the JS entry trampoline builtin.
1351 // Notice that we cannot store a reference to the trampoline code directly in
1352 // this stub, because runtime stubs are not traversed when doing GC.
1354 // Expected registers by Builtins::JSEntryTrampoline
1360 ExternalReference entry(type() == StackFrame::ENTRY_CONSTRUCT
1361 ? Builtins::kJSConstructEntryTrampoline
1362 : Builtins::kJSEntryTrampoline,
1366 // Call the JSEntryTrampoline.
1367 __ Ldr(x11, MemOperand(x10)); // Dereference the address.
1368 __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag);
1371 // Unlink this frame from the handler chain.
1376 // x0 holds the result.
1377 // The stack pointer points to the top of the entry frame pushed on entry from
1378 // C++ (at the beginning of this stub):
1379 // jssp[0] : JS entry frame marker.
1380 // jssp[1] : C entry FP.
1381 // jssp[2] : stack frame marker.
1382 // jssp[3] : stack frmae marker.
1383 // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
1385 // Check if the current stack frame is marked as the outermost JS frame.
1386 Label non_outermost_js_2;
1388 __ Cmp(x10, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
1389 __ B(ne, &non_outermost_js_2);
1390 __ Mov(x11, ExternalReference(js_entry_sp));
1391 __ Str(xzr, MemOperand(x11));
1392 __ Bind(&non_outermost_js_2);
1394 // Restore the top frame descriptors from the stack.
1396 __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
1397 __ Str(x10, MemOperand(x11));
1399 // Reset the stack to the callee saved registers.
1400 __ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes);
1401 // Restore the callee-saved registers and return.
1402 DCHECK(jssp.Is(__ StackPointer()));
1404 __ SetStackPointer(csp);
1405 __ PopCalleeSavedRegisters();
1406 // After this point, we must not modify jssp because it is a callee-saved
1407 // register which we have just restored.
1412 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1414 Register receiver = LoadDescriptor::ReceiverRegister();
1416 NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10,
1420 PropertyAccessCompiler::TailCallBuiltin(
1421 masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1425 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
1426 // Return address is in lr.
1429 Register receiver = LoadDescriptor::ReceiverRegister();
1430 Register index = LoadDescriptor::NameRegister();
1431 Register result = x0;
1432 Register scratch = x3;
1433 DCHECK(!scratch.is(receiver) && !scratch.is(index));
1435 StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
1436 &miss, // When not a string.
1437 &miss, // When not a number.
1438 &miss, // When index out of range.
1439 STRING_INDEX_IS_ARRAY_INDEX,
1440 RECEIVER_IS_STRING);
1441 char_at_generator.GenerateFast(masm);
1444 StubRuntimeCallHelper call_helper;
1445 char_at_generator.GenerateSlow(masm, call_helper);
1448 PropertyAccessCompiler::TailCallBuiltin(
1449 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1453 void InstanceofStub::Generate(MacroAssembler* masm) {
1455 // jssp[0]: function.
1458 // Returns result in x0. Zero indicates instanceof, smi 1 indicates not
1461 Register result = x0;
1462 Register function = right();
1463 Register object = left();
1464 Register scratch1 = x6;
1465 Register scratch2 = x7;
1466 Register res_true = x8;
1467 Register res_false = x9;
1468 // Only used if there was an inline map check site. (See
1469 // LCodeGen::DoInstanceOfKnownGlobal().)
1470 Register map_check_site = x4;
1471 // Delta for the instructions generated between the inline map check and the
1472 // instruction setting the result.
1473 const int32_t kDeltaToLoadBoolResult = 4 * kInstructionSize;
1475 Label not_js_object, slow;
1477 if (!HasArgsInRegisters()) {
1478 __ Pop(function, object);
1481 if (ReturnTrueFalseObject()) {
1482 __ LoadTrueFalseRoots(res_true, res_false);
1484 // This is counter-intuitive, but correct.
1485 __ Mov(res_true, Smi::FromInt(0));
1486 __ Mov(res_false, Smi::FromInt(1));
1489 // Check that the left hand side is a JS object and load its map as a side
1492 __ JumpIfSmi(object, ¬_js_object);
1493 __ IsObjectJSObjectType(object, map, scratch2, ¬_js_object);
1495 // If there is a call site cache, don't look in the global cache, but do the
1496 // real lookup and update the call site cache.
1497 if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
1499 __ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex, &miss);
1500 __ JumpIfNotRoot(map, Heap::kInstanceofCacheMapRootIndex, &miss);
1501 __ LoadRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
1506 // Get the prototype of the function.
1507 Register prototype = x13;
1508 __ TryGetFunctionPrototype(function, prototype, scratch2, &slow,
1509 MacroAssembler::kMissOnBoundFunction);
1511 // Check that the function prototype is a JS object.
1512 __ JumpIfSmi(prototype, &slow);
1513 __ IsObjectJSObjectType(prototype, scratch1, scratch2, &slow);
1515 // Update the global instanceof or call site inlined cache with the current
1516 // map and function. The cached answer will be set when it is known below.
1517 if (HasCallSiteInlineCheck()) {
1518 // Patch the (relocated) inlined map check.
1519 __ GetRelocatedValueLocation(map_check_site, scratch1);
1520 // We have a cell, so need another level of dereferencing.
1521 __ Ldr(scratch1, MemOperand(scratch1));
1522 __ Str(map, FieldMemOperand(scratch1, Cell::kValueOffset));
1524 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1525 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
1528 Label return_true, return_result;
1529 Register smi_value = scratch1;
1531 // Loop through the prototype chain looking for the function prototype.
1532 Register chain_map = x1;
1533 Register chain_prototype = x14;
1534 Register null_value = x15;
1536 __ Ldr(chain_prototype, FieldMemOperand(map, Map::kPrototypeOffset));
1537 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
1538 // Speculatively set a result.
1539 __ Mov(result, res_false);
1540 if (!HasCallSiteInlineCheck() && ReturnTrueFalseObject()) {
1541 // Value to store in the cache cannot be an object.
1542 __ Mov(smi_value, Smi::FromInt(1));
1547 // If the chain prototype is the object prototype, return true.
1548 __ Cmp(chain_prototype, prototype);
1549 __ B(eq, &return_true);
1551 // If the chain prototype is null, we've reached the end of the chain, so
1553 __ Cmp(chain_prototype, null_value);
1554 __ B(eq, &return_result);
1556 // Otherwise, load the next prototype in the chain, and loop.
1557 __ Ldr(chain_map, FieldMemOperand(chain_prototype, HeapObject::kMapOffset));
1558 __ Ldr(chain_prototype, FieldMemOperand(chain_map, Map::kPrototypeOffset));
1562 // Return sequence when no arguments are on the stack.
1563 // We cannot fall through to here.
1564 __ Bind(&return_true);
1565 __ Mov(result, res_true);
1566 if (!HasCallSiteInlineCheck() && ReturnTrueFalseObject()) {
1567 // Value to store in the cache cannot be an object.
1568 __ Mov(smi_value, Smi::FromInt(0));
1570 __ Bind(&return_result);
1571 if (HasCallSiteInlineCheck()) {
1572 DCHECK(ReturnTrueFalseObject());
1573 __ Add(map_check_site, map_check_site, kDeltaToLoadBoolResult);
1574 __ GetRelocatedValueLocation(map_check_site, scratch2);
1575 __ Str(result, MemOperand(scratch2));
1577 Register cached_value = ReturnTrueFalseObject() ? smi_value : result;
1578 __ StoreRoot(cached_value, Heap::kInstanceofCacheAnswerRootIndex);
1582 Label object_not_null, object_not_null_or_smi;
1584 __ Bind(¬_js_object);
1585 Register object_type = x14;
1586 // x0 result result return register (uninit)
1587 // x10 function pointer to function
1588 // x11 object pointer to object
1589 // x14 object_type type of object (uninit)
1591 // Before null, smi and string checks, check that the rhs is a function.
1592 // For a non-function rhs, an exception must be thrown.
1593 __ JumpIfSmi(function, &slow);
1594 __ JumpIfNotObjectType(
1595 function, scratch1, object_type, JS_FUNCTION_TYPE, &slow);
1597 __ Mov(result, res_false);
1599 // Null is not instance of anything.
1600 __ Cmp(object, Operand(isolate()->factory()->null_value()));
1601 __ B(ne, &object_not_null);
1604 __ Bind(&object_not_null);
1605 // Smi values are not instances of anything.
1606 __ JumpIfNotSmi(object, &object_not_null_or_smi);
1609 __ Bind(&object_not_null_or_smi);
1610 // String values are not instances of anything.
1611 __ IsObjectJSStringType(object, scratch2, &slow);
1614 // Slow-case. Tail call builtin.
1617 FrameScope scope(masm, StackFrame::INTERNAL);
1618 // Arguments have either been passed into registers or have been previously
1619 // popped. We need to push them before calling builtin.
1620 __ Push(object, function);
1621 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
1623 if (ReturnTrueFalseObject()) {
1624 // Reload true/false because they were clobbered in the builtin call.
1625 __ LoadTrueFalseRoots(res_true, res_false);
1627 __ Csel(result, res_true, res_false, eq);
1633 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
1634 Register arg_count = ArgumentsAccessReadDescriptor::parameter_count();
1635 Register key = ArgumentsAccessReadDescriptor::index();
1636 DCHECK(arg_count.is(x0));
1639 // The displacement is the offset of the last parameter (if any) relative
1640 // to the frame pointer.
1641 static const int kDisplacement =
1642 StandardFrameConstants::kCallerSPOffset - kPointerSize;
1644 // Check that the key is a smi.
1646 __ JumpIfNotSmi(key, &slow);
1648 // Check if the calling frame is an arguments adaptor frame.
1649 Register local_fp = x11;
1650 Register caller_fp = x11;
1651 Register caller_ctx = x12;
1653 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1654 __ Ldr(caller_ctx, MemOperand(caller_fp,
1655 StandardFrameConstants::kContextOffset));
1656 __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1657 __ Csel(local_fp, fp, caller_fp, ne);
1658 __ B(ne, &skip_adaptor);
1660 // Load the actual arguments limit found in the arguments adaptor frame.
1661 __ Ldr(arg_count, MemOperand(caller_fp,
1662 ArgumentsAdaptorFrameConstants::kLengthOffset));
1663 __ Bind(&skip_adaptor);
1665 // Check index against formal parameters count limit. Use unsigned comparison
1666 // to get negative check for free: branch if key < 0 or key >= arg_count.
1667 __ Cmp(key, arg_count);
1670 // Read the argument from the stack and return it.
1671 __ Sub(x10, arg_count, key);
1672 __ Add(x10, local_fp, Operand::UntagSmiAndScale(x10, kPointerSizeLog2));
1673 __ Ldr(x0, MemOperand(x10, kDisplacement));
1676 // Slow case: handle non-smi or out-of-bounds access to arguments by calling
1677 // the runtime system.
1680 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
1684 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
1685 // Stack layout on entry.
1686 // jssp[0]: number of parameters (tagged)
1687 // jssp[8]: address of receiver argument
1688 // jssp[16]: function
1690 // Check if the calling frame is an arguments adaptor frame.
1692 Register caller_fp = x10;
1693 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1694 // Load and untag the context.
1695 __ Ldr(w11, UntagSmiMemOperand(caller_fp,
1696 StandardFrameConstants::kContextOffset));
1697 __ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR);
1700 // Patch the arguments.length and parameters pointer in the current frame.
1701 __ Ldr(x11, MemOperand(caller_fp,
1702 ArgumentsAdaptorFrameConstants::kLengthOffset));
1703 __ Poke(x11, 0 * kXRegSize);
1704 __ Add(x10, caller_fp, Operand::UntagSmiAndScale(x11, kPointerSizeLog2));
1705 __ Add(x10, x10, StandardFrameConstants::kCallerSPOffset);
1706 __ Poke(x10, 1 * kXRegSize);
1709 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1713 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
1714 // Stack layout on entry.
1715 // jssp[0]: number of parameters (tagged)
1716 // jssp[8]: address of receiver argument
1717 // jssp[16]: function
1719 // Returns pointer to result object in x0.
1721 // Note: arg_count_smi is an alias of param_count_smi.
1722 Register arg_count_smi = x3;
1723 Register param_count_smi = x3;
1724 Register param_count = x7;
1725 Register recv_arg = x14;
1726 Register function = x4;
1727 __ Pop(param_count_smi, recv_arg, function);
1728 __ SmiUntag(param_count, param_count_smi);
1730 // Check if the calling frame is an arguments adaptor frame.
1731 Register caller_fp = x11;
1732 Register caller_ctx = x12;
1734 Label adaptor_frame, try_allocate;
1735 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1736 __ Ldr(caller_ctx, MemOperand(caller_fp,
1737 StandardFrameConstants::kContextOffset));
1738 __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1739 __ B(eq, &adaptor_frame);
1741 // No adaptor, parameter count = argument count.
1743 // x1 mapped_params number of mapped params, min(params, args) (uninit)
1744 // x2 arg_count number of function arguments (uninit)
1745 // x3 arg_count_smi number of function arguments (smi)
1746 // x4 function function pointer
1747 // x7 param_count number of function parameters
1748 // x11 caller_fp caller's frame pointer
1749 // x14 recv_arg pointer to receiver arguments
1751 Register arg_count = x2;
1752 __ Mov(arg_count, param_count);
1753 __ B(&try_allocate);
1755 // We have an adaptor frame. Patch the parameters pointer.
1756 __ Bind(&adaptor_frame);
1757 __ Ldr(arg_count_smi,
1758 MemOperand(caller_fp,
1759 ArgumentsAdaptorFrameConstants::kLengthOffset));
1760 __ SmiUntag(arg_count, arg_count_smi);
1761 __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2));
1762 __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset);
1764 // Compute the mapped parameter count = min(param_count, arg_count)
1765 Register mapped_params = x1;
1766 __ Cmp(param_count, arg_count);
1767 __ Csel(mapped_params, param_count, arg_count, lt);
1769 __ Bind(&try_allocate);
1771 // x0 alloc_obj pointer to allocated objects: param map, backing
1772 // store, arguments (uninit)
1773 // x1 mapped_params number of mapped parameters, min(params, args)
1774 // x2 arg_count number of function arguments
1775 // x3 arg_count_smi number of function arguments (smi)
1776 // x4 function function pointer
1777 // x7 param_count number of function parameters
1778 // x10 size size of objects to allocate (uninit)
1779 // x14 recv_arg pointer to receiver arguments
1781 // Compute the size of backing store, parameter map, and arguments object.
1782 // 1. Parameter map, has two extra words containing context and backing
1784 const int kParameterMapHeaderSize =
1785 FixedArray::kHeaderSize + 2 * kPointerSize;
1787 // Calculate the parameter map size, assuming it exists.
1788 Register size = x10;
1789 __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2));
1790 __ Add(size, size, kParameterMapHeaderSize);
1792 // If there are no mapped parameters, set the running size total to zero.
1793 // Otherwise, use the parameter map size calculated earlier.
1794 __ Cmp(mapped_params, 0);
1795 __ CzeroX(size, eq);
1797 // 2. Add the size of the backing store and arguments object.
1798 __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2));
1800 FixedArray::kHeaderSize + Heap::kSloppyArgumentsObjectSize);
1802 // Do the allocation of all three objects in one go. Assign this to x0, as it
1803 // will be returned to the caller.
1804 Register alloc_obj = x0;
1805 __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT);
1807 // Get the arguments boilerplate from the current (global) context.
1809 // x0 alloc_obj pointer to allocated objects (param map, backing
1810 // store, arguments)
1811 // x1 mapped_params number of mapped parameters, min(params, args)
1812 // x2 arg_count number of function arguments
1813 // x3 arg_count_smi number of function arguments (smi)
1814 // x4 function function pointer
1815 // x7 param_count number of function parameters
1816 // x11 sloppy_args_map offset to args (or aliased args) map (uninit)
1817 // x14 recv_arg pointer to receiver arguments
1819 Register global_object = x10;
1820 Register global_ctx = x10;
1821 Register sloppy_args_map = x11;
1822 Register aliased_args_map = x10;
1823 __ Ldr(global_object, GlobalObjectMemOperand());
1824 __ Ldr(global_ctx, FieldMemOperand(global_object,
1825 GlobalObject::kNativeContextOffset));
1827 __ Ldr(sloppy_args_map,
1828 ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
1829 __ Ldr(aliased_args_map,
1830 ContextMemOperand(global_ctx, Context::ALIASED_ARGUMENTS_MAP_INDEX));
1831 __ Cmp(mapped_params, 0);
1832 __ CmovX(sloppy_args_map, aliased_args_map, ne);
1834 // Copy the JS object part.
1835 __ Str(sloppy_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
1836 __ LoadRoot(x10, Heap::kEmptyFixedArrayRootIndex);
1837 __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset));
1838 __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
1840 // Set up the callee in-object property.
1841 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
1842 const int kCalleeOffset = JSObject::kHeaderSize +
1843 Heap::kArgumentsCalleeIndex * kPointerSize;
1844 __ AssertNotSmi(function);
1845 __ Str(function, FieldMemOperand(alloc_obj, kCalleeOffset));
1847 // Use the length and set that as an in-object property.
1848 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1849 const int kLengthOffset = JSObject::kHeaderSize +
1850 Heap::kArgumentsLengthIndex * kPointerSize;
1851 __ Str(arg_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
1853 // Set up the elements pointer in the allocated arguments object.
1854 // If we allocated a parameter map, "elements" will point there, otherwise
1855 // it will point to the backing store.
1857 // x0 alloc_obj pointer to allocated objects (param map, backing
1858 // store, arguments)
1859 // x1 mapped_params number of mapped parameters, min(params, args)
1860 // x2 arg_count number of function arguments
1861 // x3 arg_count_smi number of function arguments (smi)
1862 // x4 function function pointer
1863 // x5 elements pointer to parameter map or backing store (uninit)
1864 // x6 backing_store pointer to backing store (uninit)
1865 // x7 param_count number of function parameters
1866 // x14 recv_arg pointer to receiver arguments
1868 Register elements = x5;
1869 __ Add(elements, alloc_obj, Heap::kSloppyArgumentsObjectSize);
1870 __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
1872 // Initialize parameter map. If there are no mapped arguments, we're done.
1873 Label skip_parameter_map;
1874 __ Cmp(mapped_params, 0);
1875 // Set up backing store address, because it is needed later for filling in
1876 // the unmapped arguments.
1877 Register backing_store = x6;
1878 __ CmovX(backing_store, elements, eq);
1879 __ B(eq, &skip_parameter_map);
1881 __ LoadRoot(x10, Heap::kSloppyArgumentsElementsMapRootIndex);
1882 __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
1883 __ Add(x10, mapped_params, 2);
1885 __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset));
1886 __ Str(cp, FieldMemOperand(elements,
1887 FixedArray::kHeaderSize + 0 * kPointerSize));
1888 __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2));
1889 __ Add(x10, x10, kParameterMapHeaderSize);
1890 __ Str(x10, FieldMemOperand(elements,
1891 FixedArray::kHeaderSize + 1 * kPointerSize));
1893 // Copy the parameter slots and the holes in the arguments.
1894 // We need to fill in mapped_parameter_count slots. Then index the context,
1895 // where parameters are stored in reverse order, at:
1897 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1
1899 // The mapped parameter thus needs to get indices:
1901 // MIN_CONTEXT_SLOTS + parameter_count - 1 ..
1902 // MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count
1904 // We loop from right to left.
1906 // x0 alloc_obj pointer to allocated objects (param map, backing
1907 // store, arguments)
1908 // x1 mapped_params number of mapped parameters, min(params, args)
1909 // x2 arg_count number of function arguments
1910 // x3 arg_count_smi number of function arguments (smi)
1911 // x4 function function pointer
1912 // x5 elements pointer to parameter map or backing store (uninit)
1913 // x6 backing_store pointer to backing store (uninit)
1914 // x7 param_count number of function parameters
1915 // x11 loop_count parameter loop counter (uninit)
1916 // x12 index parameter index (smi, uninit)
1917 // x13 the_hole hole value (uninit)
1918 // x14 recv_arg pointer to receiver arguments
1920 Register loop_count = x11;
1921 Register index = x12;
1922 Register the_hole = x13;
1923 Label parameters_loop, parameters_test;
1924 __ Mov(loop_count, mapped_params);
1925 __ Add(index, param_count, static_cast<int>(Context::MIN_CONTEXT_SLOTS));
1926 __ Sub(index, index, mapped_params);
1928 __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
1929 __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2));
1930 __ Add(backing_store, backing_store, kParameterMapHeaderSize);
1932 __ B(¶meters_test);
1934 __ Bind(¶meters_loop);
1935 __ Sub(loop_count, loop_count, 1);
1936 __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2));
1937 __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag);
1938 __ Str(index, MemOperand(elements, x10));
1939 __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize);
1940 __ Str(the_hole, MemOperand(backing_store, x10));
1941 __ Add(index, index, Smi::FromInt(1));
1942 __ Bind(¶meters_test);
1943 __ Cbnz(loop_count, ¶meters_loop);
1945 __ Bind(&skip_parameter_map);
1946 // Copy arguments header and remaining slots (if there are any.)
1947 __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
1948 __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset));
1949 __ Str(arg_count_smi, FieldMemOperand(backing_store,
1950 FixedArray::kLengthOffset));
1952 // x0 alloc_obj pointer to allocated objects (param map, backing
1953 // store, arguments)
1954 // x1 mapped_params number of mapped parameters, min(params, args)
1955 // x2 arg_count number of function arguments
1956 // x4 function function pointer
1957 // x3 arg_count_smi number of function arguments (smi)
1958 // x6 backing_store pointer to backing store (uninit)
1959 // x14 recv_arg pointer to receiver arguments
1961 Label arguments_loop, arguments_test;
1962 __ Mov(x10, mapped_params);
1963 __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2));
1964 __ B(&arguments_test);
1966 __ Bind(&arguments_loop);
1967 __ Sub(recv_arg, recv_arg, kPointerSize);
1968 __ Ldr(x11, MemOperand(recv_arg));
1969 __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2));
1970 __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
1971 __ Add(x10, x10, 1);
1973 __ Bind(&arguments_test);
1974 __ Cmp(x10, arg_count);
1975 __ B(lt, &arguments_loop);
1979 // Do the runtime call to allocate the arguments object.
1981 __ Push(function, recv_arg, arg_count_smi);
1982 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1986 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
1987 // Return address is in lr.
1990 Register receiver = LoadDescriptor::ReceiverRegister();
1991 Register key = LoadDescriptor::NameRegister();
1993 // Check that the key is an array index, that is Uint32.
1994 __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
1996 // Everything is fine, call runtime.
1997 __ Push(receiver, key);
1998 __ TailCallExternalReference(
1999 ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
2004 PropertyAccessCompiler::TailCallBuiltin(
2005 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
2009 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
2010 // Stack layout on entry.
2011 // jssp[0]: number of parameters (tagged)
2012 // jssp[8]: address of receiver argument
2013 // jssp[16]: function
2015 // Returns pointer to result object in x0.
2017 // Get the stub arguments from the frame, and make an untagged copy of the
2019 Register param_count_smi = x1;
2020 Register params = x2;
2021 Register function = x3;
2022 Register param_count = x13;
2023 __ Pop(param_count_smi, params, function);
2024 __ SmiUntag(param_count, param_count_smi);
2026 // Test if arguments adaptor needed.
2027 Register caller_fp = x11;
2028 Register caller_ctx = x12;
2029 Label try_allocate, runtime;
2030 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2031 __ Ldr(caller_ctx, MemOperand(caller_fp,
2032 StandardFrameConstants::kContextOffset));
2033 __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2034 __ B(ne, &try_allocate);
2036 // x1 param_count_smi number of parameters passed to function (smi)
2037 // x2 params pointer to parameters
2038 // x3 function function pointer
2039 // x11 caller_fp caller's frame pointer
2040 // x13 param_count number of parameters passed to function
2042 // Patch the argument length and parameters pointer.
2043 __ Ldr(param_count_smi,
2044 MemOperand(caller_fp,
2045 ArgumentsAdaptorFrameConstants::kLengthOffset));
2046 __ SmiUntag(param_count, param_count_smi);
2047 __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
2048 __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
2050 // Try the new space allocation. Start out with computing the size of the
2051 // arguments object and the elements array in words.
2052 Register size = x10;
2053 __ Bind(&try_allocate);
2054 __ Add(size, param_count, FixedArray::kHeaderSize / kPointerSize);
2055 __ Cmp(param_count, 0);
2056 __ CzeroX(size, eq);
2057 __ Add(size, size, Heap::kStrictArgumentsObjectSize / kPointerSize);
2059 // Do the allocation of both objects in one go. Assign this to x0, as it will
2060 // be returned to the caller.
2061 Register alloc_obj = x0;
2062 __ Allocate(size, alloc_obj, x11, x12, &runtime,
2063 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
2065 // Get the arguments boilerplate from the current (native) context.
2066 Register global_object = x10;
2067 Register global_ctx = x10;
2068 Register strict_args_map = x4;
2069 __ Ldr(global_object, GlobalObjectMemOperand());
2070 __ Ldr(global_ctx, FieldMemOperand(global_object,
2071 GlobalObject::kNativeContextOffset));
2072 __ Ldr(strict_args_map,
2073 ContextMemOperand(global_ctx, Context::STRICT_ARGUMENTS_MAP_INDEX));
2075 // x0 alloc_obj pointer to allocated objects: parameter array and
2077 // x1 param_count_smi number of parameters passed to function (smi)
2078 // x2 params pointer to parameters
2079 // x3 function function pointer
2080 // x4 strict_args_map offset to arguments map
2081 // x13 param_count number of parameters passed to function
2082 __ Str(strict_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
2083 __ LoadRoot(x5, Heap::kEmptyFixedArrayRootIndex);
2084 __ Str(x5, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset));
2085 __ Str(x5, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
2087 // Set the smi-tagged length as an in-object property.
2088 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2089 const int kLengthOffset = JSObject::kHeaderSize +
2090 Heap::kArgumentsLengthIndex * kPointerSize;
2091 __ Str(param_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
2093 // If there are no actual arguments, we're done.
2095 __ Cbz(param_count, &done);
2097 // Set up the elements pointer in the allocated arguments object and
2098 // initialize the header in the elements fixed array.
2099 Register elements = x5;
2100 __ Add(elements, alloc_obj, Heap::kStrictArgumentsObjectSize);
2101 __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
2102 __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
2103 __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
2104 __ Str(param_count_smi, FieldMemOperand(elements, FixedArray::kLengthOffset));
2106 // x0 alloc_obj pointer to allocated objects: parameter array and
2108 // x1 param_count_smi number of parameters passed to function (smi)
2109 // x2 params pointer to parameters
2110 // x3 function function pointer
2111 // x4 array pointer to array slot (uninit)
2112 // x5 elements pointer to elements array of alloc_obj
2113 // x13 param_count number of parameters passed to function
2115 // Copy the fixed array slots.
2117 Register array = x4;
2118 // Set up pointer to first array slot.
2119 __ Add(array, elements, FixedArray::kHeaderSize - kHeapObjectTag);
2122 // Pre-decrement the parameters pointer by kPointerSize on each iteration.
2123 // Pre-decrement in order to skip receiver.
2124 __ Ldr(x10, MemOperand(params, -kPointerSize, PreIndex));
2125 // Post-increment elements by kPointerSize on each iteration.
2126 __ Str(x10, MemOperand(array, kPointerSize, PostIndex));
2127 __ Sub(param_count, param_count, 1);
2128 __ Cbnz(param_count, &loop);
2130 // Return from stub.
2134 // Do the runtime call to allocate the arguments object.
2136 __ Push(function, params, param_count_smi);
2137 __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
2141 void RegExpExecStub::Generate(MacroAssembler* masm) {
2142 #ifdef V8_INTERPRETED_REGEXP
2143 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2144 #else // V8_INTERPRETED_REGEXP
2146 // Stack frame on entry.
2147 // jssp[0]: last_match_info (expected JSArray)
2148 // jssp[8]: previous index
2149 // jssp[16]: subject string
2150 // jssp[24]: JSRegExp object
2153 // Use of registers for this function.
2155 // Variable registers:
2156 // x10-x13 used as scratch registers
2157 // w0 string_type type of subject string
2158 // x2 jsstring_length subject string length
2159 // x3 jsregexp_object JSRegExp object
2160 // w4 string_encoding Latin1 or UC16
2161 // w5 sliced_string_offset if the string is a SlicedString
2162 // offset to the underlying string
2163 // w6 string_representation groups attributes of the string:
2165 // - type of the string
2166 // - is a short external string
2167 Register string_type = w0;
2168 Register jsstring_length = x2;
2169 Register jsregexp_object = x3;
2170 Register string_encoding = w4;
2171 Register sliced_string_offset = w5;
2172 Register string_representation = w6;
2174 // These are in callee save registers and will be preserved by the call
2175 // to the native RegExp code, as this code is called using the normal
2176 // C calling convention. When calling directly from generated code the
2177 // native RegExp code will not do a GC and therefore the content of
2178 // these registers are safe to use after the call.
2180 // x19 subject subject string
2181 // x20 regexp_data RegExp data (FixedArray)
2182 // x21 last_match_info_elements info relative to the last match
2184 // x22 code_object generated regexp code
2185 Register subject = x19;
2186 Register regexp_data = x20;
2187 Register last_match_info_elements = x21;
2188 Register code_object = x22;
2190 // TODO(jbramley): Is it necessary to preserve these? I don't think ARM does.
2191 CPURegList used_callee_saved_registers(subject,
2193 last_match_info_elements,
2195 __ PushCPURegList(used_callee_saved_registers);
2202 // jssp[32]: last_match_info (JSArray)
2203 // jssp[40]: previous index
2204 // jssp[48]: subject string
2205 // jssp[56]: JSRegExp object
2207 const int kLastMatchInfoOffset = 4 * kPointerSize;
2208 const int kPreviousIndexOffset = 5 * kPointerSize;
2209 const int kSubjectOffset = 6 * kPointerSize;
2210 const int kJSRegExpOffset = 7 * kPointerSize;
2212 // Ensure that a RegExp stack is allocated.
2213 ExternalReference address_of_regexp_stack_memory_address =
2214 ExternalReference::address_of_regexp_stack_memory_address(isolate());
2215 ExternalReference address_of_regexp_stack_memory_size =
2216 ExternalReference::address_of_regexp_stack_memory_size(isolate());
2217 __ Mov(x10, address_of_regexp_stack_memory_size);
2218 __ Ldr(x10, MemOperand(x10));
2219 __ Cbz(x10, &runtime);
2221 // Check that the first argument is a JSRegExp object.
2222 DCHECK(jssp.Is(__ StackPointer()));
2223 __ Peek(jsregexp_object, kJSRegExpOffset);
2224 __ JumpIfSmi(jsregexp_object, &runtime);
2225 __ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime);
2227 // Check that the RegExp has been compiled (data contains a fixed array).
2228 __ Ldr(regexp_data, FieldMemOperand(jsregexp_object, JSRegExp::kDataOffset));
2229 if (FLAG_debug_code) {
2230 STATIC_ASSERT(kSmiTag == 0);
2231 __ Tst(regexp_data, kSmiTagMask);
2232 __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
2233 __ CompareObjectType(regexp_data, x10, x10, FIXED_ARRAY_TYPE);
2234 __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
2237 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2238 __ Ldr(x10, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2239 __ Cmp(x10, Smi::FromInt(JSRegExp::IRREGEXP));
2242 // Check that the number of captures fit in the static offsets vector buffer.
2243 // We have always at least one capture for the whole match, plus additional
2244 // ones due to capturing parentheses. A capture takes 2 registers.
2245 // The number of capture registers then is (number_of_captures + 1) * 2.
2247 UntagSmiFieldMemOperand(regexp_data,
2248 JSRegExp::kIrregexpCaptureCountOffset));
2249 // Check (number_of_captures + 1) * 2 <= offsets vector size
2250 // number_of_captures * 2 <= offsets vector size - 2
2251 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
2252 __ Add(x10, x10, x10);
2253 __ Cmp(x10, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
2256 // Initialize offset for possibly sliced string.
2257 __ Mov(sliced_string_offset, 0);
2259 DCHECK(jssp.Is(__ StackPointer()));
2260 __ Peek(subject, kSubjectOffset);
2261 __ JumpIfSmi(subject, &runtime);
2263 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
2264 __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
2266 __ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset));
2268 // Handle subject string according to its encoding and representation:
2269 // (1) Sequential string? If yes, go to (5).
2270 // (2) Anything but sequential or cons? If yes, go to (6).
2271 // (3) Cons string. If the string is flat, replace subject with first string.
2272 // Otherwise bailout.
2273 // (4) Is subject external? If yes, go to (7).
2274 // (5) Sequential string. Load regexp code according to encoding.
2278 // Deferred code at the end of the stub:
2279 // (6) Not a long external string? If yes, go to (8).
2280 // (7) External string. Make it, offset-wise, look like a sequential string.
2282 // (8) Short external string or not a string? If yes, bail out to runtime.
2283 // (9) Sliced string. Replace subject with parent. Go to (4).
2285 Label check_underlying; // (4)
2286 Label seq_string; // (5)
2287 Label not_seq_nor_cons; // (6)
2288 Label external_string; // (7)
2289 Label not_long_external; // (8)
2291 // (1) Sequential string? If yes, go to (5).
2292 __ And(string_representation,
2295 kStringRepresentationMask |
2296 kShortExternalStringMask);
2297 // We depend on the fact that Strings of type
2298 // SeqString and not ShortExternalString are defined
2299 // by the following pattern:
2300 // string_type: 0XX0 XX00
2303 // | | is a SeqString
2304 // | is not a short external String
2306 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
2307 STATIC_ASSERT(kShortExternalStringTag != 0);
2308 __ Cbz(string_representation, &seq_string); // Go to (5).
2310 // (2) Anything but sequential or cons? If yes, go to (6).
2311 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2312 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2313 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2314 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2315 __ Cmp(string_representation, kExternalStringTag);
2316 __ B(ge, ¬_seq_nor_cons); // Go to (6).
2318 // (3) Cons string. Check that it's flat.
2319 __ Ldr(x10, FieldMemOperand(subject, ConsString::kSecondOffset));
2320 __ JumpIfNotRoot(x10, Heap::kempty_stringRootIndex, &runtime);
2321 // Replace subject with first string.
2322 __ Ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2324 // (4) Is subject external? If yes, go to (7).
2325 __ Bind(&check_underlying);
2326 // Reload the string type.
2327 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
2328 __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
2329 STATIC_ASSERT(kSeqStringTag == 0);
2330 // The underlying external string is never a short external string.
2331 STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
2332 STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2333 __ TestAndBranchIfAnySet(string_type.X(),
2334 kStringRepresentationMask,
2335 &external_string); // Go to (7).
2337 // (5) Sequential string. Load regexp code according to encoding.
2338 __ Bind(&seq_string);
2340 // Check that the third argument is a positive smi less than the subject
2341 // string length. A negative value will be greater (unsigned comparison).
2342 DCHECK(jssp.Is(__ StackPointer()));
2343 __ Peek(x10, kPreviousIndexOffset);
2344 __ JumpIfNotSmi(x10, &runtime);
2345 __ Cmp(jsstring_length, x10);
2348 // Argument 2 (x1): We need to load argument 2 (the previous index) into x1
2349 // before entering the exit frame.
2350 __ SmiUntag(x1, x10);
2352 // The third bit determines the string encoding in string_type.
2353 STATIC_ASSERT(kOneByteStringTag == 0x04);
2354 STATIC_ASSERT(kTwoByteStringTag == 0x00);
2355 STATIC_ASSERT(kStringEncodingMask == 0x04);
2357 // Find the code object based on the assumptions above.
2358 // kDataOneByteCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
2359 // of kPointerSize to reach the latter.
2360 DCHECK_EQ(JSRegExp::kDataOneByteCodeOffset + kPointerSize,
2361 JSRegExp::kDataUC16CodeOffset);
2362 __ Mov(x10, kPointerSize);
2363 // We will need the encoding later: Latin1 = 0x04
2365 __ Ands(string_encoding, string_type, kStringEncodingMask);
2367 __ Add(x10, regexp_data, x10);
2368 __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataOneByteCodeOffset));
2370 // (E) Carry on. String handling is done.
2372 // Check that the irregexp code has been generated for the actual string
2373 // encoding. If it has, the field contains a code object otherwise it contains
2374 // a smi (code flushing support).
2375 __ JumpIfSmi(code_object, &runtime);
2377 // All checks done. Now push arguments for native regexp code.
2378 __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1,
2382 // Isolates: note we add an additional parameter here (isolate pointer).
2383 __ EnterExitFrame(false, x10, 1);
2384 DCHECK(csp.Is(__ StackPointer()));
2386 // We have 9 arguments to pass to the regexp code, therefore we have to pass
2387 // one on the stack and the rest as registers.
2389 // Note that the placement of the argument on the stack isn't standard
2391 // csp[0]: Space for the return address placed by DirectCEntryStub.
2392 // csp[8]: Argument 9, the current isolate address.
2394 __ Mov(x10, ExternalReference::isolate_address(isolate()));
2395 __ Poke(x10, kPointerSize);
2397 Register length = w11;
2398 Register previous_index_in_bytes = w12;
2399 Register start = x13;
2401 // Load start of the subject string.
2402 __ Add(start, subject, SeqString::kHeaderSize - kHeapObjectTag);
2403 // Load the length from the original subject string from the previous stack
2404 // frame. Therefore we have to use fp, which points exactly to two pointer
2405 // sizes below the previous sp. (Because creating a new stack frame pushes
2406 // the previous fp onto the stack and decrements sp by 2 * kPointerSize.)
2407 __ Ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2408 __ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
2410 // Handle UC16 encoding, two bytes make one character.
2411 // string_encoding: if Latin1: 0x04
2413 STATIC_ASSERT(kStringEncodingMask == 0x04);
2414 __ Ubfx(string_encoding, string_encoding, 2, 1);
2415 __ Eor(string_encoding, string_encoding, 1);
2416 // string_encoding: if Latin1: 0
2419 // Convert string positions from characters to bytes.
2420 // Previous index is in x1.
2421 __ Lsl(previous_index_in_bytes, w1, string_encoding);
2422 __ Lsl(length, length, string_encoding);
2423 __ Lsl(sliced_string_offset, sliced_string_offset, string_encoding);
2425 // Argument 1 (x0): Subject string.
2426 __ Mov(x0, subject);
2428 // Argument 2 (x1): Previous index, already there.
2430 // Argument 3 (x2): Get the start of input.
2431 // Start of input = start of string + previous index + substring offset
2434 __ Add(w10, previous_index_in_bytes, sliced_string_offset);
2435 __ Add(x2, start, Operand(w10, UXTW));
2438 // End of input = start of input + (length of input - previous index)
2439 __ Sub(w10, length, previous_index_in_bytes);
2440 __ Add(x3, x2, Operand(w10, UXTW));
2442 // Argument 5 (x4): static offsets vector buffer.
2443 __ Mov(x4, ExternalReference::address_of_static_offsets_vector(isolate()));
2445 // Argument 6 (x5): Set the number of capture registers to zero to force
2446 // global regexps to behave as non-global. This stub is not used for global
2450 // Argument 7 (x6): Start (high end) of backtracking stack memory area.
2451 __ Mov(x10, address_of_regexp_stack_memory_address);
2452 __ Ldr(x10, MemOperand(x10));
2453 __ Mov(x11, address_of_regexp_stack_memory_size);
2454 __ Ldr(x11, MemOperand(x11));
2455 __ Add(x6, x10, x11);
2457 // Argument 8 (x7): Indicate that this is a direct call from JavaScript.
2460 // Locate the code entry and call it.
2461 __ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag);
2462 DirectCEntryStub stub(isolate());
2463 stub.GenerateCall(masm, code_object);
2465 __ LeaveExitFrame(false, x10, true);
2467 // The generated regexp code returns an int32 in w0.
2468 Label failure, exception;
2469 __ CompareAndBranch(w0, NativeRegExpMacroAssembler::FAILURE, eq, &failure);
2470 __ CompareAndBranch(w0,
2471 NativeRegExpMacroAssembler::EXCEPTION,
2474 __ CompareAndBranch(w0, NativeRegExpMacroAssembler::RETRY, eq, &runtime);
2476 // Success: process the result from the native regexp code.
2477 Register number_of_capture_registers = x12;
2479 // Calculate number of capture registers (number_of_captures + 1) * 2
2480 // and store it in the last match info.
2482 UntagSmiFieldMemOperand(regexp_data,
2483 JSRegExp::kIrregexpCaptureCountOffset));
2484 __ Add(x10, x10, x10);
2485 __ Add(number_of_capture_registers, x10, 2);
2487 // Check that the fourth object is a JSArray object.
2488 DCHECK(jssp.Is(__ StackPointer()));
2489 __ Peek(x10, kLastMatchInfoOffset);
2490 __ JumpIfSmi(x10, &runtime);
2491 __ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime);
2493 // Check that the JSArray is the fast case.
2494 __ Ldr(last_match_info_elements,
2495 FieldMemOperand(x10, JSArray::kElementsOffset));
2497 FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2498 __ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime);
2500 // Check that the last match info has space for the capture registers and the
2501 // additional information (overhead).
2502 // (number_of_captures + 1) * 2 + overhead <= last match info size
2503 // (number_of_captures * 2) + 2 + overhead <= last match info size
2504 // number_of_capture_registers + overhead <= last match info size
2506 UntagSmiFieldMemOperand(last_match_info_elements,
2507 FixedArray::kLengthOffset));
2508 __ Add(x11, number_of_capture_registers, RegExpImpl::kLastMatchOverhead);
2512 // Store the capture count.
2513 __ SmiTag(x10, number_of_capture_registers);
2515 FieldMemOperand(last_match_info_elements,
2516 RegExpImpl::kLastCaptureCountOffset));
2517 // Store last subject and last input.
2519 FieldMemOperand(last_match_info_elements,
2520 RegExpImpl::kLastSubjectOffset));
2521 // Use x10 as the subject string in order to only need
2522 // one RecordWriteStub.
2523 __ Mov(x10, subject);
2524 __ RecordWriteField(last_match_info_elements,
2525 RegExpImpl::kLastSubjectOffset,
2531 FieldMemOperand(last_match_info_elements,
2532 RegExpImpl::kLastInputOffset));
2533 __ Mov(x10, subject);
2534 __ RecordWriteField(last_match_info_elements,
2535 RegExpImpl::kLastInputOffset,
2541 Register last_match_offsets = x13;
2542 Register offsets_vector_index = x14;
2543 Register current_offset = x15;
2545 // Get the static offsets vector filled by the native regexp code
2546 // and fill the last match info.
2547 ExternalReference address_of_static_offsets_vector =
2548 ExternalReference::address_of_static_offsets_vector(isolate());
2549 __ Mov(offsets_vector_index, address_of_static_offsets_vector);
2551 Label next_capture, done;
2552 // Capture register counter starts from number of capture registers and
2553 // iterates down to zero (inclusive).
2554 __ Add(last_match_offsets,
2555 last_match_info_elements,
2556 RegExpImpl::kFirstCaptureOffset - kHeapObjectTag);
2557 __ Bind(&next_capture);
2558 __ Subs(number_of_capture_registers, number_of_capture_registers, 2);
2560 // Read two 32 bit values from the static offsets vector buffer into
2562 __ Ldr(current_offset,
2563 MemOperand(offsets_vector_index, kWRegSize * 2, PostIndex));
2564 // Store the smi values in the last match info.
2565 __ SmiTag(x10, current_offset);
2566 // Clearing the 32 bottom bits gives us a Smi.
2567 STATIC_ASSERT(kSmiTag == 0);
2568 __ Bic(x11, current_offset, kSmiShiftMask);
2571 MemOperand(last_match_offsets, kXRegSize * 2, PostIndex));
2572 __ B(&next_capture);
2575 // Return last match info.
2576 __ Peek(x0, kLastMatchInfoOffset);
2577 __ PopCPURegList(used_callee_saved_registers);
2578 // Drop the 4 arguments of the stub from the stack.
2582 __ Bind(&exception);
2583 Register exception_value = x0;
2584 // A stack overflow (on the backtrack stack) may have occured
2585 // in the RegExp code but no exception has been created yet.
2586 // If there is no pending exception, handle that in the runtime system.
2587 __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
2589 Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2591 __ Ldr(exception_value, MemOperand(x11));
2592 __ Cmp(x10, exception_value);
2595 __ Str(x10, MemOperand(x11)); // Clear pending exception.
2597 // Check if the exception is a termination. If so, throw as uncatchable.
2598 Label termination_exception;
2599 __ JumpIfRoot(exception_value,
2600 Heap::kTerminationExceptionRootIndex,
2601 &termination_exception);
2603 __ Throw(exception_value, x10, x11, x12, x13);
2605 __ Bind(&termination_exception);
2606 __ ThrowUncatchable(exception_value, x10, x11, x12, x13);
2609 __ Mov(x0, Operand(isolate()->factory()->null_value()));
2610 __ PopCPURegList(used_callee_saved_registers);
2611 // Drop the 4 arguments of the stub from the stack.
2616 __ PopCPURegList(used_callee_saved_registers);
2617 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2619 // Deferred code for string handling.
2620 // (6) Not a long external string? If yes, go to (8).
2621 __ Bind(¬_seq_nor_cons);
2622 // Compare flags are still set.
2623 __ B(ne, ¬_long_external); // Go to (8).
2625 // (7) External string. Make it, offset-wise, look like a sequential string.
2626 __ Bind(&external_string);
2627 if (masm->emit_debug_code()) {
2628 // Assert that we do not have a cons or slice (indirect strings) here.
2629 // Sequential strings have already been ruled out.
2630 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
2631 __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
2632 __ Tst(x10, kIsIndirectStringMask);
2633 __ Check(eq, kExternalStringExpectedButNotFound);
2634 __ And(x10, x10, kStringRepresentationMask);
2636 __ Check(ne, kExternalStringExpectedButNotFound);
2639 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
2640 // Move the pointer so that offset-wise, it looks like a sequential string.
2641 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2642 __ Sub(subject, subject, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
2643 __ B(&seq_string); // Go to (5).
2645 // (8) If this is a short external string or not a string, bail out to
2647 __ Bind(¬_long_external);
2648 STATIC_ASSERT(kShortExternalStringTag != 0);
2649 __ TestAndBranchIfAnySet(string_representation,
2650 kShortExternalStringMask | kIsNotStringMask,
2653 // (9) Sliced string. Replace subject with parent.
2654 __ Ldr(sliced_string_offset,
2655 UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset));
2656 __ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
2657 __ B(&check_underlying); // Go to (4).
2662 static void GenerateRecordCallTarget(MacroAssembler* masm,
2665 Register feedback_vector,
2668 Register scratch2) {
2669 ASM_LOCATION("GenerateRecordCallTarget");
2670 DCHECK(!AreAliased(scratch1, scratch2,
2671 argc, function, feedback_vector, index));
2672 // Cache the called function in a feedback vector slot. Cache states are
2673 // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
2674 // argc : number of arguments to the construct function
2675 // function : the function to call
2676 // feedback_vector : the feedback vector
2677 // index : slot in feedback vector (smi)
2678 Label initialize, done, miss, megamorphic, not_array_function;
2680 DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
2681 masm->isolate()->heap()->megamorphic_symbol());
2682 DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
2683 masm->isolate()->heap()->uninitialized_symbol());
2685 // Load the cache state.
2686 __ Add(scratch1, feedback_vector,
2687 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
2688 __ Ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
2690 // A monomorphic cache hit or an already megamorphic state: invoke the
2691 // function without changing the state.
2692 __ Cmp(scratch1, function);
2695 if (!FLAG_pretenuring_call_new) {
2696 // If we came here, we need to see if we are the array function.
2697 // If we didn't have a matching function, and we didn't find the megamorph
2698 // sentinel, then we have in the slot either some other function or an
2699 // AllocationSite. Do a map check on the object in scratch1 register.
2700 __ Ldr(scratch2, FieldMemOperand(scratch1, AllocationSite::kMapOffset));
2701 __ JumpIfNotRoot(scratch2, Heap::kAllocationSiteMapRootIndex, &miss);
2703 // Make sure the function is the Array() function
2704 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
2705 __ Cmp(function, scratch1);
2706 __ B(ne, &megamorphic);
2712 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2714 __ JumpIfRoot(scratch1, Heap::kuninitialized_symbolRootIndex, &initialize);
2715 // MegamorphicSentinel is an immortal immovable object (undefined) so no
2716 // write-barrier is needed.
2717 __ Bind(&megamorphic);
2718 __ Add(scratch1, feedback_vector,
2719 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
2720 __ LoadRoot(scratch2, Heap::kmegamorphic_symbolRootIndex);
2721 __ Str(scratch2, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
2724 // An uninitialized cache is patched with the function or sentinel to
2725 // indicate the ElementsKind if function is the Array constructor.
2726 __ Bind(&initialize);
2728 if (!FLAG_pretenuring_call_new) {
2729 // Make sure the function is the Array() function
2730 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
2731 __ Cmp(function, scratch1);
2732 __ B(ne, ¬_array_function);
2734 // The target function is the Array constructor,
2735 // Create an AllocationSite if we don't already have it, store it in the
2738 FrameScope scope(masm, StackFrame::INTERNAL);
2739 CreateAllocationSiteStub create_stub(masm->isolate());
2741 // Arguments register must be smi-tagged to call out.
2743 __ Push(argc, function, feedback_vector, index);
2745 // CreateAllocationSiteStub expect the feedback vector in x2 and the slot
2747 DCHECK(feedback_vector.Is(x2) && index.Is(x3));
2748 __ CallStub(&create_stub);
2750 __ Pop(index, feedback_vector, function, argc);
2755 __ Bind(¬_array_function);
2758 // An uninitialized cache is patched with the function.
2760 __ Add(scratch1, feedback_vector,
2761 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
2762 __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
2763 __ Str(function, MemOperand(scratch1, 0));
2766 __ RecordWrite(feedback_vector, scratch1, function, kLRHasNotBeenSaved,
2767 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
2774 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
2775 // Do not transform the receiver for strict mode functions.
2776 __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
2777 __ Ldr(w4, FieldMemOperand(x3, SharedFunctionInfo::kCompilerHintsOffset));
2778 __ Tbnz(w4, SharedFunctionInfo::kStrictModeFunction, cont);
2780 // Do not transform the receiver for native (Compilerhints already in x3).
2781 __ Tbnz(w4, SharedFunctionInfo::kNative, cont);
2785 static void EmitSlowCase(MacroAssembler* masm,
2789 Label* non_function) {
2790 // Check for function proxy.
2791 // x10 : function type.
2792 __ CompareAndBranch(type, JS_FUNCTION_PROXY_TYPE, ne, non_function);
2793 __ Push(function); // put proxy as additional argument
2794 __ Mov(x0, argc + 1);
2796 __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
2798 Handle<Code> adaptor =
2799 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
2800 __ Jump(adaptor, RelocInfo::CODE_TARGET);
2803 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
2804 // of the original receiver from the call site).
2805 __ Bind(non_function);
2806 __ Poke(function, argc * kXRegSize);
2807 __ Mov(x0, argc); // Set up the number of arguments.
2809 __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION);
2810 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2811 RelocInfo::CODE_TARGET);
2815 static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
2816 // Wrap the receiver and patch it back onto the stack.
2817 { FrameScope frame_scope(masm, StackFrame::INTERNAL);
2819 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
2822 __ Poke(x0, argc * kPointerSize);
2827 static void CallFunctionNoFeedback(MacroAssembler* masm,
2828 int argc, bool needs_checks,
2829 bool call_as_method) {
2830 // x1 function the function to call
2831 Register function = x1;
2833 Label slow, non_function, wrap, cont;
2835 // TODO(jbramley): This function has a lot of unnamed registers. Name them,
2836 // and tidy things up a bit.
2839 // Check that the function is really a JavaScript function.
2840 __ JumpIfSmi(function, &non_function);
2842 // Goto slow case if we do not have a function.
2843 __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
2846 // Fast-case: Invoke the function now.
2847 // x1 function pushed function
2848 ParameterCount actual(argc);
2850 if (call_as_method) {
2852 EmitContinueIfStrictOrNative(masm, &cont);
2855 // Compute the receiver in sloppy mode.
2856 __ Peek(x3, argc * kPointerSize);
2859 __ JumpIfSmi(x3, &wrap);
2860 __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
2868 __ InvokeFunction(function,
2873 // Slow-case: Non-function called.
2875 EmitSlowCase(masm, argc, function, type, &non_function);
2878 if (call_as_method) {
2880 EmitWrapCase(masm, argc, &cont);
2885 void CallFunctionStub::Generate(MacroAssembler* masm) {
2886 ASM_LOCATION("CallFunctionStub::Generate");
2887 CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
2891 void CallConstructStub::Generate(MacroAssembler* masm) {
2892 ASM_LOCATION("CallConstructStub::Generate");
2893 // x0 : number of arguments
2894 // x1 : the function to call
2895 // x2 : feedback vector
2896 // x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
2897 Register function = x1;
2898 Label slow, non_function_call;
2900 // Check that the function is not a smi.
2901 __ JumpIfSmi(function, &non_function_call);
2902 // Check that the function is a JSFunction.
2903 Register object_type = x10;
2904 __ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE,
2907 if (RecordCallTarget()) {
2908 GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5);
2910 __ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
2911 if (FLAG_pretenuring_call_new) {
2912 // Put the AllocationSite from the feedback vector into x2.
2913 // By adding kPointerSize we encode that we know the AllocationSite
2914 // entry is at the feedback vector slot given by x3 + 1.
2915 __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize + kPointerSize));
2917 Label feedback_register_initialized;
2918 // Put the AllocationSite from the feedback vector into x2, or undefined.
2919 __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize));
2920 __ Ldr(x5, FieldMemOperand(x2, AllocationSite::kMapOffset));
2921 __ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex,
2922 &feedback_register_initialized);
2923 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
2924 __ bind(&feedback_register_initialized);
2927 __ AssertUndefinedOrAllocationSite(x2, x5);
2930 // Jump to the function-specific construct stub.
2931 Register jump_reg = x4;
2932 Register shared_func_info = jump_reg;
2933 Register cons_stub = jump_reg;
2934 Register cons_stub_code = jump_reg;
2935 __ Ldr(shared_func_info,
2936 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
2938 FieldMemOperand(shared_func_info,
2939 SharedFunctionInfo::kConstructStubOffset));
2940 __ Add(cons_stub_code, cons_stub, Code::kHeaderSize - kHeapObjectTag);
2941 __ Br(cons_stub_code);
2945 __ Cmp(object_type, JS_FUNCTION_PROXY_TYPE);
2946 __ B(ne, &non_function_call);
2947 __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
2950 __ Bind(&non_function_call);
2951 __ GetBuiltinFunction(x1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
2954 // Set expected number of arguments to zero (not changing x0).
2956 __ Jump(isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2957 RelocInfo::CODE_TARGET);
2961 static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
2962 __ Ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
2963 __ Ldr(vector, FieldMemOperand(vector,
2964 JSFunction::kSharedFunctionInfoOffset));
2965 __ Ldr(vector, FieldMemOperand(vector,
2966 SharedFunctionInfo::kFeedbackVectorOffset));
2970 void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
2974 Register function = x1;
2975 Register feedback_vector = x2;
2976 Register index = x3;
2977 Register scratch = x4;
2979 EmitLoadTypeFeedbackVector(masm, feedback_vector);
2981 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch);
2982 __ Cmp(function, scratch);
2985 __ Mov(x0, Operand(arg_count()));
2987 __ Add(scratch, feedback_vector,
2988 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
2989 __ Ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
2991 // Verify that scratch contains an AllocationSite
2993 __ Ldr(map, FieldMemOperand(scratch, HeapObject::kMapOffset));
2994 __ JumpIfNotRoot(map, Heap::kAllocationSiteMapRootIndex, &miss);
2996 Register allocation_site = feedback_vector;
2997 __ Mov(allocation_site, scratch);
2998 ArrayConstructorStub stub(masm->isolate(), arg_count());
2999 __ TailCallStub(&stub);
3004 // The slow case, we need this no matter what to complete a call after a miss.
3005 CallFunctionNoFeedback(masm,
3014 void CallICStub::Generate(MacroAssembler* masm) {
3015 ASM_LOCATION("CallICStub");
3018 // x3 - slot id (Smi)
3019 Label extra_checks_or_miss, slow_start;
3020 Label slow, non_function, wrap, cont;
3021 Label have_js_function;
3022 int argc = arg_count();
3023 ParameterCount actual(argc);
3025 Register function = x1;
3026 Register feedback_vector = x2;
3027 Register index = x3;
3030 EmitLoadTypeFeedbackVector(masm, feedback_vector);
3032 // The checks. First, does x1 match the recorded monomorphic target?
3033 __ Add(x4, feedback_vector,
3034 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
3035 __ Ldr(x4, FieldMemOperand(x4, FixedArray::kHeaderSize));
3037 __ Cmp(x4, function);
3038 __ B(ne, &extra_checks_or_miss);
3040 __ bind(&have_js_function);
3041 if (CallAsMethod()) {
3042 EmitContinueIfStrictOrNative(masm, &cont);
3044 // Compute the receiver in sloppy mode.
3045 __ Peek(x3, argc * kPointerSize);
3047 __ JumpIfSmi(x3, &wrap);
3048 __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
3053 __ InvokeFunction(function,
3059 EmitSlowCase(masm, argc, function, type, &non_function);
3061 if (CallAsMethod()) {
3063 EmitWrapCase(masm, argc, &cont);
3066 __ bind(&extra_checks_or_miss);
3069 __ JumpIfRoot(x4, Heap::kmegamorphic_symbolRootIndex, &slow_start);
3070 __ JumpIfRoot(x4, Heap::kuninitialized_symbolRootIndex, &miss);
3072 if (!FLAG_trace_ic) {
3073 // We are going megamorphic. If the feedback is a JSFunction, it is fine
3074 // to handle it here. More complex cases are dealt with in the runtime.
3075 __ AssertNotSmi(x4);
3076 __ JumpIfNotObjectType(x4, x5, x5, JS_FUNCTION_TYPE, &miss);
3077 __ Add(x4, feedback_vector,
3078 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
3079 __ LoadRoot(x5, Heap::kmegamorphic_symbolRootIndex);
3080 __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
3081 // We have to update statistics for runtime profiling.
3082 const int with_types_offset =
3083 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
3084 __ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
3085 __ Subs(x4, x4, Operand(Smi::FromInt(1)));
3086 __ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
3087 const int generic_offset =
3088 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
3089 __ Ldr(x4, FieldMemOperand(feedback_vector, generic_offset));
3090 __ Adds(x4, x4, Operand(Smi::FromInt(1)));
3091 __ Str(x4, FieldMemOperand(feedback_vector, generic_offset));
3095 // We are here because tracing is on or we are going monomorphic.
3100 __ bind(&slow_start);
3102 // Check that the function is really a JavaScript function.
3103 __ JumpIfSmi(function, &non_function);
3105 // Goto slow case if we do not have a function.
3106 __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
3107 __ B(&have_js_function);
3111 void CallICStub::GenerateMiss(MacroAssembler* masm) {
3112 ASM_LOCATION("CallICStub[Miss]");
3114 // Get the receiver of the function from the stack; 1 ~ return address.
3115 __ Peek(x4, (arg_count() + 1) * kPointerSize);
3118 FrameScope scope(masm, StackFrame::INTERNAL);
3120 // Push the receiver and the function and feedback info.
3121 __ Push(x4, x1, x2, x3);
3124 IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
3125 : IC::kCallIC_Customization_Miss;
3127 ExternalReference miss = ExternalReference(IC_Utility(id),
3129 __ CallExternalReference(miss, 4);
3131 // Move result to edi and exit the internal frame.
3137 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
3138 // If the receiver is a smi trigger the non-string case.
3139 if (check_mode_ == RECEIVER_IS_UNKNOWN) {
3140 __ JumpIfSmi(object_, receiver_not_string_);
3142 // Fetch the instance type of the receiver into result register.
3143 __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3144 __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3146 // If the receiver is not a string trigger the non-string case.
3147 __ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_);
3150 // If the index is non-smi trigger the non-smi case.
3151 __ JumpIfNotSmi(index_, &index_not_smi_);
3153 __ Bind(&got_smi_index_);
3154 // Check for index out of range.
3155 __ Ldrsw(result_, UntagSmiFieldMemOperand(object_, String::kLengthOffset));
3156 __ Cmp(result_, Operand::UntagSmi(index_));
3157 __ B(ls, index_out_of_range_);
3159 __ SmiUntag(index_);
3161 StringCharLoadGenerator::Generate(masm,
3171 void StringCharCodeAtGenerator::GenerateSlow(
3172 MacroAssembler* masm,
3173 const RuntimeCallHelper& call_helper) {
3174 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
3176 __ Bind(&index_not_smi_);
3177 // If index is a heap number, try converting it to an integer.
3178 __ JumpIfNotHeapNumber(index_, index_not_number_);
3179 call_helper.BeforeCall(masm);
3180 // Save object_ on the stack and pass index_ as argument for runtime call.
3181 __ Push(object_, index_);
3182 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3183 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3185 DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3186 // NumberToSmi discards numbers that are not exact integers.
3187 __ CallRuntime(Runtime::kNumberToSmi, 1);
3189 // Save the conversion result before the pop instructions below
3190 // have a chance to overwrite it.
3193 // Reload the instance type.
3194 __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3195 __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3196 call_helper.AfterCall(masm);
3198 // If index is still not a smi, it must be out of range.
3199 __ JumpIfNotSmi(index_, index_out_of_range_);
3200 // Otherwise, return to the fast path.
3201 __ B(&got_smi_index_);
3203 // Call runtime. We get here when the receiver is a string and the
3204 // index is a number, but the code of getting the actual character
3205 // is too complex (e.g., when the string needs to be flattened).
3206 __ Bind(&call_runtime_);
3207 call_helper.BeforeCall(masm);
3209 __ Push(object_, index_);
3210 __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
3211 __ Mov(result_, x0);
3212 call_helper.AfterCall(masm);
3215 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3219 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3220 __ JumpIfNotSmi(code_, &slow_case_);
3221 __ Cmp(code_, Smi::FromInt(String::kMaxOneByteCharCode));
3222 __ B(hi, &slow_case_);
3224 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3225 // At this point code register contains smi tagged one-byte char code.
3226 __ Add(result_, result_, Operand::UntagSmiAndScale(code_, kPointerSizeLog2));
3227 __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3228 __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
3233 void StringCharFromCodeGenerator::GenerateSlow(
3234 MacroAssembler* masm,
3235 const RuntimeCallHelper& call_helper) {
3236 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3238 __ Bind(&slow_case_);
3239 call_helper.BeforeCall(masm);
3241 __ CallRuntime(Runtime::kCharFromCode, 1);
3242 __ Mov(result_, x0);
3243 call_helper.AfterCall(masm);
3246 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3250 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
3251 // Inputs are in x0 (lhs) and x1 (rhs).
3252 DCHECK(state() == CompareICState::SMI);
3253 ASM_LOCATION("CompareICStub[Smis]");
3255 // Bail out (to 'miss') unless both x0 and x1 are smis.
3256 __ JumpIfEitherNotSmi(x0, x1, &miss);
3258 if (GetCondition() == eq) {
3259 // For equality we do not care about the sign of the result.
3262 // Untag before subtracting to avoid handling overflow.
3264 __ Sub(x0, x1, Operand::UntagSmi(x0));
3273 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
3274 DCHECK(state() == CompareICState::NUMBER);
3275 ASM_LOCATION("CompareICStub[HeapNumbers]");
3277 Label unordered, maybe_undefined1, maybe_undefined2;
3278 Label miss, handle_lhs, values_in_d_regs;
3279 Label untag_rhs, untag_lhs;
3281 Register result = x0;
3284 FPRegister rhs_d = d0;
3285 FPRegister lhs_d = d1;
3287 if (left() == CompareICState::SMI) {
3288 __ JumpIfNotSmi(lhs, &miss);
3290 if (right() == CompareICState::SMI) {
3291 __ JumpIfNotSmi(rhs, &miss);
3294 __ SmiUntagToDouble(rhs_d, rhs, kSpeculativeUntag);
3295 __ SmiUntagToDouble(lhs_d, lhs, kSpeculativeUntag);
3297 // Load rhs if it's a heap number.
3298 __ JumpIfSmi(rhs, &handle_lhs);
3299 __ JumpIfNotHeapNumber(rhs, &maybe_undefined1);
3300 __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
3302 // Load lhs if it's a heap number.
3303 __ Bind(&handle_lhs);
3304 __ JumpIfSmi(lhs, &values_in_d_regs);
3305 __ JumpIfNotHeapNumber(lhs, &maybe_undefined2);
3306 __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
3308 __ Bind(&values_in_d_regs);
3309 __ Fcmp(lhs_d, rhs_d);
3310 __ B(vs, &unordered); // Overflow flag set if either is NaN.
3311 STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
3312 __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
3313 __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
3316 __ Bind(&unordered);
3317 CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
3318 CompareICState::GENERIC, CompareICState::GENERIC);
3319 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3321 __ Bind(&maybe_undefined1);
3322 if (Token::IsOrderedRelationalCompareOp(op())) {
3323 __ JumpIfNotRoot(rhs, Heap::kUndefinedValueRootIndex, &miss);
3324 __ JumpIfSmi(lhs, &unordered);
3325 __ JumpIfNotHeapNumber(lhs, &maybe_undefined2);
3329 __ Bind(&maybe_undefined2);
3330 if (Token::IsOrderedRelationalCompareOp(op())) {
3331 __ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered);
3339 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3340 DCHECK(state() == CompareICState::INTERNALIZED_STRING);
3341 ASM_LOCATION("CompareICStub[InternalizedStrings]");
3344 Register result = x0;
3348 // Check that both operands are heap objects.
3349 __ JumpIfEitherSmi(lhs, rhs, &miss);
3351 // Check that both operands are internalized strings.
3352 Register rhs_map = x10;
3353 Register lhs_map = x11;
3354 Register rhs_type = x10;
3355 Register lhs_type = x11;
3356 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
3357 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
3358 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
3359 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
3361 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
3362 __ Orr(x12, lhs_type, rhs_type);
3363 __ TestAndBranchIfAnySet(
3364 x12, kIsNotStringMask | kIsNotInternalizedMask, &miss);
3366 // Internalized strings are compared by identity.
3367 STATIC_ASSERT(EQUAL == 0);
3369 __ Cset(result, ne);
3377 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
3378 DCHECK(state() == CompareICState::UNIQUE_NAME);
3379 ASM_LOCATION("CompareICStub[UniqueNames]");
3380 DCHECK(GetCondition() == eq);
3383 Register result = x0;
3387 Register lhs_instance_type = w2;
3388 Register rhs_instance_type = w3;
3390 // Check that both operands are heap objects.
3391 __ JumpIfEitherSmi(lhs, rhs, &miss);
3393 // Check that both operands are unique names. This leaves the instance
3394 // types loaded in tmp1 and tmp2.
3395 __ Ldr(x10, FieldMemOperand(lhs, HeapObject::kMapOffset));
3396 __ Ldr(x11, FieldMemOperand(rhs, HeapObject::kMapOffset));
3397 __ Ldrb(lhs_instance_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
3398 __ Ldrb(rhs_instance_type, FieldMemOperand(x11, Map::kInstanceTypeOffset));
3400 // To avoid a miss, each instance type should be either SYMBOL_TYPE or it
3401 // should have kInternalizedTag set.
3402 __ JumpIfNotUniqueNameInstanceType(lhs_instance_type, &miss);
3403 __ JumpIfNotUniqueNameInstanceType(rhs_instance_type, &miss);
3405 // Unique names are compared by identity.
3406 STATIC_ASSERT(EQUAL == 0);
3408 __ Cset(result, ne);
3416 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
3417 DCHECK(state() == CompareICState::STRING);
3418 ASM_LOCATION("CompareICStub[Strings]");
3422 bool equality = Token::IsEqualityOp(op());
3424 Register result = x0;
3428 // Check that both operands are heap objects.
3429 __ JumpIfEitherSmi(rhs, lhs, &miss);
3431 // Check that both operands are strings.
3432 Register rhs_map = x10;
3433 Register lhs_map = x11;
3434 Register rhs_type = x10;
3435 Register lhs_type = x11;
3436 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
3437 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
3438 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
3439 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
3440 STATIC_ASSERT(kNotStringTag != 0);
3441 __ Orr(x12, lhs_type, rhs_type);
3442 __ Tbnz(x12, MaskToBit(kIsNotStringMask), &miss);
3444 // Fast check for identical strings.
3447 __ B(ne, ¬_equal);
3448 __ Mov(result, EQUAL);
3451 __ Bind(¬_equal);
3452 // Handle not identical strings
3454 // Check that both strings are internalized strings. If they are, we're done
3455 // because we already know they are not identical. We know they are both
3458 DCHECK(GetCondition() == eq);
3459 STATIC_ASSERT(kInternalizedTag == 0);
3460 Label not_internalized_strings;
3461 __ Orr(x12, lhs_type, rhs_type);
3462 __ TestAndBranchIfAnySet(
3463 x12, kIsNotInternalizedMask, ¬_internalized_strings);
3464 // Result is in rhs (x0), and not EQUAL, as rhs is not a smi.
3466 __ Bind(¬_internalized_strings);
3469 // Check that both strings are sequential one-byte.
3471 __ JumpIfBothInstanceTypesAreNotSequentialOneByte(lhs_type, rhs_type, x12,
3474 // Compare flat one-byte strings. Returns when done.
3476 StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, x10, x11,
3479 StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, x10, x11,
3483 // Handle more complex cases in runtime.
3487 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
3489 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3497 void CompareICStub::GenerateObjects(MacroAssembler* masm) {
3498 DCHECK(state() == CompareICState::OBJECT);
3499 ASM_LOCATION("CompareICStub[Objects]");
3503 Register result = x0;
3507 __ JumpIfEitherSmi(rhs, lhs, &miss);
3509 __ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss);
3510 __ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss);
3512 DCHECK(GetCondition() == eq);
3513 __ Sub(result, rhs, lhs);
3521 void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
3522 ASM_LOCATION("CompareICStub[KnownObjects]");
3526 Register result = x0;
3530 __ JumpIfEitherSmi(rhs, lhs, &miss);
3532 Register rhs_map = x10;
3533 Register lhs_map = x11;
3534 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
3535 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
3536 __ Cmp(rhs_map, Operand(known_map_));
3538 __ Cmp(lhs_map, Operand(known_map_));
3541 __ Sub(result, rhs, lhs);
3549 // This method handles the case where a compare stub had the wrong
3550 // implementation. It calls a miss handler, which re-writes the stub. All other
3551 // CompareICStub::Generate* methods should fall back into this one if their
3552 // operands were not the expected types.
3553 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
3554 ASM_LOCATION("CompareICStub[Miss]");
3556 Register stub_entry = x11;
3558 ExternalReference miss =
3559 ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
3561 FrameScope scope(masm, StackFrame::INTERNAL);
3564 Register right = x0;
3565 // Preserve some caller-saved registers.
3566 __ Push(x1, x0, lr);
3567 // Push the arguments.
3568 __ Mov(op, Smi::FromInt(this->op()));
3569 __ Push(left, right, op);
3571 // Call the miss handler. This also pops the arguments.
3572 __ CallExternalReference(miss, 3);
3574 // Compute the entry point of the rewritten stub.
3575 __ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag);
3576 // Restore caller-saved registers.
3580 // Tail-call to the new stub.
3581 __ Jump(stub_entry);
3585 void SubStringStub::Generate(MacroAssembler* masm) {
3586 ASM_LOCATION("SubStringStub::Generate");
3589 // Stack frame on entry.
3590 // lr: return address
3591 // jssp[0]: substring "to" offset
3592 // jssp[8]: substring "from" offset
3593 // jssp[16]: pointer to string object
3595 // This stub is called from the native-call %_SubString(...), so
3596 // nothing can be assumed about the arguments. It is tested that:
3597 // "string" is a sequential string,
3598 // both "from" and "to" are smis, and
3599 // 0 <= from <= to <= string.length (in debug mode.)
3600 // If any of these assumptions fail, we call the runtime system.
3602 static const int kToOffset = 0 * kPointerSize;
3603 static const int kFromOffset = 1 * kPointerSize;
3604 static const int kStringOffset = 2 * kPointerSize;
3607 Register from = x15;
3608 Register input_string = x10;
3609 Register input_length = x11;
3610 Register input_type = x12;
3611 Register result_string = x0;
3612 Register result_length = x1;
3615 __ Peek(to, kToOffset);
3616 __ Peek(from, kFromOffset);
3618 // Check that both from and to are smis. If not, jump to runtime.
3619 __ JumpIfEitherNotSmi(from, to, &runtime);
3623 // Calculate difference between from and to. If to < from, branch to runtime.
3624 __ Subs(result_length, to, from);
3627 // Check from is positive.
3628 __ Tbnz(from, kWSignBit, &runtime);
3630 // Make sure first argument is a string.
3631 __ Peek(input_string, kStringOffset);
3632 __ JumpIfSmi(input_string, &runtime);
3633 __ IsObjectJSStringType(input_string, input_type, &runtime);
3636 __ Cmp(result_length, 1);
3637 __ B(eq, &single_char);
3639 // Short-cut for the case of trivial substring.
3641 __ Ldrsw(input_length,
3642 UntagSmiFieldMemOperand(input_string, String::kLengthOffset));
3644 __ Cmp(result_length, input_length);
3645 __ CmovX(x0, input_string, eq);
3646 // Return original string.
3647 __ B(eq, &return_x0);
3649 // Longer than original string's length or negative: unsafe arguments.
3652 // Shorter than original string's length: an actual substring.
3654 // x0 to substring end character offset
3655 // x1 result_length length of substring result
3656 // x10 input_string pointer to input string object
3657 // x10 unpacked_string pointer to unpacked string object
3658 // x11 input_length length of input string
3659 // x12 input_type instance type of input string
3660 // x15 from substring start character offset
3662 // Deal with different string types: update the index if necessary and put
3663 // the underlying string into register unpacked_string.
3664 Label underlying_unpacked, sliced_string, seq_or_external_string;
3665 Label update_instance_type;
3666 // If the string is not indirect, it can only be sequential or external.
3667 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
3668 STATIC_ASSERT(kIsIndirectStringMask != 0);
3670 // Test for string types, and branch/fall through to appropriate unpacking
3672 __ Tst(input_type, kIsIndirectStringMask);
3673 __ B(eq, &seq_or_external_string);
3674 __ Tst(input_type, kSlicedNotConsMask);
3675 __ B(ne, &sliced_string);
3677 Register unpacked_string = input_string;
3679 // Cons string. Check whether it is flat, then fetch first part.
3680 __ Ldr(temp, FieldMemOperand(input_string, ConsString::kSecondOffset));
3681 __ JumpIfNotRoot(temp, Heap::kempty_stringRootIndex, &runtime);
3682 __ Ldr(unpacked_string,
3683 FieldMemOperand(input_string, ConsString::kFirstOffset));
3684 __ B(&update_instance_type);
3686 __ Bind(&sliced_string);
3687 // Sliced string. Fetch parent and correct start index by offset.
3689 UntagSmiFieldMemOperand(input_string, SlicedString::kOffsetOffset));
3690 __ Add(from, from, temp);
3691 __ Ldr(unpacked_string,
3692 FieldMemOperand(input_string, SlicedString::kParentOffset));
3694 __ Bind(&update_instance_type);
3695 __ Ldr(temp, FieldMemOperand(unpacked_string, HeapObject::kMapOffset));
3696 __ Ldrb(input_type, FieldMemOperand(temp, Map::kInstanceTypeOffset));
3697 // Now control must go to &underlying_unpacked. Since the no code is generated
3698 // before then we fall through instead of generating a useless branch.
3700 __ Bind(&seq_or_external_string);
3701 // Sequential or external string. Registers unpacked_string and input_string
3702 // alias, so there's nothing to do here.
3703 // Note that if code is added here, the above code must be updated.
3705 // x0 result_string pointer to result string object (uninit)
3706 // x1 result_length length of substring result
3707 // x10 unpacked_string pointer to unpacked string object
3708 // x11 input_length length of input string
3709 // x12 input_type instance type of input string
3710 // x15 from substring start character offset
3711 __ Bind(&underlying_unpacked);
3713 if (FLAG_string_slices) {
3715 __ Cmp(result_length, SlicedString::kMinLength);
3716 // Short slice. Copy instead of slicing.
3717 __ B(lt, ©_routine);
3718 // Allocate new sliced string. At this point we do not reload the instance
3719 // type including the string encoding because we simply rely on the info
3720 // provided by the original string. It does not matter if the original
3721 // string's encoding is wrong because we always have to recheck encoding of
3722 // the newly created string's parent anyway due to externalized strings.
3723 Label two_byte_slice, set_slice_header;
3724 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
3725 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
3726 __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_slice);
3727 __ AllocateOneByteSlicedString(result_string, result_length, x3, x4,
3729 __ B(&set_slice_header);
3731 __ Bind(&two_byte_slice);
3732 __ AllocateTwoByteSlicedString(result_string, result_length, x3, x4,
3735 __ Bind(&set_slice_header);
3737 __ Str(from, FieldMemOperand(result_string, SlicedString::kOffsetOffset));
3738 __ Str(unpacked_string,
3739 FieldMemOperand(result_string, SlicedString::kParentOffset));
3742 __ Bind(©_routine);
3745 // x0 result_string pointer to result string object (uninit)
3746 // x1 result_length length of substring result
3747 // x10 unpacked_string pointer to unpacked string object
3748 // x11 input_length length of input string
3749 // x12 input_type instance type of input string
3750 // x13 unpacked_char0 pointer to first char of unpacked string (uninit)
3751 // x13 substring_char0 pointer to first char of substring (uninit)
3752 // x14 result_char0 pointer to first char of result (uninit)
3753 // x15 from substring start character offset
3754 Register unpacked_char0 = x13;
3755 Register substring_char0 = x13;
3756 Register result_char0 = x14;
3757 Label two_byte_sequential, sequential_string, allocate_result;
3758 STATIC_ASSERT(kExternalStringTag != 0);
3759 STATIC_ASSERT(kSeqStringTag == 0);
3761 __ Tst(input_type, kExternalStringTag);
3762 __ B(eq, &sequential_string);
3764 __ Tst(input_type, kShortExternalStringTag);
3766 __ Ldr(unpacked_char0,
3767 FieldMemOperand(unpacked_string, ExternalString::kResourceDataOffset));
3768 // unpacked_char0 points to the first character of the underlying string.
3769 __ B(&allocate_result);
3771 __ Bind(&sequential_string);
3772 // Locate first character of underlying subject string.
3773 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3774 __ Add(unpacked_char0, unpacked_string,
3775 SeqOneByteString::kHeaderSize - kHeapObjectTag);
3777 __ Bind(&allocate_result);
3778 // Sequential one-byte string. Allocate the result.
3779 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3780 __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_sequential);
3782 // Allocate and copy the resulting one-byte string.
3783 __ AllocateOneByteString(result_string, result_length, x3, x4, x5, &runtime);
3785 // Locate first character of substring to copy.
3786 __ Add(substring_char0, unpacked_char0, from);
3788 // Locate first character of result.
3789 __ Add(result_char0, result_string,
3790 SeqOneByteString::kHeaderSize - kHeapObjectTag);
3792 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3793 __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
3796 // Allocate and copy the resulting two-byte string.
3797 __ Bind(&two_byte_sequential);
3798 __ AllocateTwoByteString(result_string, result_length, x3, x4, x5, &runtime);
3800 // Locate first character of substring to copy.
3801 __ Add(substring_char0, unpacked_char0, Operand(from, LSL, 1));
3803 // Locate first character of result.
3804 __ Add(result_char0, result_string,
3805 SeqTwoByteString::kHeaderSize - kHeapObjectTag);
3807 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3808 __ Add(result_length, result_length, result_length);
3809 __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
3811 __ Bind(&return_x0);
3812 Counters* counters = isolate()->counters();
3813 __ IncrementCounter(counters->sub_string_native(), 1, x3, x4);
3818 __ TailCallRuntime(Runtime::kSubString, 3, 1);
3820 __ bind(&single_char);
3821 // x1: result_length
3822 // x10: input_string
3824 // x15: from (untagged)
3826 StringCharAtGenerator generator(input_string, from, result_length, x0,
3827 &runtime, &runtime, &runtime,
3828 STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
3829 generator.GenerateFast(masm);
3832 generator.SkipSlow(masm, &runtime);
3836 void StringHelper::GenerateFlatOneByteStringEquals(
3837 MacroAssembler* masm, Register left, Register right, Register scratch1,
3838 Register scratch2, Register scratch3) {
3839 DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3));
3840 Register result = x0;
3841 Register left_length = scratch1;
3842 Register right_length = scratch2;
3844 // Compare lengths. If lengths differ, strings can't be equal. Lengths are
3845 // smis, and don't need to be untagged.
3846 Label strings_not_equal, check_zero_length;
3847 __ Ldr(left_length, FieldMemOperand(left, String::kLengthOffset));
3848 __ Ldr(right_length, FieldMemOperand(right, String::kLengthOffset));
3849 __ Cmp(left_length, right_length);
3850 __ B(eq, &check_zero_length);
3852 __ Bind(&strings_not_equal);
3853 __ Mov(result, Smi::FromInt(NOT_EQUAL));
3856 // Check if the length is zero. If so, the strings must be equal (and empty.)
3857 Label compare_chars;
3858 __ Bind(&check_zero_length);
3859 STATIC_ASSERT(kSmiTag == 0);
3860 __ Cbnz(left_length, &compare_chars);
3861 __ Mov(result, Smi::FromInt(EQUAL));
3864 // Compare characters. Falls through if all characters are equal.
3865 __ Bind(&compare_chars);
3866 GenerateOneByteCharsCompareLoop(masm, left, right, left_length, scratch2,
3867 scratch3, &strings_not_equal);
3869 // Characters in strings are equal.
3870 __ Mov(result, Smi::FromInt(EQUAL));
3875 void StringHelper::GenerateCompareFlatOneByteStrings(
3876 MacroAssembler* masm, Register left, Register right, Register scratch1,
3877 Register scratch2, Register scratch3, Register scratch4) {
3878 DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
3879 Label result_not_equal, compare_lengths;
3881 // Find minimum length and length difference.
3882 Register length_delta = scratch3;
3883 __ Ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
3884 __ Ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
3885 __ Subs(length_delta, scratch1, scratch2);
3887 Register min_length = scratch1;
3888 __ Csel(min_length, scratch2, scratch1, gt);
3889 __ Cbz(min_length, &compare_lengths);
3892 GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
3893 scratch4, &result_not_equal);
3895 // Compare lengths - strings up to min-length are equal.
3896 __ Bind(&compare_lengths);
3898 DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
3900 // Use length_delta as result if it's zero.
3901 Register result = x0;
3902 __ Subs(result, length_delta, 0);
3904 __ Bind(&result_not_equal);
3905 Register greater = x10;
3906 Register less = x11;
3907 __ Mov(greater, Smi::FromInt(GREATER));
3908 __ Mov(less, Smi::FromInt(LESS));
3909 __ CmovX(result, greater, gt);
3910 __ CmovX(result, less, lt);
3915 void StringHelper::GenerateOneByteCharsCompareLoop(
3916 MacroAssembler* masm, Register left, Register right, Register length,
3917 Register scratch1, Register scratch2, Label* chars_not_equal) {
3918 DCHECK(!AreAliased(left, right, length, scratch1, scratch2));
3920 // Change index to run from -length to -1 by adding length to string
3921 // start. This means that loop ends when index reaches zero, which
3922 // doesn't need an additional compare.
3923 __ SmiUntag(length);
3924 __ Add(scratch1, length, SeqOneByteString::kHeaderSize - kHeapObjectTag);
3925 __ Add(left, left, scratch1);
3926 __ Add(right, right, scratch1);
3928 Register index = length;
3929 __ Neg(index, length); // index = -length;
3934 __ Ldrb(scratch1, MemOperand(left, index));
3935 __ Ldrb(scratch2, MemOperand(right, index));
3936 __ Cmp(scratch1, scratch2);
3937 __ B(ne, chars_not_equal);
3938 __ Add(index, index, 1);
3939 __ Cbnz(index, &loop);
3943 void StringCompareStub::Generate(MacroAssembler* masm) {
3946 Counters* counters = isolate()->counters();
3948 // Stack frame on entry.
3949 // sp[0]: right string
3950 // sp[8]: left string
3951 Register right = x10;
3952 Register left = x11;
3953 Register result = x0;
3954 __ Pop(right, left);
3957 __ Subs(result, right, left);
3958 __ B(ne, ¬_same);
3959 STATIC_ASSERT(EQUAL == 0);
3960 __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
3965 // Check that both objects are sequential one-byte strings.
3966 __ JumpIfEitherIsNotSequentialOneByteStrings(left, right, x12, x13, &runtime);
3968 // Compare flat one-byte strings natively. Remove arguments from stack first,
3969 // as this function will generate a return.
3970 __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
3971 StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, x12, x13,
3976 // Push arguments back on to the stack.
3977 // sp[0] = right string
3978 // sp[8] = left string.
3979 __ Push(left, right);
3981 // Call the runtime.
3982 // Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
3983 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3987 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
3988 // ----------- S t a t e -------------
3991 // -- lr : return address
3992 // -----------------------------------
3994 // Load x2 with the allocation site. We stick an undefined dummy value here
3995 // and replace it with the real allocation site later when we instantiate this
3996 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
3997 __ LoadObject(x2, handle(isolate()->heap()->undefined_value()));
3999 // Make sure that we actually patched the allocation site.
4000 if (FLAG_debug_code) {
4001 __ AssertNotSmi(x2, kExpectedAllocationSite);
4002 __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
4003 __ AssertRegisterIsRoot(x10, Heap::kAllocationSiteMapRootIndex,
4004 kExpectedAllocationSite);
4007 // Tail call into the stub that handles binary operations with allocation
4009 BinaryOpWithAllocationSiteStub stub(isolate(), state());
4010 __ TailCallStub(&stub);
4014 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4015 // We need some extra registers for this stub, they have been allocated
4016 // but we need to save them before using them.
4019 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4020 Label dont_need_remembered_set;
4022 Register val = regs_.scratch0();
4023 __ Ldr(val, MemOperand(regs_.address()));
4024 __ JumpIfNotInNewSpace(val, &dont_need_remembered_set);
4026 __ CheckPageFlagSet(regs_.object(), val, 1 << MemoryChunk::SCAN_ON_SCAVENGE,
4027 &dont_need_remembered_set);
4029 // First notify the incremental marker if necessary, then update the
4031 CheckNeedsToInformIncrementalMarker(
4032 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4033 InformIncrementalMarker(masm);
4034 regs_.Restore(masm); // Restore the extra scratch registers we used.
4036 __ RememberedSetHelper(object(), address(),
4037 value(), // scratch1
4038 save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
4040 __ Bind(&dont_need_remembered_set);
4043 CheckNeedsToInformIncrementalMarker(
4044 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4045 InformIncrementalMarker(masm);
4046 regs_.Restore(masm); // Restore the extra scratch registers we used.
4051 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4052 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
4054 x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
4055 DCHECK(!address.Is(regs_.object()));
4056 DCHECK(!address.Is(x0));
4057 __ Mov(address, regs_.address());
4058 __ Mov(x0, regs_.object());
4059 __ Mov(x1, address);
4060 __ Mov(x2, ExternalReference::isolate_address(isolate()));
4062 AllowExternalCallThatCantCauseGC scope(masm);
4063 ExternalReference function =
4064 ExternalReference::incremental_marking_record_write_function(
4066 __ CallCFunction(function, 3, 0);
4068 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
4072 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4073 MacroAssembler* masm,
4074 OnNoNeedToInformIncrementalMarker on_no_need,
4077 Label need_incremental;
4078 Label need_incremental_pop_scratch;
4080 Register mem_chunk = regs_.scratch0();
4081 Register counter = regs_.scratch1();
4082 __ Bic(mem_chunk, regs_.object(), Page::kPageAlignmentMask);
4084 MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
4085 __ Subs(counter, counter, 1);
4087 MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
4088 __ B(mi, &need_incremental);
4090 // If the object is not black we don't have to inform the incremental marker.
4091 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4093 regs_.Restore(masm); // Restore the extra scratch registers we used.
4094 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4095 __ RememberedSetHelper(object(), address(),
4096 value(), // scratch1
4097 save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
4103 // Get the value from the slot.
4104 Register val = regs_.scratch0();
4105 __ Ldr(val, MemOperand(regs_.address()));
4107 if (mode == INCREMENTAL_COMPACTION) {
4108 Label ensure_not_white;
4110 __ CheckPageFlagClear(val, regs_.scratch1(),
4111 MemoryChunk::kEvacuationCandidateMask,
4114 __ CheckPageFlagClear(regs_.object(),
4116 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
4119 __ Bind(&ensure_not_white);
4122 // We need extra registers for this, so we push the object and the address
4123 // register temporarily.
4124 __ Push(regs_.address(), regs_.object());
4125 __ EnsureNotWhite(val,
4126 regs_.scratch1(), // Scratch.
4127 regs_.object(), // Scratch.
4128 regs_.address(), // Scratch.
4129 regs_.scratch2(), // Scratch.
4130 &need_incremental_pop_scratch);
4131 __ Pop(regs_.object(), regs_.address());
4133 regs_.Restore(masm); // Restore the extra scratch registers we used.
4134 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4135 __ RememberedSetHelper(object(), address(),
4136 value(), // scratch1
4137 save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
4142 __ Bind(&need_incremental_pop_scratch);
4143 __ Pop(regs_.object(), regs_.address());
4145 __ Bind(&need_incremental);
4146 // Fall through when we need to inform the incremental marker.
4150 void RecordWriteStub::Generate(MacroAssembler* masm) {
4151 Label skip_to_incremental_noncompacting;
4152 Label skip_to_incremental_compacting;
4154 // We patch these two first instructions back and forth between a nop and
4155 // real branch when we start and stop incremental heap marking.
4156 // Initially the stub is expected to be in STORE_BUFFER_ONLY mode, so 2 nops
4158 // See RecordWriteStub::Patch for details.
4160 InstructionAccurateScope scope(masm, 2);
4161 __ adr(xzr, &skip_to_incremental_noncompacting);
4162 __ adr(xzr, &skip_to_incremental_compacting);
4165 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4166 __ RememberedSetHelper(object(), address(),
4167 value(), // scratch1
4168 save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
4172 __ Bind(&skip_to_incremental_noncompacting);
4173 GenerateIncremental(masm, INCREMENTAL);
4175 __ Bind(&skip_to_incremental_compacting);
4176 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4180 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4181 // x0 value element value to store
4182 // x3 index_smi element index as smi
4183 // sp[0] array_index_smi array literal index in function as smi
4184 // sp[1] array array literal
4186 Register value = x0;
4187 Register index_smi = x3;
4189 Register array = x1;
4190 Register array_map = x2;
4191 Register array_index_smi = x4;
4192 __ PeekPair(array_index_smi, array, 0);
4193 __ Ldr(array_map, FieldMemOperand(array, JSObject::kMapOffset));
4195 Label double_elements, smi_element, fast_elements, slow_elements;
4196 Register bitfield2 = x10;
4197 __ Ldrb(bitfield2, FieldMemOperand(array_map, Map::kBitField2Offset));
4199 // Jump if array's ElementsKind is not FAST*_SMI_ELEMENTS, FAST_ELEMENTS or
4200 // FAST_HOLEY_ELEMENTS.
4201 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4202 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4203 STATIC_ASSERT(FAST_ELEMENTS == 2);
4204 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4205 __ Cmp(bitfield2, Map::kMaximumBitField2FastHoleyElementValue);
4206 __ B(hi, &double_elements);
4208 __ JumpIfSmi(value, &smi_element);
4210 // Jump if array's ElementsKind is not FAST_ELEMENTS or FAST_HOLEY_ELEMENTS.
4211 __ Tbnz(bitfield2, MaskToBit(FAST_ELEMENTS << Map::ElementsKindBits::kShift),
4214 // Store into the array literal requires an elements transition. Call into
4216 __ Bind(&slow_elements);
4217 __ Push(array, index_smi, value);
4218 __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4219 __ Ldr(x11, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
4220 __ Push(x11, array_index_smi);
4221 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4223 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4224 __ Bind(&fast_elements);
4225 __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
4226 __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
4227 __ Add(x11, x11, FixedArray::kHeaderSize - kHeapObjectTag);
4228 __ Str(value, MemOperand(x11));
4229 // Update the write barrier for the array store.
4230 __ RecordWrite(x10, x11, value, kLRHasNotBeenSaved, kDontSaveFPRegs,
4231 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
4234 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
4235 // and value is Smi.
4236 __ Bind(&smi_element);
4237 __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
4238 __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
4239 __ Str(value, FieldMemOperand(x11, FixedArray::kHeaderSize));
4242 __ Bind(&double_elements);
4243 __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
4244 __ StoreNumberToDoubleElements(value, index_smi, x10, x11, d0,
4250 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4251 CEntryStub ces(isolate(), 1, kSaveFPRegs);
4252 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4253 int parameter_count_offset =
4254 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4255 __ Ldr(x1, MemOperand(fp, parameter_count_offset));
4256 if (function_mode() == JS_FUNCTION_STUB_MODE) {
4259 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4261 // Return to IC Miss stub, continuation still on stack.
4266 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
4267 EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4268 VectorLoadStub stub(isolate(), state());
4269 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4273 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
4274 EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4275 VectorKeyedLoadStub stub(isolate());
4276 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4280 static unsigned int GetProfileEntryHookCallSize(MacroAssembler* masm) {
4281 // The entry hook is a "BumpSystemStackPointer" instruction (sub),
4282 // followed by a "Push lr" instruction, followed by a call.
4284 Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
4285 if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
4286 // If ALWAYS_ALIGN_CSP then there will be an extra bic instruction in
4287 // "BumpSystemStackPointer".
4288 size += kInstructionSize;
4294 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4295 if (masm->isolate()->function_entry_hook() != NULL) {
4296 ProfileEntryHookStub stub(masm->isolate());
4297 Assembler::BlockConstPoolScope no_const_pools(masm);
4298 DontEmitDebugCodeScope no_debug_code(masm);
4299 Label entry_hook_call_start;
4300 __ Bind(&entry_hook_call_start);
4303 DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
4304 GetProfileEntryHookCallSize(masm));
4311 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4312 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
4314 // Save all kCallerSaved registers (including lr), since this can be called
4316 // TODO(jbramley): What about FP registers?
4317 __ PushCPURegList(kCallerSaved);
4318 DCHECK(kCallerSaved.IncludesAliasOf(lr));
4319 const int kNumSavedRegs = kCallerSaved.Count();
4321 // Compute the function's address as the first argument.
4322 __ Sub(x0, lr, GetProfileEntryHookCallSize(masm));
4324 #if V8_HOST_ARCH_ARM64
4325 uintptr_t entry_hook =
4326 reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
4327 __ Mov(x10, entry_hook);
4329 // Under the simulator we need to indirect the entry hook through a trampoline
4330 // function at a known address.
4331 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4332 __ Mov(x10, Operand(ExternalReference(&dispatcher,
4333 ExternalReference::BUILTIN_CALL,
4335 // It additionally takes an isolate as a third parameter
4336 __ Mov(x2, ExternalReference::isolate_address(isolate()));
4339 // The caller's return address is above the saved temporaries.
4340 // Grab its location for the second argument to the hook.
4341 __ Add(x1, __ StackPointer(), kNumSavedRegs * kPointerSize);
4344 // Create a dummy frame, as CallCFunction requires this.
4345 FrameScope frame(masm, StackFrame::MANUAL);
4346 __ CallCFunction(x10, 2, 0);
4349 __ PopCPURegList(kCallerSaved);
4354 void DirectCEntryStub::Generate(MacroAssembler* masm) {
4355 // When calling into C++ code the stack pointer must be csp.
4356 // Therefore this code must use csp for peek/poke operations when the
4357 // stub is generated. When the stub is called
4358 // (via DirectCEntryStub::GenerateCall), the caller must setup an ExitFrame
4359 // and configure the stack pointer *before* doing the call.
4360 const Register old_stack_pointer = __ StackPointer();
4361 __ SetStackPointer(csp);
4363 // Put return address on the stack (accessible to GC through exit frame pc).
4365 // Call the C++ function.
4367 // Return to calling code.
4369 __ AssertFPCRState();
4372 __ SetStackPointer(old_stack_pointer);
4375 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
4377 // Make sure the caller configured the stack pointer (see comment in
4378 // DirectCEntryStub::Generate).
4379 DCHECK(csp.Is(__ StackPointer()));
4382 reinterpret_cast<intptr_t>(GetCode().location());
4383 __ Mov(lr, Operand(code, RelocInfo::CODE_TARGET));
4384 __ Mov(x10, target);
4385 // Branch to the stub.
4390 // Probe the name dictionary in the 'elements' register.
4391 // Jump to the 'done' label if a property with the given name is found.
4392 // Jump to the 'miss' label otherwise.
4394 // If lookup was successful 'scratch2' will be equal to elements + 4 * index.
4395 // 'elements' and 'name' registers are preserved on miss.
4396 void NameDictionaryLookupStub::GeneratePositiveLookup(
4397 MacroAssembler* masm,
4403 Register scratch2) {
4404 DCHECK(!AreAliased(elements, name, scratch1, scratch2));
4406 // Assert that name contains a string.
4407 __ AssertName(name);
4409 // Compute the capacity mask.
4410 __ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset));
4411 __ Sub(scratch1, scratch1, 1);
4413 // Generate an unrolled loop that performs a few probes before giving up.
4414 for (int i = 0; i < kInlinedProbes; i++) {
4415 // Compute the masked index: (hash + i + i * i) & mask.
4416 __ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
4418 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4419 // the hash in a separate instruction. The value hash + i + i * i is right
4420 // shifted in the following and instruction.
4421 DCHECK(NameDictionary::GetProbeOffset(i) <
4422 1 << (32 - Name::kHashFieldOffset));
4423 __ Add(scratch2, scratch2, Operand(
4424 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4426 __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
4428 // Scale the index by multiplying by the element size.
4429 DCHECK(NameDictionary::kEntrySize == 3);
4430 __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
4432 // Check if the key is identical to the name.
4433 UseScratchRegisterScope temps(masm);
4434 Register scratch3 = temps.AcquireX();
4435 __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
4436 __ Ldr(scratch3, FieldMemOperand(scratch2, kElementsStartOffset));
4437 __ Cmp(name, scratch3);
4441 // The inlined probes didn't find the entry.
4442 // Call the complete stub to scan the whole dictionary.
4444 CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
4445 spill_list.Combine(lr);
4446 spill_list.Remove(scratch1);
4447 spill_list.Remove(scratch2);
4449 __ PushCPURegList(spill_list);
4452 DCHECK(!elements.is(x1));
4454 __ Mov(x0, elements);
4456 __ Mov(x0, elements);
4461 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
4463 __ Cbz(x0, ¬_found);
4464 __ Mov(scratch2, x2); // Move entry index into scratch2.
4465 __ PopCPURegList(spill_list);
4468 __ Bind(¬_found);
4469 __ PopCPURegList(spill_list);
4474 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
4478 Register properties,
4480 Register scratch0) {
4481 DCHECK(!AreAliased(receiver, properties, scratch0));
4482 DCHECK(name->IsUniqueName());
4483 // If names of slots in range from 1 to kProbes - 1 for the hash value are
4484 // not equal to the name and kProbes-th slot is not used (its name is the
4485 // undefined value), it guarantees the hash table doesn't contain the
4486 // property. It's true even if some slots represent deleted properties
4487 // (their names are the hole value).
4488 for (int i = 0; i < kInlinedProbes; i++) {
4489 // scratch0 points to properties hash.
4490 // Compute the masked index: (hash + i + i * i) & mask.
4491 Register index = scratch0;
4492 // Capacity is smi 2^n.
4493 __ Ldrsw(index, UntagSmiFieldMemOperand(properties, kCapacityOffset));
4494 __ Sub(index, index, 1);
4495 __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
4497 // Scale the index by multiplying by the entry size.
4498 DCHECK(NameDictionary::kEntrySize == 3);
4499 __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
4501 Register entity_name = scratch0;
4502 // Having undefined at this place means the name is not contained.
4503 Register tmp = index;
4504 __ Add(tmp, properties, Operand(index, LSL, kPointerSizeLog2));
4505 __ Ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
4507 __ JumpIfRoot(entity_name, Heap::kUndefinedValueRootIndex, done);
4509 // Stop if found the property.
4510 __ Cmp(entity_name, Operand(name));
4514 __ JumpIfRoot(entity_name, Heap::kTheHoleValueRootIndex, &good);
4516 // Check if the entry name is not a unique name.
4517 __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
4518 __ Ldrb(entity_name,
4519 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
4520 __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
4524 CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
4525 spill_list.Combine(lr);
4526 spill_list.Remove(scratch0); // Scratch registers don't need to be preserved.
4528 __ PushCPURegList(spill_list);
4530 __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4531 __ Mov(x1, Operand(name));
4532 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
4534 // Move stub return value to scratch0. Note that scratch0 is not included in
4535 // spill_list and won't be clobbered by PopCPURegList.
4536 __ Mov(scratch0, x0);
4537 __ PopCPURegList(spill_list);
4539 __ Cbz(scratch0, done);
4544 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
4545 // This stub overrides SometimesSetsUpAFrame() to return false. That means
4546 // we cannot call anything that could cause a GC from this stub.
4548 // Arguments are in x0 and x1:
4549 // x0: property dictionary.
4550 // x1: the name of the property we are looking for.
4552 // Return value is in x0 and is zero if lookup failed, non zero otherwise.
4553 // If the lookup is successful, x2 will contains the index of the entry.
4555 Register result = x0;
4556 Register dictionary = x0;
4558 Register index = x2;
4561 Register undefined = x5;
4562 Register entry_key = x6;
4564 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
4566 __ Ldrsw(mask, UntagSmiFieldMemOperand(dictionary, kCapacityOffset));
4567 __ Sub(mask, mask, 1);
4569 __ Ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
4570 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
4572 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
4573 // Compute the masked index: (hash + i + i * i) & mask.
4574 // Capacity is smi 2^n.
4576 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4577 // the hash in a separate instruction. The value hash + i + i * i is right
4578 // shifted in the following and instruction.
4579 DCHECK(NameDictionary::GetProbeOffset(i) <
4580 1 << (32 - Name::kHashFieldOffset));
4582 NameDictionary::GetProbeOffset(i) << Name::kHashShift);
4584 __ Mov(index, hash);
4586 __ And(index, mask, Operand(index, LSR, Name::kHashShift));
4588 // Scale the index by multiplying by the entry size.
4589 DCHECK(NameDictionary::kEntrySize == 3);
4590 __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
4592 __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
4593 __ Ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
4595 // Having undefined at this place means the name is not contained.
4596 __ Cmp(entry_key, undefined);
4597 __ B(eq, ¬_in_dictionary);
4599 // Stop if found the property.
4600 __ Cmp(entry_key, key);
4601 __ B(eq, &in_dictionary);
4603 if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
4604 // Check if the entry name is not a unique name.
4605 __ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
4606 __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
4607 __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
4611 __ Bind(&maybe_in_dictionary);
4612 // If we are doing negative lookup then probing failure should be
4613 // treated as a lookup success. For positive lookup, probing failure
4614 // should be treated as lookup failure.
4615 if (mode() == POSITIVE_LOOKUP) {
4620 __ Bind(&in_dictionary);
4624 __ Bind(¬_in_dictionary);
4631 static void CreateArrayDispatch(MacroAssembler* masm,
4632 AllocationSiteOverrideMode mode) {
4633 ASM_LOCATION("CreateArrayDispatch");
4634 if (mode == DISABLE_ALLOCATION_SITES) {
4635 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4636 __ TailCallStub(&stub);
4638 } else if (mode == DONT_OVERRIDE) {
4641 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
4642 for (int i = 0; i <= last_index; ++i) {
4644 ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
4645 // TODO(jbramley): Is this the best way to handle this? Can we make the
4646 // tail calls conditional, rather than hopping over each one?
4647 __ CompareAndBranch(kind, candidate_kind, ne, &next);
4648 T stub(masm->isolate(), candidate_kind);
4649 __ TailCallStub(&stub);
4653 // If we reached this point there is a problem.
4654 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4662 // TODO(jbramley): If this needs to be a special case, make it a proper template
4663 // specialization, and not a separate function.
4664 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4665 AllocationSiteOverrideMode mode) {
4666 ASM_LOCATION("CreateArrayDispatchOneArgument");
4668 // x1 - constructor?
4669 // x2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4670 // x3 - kind (if mode != DISABLE_ALLOCATION_SITES)
4671 // sp[0] - last argument
4673 Register allocation_site = x2;
4676 Label normal_sequence;
4677 if (mode == DONT_OVERRIDE) {
4678 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4679 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4680 STATIC_ASSERT(FAST_ELEMENTS == 2);
4681 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4682 STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
4683 STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
4685 // Is the low bit set? If so, the array is holey.
4686 __ Tbnz(kind, 0, &normal_sequence);
4689 // Look at the last argument.
4690 // TODO(jbramley): What does a 0 argument represent?
4692 __ Cbz(x10, &normal_sequence);
4694 if (mode == DISABLE_ALLOCATION_SITES) {
4695 ElementsKind initial = GetInitialFastElementsKind();
4696 ElementsKind holey_initial = GetHoleyElementsKind(initial);
4698 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
4700 DISABLE_ALLOCATION_SITES);
4701 __ TailCallStub(&stub_holey);
4703 __ Bind(&normal_sequence);
4704 ArraySingleArgumentConstructorStub stub(masm->isolate(),
4706 DISABLE_ALLOCATION_SITES);
4707 __ TailCallStub(&stub);
4708 } else if (mode == DONT_OVERRIDE) {
4709 // We are going to create a holey array, but our kind is non-holey.
4710 // Fix kind and retry (only if we have an allocation site in the slot).
4711 __ Orr(kind, kind, 1);
4713 if (FLAG_debug_code) {
4714 __ Ldr(x10, FieldMemOperand(allocation_site, 0));
4715 __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex,
4717 __ Assert(eq, kExpectedAllocationSite);
4720 // Save the resulting elements kind in type info. We can't just store 'kind'
4721 // in the AllocationSite::transition_info field because elements kind is
4722 // restricted to a portion of the field; upper bits need to be left alone.
4723 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4724 __ Ldr(x11, FieldMemOperand(allocation_site,
4725 AllocationSite::kTransitionInfoOffset));
4726 __ Add(x11, x11, Smi::FromInt(kFastElementsKindPackedToHoley));
4727 __ Str(x11, FieldMemOperand(allocation_site,
4728 AllocationSite::kTransitionInfoOffset));
4730 __ Bind(&normal_sequence);
4732 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
4733 for (int i = 0; i <= last_index; ++i) {
4735 ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
4736 __ CompareAndBranch(kind, candidate_kind, ne, &next);
4737 ArraySingleArgumentConstructorStub stub(masm->isolate(), candidate_kind);
4738 __ TailCallStub(&stub);
4742 // If we reached this point there is a problem.
4743 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4751 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
4752 int to_index = GetSequenceIndexFromFastElementsKind(
4753 TERMINAL_FAST_ELEMENTS_KIND);
4754 for (int i = 0; i <= to_index; ++i) {
4755 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4756 T stub(isolate, kind);
4758 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
4759 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
4766 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
4767 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
4769 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
4771 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
4776 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
4778 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
4779 for (int i = 0; i < 2; i++) {
4780 // For internal arrays we only need a few things
4781 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
4783 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
4785 InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
4791 void ArrayConstructorStub::GenerateDispatchToArrayStub(
4792 MacroAssembler* masm,
4793 AllocationSiteOverrideMode mode) {
4795 if (argument_count() == ANY) {
4796 Label zero_case, n_case;
4797 __ Cbz(argc, &zero_case);
4802 CreateArrayDispatchOneArgument(masm, mode);
4804 __ Bind(&zero_case);
4806 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4810 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4812 } else if (argument_count() == NONE) {
4813 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4814 } else if (argument_count() == ONE) {
4815 CreateArrayDispatchOneArgument(masm, mode);
4816 } else if (argument_count() == MORE_THAN_ONE) {
4817 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4824 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
4825 ASM_LOCATION("ArrayConstructorStub::Generate");
4826 // ----------- S t a t e -------------
4827 // -- x0 : argc (only if argument_count() == ANY)
4828 // -- x1 : constructor
4829 // -- x2 : AllocationSite or undefined
4830 // -- sp[0] : return address
4831 // -- sp[4] : last argument
4832 // -----------------------------------
4833 Register constructor = x1;
4834 Register allocation_site = x2;
4836 if (FLAG_debug_code) {
4837 // The array construct code is only set for the global and natives
4838 // builtin Array functions which always have maps.
4840 Label unexpected_map, map_ok;
4841 // Initial map for the builtin Array function should be a map.
4842 __ Ldr(x10, FieldMemOperand(constructor,
4843 JSFunction::kPrototypeOrInitialMapOffset));
4844 // Will both indicate a NULL and a Smi.
4845 __ JumpIfSmi(x10, &unexpected_map);
4846 __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
4847 __ Bind(&unexpected_map);
4848 __ Abort(kUnexpectedInitialMapForArrayFunction);
4851 // We should either have undefined in the allocation_site register or a
4852 // valid AllocationSite.
4853 __ AssertUndefinedOrAllocationSite(allocation_site, x10);
4858 // Get the elements kind and case on that.
4859 __ JumpIfRoot(allocation_site, Heap::kUndefinedValueRootIndex, &no_info);
4862 UntagSmiFieldMemOperand(allocation_site,
4863 AllocationSite::kTransitionInfoOffset));
4864 __ And(kind, kind, AllocationSite::ElementsKindBits::kMask);
4865 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
4868 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
4872 void InternalArrayConstructorStub::GenerateCase(
4873 MacroAssembler* masm, ElementsKind kind) {
4874 Label zero_case, n_case;
4877 __ Cbz(argc, &zero_case);
4878 __ CompareAndBranch(argc, 1, ne, &n_case);
4881 if (IsFastPackedElementsKind(kind)) {
4884 // We might need to create a holey array; look at the first argument.
4886 __ Cbz(x10, &packed_case);
4888 InternalArraySingleArgumentConstructorStub
4889 stub1_holey(isolate(), GetHoleyElementsKind(kind));
4890 __ TailCallStub(&stub1_holey);
4892 __ Bind(&packed_case);
4894 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
4895 __ TailCallStub(&stub1);
4897 __ Bind(&zero_case);
4899 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
4900 __ TailCallStub(&stub0);
4904 InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
4905 __ TailCallStub(&stubN);
4909 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
4910 // ----------- S t a t e -------------
4912 // -- x1 : constructor
4913 // -- sp[0] : return address
4914 // -- sp[4] : last argument
4915 // -----------------------------------
4917 Register constructor = x1;
4919 if (FLAG_debug_code) {
4920 // The array construct code is only set for the global and natives
4921 // builtin Array functions which always have maps.
4923 Label unexpected_map, map_ok;
4924 // Initial map for the builtin Array function should be a map.
4925 __ Ldr(x10, FieldMemOperand(constructor,
4926 JSFunction::kPrototypeOrInitialMapOffset));
4927 // Will both indicate a NULL and a Smi.
4928 __ JumpIfSmi(x10, &unexpected_map);
4929 __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
4930 __ Bind(&unexpected_map);
4931 __ Abort(kUnexpectedInitialMapForArrayFunction);
4936 // Figure out the right elements kind
4937 __ Ldr(x10, FieldMemOperand(constructor,
4938 JSFunction::kPrototypeOrInitialMapOffset));
4940 // Retrieve elements_kind from map.
4941 __ LoadElementsKindFromMap(kind, x10);
4943 if (FLAG_debug_code) {
4945 __ Cmp(x3, FAST_ELEMENTS);
4946 __ Ccmp(x3, FAST_HOLEY_ELEMENTS, ZFlag, ne);
4947 __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
4950 Label fast_elements_case;
4951 __ CompareAndBranch(kind, FAST_ELEMENTS, eq, &fast_elements_case);
4952 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
4954 __ Bind(&fast_elements_case);
4955 GenerateCase(masm, FAST_ELEMENTS);
4959 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
4960 // ----------- S t a t e -------------
4962 // -- x4 : call_data
4964 // -- x1 : api_function_address
4967 // -- sp[0] : last argument
4969 // -- sp[(argc - 1) * 8] : first argument
4970 // -- sp[argc * 8] : receiver
4971 // -----------------------------------
4973 Register callee = x0;
4974 Register call_data = x4;
4975 Register holder = x2;
4976 Register api_function_address = x1;
4977 Register context = cp;
4979 int argc = this->argc();
4980 bool is_store = this->is_store();
4981 bool call_data_undefined = this->call_data_undefined();
4983 typedef FunctionCallbackArguments FCA;
4985 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
4986 STATIC_ASSERT(FCA::kCalleeIndex == 5);
4987 STATIC_ASSERT(FCA::kDataIndex == 4);
4988 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
4989 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
4990 STATIC_ASSERT(FCA::kIsolateIndex == 1);
4991 STATIC_ASSERT(FCA::kHolderIndex == 0);
4992 STATIC_ASSERT(FCA::kArgsLength == 7);
4994 // FunctionCallbackArguments: context, callee and call data.
4995 __ Push(context, callee, call_data);
4997 // Load context from callee
4998 __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
5000 if (!call_data_undefined) {
5001 __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
5003 Register isolate_reg = x5;
5004 __ Mov(isolate_reg, ExternalReference::isolate_address(isolate()));
5006 // FunctionCallbackArguments:
5007 // return value, return value default, isolate, holder.
5008 __ Push(call_data, call_data, isolate_reg, holder);
5010 // Prepare arguments.
5012 __ Mov(args, masm->StackPointer());
5014 // Allocate the v8::Arguments structure in the arguments' space, since it's
5015 // not controlled by GC.
5016 const int kApiStackSpace = 4;
5018 // Allocate space for CallApiFunctionAndReturn can store some scratch
5019 // registeres on the stack.
5020 const int kCallApiFunctionSpillSpace = 4;
5022 FrameScope frame_scope(masm, StackFrame::MANUAL);
5023 __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
5025 DCHECK(!AreAliased(x0, api_function_address));
5026 // x0 = FunctionCallbackInfo&
5027 // Arguments is after the return address.
5028 __ Add(x0, masm->StackPointer(), 1 * kPointerSize);
5029 // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
5030 __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
5031 __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
5032 // FunctionCallbackInfo::length_ = argc and
5033 // FunctionCallbackInfo::is_construct_call = 0
5035 __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
5037 const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
5038 ExternalReference thunk_ref =
5039 ExternalReference::invoke_function_callback(isolate());
5041 AllowExternalCallThatCantCauseGC scope(masm);
5042 MemOperand context_restore_operand(
5043 fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
5044 // Stores return the first js argument
5045 int return_value_offset = 0;
5047 return_value_offset = 2 + FCA::kArgsLength;
5049 return_value_offset = 2 + FCA::kReturnValueOffset;
5051 MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
5053 const int spill_offset = 1 + kApiStackSpace;
5054 __ CallApiFunctionAndReturn(api_function_address,
5058 return_value_operand,
5059 &context_restore_operand);
5063 void CallApiGetterStub::Generate(MacroAssembler* masm) {
5064 // ----------- S t a t e -------------
5066 // -- sp[8 - kArgsLength*8] : PropertyCallbackArguments object
5068 // -- x2 : api_function_address
5069 // -----------------------------------
5071 Register api_function_address = ApiGetterDescriptor::function_address();
5072 DCHECK(api_function_address.is(x2));
5074 __ Mov(x0, masm->StackPointer()); // x0 = Handle<Name>
5075 __ Add(x1, x0, 1 * kPointerSize); // x1 = PCA
5077 const int kApiStackSpace = 1;
5079 // Allocate space for CallApiFunctionAndReturn can store some scratch
5080 // registeres on the stack.
5081 const int kCallApiFunctionSpillSpace = 4;
5083 FrameScope frame_scope(masm, StackFrame::MANUAL);
5084 __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
5086 // Create PropertyAccessorInfo instance on the stack above the exit frame with
5087 // x1 (internal::Object** args_) as the data.
5088 __ Poke(x1, 1 * kPointerSize);
5089 __ Add(x1, masm->StackPointer(), 1 * kPointerSize); // x1 = AccessorInfo&
5091 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
5093 ExternalReference thunk_ref =
5094 ExternalReference::invoke_accessor_getter_callback(isolate());
5096 const int spill_offset = 1 + kApiStackSpace;
5097 __ CallApiFunctionAndReturn(api_function_address,
5101 MemOperand(fp, 6 * kPointerSize),
5108 } } // namespace v8::internal
5110 #endif // V8_TARGET_ARCH_ARM64