1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #if V8_TARGET_ARCH_ARM64
9 #include "src/bootstrapper.h"
10 #include "src/code-stubs.h"
11 #include "src/codegen.h"
12 #include "src/ic/handler-compiler.h"
13 #include "src/ic/ic.h"
14 #include "src/isolate.h"
15 #include "src/jsregexp.h"
16 #include "src/regexp-macro-assembler.h"
17 #include "src/runtime/runtime.h"
23 static void InitializeArrayConstructorDescriptor(
24 Isolate* isolate, CodeStubDescriptor* descriptor,
25 int constant_stack_parameter_count) {
28 // x2: allocation site with elements kind
29 // x0: number of arguments to the constructor function
30 Address deopt_handler = Runtime::FunctionForId(
31 Runtime::kArrayConstructor)->entry;
33 if (constant_stack_parameter_count == 0) {
34 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
35 JS_FUNCTION_STUB_MODE);
37 descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
38 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
43 void ArrayNoArgumentConstructorStub::InitializeDescriptor(
44 CodeStubDescriptor* descriptor) {
45 InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
49 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
50 CodeStubDescriptor* descriptor) {
51 InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
55 void ArrayNArgumentsConstructorStub::InitializeDescriptor(
56 CodeStubDescriptor* descriptor) {
57 InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
61 static void InitializeInternalArrayConstructorDescriptor(
62 Isolate* isolate, CodeStubDescriptor* descriptor,
63 int constant_stack_parameter_count) {
64 Address deopt_handler = Runtime::FunctionForId(
65 Runtime::kInternalArrayConstructor)->entry;
67 if (constant_stack_parameter_count == 0) {
68 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
69 JS_FUNCTION_STUB_MODE);
71 descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
72 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
77 void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
78 CodeStubDescriptor* descriptor) {
79 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
83 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
84 CodeStubDescriptor* descriptor) {
85 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
89 void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
90 CodeStubDescriptor* descriptor) {
91 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
95 #define __ ACCESS_MASM(masm)
98 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
99 ExternalReference miss) {
100 // Update the static counter each time a new code stub is generated.
101 isolate()->counters()->code_stubs()->Increment();
103 CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
104 int param_count = descriptor.GetEnvironmentParameterCount();
106 // Call the runtime system in a fresh internal frame.
107 FrameScope scope(masm, StackFrame::INTERNAL);
108 DCHECK((param_count == 0) ||
109 x0.Is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
112 MacroAssembler::PushPopQueue queue(masm);
113 for (int i = 0; i < param_count; ++i) {
114 queue.Queue(descriptor.GetEnvironmentParameterRegister(i));
118 __ CallExternalReference(miss, param_count);
125 void DoubleToIStub::Generate(MacroAssembler* masm) {
127 Register input = source();
128 Register result = destination();
129 DCHECK(is_truncating());
131 DCHECK(result.Is64Bits());
132 DCHECK(jssp.Is(masm->StackPointer()));
134 int double_offset = offset();
136 DoubleRegister double_scratch = d0; // only used if !skip_fastpath()
137 Register scratch1 = GetAllocatableRegisterThatIsNotOneOf(input, result);
139 GetAllocatableRegisterThatIsNotOneOf(input, result, scratch1);
141 __ Push(scratch1, scratch2);
142 // Account for saved regs if input is jssp.
143 if (input.is(jssp)) double_offset += 2 * kPointerSize;
145 if (!skip_fastpath()) {
146 __ Push(double_scratch);
147 if (input.is(jssp)) double_offset += 1 * kDoubleSize;
148 __ Ldr(double_scratch, MemOperand(input, double_offset));
149 // Try to convert with a FPU convert instruction. This handles all
150 // non-saturating cases.
151 __ TryConvertDoubleToInt64(result, double_scratch, &done);
152 __ Fmov(result, double_scratch);
154 __ Ldr(result, MemOperand(input, double_offset));
157 // If we reach here we need to manually convert the input to an int32.
159 // Extract the exponent.
160 Register exponent = scratch1;
161 __ Ubfx(exponent, result, HeapNumber::kMantissaBits,
162 HeapNumber::kExponentBits);
164 // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
165 // the mantissa gets shifted completely out of the int32_t result.
166 __ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32);
167 __ CzeroX(result, ge);
170 // The Fcvtzs sequence handles all cases except where the conversion causes
171 // signed overflow in the int64_t target. Since we've already handled
172 // exponents >= 84, we can guarantee that 63 <= exponent < 84.
174 if (masm->emit_debug_code()) {
175 __ Cmp(exponent, HeapNumber::kExponentBias + 63);
176 // Exponents less than this should have been handled by the Fcvt case.
177 __ Check(ge, kUnexpectedValue);
180 // Isolate the mantissa bits, and set the implicit '1'.
181 Register mantissa = scratch2;
182 __ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
183 __ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits);
185 // Negate the mantissa if necessary.
186 __ Tst(result, kXSignMask);
187 __ Cneg(mantissa, mantissa, ne);
189 // Shift the mantissa bits in the correct place. We know that we have to shift
190 // it left here, because exponent >= 63 >= kMantissaBits.
191 __ Sub(exponent, exponent,
192 HeapNumber::kExponentBias + HeapNumber::kMantissaBits);
193 __ Lsl(result, mantissa, exponent);
196 if (!skip_fastpath()) {
197 __ Pop(double_scratch);
199 __ Pop(scratch2, scratch1);
204 // See call site for description.
205 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
209 FPRegister double_scratch,
212 DCHECK(!AreAliased(left, right, scratch));
213 Label not_identical, return_equal, heap_number;
214 Register result = x0;
217 __ B(ne, ¬_identical);
219 // Test for NaN. Sadly, we can't just compare to factory::nan_value(),
220 // so we do the second best thing - test it ourselves.
221 // They are both equal and they are not both Smis so both of them are not
222 // Smis. If it's not a heap number, then return equal.
223 if ((cond == lt) || (cond == gt)) {
224 __ JumpIfObjectType(right, scratch, scratch, FIRST_SPEC_OBJECT_TYPE, slow,
226 } else if (cond == eq) {
227 __ JumpIfHeapNumber(right, &heap_number);
229 Register right_type = scratch;
230 __ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
232 // Comparing JS objects with <=, >= is complicated.
233 __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
235 // Normally here we fall through to return_equal, but undefined is
236 // special: (undefined == undefined) == true, but
237 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
238 if ((cond == le) || (cond == ge)) {
239 __ Cmp(right_type, ODDBALL_TYPE);
240 __ B(ne, &return_equal);
241 __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal);
243 // undefined <= undefined should fail.
244 __ Mov(result, GREATER);
246 // undefined >= undefined should fail.
247 __ Mov(result, LESS);
253 __ Bind(&return_equal);
255 __ Mov(result, GREATER); // Things aren't less than themselves.
256 } else if (cond == gt) {
257 __ Mov(result, LESS); // Things aren't greater than themselves.
259 __ Mov(result, EQUAL); // Things are <=, >=, ==, === themselves.
263 // Cases lt and gt have been handled earlier, and case ne is never seen, as
264 // it is handled in the parser (see Parser::ParseBinaryExpression). We are
265 // only concerned with cases ge, le and eq here.
266 if ((cond != lt) && (cond != gt)) {
267 DCHECK((cond == ge) || (cond == le) || (cond == eq));
268 __ Bind(&heap_number);
269 // Left and right are identical pointers to a heap number object. Return
270 // non-equal if the heap number is a NaN, and equal otherwise. Comparing
271 // the number to itself will set the overflow flag iff the number is NaN.
272 __ Ldr(double_scratch, FieldMemOperand(right, HeapNumber::kValueOffset));
273 __ Fcmp(double_scratch, double_scratch);
274 __ B(vc, &return_equal); // Not NaN, so treat as normal heap number.
277 __ Mov(result, GREATER);
279 __ Mov(result, LESS);
284 // No fall through here.
285 if (FLAG_debug_code) {
289 __ Bind(¬_identical);
293 // See call site for description.
294 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
300 DCHECK(!AreAliased(left, right, left_type, right_type, scratch));
302 if (masm->emit_debug_code()) {
303 // We assume that the arguments are not identical.
305 __ Assert(ne, kExpectedNonIdenticalObjects);
308 // If either operand is a JS object or an oddball value, then they are not
309 // equal since their pointers are different.
310 // There is no test for undetectability in strict equality.
311 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
312 Label right_non_object;
314 __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
315 __ B(lt, &right_non_object);
317 // Return non-zero - x0 already contains a non-zero pointer.
318 DCHECK(left.is(x0) || right.is(x0));
319 Label return_not_equal;
320 __ Bind(&return_not_equal);
323 __ Bind(&right_non_object);
325 // Check for oddballs: true, false, null, undefined.
326 __ Cmp(right_type, ODDBALL_TYPE);
328 // If right is not ODDBALL, test left. Otherwise, set eq condition.
329 __ Ccmp(left_type, ODDBALL_TYPE, ZFlag, ne);
331 // If right or left is not ODDBALL, test left >= FIRST_SPEC_OBJECT_TYPE.
332 // Otherwise, right or left is ODDBALL, so set a ge condition.
333 __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NVFlag, ne);
335 __ B(ge, &return_not_equal);
337 // Internalized strings are unique, so they can only be equal if they are the
338 // same object. We have already tested that case, so if left and right are
339 // both internalized strings, they cannot be equal.
340 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
341 __ Orr(scratch, left_type, right_type);
342 __ TestAndBranchIfAllClear(
343 scratch, kIsNotStringMask | kIsNotInternalizedMask, &return_not_equal);
347 // See call site for description.
348 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
355 DCHECK(!AreAliased(left_d, right_d));
356 DCHECK((left.is(x0) && right.is(x1)) ||
357 (right.is(x0) && left.is(x1)));
358 Register result = x0;
360 Label right_is_smi, done;
361 __ JumpIfSmi(right, &right_is_smi);
363 // Left is the smi. Check whether right is a heap number.
365 // If right is not a number and left is a smi, then strict equality cannot
366 // succeed. Return non-equal.
367 Label is_heap_number;
368 __ JumpIfHeapNumber(right, &is_heap_number);
369 // Register right is a non-zero pointer, which is a valid NOT_EQUAL result.
370 if (!right.is(result)) {
371 __ Mov(result, NOT_EQUAL);
374 __ Bind(&is_heap_number);
376 // Smi compared non-strictly with a non-smi, non-heap-number. Call the
378 __ JumpIfNotHeapNumber(right, slow);
381 // Left is the smi. Right is a heap number. Load right value into right_d, and
382 // convert left smi into double in left_d.
383 __ Ldr(right_d, FieldMemOperand(right, HeapNumber::kValueOffset));
384 __ SmiUntagToDouble(left_d, left);
387 __ Bind(&right_is_smi);
388 // Right is a smi. Check whether the non-smi left is a heap number.
390 // If left is not a number and right is a smi then strict equality cannot
391 // succeed. Return non-equal.
392 Label is_heap_number;
393 __ JumpIfHeapNumber(left, &is_heap_number);
394 // Register left is a non-zero pointer, which is a valid NOT_EQUAL result.
395 if (!left.is(result)) {
396 __ Mov(result, NOT_EQUAL);
399 __ Bind(&is_heap_number);
401 // Smi compared non-strictly with a non-smi, non-heap-number. Call the
403 __ JumpIfNotHeapNumber(left, slow);
406 // Right is the smi. Left is a heap number. Load left value into left_d, and
407 // convert right smi into double in right_d.
408 __ Ldr(left_d, FieldMemOperand(left, HeapNumber::kValueOffset));
409 __ SmiUntagToDouble(right_d, right);
411 // Fall through to both_loaded_as_doubles.
416 // Fast negative check for internalized-to-internalized equality.
417 // See call site for description.
418 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
425 Label* possible_strings,
426 Label* not_both_strings) {
427 DCHECK(!AreAliased(left, right, left_map, right_map, left_type, right_type));
428 Register result = x0;
431 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
432 // TODO(all): reexamine this branch sequence for optimisation wrt branch
434 __ Tbnz(right_type, MaskToBit(kIsNotStringMask), &object_test);
435 __ Tbnz(right_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
436 __ Tbnz(left_type, MaskToBit(kIsNotStringMask), not_both_strings);
437 __ Tbnz(left_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
439 // Both are internalized. We already checked that they weren't the same
440 // pointer, so they are not equal.
441 __ Mov(result, NOT_EQUAL);
444 __ Bind(&object_test);
446 __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
448 // If right >= FIRST_SPEC_OBJECT_TYPE, test left.
449 // Otherwise, right < FIRST_SPEC_OBJECT_TYPE, so set lt condition.
450 __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NFlag, ge);
452 __ B(lt, not_both_strings);
454 // If both objects are undetectable, they are equal. Otherwise, they are not
455 // equal, since they are different objects and an object is not equal to
458 // Returning here, so we can corrupt right_type and left_type.
459 Register right_bitfield = right_type;
460 Register left_bitfield = left_type;
461 __ Ldrb(right_bitfield, FieldMemOperand(right_map, Map::kBitFieldOffset));
462 __ Ldrb(left_bitfield, FieldMemOperand(left_map, Map::kBitFieldOffset));
463 __ And(result, right_bitfield, left_bitfield);
464 __ And(result, result, 1 << Map::kIsUndetectable);
465 __ Eor(result, result, 1 << Map::kIsUndetectable);
470 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
471 CompareICState::State expected,
474 if (expected == CompareICState::SMI) {
475 __ JumpIfNotSmi(input, fail);
476 } else if (expected == CompareICState::NUMBER) {
477 __ JumpIfSmi(input, &ok);
478 __ JumpIfNotHeapNumber(input, fail);
480 // We could be strict about internalized/non-internalized here, but as long as
481 // hydrogen doesn't care, the stub doesn't have to care either.
486 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
489 Register result = x0;
490 Condition cond = GetCondition();
493 CompareICStub_CheckInputType(masm, lhs, left(), &miss);
494 CompareICStub_CheckInputType(masm, rhs, right(), &miss);
496 Label slow; // Call builtin.
497 Label not_smis, both_loaded_as_doubles;
498 Label not_two_smis, smi_done;
499 __ JumpIfEitherNotSmi(lhs, rhs, ¬_two_smis);
501 __ Sub(result, lhs, Operand::UntagSmi(rhs));
504 __ Bind(¬_two_smis);
506 // NOTICE! This code is only reached after a smi-fast-case check, so it is
507 // certain that at least one operand isn't a smi.
509 // Handle the case where the objects are identical. Either returns the answer
510 // or goes to slow. Only falls through if the objects were not identical.
511 EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond);
513 // If either is a smi (we know that at least one is not a smi), then they can
514 // only be strictly equal if the other is a HeapNumber.
515 __ JumpIfBothNotSmi(lhs, rhs, ¬_smis);
517 // Exactly one operand is a smi. EmitSmiNonsmiComparison generates code that
519 // 1) Return the answer.
520 // 2) Branch to the slow case.
521 // 3) Fall through to both_loaded_as_doubles.
522 // In case 3, we have found out that we were dealing with a number-number
523 // comparison. The double values of the numbers have been loaded, right into
524 // rhs_d, left into lhs_d.
525 FPRegister rhs_d = d0;
526 FPRegister lhs_d = d1;
527 EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, &slow, strict());
529 __ Bind(&both_loaded_as_doubles);
530 // The arguments have been converted to doubles and stored in rhs_d and
533 __ Fcmp(lhs_d, rhs_d);
534 __ B(vs, &nan); // Overflow flag set if either is NaN.
535 STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
536 __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
537 __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
541 // Left and/or right is a NaN. Load the result register with whatever makes
542 // the comparison fail, since comparisons with NaN always fail (except ne,
543 // which is filtered out at a higher level.)
545 if ((cond == lt) || (cond == le)) {
546 __ Mov(result, GREATER);
548 __ Mov(result, LESS);
553 // At this point we know we are dealing with two different objects, and
554 // neither of them is a smi. The objects are in rhs_ and lhs_.
556 // Load the maps and types of the objects.
557 Register rhs_map = x10;
558 Register rhs_type = x11;
559 Register lhs_map = x12;
560 Register lhs_type = x13;
561 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
562 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
563 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
564 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
567 // This emits a non-equal return sequence for some object types, or falls
568 // through if it was not lucky.
569 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs, lhs_type, rhs_type, x14);
572 Label check_for_internalized_strings;
573 Label flat_string_check;
574 // Check for heap number comparison. Branch to earlier double comparison code
575 // if they are heap numbers, otherwise, branch to internalized string check.
576 __ Cmp(rhs_type, HEAP_NUMBER_TYPE);
577 __ B(ne, &check_for_internalized_strings);
578 __ Cmp(lhs_map, rhs_map);
580 // If maps aren't equal, lhs_ and rhs_ are not heap numbers. Branch to flat
582 __ B(ne, &flat_string_check);
584 // Both lhs_ and rhs_ are heap numbers. Load them and branch to the double
586 __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
587 __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
588 __ B(&both_loaded_as_doubles);
590 __ Bind(&check_for_internalized_strings);
591 // In the strict case, the EmitStrictTwoHeapObjectCompare already took care
592 // of internalized strings.
593 if ((cond == eq) && !strict()) {
594 // Returns an answer for two internalized strings or two detectable objects.
595 // Otherwise branches to the string case or not both strings case.
596 EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, lhs_map, rhs_map,
598 &flat_string_check, &slow);
601 // Check for both being sequential one-byte strings,
602 // and inline if that is the case.
603 __ Bind(&flat_string_check);
604 __ JumpIfBothInstanceTypesAreNotSequentialOneByte(lhs_type, rhs_type, x14,
607 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x10,
610 StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, x10, x11,
613 StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, x10, x11,
617 // Never fall through to here.
618 if (FLAG_debug_code) {
625 // Figure out which native to call and setup the arguments.
626 Builtins::JavaScript native;
628 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
630 native = Builtins::COMPARE;
631 int ncr; // NaN compare result
632 if ((cond == lt) || (cond == le)) {
635 DCHECK((cond == gt) || (cond == ge)); // remaining cases
638 __ Mov(x10, Smi::FromInt(ncr));
642 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
643 // tagged as a small integer.
644 __ InvokeBuiltin(native, JUMP_FUNCTION);
651 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
652 CPURegList saved_regs = kCallerSaved;
653 CPURegList saved_fp_regs = kCallerSavedFP;
655 // We don't allow a GC during a store buffer overflow so there is no need to
656 // store the registers in any particular way, but we do have to store and
659 // We don't care if MacroAssembler scratch registers are corrupted.
660 saved_regs.Remove(*(masm->TmpList()));
661 saved_fp_regs.Remove(*(masm->FPTmpList()));
663 __ PushCPURegList(saved_regs);
664 if (save_doubles()) {
665 __ PushCPURegList(saved_fp_regs);
668 AllowExternalCallThatCantCauseGC scope(masm);
669 __ Mov(x0, ExternalReference::isolate_address(isolate()));
671 ExternalReference::store_buffer_overflow_function(isolate()), 1, 0);
673 if (save_doubles()) {
674 __ PopCPURegList(saved_fp_regs);
676 __ PopCPURegList(saved_regs);
681 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
683 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
685 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
690 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
691 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
692 UseScratchRegisterScope temps(masm);
693 Register saved_lr = temps.UnsafeAcquire(to_be_pushed_lr());
694 Register return_address = temps.AcquireX();
695 __ Mov(return_address, lr);
696 // Restore lr with the value it had before the call to this stub (the value
697 // which must be pushed).
698 __ Mov(lr, saved_lr);
699 __ PushSafepointRegisters();
700 __ Ret(return_address);
704 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
705 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
706 UseScratchRegisterScope temps(masm);
707 Register return_address = temps.AcquireX();
708 // Preserve the return address (lr will be clobbered by the pop).
709 __ Mov(return_address, lr);
710 __ PopSafepointRegisters();
711 __ Ret(return_address);
715 void MathPowStub::Generate(MacroAssembler* masm) {
717 // jssp[0]: Exponent (as a tagged value).
718 // jssp[1]: Base (as a tagged value).
720 // The (tagged) result will be returned in x0, as a heap number.
722 Register result_tagged = x0;
723 Register base_tagged = x10;
724 Register exponent_tagged = MathPowTaggedDescriptor::exponent();
725 DCHECK(exponent_tagged.is(x11));
726 Register exponent_integer = MathPowIntegerDescriptor::exponent();
727 DCHECK(exponent_integer.is(x12));
728 Register scratch1 = x14;
729 Register scratch0 = x15;
730 Register saved_lr = x19;
731 FPRegister result_double = d0;
732 FPRegister base_double = d0;
733 FPRegister exponent_double = d1;
734 FPRegister base_double_copy = d2;
735 FPRegister scratch1_double = d6;
736 FPRegister scratch0_double = d7;
738 // A fast-path for integer exponents.
739 Label exponent_is_smi, exponent_is_integer;
740 // Bail out to runtime.
742 // Allocate a heap number for the result, and return it.
745 // Unpack the inputs.
746 if (exponent_type() == ON_STACK) {
748 Label unpack_exponent;
750 __ Pop(exponent_tagged, base_tagged);
752 __ JumpIfSmi(base_tagged, &base_is_smi);
753 __ JumpIfNotHeapNumber(base_tagged, &call_runtime);
754 // base_tagged is a heap number, so load its double value.
755 __ Ldr(base_double, FieldMemOperand(base_tagged, HeapNumber::kValueOffset));
756 __ B(&unpack_exponent);
757 __ Bind(&base_is_smi);
758 // base_tagged is a SMI, so untag it and convert it to a double.
759 __ SmiUntagToDouble(base_double, base_tagged);
761 __ Bind(&unpack_exponent);
762 // x10 base_tagged The tagged base (input).
763 // x11 exponent_tagged The tagged exponent (input).
764 // d1 base_double The base as a double.
765 __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
766 __ JumpIfNotHeapNumber(exponent_tagged, &call_runtime);
767 // exponent_tagged is a heap number, so load its double value.
768 __ Ldr(exponent_double,
769 FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
770 } else if (exponent_type() == TAGGED) {
771 __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
772 __ Ldr(exponent_double,
773 FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
776 // Handle double (heap number) exponents.
777 if (exponent_type() != INTEGER) {
778 // Detect integer exponents stored as doubles and handle those in the
779 // integer fast-path.
780 __ TryRepresentDoubleAsInt64(exponent_integer, exponent_double,
781 scratch0_double, &exponent_is_integer);
783 if (exponent_type() == ON_STACK) {
784 FPRegister half_double = d3;
785 FPRegister minus_half_double = d4;
786 // Detect square root case. Crankshaft detects constant +/-0.5 at compile
787 // time and uses DoMathPowHalf instead. We then skip this check for
788 // non-constant cases of +/-0.5 as these hardly occur.
790 __ Fmov(minus_half_double, -0.5);
791 __ Fmov(half_double, 0.5);
792 __ Fcmp(minus_half_double, exponent_double);
793 __ Fccmp(half_double, exponent_double, NZFlag, ne);
794 // Condition flags at this point:
795 // 0.5; nZCv // Identified by eq && pl
796 // -0.5: NZcv // Identified by eq && mi
797 // other: ?z?? // Identified by ne
798 __ B(ne, &call_runtime);
800 // The exponent is 0.5 or -0.5.
802 // Given that exponent is known to be either 0.5 or -0.5, the following
803 // special cases could apply (according to ECMA-262 15.8.2.13):
805 // base.isNaN(): The result is NaN.
806 // (base == +INFINITY) || (base == -INFINITY)
807 // exponent == 0.5: The result is +INFINITY.
808 // exponent == -0.5: The result is +0.
809 // (base == +0) || (base == -0)
810 // exponent == 0.5: The result is +0.
811 // exponent == -0.5: The result is +INFINITY.
812 // (base < 0) && base.isFinite(): The result is NaN.
814 // Fsqrt (and Fdiv for the -0.5 case) can handle all of those except
815 // where base is -INFINITY or -0.
817 // Add +0 to base. This has no effect other than turning -0 into +0.
818 __ Fadd(base_double, base_double, fp_zero);
819 // The operation -0+0 results in +0 in all cases except where the
820 // FPCR rounding mode is 'round towards minus infinity' (RM). The
821 // ARM64 simulator does not currently simulate FPCR (where the rounding
822 // mode is set), so test the operation with some debug code.
823 if (masm->emit_debug_code()) {
824 UseScratchRegisterScope temps(masm);
825 Register temp = temps.AcquireX();
826 __ Fneg(scratch0_double, fp_zero);
827 // Verify that we correctly generated +0.0 and -0.0.
828 // bits(+0.0) = 0x0000000000000000
829 // bits(-0.0) = 0x8000000000000000
830 __ Fmov(temp, fp_zero);
831 __ CheckRegisterIsClear(temp, kCouldNotGenerateZero);
832 __ Fmov(temp, scratch0_double);
833 __ Eor(temp, temp, kDSignMask);
834 __ CheckRegisterIsClear(temp, kCouldNotGenerateNegativeZero);
835 // Check that -0.0 + 0.0 == +0.0.
836 __ Fadd(scratch0_double, scratch0_double, fp_zero);
837 __ Fmov(temp, scratch0_double);
838 __ CheckRegisterIsClear(temp, kExpectedPositiveZero);
841 // If base is -INFINITY, make it +INFINITY.
842 // * Calculate base - base: All infinities will become NaNs since both
843 // -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in ARM64.
844 // * If the result is NaN, calculate abs(base).
845 __ Fsub(scratch0_double, base_double, base_double);
846 __ Fcmp(scratch0_double, 0.0);
847 __ Fabs(scratch1_double, base_double);
848 __ Fcsel(base_double, scratch1_double, base_double, vs);
850 // Calculate the square root of base.
851 __ Fsqrt(result_double, base_double);
852 __ Fcmp(exponent_double, 0.0);
853 __ B(ge, &done); // Finish now for exponents of 0.5.
854 // Find the inverse for exponents of -0.5.
855 __ Fmov(scratch0_double, 1.0);
856 __ Fdiv(result_double, scratch0_double, result_double);
861 AllowExternalCallThatCantCauseGC scope(masm);
862 __ Mov(saved_lr, lr);
864 ExternalReference::power_double_double_function(isolate()),
866 __ Mov(lr, saved_lr);
870 // Handle SMI exponents.
871 __ Bind(&exponent_is_smi);
872 // x10 base_tagged The tagged base (input).
873 // x11 exponent_tagged The tagged exponent (input).
874 // d1 base_double The base as a double.
875 __ SmiUntag(exponent_integer, exponent_tagged);
878 __ Bind(&exponent_is_integer);
879 // x10 base_tagged The tagged base (input).
880 // x11 exponent_tagged The tagged exponent (input).
881 // x12 exponent_integer The exponent as an integer.
882 // d1 base_double The base as a double.
884 // Find abs(exponent). For negative exponents, we can find the inverse later.
885 Register exponent_abs = x13;
886 __ Cmp(exponent_integer, 0);
887 __ Cneg(exponent_abs, exponent_integer, mi);
888 // x13 exponent_abs The value of abs(exponent_integer).
890 // Repeatedly multiply to calculate the power.
892 // For each bit n (exponent_integer{n}) {
893 // if (exponent_integer{n}) {
897 // if (remaining bits in exponent_integer are all zero) {
901 Label power_loop, power_loop_entry, power_loop_exit;
902 __ Fmov(scratch1_double, base_double);
903 __ Fmov(base_double_copy, base_double);
904 __ Fmov(result_double, 1.0);
905 __ B(&power_loop_entry);
907 __ Bind(&power_loop);
908 __ Fmul(scratch1_double, scratch1_double, scratch1_double);
909 __ Lsr(exponent_abs, exponent_abs, 1);
910 __ Cbz(exponent_abs, &power_loop_exit);
912 __ Bind(&power_loop_entry);
913 __ Tbz(exponent_abs, 0, &power_loop);
914 __ Fmul(result_double, result_double, scratch1_double);
917 __ Bind(&power_loop_exit);
919 // If the exponent was positive, result_double holds the result.
920 __ Tbz(exponent_integer, kXSignBit, &done);
922 // The exponent was negative, so find the inverse.
923 __ Fmov(scratch0_double, 1.0);
924 __ Fdiv(result_double, scratch0_double, result_double);
925 // ECMA-262 only requires Math.pow to return an 'implementation-dependent
926 // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow
927 // to calculate the subnormal value 2^-1074. This method of calculating
928 // negative powers doesn't work because 2^1074 overflows to infinity. To
929 // catch this corner-case, we bail out if the result was 0. (This can only
930 // occur if the divisor is infinity or the base is zero.)
931 __ Fcmp(result_double, 0.0);
934 if (exponent_type() == ON_STACK) {
935 // Bail out to runtime code.
936 __ Bind(&call_runtime);
937 // Put the arguments back on the stack.
938 __ Push(base_tagged, exponent_tagged);
939 __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
943 __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1,
945 DCHECK(result_tagged.is(x0));
947 isolate()->counters()->math_pow(), 1, scratch0, scratch1);
950 AllowExternalCallThatCantCauseGC scope(masm);
951 __ Mov(saved_lr, lr);
952 __ Fmov(base_double, base_double_copy);
953 __ Scvtf(exponent_double, exponent_integer);
955 ExternalReference::power_double_double_function(isolate()),
957 __ Mov(lr, saved_lr);
960 isolate()->counters()->math_pow(), 1, scratch0, scratch1);
966 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
967 // It is important that the following stubs are generated in this order
968 // because pregenerated stubs can only call other pregenerated stubs.
969 // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses
971 CEntryStub::GenerateAheadOfTime(isolate);
972 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
973 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
974 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
975 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
976 CreateWeakCellStub::GenerateAheadOfTime(isolate);
977 BinaryOpICStub::GenerateAheadOfTime(isolate);
978 StoreRegistersStateStub::GenerateAheadOfTime(isolate);
979 RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
980 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
984 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
985 StoreRegistersStateStub stub(isolate);
990 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
991 RestoreRegistersStateStub stub(isolate);
996 void CodeStub::GenerateFPStubs(Isolate* isolate) {
997 // Floating-point code doesn't get special handling in ARM64, so there's
998 // nothing to do here.
1003 bool CEntryStub::NeedsImmovableCode() {
1004 // CEntryStub stores the return address on the stack before calling into
1005 // C++ code. In some cases, the VM accesses this address, but it is not used
1006 // when the C++ code returns to the stub because LR holds the return address
1007 // in AAPCS64. If the stub is moved (perhaps during a GC), we could end up
1008 // returning to dead code.
1009 // TODO(jbramley): Whilst this is the only analysis that makes sense, I can't
1010 // find any comment to confirm this, and I don't hit any crashes whatever
1011 // this function returns. The anaylsis should be properly confirmed.
1016 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1017 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
1019 CEntryStub stub_fp(isolate, 1, kSaveFPRegs);
1024 void CEntryStub::Generate(MacroAssembler* masm) {
1025 // The Abort mechanism relies on CallRuntime, which in turn relies on
1026 // CEntryStub, so until this stub has been generated, we have to use a
1027 // fall-back Abort mechanism.
1029 // Note that this stub must be generated before any use of Abort.
1030 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
1032 ASM_LOCATION("CEntryStub::Generate entry");
1033 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1035 // Register parameters:
1036 // x0: argc (including receiver, untagged)
1039 // The stack on entry holds the arguments and the receiver, with the receiver
1040 // at the highest address:
1042 // jssp]argc-1]: receiver
1043 // jssp[argc-2]: arg[argc-2]
1048 // The arguments are in reverse order, so that arg[argc-2] is actually the
1049 // first argument to the target function and arg[0] is the last.
1050 DCHECK(jssp.Is(__ StackPointer()));
1051 const Register& argc_input = x0;
1052 const Register& target_input = x1;
1054 // Calculate argv, argc and the target address, and store them in
1055 // callee-saved registers so we can retry the call without having to reload
1057 // TODO(jbramley): If the first call attempt succeeds in the common case (as
1058 // it should), then we might be better off putting these parameters directly
1059 // into their argument registers, rather than using callee-saved registers and
1060 // preserving them on the stack.
1061 const Register& argv = x21;
1062 const Register& argc = x22;
1063 const Register& target = x23;
1065 // Derive argv from the stack pointer so that it points to the first argument
1066 // (arg[argc-2]), or just below the receiver in case there are no arguments.
1067 // - Adjust for the arg[] array.
1068 Register temp_argv = x11;
1069 __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
1070 // - Adjust for the receiver.
1071 __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
1073 // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved
1075 FrameScope scope(masm, StackFrame::MANUAL);
1076 __ EnterExitFrame(save_doubles(), x10, 3);
1077 DCHECK(csp.Is(__ StackPointer()));
1079 // Poke callee-saved registers into reserved space.
1080 __ Poke(argv, 1 * kPointerSize);
1081 __ Poke(argc, 2 * kPointerSize);
1082 __ Poke(target, 3 * kPointerSize);
1084 // We normally only keep tagged values in callee-saved registers, as they
1085 // could be pushed onto the stack by called stubs and functions, and on the
1086 // stack they can confuse the GC. However, we're only calling C functions
1087 // which can push arbitrary data onto the stack anyway, and so the GC won't
1088 // examine that part of the stack.
1089 __ Mov(argc, argc_input);
1090 __ Mov(target, target_input);
1091 __ Mov(argv, temp_argv);
1095 // x23 : call target
1097 // The stack (on entry) holds the arguments and the receiver, with the
1098 // receiver at the highest address:
1100 // argv[8]: receiver
1101 // argv -> argv[0]: arg[argc-2]
1103 // argv[...]: arg[1]
1104 // argv[...]: arg[0]
1106 // Immediately below (after) this is the exit frame, as constructed by
1108 // fp[8]: CallerPC (lr)
1109 // fp -> fp[0]: CallerFP (old fp)
1110 // fp[-8]: Space reserved for SPOffset.
1111 // fp[-16]: CodeObject()
1112 // csp[...]: Saved doubles, if saved_doubles is true.
1113 // csp[32]: Alignment padding, if necessary.
1114 // csp[24]: Preserved x23 (used for target).
1115 // csp[16]: Preserved x22 (used for argc).
1116 // csp[8]: Preserved x21 (used for argv).
1117 // csp -> csp[0]: Space reserved for the return address.
1119 // After a successful call, the exit frame, preserved registers (x21-x23) and
1120 // the arguments (including the receiver) are dropped or popped as
1121 // appropriate. The stub then returns.
1123 // After an unsuccessful call, the exit frame and suchlike are left
1124 // untouched, and the stub either throws an exception by jumping to one of
1125 // the exception_returned label.
1127 DCHECK(csp.Is(__ StackPointer()));
1129 // Prepare AAPCS64 arguments to pass to the builtin.
1132 __ Mov(x2, ExternalReference::isolate_address(isolate()));
1134 Label return_location;
1135 __ Adr(x12, &return_location);
1138 if (__ emit_debug_code()) {
1139 // Verify that the slot below fp[kSPOffset]-8 points to the return location
1140 // (currently in x12).
1141 UseScratchRegisterScope temps(masm);
1142 Register temp = temps.AcquireX();
1143 __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
1144 __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSize)));
1146 __ Check(eq, kReturnAddressNotFoundInFrame);
1149 // Call the builtin.
1151 __ Bind(&return_location);
1153 // x0 result The return code from the call.
1157 const Register& result = x0;
1159 // Check result for exception sentinel.
1160 Label exception_returned;
1161 __ CompareRoot(result, Heap::kExceptionRootIndex);
1162 __ B(eq, &exception_returned);
1164 // The call succeeded, so unwind the stack and return.
1166 // Restore callee-saved registers x21-x23.
1169 __ Peek(argv, 1 * kPointerSize);
1170 __ Peek(argc, 2 * kPointerSize);
1171 __ Peek(target, 3 * kPointerSize);
1173 __ LeaveExitFrame(save_doubles(), x10, true);
1174 DCHECK(jssp.Is(__ StackPointer()));
1175 // Pop or drop the remaining stack slots and return from the stub.
1176 // jssp[24]: Arguments array (of size argc), including receiver.
1177 // jssp[16]: Preserved x23 (used for target).
1178 // jssp[8]: Preserved x22 (used for argc).
1179 // jssp[0]: Preserved x21 (used for argv).
1181 __ AssertFPCRState();
1184 // The stack pointer is still csp if we aren't returning, and the frame
1185 // hasn't changed (except for the return address).
1186 __ SetStackPointer(csp);
1188 // Handling of exception.
1189 __ Bind(&exception_returned);
1191 // Retrieve the pending exception.
1192 ExternalReference pending_exception_address(
1193 Isolate::kPendingExceptionAddress, isolate());
1194 const Register& exception = result;
1195 const Register& exception_address = x11;
1196 __ Mov(exception_address, Operand(pending_exception_address));
1197 __ Ldr(exception, MemOperand(exception_address));
1199 // Clear the pending exception.
1200 __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
1201 __ Str(x10, MemOperand(exception_address));
1203 // x0 exception The exception descriptor.
1208 // Special handling of termination exceptions, which are uncatchable by
1210 Label throw_termination_exception;
1211 __ Cmp(exception, Operand(isolate()->factory()->termination_exception()));
1212 __ B(eq, &throw_termination_exception);
1214 // We didn't execute a return case, so the stack frame hasn't been updated
1215 // (except for the return address slot). However, we don't need to initialize
1216 // jssp because the throw method will immediately overwrite it when it
1217 // unwinds the stack.
1218 __ SetStackPointer(jssp);
1220 ASM_LOCATION("Throw normal");
1224 __ Throw(x0, x10, x11, x12, x13);
1226 __ Bind(&throw_termination_exception);
1227 ASM_LOCATION("Throw termination");
1231 __ ThrowUncatchable(x0, x10, x11, x12, x13);
1235 // This is the entry point from C++. 5 arguments are provided in x0-x4.
1236 // See use of the CALL_GENERATED_CODE macro for example in src/execution.cc.
1245 void JSEntryStub::Generate(MacroAssembler* masm) {
1246 DCHECK(jssp.Is(__ StackPointer()));
1247 Register code_entry = x0;
1249 // Enable instruction instrumentation. This only works on the simulator, and
1250 // will have no effect on the model or real hardware.
1251 __ EnableInstrumentation();
1253 Label invoke, handler_entry, exit;
1255 // Push callee-saved registers and synchronize the system stack pointer (csp)
1256 // and the JavaScript stack pointer (jssp).
1258 // We must not write to jssp until after the PushCalleeSavedRegisters()
1259 // call, since jssp is itself a callee-saved register.
1260 __ SetStackPointer(csp);
1261 __ PushCalleeSavedRegisters();
1263 __ SetStackPointer(jssp);
1265 // Configure the FPCR. We don't restore it, so this is technically not allowed
1266 // according to AAPCS64. However, we only set default-NaN mode and this will
1267 // be harmless for most C code. Also, it works for ARM.
1270 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1272 // Set up the reserved register for 0.0.
1273 __ Fmov(fp_zero, 0.0);
1275 // Build an entry frame (see layout below).
1276 int marker = type();
1277 int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
1278 __ Mov(x13, bad_frame_pointer);
1279 __ Mov(x12, Smi::FromInt(marker));
1280 __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
1281 __ Ldr(x10, MemOperand(x11));
1283 __ Push(x13, xzr, x12, x10);
1285 __ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset);
1287 // Push the JS entry frame marker. Also set js_entry_sp if this is the
1288 // outermost JS call.
1289 Label non_outermost_js, done;
1290 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
1291 __ Mov(x10, ExternalReference(js_entry_sp));
1292 __ Ldr(x11, MemOperand(x10));
1293 __ Cbnz(x11, &non_outermost_js);
1294 __ Str(fp, MemOperand(x10));
1295 __ Mov(x12, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
1298 __ Bind(&non_outermost_js);
1299 // We spare one instruction by pushing xzr since the marker is 0.
1300 DCHECK(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
1304 // The frame set up looks like this:
1305 // jssp[0] : JS entry frame marker.
1306 // jssp[1] : C entry FP.
1307 // jssp[2] : stack frame marker.
1308 // jssp[3] : stack frmae marker.
1309 // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
1312 // Jump to a faked try block that does the invoke, with a faked catch
1313 // block that sets the pending exception.
1316 // Prevent the constant pool from being emitted between the record of the
1317 // handler_entry position and the first instruction of the sequence here.
1318 // There is no risk because Assembler::Emit() emits the instruction before
1319 // checking for constant pool emission, but we do not want to depend on
1322 Assembler::BlockPoolsScope block_pools(masm);
1323 __ bind(&handler_entry);
1324 handler_offset_ = handler_entry.pos();
1325 // Caught exception: Store result (exception) in the pending exception
1326 // field in the JSEnv and return a failure sentinel. Coming in here the
1327 // fp will be invalid because the PushTryHandler below sets it to 0 to
1328 // signal the existence of the JSEntry frame.
1329 __ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1332 __ Str(code_entry, MemOperand(x10));
1333 __ LoadRoot(x0, Heap::kExceptionRootIndex);
1336 // Invoke: Link this frame into the handler chain. There's only one
1337 // handler block in this code object, so its index is 0.
1339 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
1340 // If an exception not caught by another handler occurs, this handler
1341 // returns control to the code after the B(&invoke) above, which
1342 // restores all callee-saved registers (including cp and fp) to their
1343 // saved values before returning a failure to C.
1345 // Clear any pending exceptions.
1346 __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
1347 __ Mov(x11, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1349 __ Str(x10, MemOperand(x11));
1351 // Invoke the function by calling through the JS entry trampoline builtin.
1352 // Notice that we cannot store a reference to the trampoline code directly in
1353 // this stub, because runtime stubs are not traversed when doing GC.
1355 // Expected registers by Builtins::JSEntryTrampoline
1361 ExternalReference entry(type() == StackFrame::ENTRY_CONSTRUCT
1362 ? Builtins::kJSConstructEntryTrampoline
1363 : Builtins::kJSEntryTrampoline,
1367 // Call the JSEntryTrampoline.
1368 __ Ldr(x11, MemOperand(x10)); // Dereference the address.
1369 __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag);
1372 // Unlink this frame from the handler chain.
1377 // x0 holds the result.
1378 // The stack pointer points to the top of the entry frame pushed on entry from
1379 // C++ (at the beginning of this stub):
1380 // jssp[0] : JS entry frame marker.
1381 // jssp[1] : C entry FP.
1382 // jssp[2] : stack frame marker.
1383 // jssp[3] : stack frmae marker.
1384 // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
1386 // Check if the current stack frame is marked as the outermost JS frame.
1387 Label non_outermost_js_2;
1389 __ Cmp(x10, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
1390 __ B(ne, &non_outermost_js_2);
1391 __ Mov(x11, ExternalReference(js_entry_sp));
1392 __ Str(xzr, MemOperand(x11));
1393 __ Bind(&non_outermost_js_2);
1395 // Restore the top frame descriptors from the stack.
1397 __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
1398 __ Str(x10, MemOperand(x11));
1400 // Reset the stack to the callee saved registers.
1401 __ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes);
1402 // Restore the callee-saved registers and return.
1403 DCHECK(jssp.Is(__ StackPointer()));
1405 __ SetStackPointer(csp);
1406 __ PopCalleeSavedRegisters();
1407 // After this point, we must not modify jssp because it is a callee-saved
1408 // register which we have just restored.
1413 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1415 Register receiver = LoadDescriptor::ReceiverRegister();
1416 // Ensure that the vector and slot registers won't be clobbered before
1417 // calling the miss handler.
1418 DCHECK(!FLAG_vector_ics ||
1419 !AreAliased(x10, x11, VectorLoadICDescriptor::VectorRegister(),
1420 VectorLoadICDescriptor::SlotRegister()));
1422 NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10,
1426 PropertyAccessCompiler::TailCallBuiltin(
1427 masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1431 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
1432 // Return address is in lr.
1435 Register receiver = LoadDescriptor::ReceiverRegister();
1436 Register index = LoadDescriptor::NameRegister();
1437 Register result = x0;
1438 Register scratch = x10;
1439 DCHECK(!scratch.is(receiver) && !scratch.is(index));
1440 DCHECK(!FLAG_vector_ics ||
1441 (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
1442 result.is(VectorLoadICDescriptor::SlotRegister())));
1444 // StringCharAtGenerator doesn't use the result register until it's passed
1445 // the different miss possibilities. If it did, we would have a conflict
1446 // when FLAG_vector_ics is true.
1447 StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
1448 &miss, // When not a string.
1449 &miss, // When not a number.
1450 &miss, // When index out of range.
1451 STRING_INDEX_IS_ARRAY_INDEX,
1452 RECEIVER_IS_STRING);
1453 char_at_generator.GenerateFast(masm);
1456 StubRuntimeCallHelper call_helper;
1457 char_at_generator.GenerateSlow(masm, call_helper);
1460 PropertyAccessCompiler::TailCallBuiltin(
1461 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1465 void InstanceofStub::Generate(MacroAssembler* masm) {
1467 // jssp[0]: function.
1470 // Returns result in x0. Zero indicates instanceof, smi 1 indicates not
1473 Register result = x0;
1474 Register function = right();
1475 Register object = left();
1476 Register scratch1 = x6;
1477 Register scratch2 = x7;
1478 Register res_true = x8;
1479 Register res_false = x9;
1480 // Only used if there was an inline map check site. (See
1481 // LCodeGen::DoInstanceOfKnownGlobal().)
1482 Register map_check_site = x4;
1483 // Delta for the instructions generated between the inline map check and the
1484 // instruction setting the result.
1485 const int32_t kDeltaToLoadBoolResult = 4 * kInstructionSize;
1487 Label not_js_object, slow;
1489 if (!HasArgsInRegisters()) {
1490 __ Pop(function, object);
1493 if (ReturnTrueFalseObject()) {
1494 __ LoadTrueFalseRoots(res_true, res_false);
1496 // This is counter-intuitive, but correct.
1497 __ Mov(res_true, Smi::FromInt(0));
1498 __ Mov(res_false, Smi::FromInt(1));
1501 // Check that the left hand side is a JS object and load its map as a side
1504 __ JumpIfSmi(object, ¬_js_object);
1505 __ IsObjectJSObjectType(object, map, scratch2, ¬_js_object);
1507 // If there is a call site cache, don't look in the global cache, but do the
1508 // real lookup and update the call site cache.
1509 if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
1511 __ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex, &miss);
1512 __ JumpIfNotRoot(map, Heap::kInstanceofCacheMapRootIndex, &miss);
1513 __ LoadRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
1518 // Get the prototype of the function.
1519 Register prototype = x13;
1520 __ TryGetFunctionPrototype(function, prototype, scratch2, &slow,
1521 MacroAssembler::kMissOnBoundFunction);
1523 // Check that the function prototype is a JS object.
1524 __ JumpIfSmi(prototype, &slow);
1525 __ IsObjectJSObjectType(prototype, scratch1, scratch2, &slow);
1527 // Update the global instanceof or call site inlined cache with the current
1528 // map and function. The cached answer will be set when it is known below.
1529 if (HasCallSiteInlineCheck()) {
1530 // Patch the (relocated) inlined map check.
1531 __ GetRelocatedValueLocation(map_check_site, scratch1);
1532 // We have a cell, so need another level of dereferencing.
1533 __ Ldr(scratch1, MemOperand(scratch1));
1534 __ Str(map, FieldMemOperand(scratch1, Cell::kValueOffset));
1536 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1537 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
1540 Label return_true, return_result;
1541 Register smi_value = scratch1;
1543 // Loop through the prototype chain looking for the function prototype.
1544 Register chain_map = x1;
1545 Register chain_prototype = x14;
1546 Register null_value = x15;
1548 __ Ldr(chain_prototype, FieldMemOperand(map, Map::kPrototypeOffset));
1549 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
1550 // Speculatively set a result.
1551 __ Mov(result, res_false);
1552 if (!HasCallSiteInlineCheck() && ReturnTrueFalseObject()) {
1553 // Value to store in the cache cannot be an object.
1554 __ Mov(smi_value, Smi::FromInt(1));
1559 // If the chain prototype is the object prototype, return true.
1560 __ Cmp(chain_prototype, prototype);
1561 __ B(eq, &return_true);
1563 // If the chain prototype is null, we've reached the end of the chain, so
1565 __ Cmp(chain_prototype, null_value);
1566 __ B(eq, &return_result);
1568 // Otherwise, load the next prototype in the chain, and loop.
1569 __ Ldr(chain_map, FieldMemOperand(chain_prototype, HeapObject::kMapOffset));
1570 __ Ldr(chain_prototype, FieldMemOperand(chain_map, Map::kPrototypeOffset));
1574 // Return sequence when no arguments are on the stack.
1575 // We cannot fall through to here.
1576 __ Bind(&return_true);
1577 __ Mov(result, res_true);
1578 if (!HasCallSiteInlineCheck() && ReturnTrueFalseObject()) {
1579 // Value to store in the cache cannot be an object.
1580 __ Mov(smi_value, Smi::FromInt(0));
1582 __ Bind(&return_result);
1583 if (HasCallSiteInlineCheck()) {
1584 DCHECK(ReturnTrueFalseObject());
1585 __ Add(map_check_site, map_check_site, kDeltaToLoadBoolResult);
1586 __ GetRelocatedValueLocation(map_check_site, scratch2);
1587 __ Str(result, MemOperand(scratch2));
1589 Register cached_value = ReturnTrueFalseObject() ? smi_value : result;
1590 __ StoreRoot(cached_value, Heap::kInstanceofCacheAnswerRootIndex);
1594 Label object_not_null, object_not_null_or_smi;
1596 __ Bind(¬_js_object);
1597 Register object_type = x14;
1598 // x0 result result return register (uninit)
1599 // x10 function pointer to function
1600 // x11 object pointer to object
1601 // x14 object_type type of object (uninit)
1603 // Before null, smi and string checks, check that the rhs is a function.
1604 // For a non-function rhs, an exception must be thrown.
1605 __ JumpIfSmi(function, &slow);
1606 __ JumpIfNotObjectType(
1607 function, scratch1, object_type, JS_FUNCTION_TYPE, &slow);
1609 __ Mov(result, res_false);
1611 // Null is not instance of anything.
1612 __ Cmp(object, Operand(isolate()->factory()->null_value()));
1613 __ B(ne, &object_not_null);
1616 __ Bind(&object_not_null);
1617 // Smi values are not instances of anything.
1618 __ JumpIfNotSmi(object, &object_not_null_or_smi);
1621 __ Bind(&object_not_null_or_smi);
1622 // String values are not instances of anything.
1623 __ IsObjectJSStringType(object, scratch2, &slow);
1626 // Slow-case. Tail call builtin.
1629 FrameScope scope(masm, StackFrame::INTERNAL);
1630 // Arguments have either been passed into registers or have been previously
1631 // popped. We need to push them before calling builtin.
1632 __ Push(object, function);
1633 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
1635 if (ReturnTrueFalseObject()) {
1636 // Reload true/false because they were clobbered in the builtin call.
1637 __ LoadTrueFalseRoots(res_true, res_false);
1639 __ Csel(result, res_true, res_false, eq);
1645 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
1646 CHECK(!has_new_target());
1647 Register arg_count = ArgumentsAccessReadDescriptor::parameter_count();
1648 Register key = ArgumentsAccessReadDescriptor::index();
1649 DCHECK(arg_count.is(x0));
1652 // The displacement is the offset of the last parameter (if any) relative
1653 // to the frame pointer.
1654 static const int kDisplacement =
1655 StandardFrameConstants::kCallerSPOffset - kPointerSize;
1657 // Check that the key is a smi.
1659 __ JumpIfNotSmi(key, &slow);
1661 // Check if the calling frame is an arguments adaptor frame.
1662 Register local_fp = x11;
1663 Register caller_fp = x11;
1664 Register caller_ctx = x12;
1666 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1667 __ Ldr(caller_ctx, MemOperand(caller_fp,
1668 StandardFrameConstants::kContextOffset));
1669 __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1670 __ Csel(local_fp, fp, caller_fp, ne);
1671 __ B(ne, &skip_adaptor);
1673 // Load the actual arguments limit found in the arguments adaptor frame.
1674 __ Ldr(arg_count, MemOperand(caller_fp,
1675 ArgumentsAdaptorFrameConstants::kLengthOffset));
1676 __ Bind(&skip_adaptor);
1678 // Check index against formal parameters count limit. Use unsigned comparison
1679 // to get negative check for free: branch if key < 0 or key >= arg_count.
1680 __ Cmp(key, arg_count);
1683 // Read the argument from the stack and return it.
1684 __ Sub(x10, arg_count, key);
1685 __ Add(x10, local_fp, Operand::UntagSmiAndScale(x10, kPointerSizeLog2));
1686 __ Ldr(x0, MemOperand(x10, kDisplacement));
1689 // Slow case: handle non-smi or out-of-bounds access to arguments by calling
1690 // the runtime system.
1693 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
1697 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
1698 // Stack layout on entry.
1699 // jssp[0]: number of parameters (tagged)
1700 // jssp[8]: address of receiver argument
1701 // jssp[16]: function
1703 CHECK(!has_new_target());
1705 // Check if the calling frame is an arguments adaptor frame.
1707 Register caller_fp = x10;
1708 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1709 // Load and untag the context.
1710 __ Ldr(w11, UntagSmiMemOperand(caller_fp,
1711 StandardFrameConstants::kContextOffset));
1712 __ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR);
1715 // Patch the arguments.length and parameters pointer in the current frame.
1716 __ Ldr(x11, MemOperand(caller_fp,
1717 ArgumentsAdaptorFrameConstants::kLengthOffset));
1718 __ Poke(x11, 0 * kXRegSize);
1719 __ Add(x10, caller_fp, Operand::UntagSmiAndScale(x11, kPointerSizeLog2));
1720 __ Add(x10, x10, StandardFrameConstants::kCallerSPOffset);
1721 __ Poke(x10, 1 * kXRegSize);
1724 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1728 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
1729 // Stack layout on entry.
1730 // jssp[0]: number of parameters (tagged)
1731 // jssp[8]: address of receiver argument
1732 // jssp[16]: function
1734 // Returns pointer to result object in x0.
1736 CHECK(!has_new_target());
1738 // Note: arg_count_smi is an alias of param_count_smi.
1739 Register arg_count_smi = x3;
1740 Register param_count_smi = x3;
1741 Register param_count = x7;
1742 Register recv_arg = x14;
1743 Register function = x4;
1744 __ Pop(param_count_smi, recv_arg, function);
1745 __ SmiUntag(param_count, param_count_smi);
1747 // Check if the calling frame is an arguments adaptor frame.
1748 Register caller_fp = x11;
1749 Register caller_ctx = x12;
1751 Label adaptor_frame, try_allocate;
1752 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1753 __ Ldr(caller_ctx, MemOperand(caller_fp,
1754 StandardFrameConstants::kContextOffset));
1755 __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1756 __ B(eq, &adaptor_frame);
1758 // No adaptor, parameter count = argument count.
1760 // x1 mapped_params number of mapped params, min(params, args) (uninit)
1761 // x2 arg_count number of function arguments (uninit)
1762 // x3 arg_count_smi number of function arguments (smi)
1763 // x4 function function pointer
1764 // x7 param_count number of function parameters
1765 // x11 caller_fp caller's frame pointer
1766 // x14 recv_arg pointer to receiver arguments
1768 Register arg_count = x2;
1769 __ Mov(arg_count, param_count);
1770 __ B(&try_allocate);
1772 // We have an adaptor frame. Patch the parameters pointer.
1773 __ Bind(&adaptor_frame);
1774 __ Ldr(arg_count_smi,
1775 MemOperand(caller_fp,
1776 ArgumentsAdaptorFrameConstants::kLengthOffset));
1777 __ SmiUntag(arg_count, arg_count_smi);
1778 __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2));
1779 __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset);
1781 // Compute the mapped parameter count = min(param_count, arg_count)
1782 Register mapped_params = x1;
1783 __ Cmp(param_count, arg_count);
1784 __ Csel(mapped_params, param_count, arg_count, lt);
1786 __ Bind(&try_allocate);
1788 // x0 alloc_obj pointer to allocated objects: param map, backing
1789 // store, arguments (uninit)
1790 // x1 mapped_params number of mapped parameters, min(params, args)
1791 // x2 arg_count number of function arguments
1792 // x3 arg_count_smi number of function arguments (smi)
1793 // x4 function function pointer
1794 // x7 param_count number of function parameters
1795 // x10 size size of objects to allocate (uninit)
1796 // x14 recv_arg pointer to receiver arguments
1798 // Compute the size of backing store, parameter map, and arguments object.
1799 // 1. Parameter map, has two extra words containing context and backing
1801 const int kParameterMapHeaderSize =
1802 FixedArray::kHeaderSize + 2 * kPointerSize;
1804 // Calculate the parameter map size, assuming it exists.
1805 Register size = x10;
1806 __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2));
1807 __ Add(size, size, kParameterMapHeaderSize);
1809 // If there are no mapped parameters, set the running size total to zero.
1810 // Otherwise, use the parameter map size calculated earlier.
1811 __ Cmp(mapped_params, 0);
1812 __ CzeroX(size, eq);
1814 // 2. Add the size of the backing store and arguments object.
1815 __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2));
1817 FixedArray::kHeaderSize + Heap::kSloppyArgumentsObjectSize);
1819 // Do the allocation of all three objects in one go. Assign this to x0, as it
1820 // will be returned to the caller.
1821 Register alloc_obj = x0;
1822 __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT);
1824 // Get the arguments boilerplate from the current (global) context.
1826 // x0 alloc_obj pointer to allocated objects (param map, backing
1827 // store, arguments)
1828 // x1 mapped_params number of mapped parameters, min(params, args)
1829 // x2 arg_count number of function arguments
1830 // x3 arg_count_smi number of function arguments (smi)
1831 // x4 function function pointer
1832 // x7 param_count number of function parameters
1833 // x11 sloppy_args_map offset to args (or aliased args) map (uninit)
1834 // x14 recv_arg pointer to receiver arguments
1836 Register global_object = x10;
1837 Register global_ctx = x10;
1838 Register sloppy_args_map = x11;
1839 Register aliased_args_map = x10;
1840 __ Ldr(global_object, GlobalObjectMemOperand());
1841 __ Ldr(global_ctx, FieldMemOperand(global_object,
1842 GlobalObject::kNativeContextOffset));
1844 __ Ldr(sloppy_args_map,
1845 ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
1846 __ Ldr(aliased_args_map,
1847 ContextMemOperand(global_ctx, Context::ALIASED_ARGUMENTS_MAP_INDEX));
1848 __ Cmp(mapped_params, 0);
1849 __ CmovX(sloppy_args_map, aliased_args_map, ne);
1851 // Copy the JS object part.
1852 __ Str(sloppy_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
1853 __ LoadRoot(x10, Heap::kEmptyFixedArrayRootIndex);
1854 __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset));
1855 __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
1857 // Set up the callee in-object property.
1858 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
1859 const int kCalleeOffset = JSObject::kHeaderSize +
1860 Heap::kArgumentsCalleeIndex * kPointerSize;
1861 __ AssertNotSmi(function);
1862 __ Str(function, FieldMemOperand(alloc_obj, kCalleeOffset));
1864 // Use the length and set that as an in-object property.
1865 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1866 const int kLengthOffset = JSObject::kHeaderSize +
1867 Heap::kArgumentsLengthIndex * kPointerSize;
1868 __ Str(arg_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
1870 // Set up the elements pointer in the allocated arguments object.
1871 // If we allocated a parameter map, "elements" will point there, otherwise
1872 // it will point to the backing store.
1874 // x0 alloc_obj pointer to allocated objects (param map, backing
1875 // store, arguments)
1876 // x1 mapped_params number of mapped parameters, min(params, args)
1877 // x2 arg_count number of function arguments
1878 // x3 arg_count_smi number of function arguments (smi)
1879 // x4 function function pointer
1880 // x5 elements pointer to parameter map or backing store (uninit)
1881 // x6 backing_store pointer to backing store (uninit)
1882 // x7 param_count number of function parameters
1883 // x14 recv_arg pointer to receiver arguments
1885 Register elements = x5;
1886 __ Add(elements, alloc_obj, Heap::kSloppyArgumentsObjectSize);
1887 __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
1889 // Initialize parameter map. If there are no mapped arguments, we're done.
1890 Label skip_parameter_map;
1891 __ Cmp(mapped_params, 0);
1892 // Set up backing store address, because it is needed later for filling in
1893 // the unmapped arguments.
1894 Register backing_store = x6;
1895 __ CmovX(backing_store, elements, eq);
1896 __ B(eq, &skip_parameter_map);
1898 __ LoadRoot(x10, Heap::kSloppyArgumentsElementsMapRootIndex);
1899 __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
1900 __ Add(x10, mapped_params, 2);
1902 __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset));
1903 __ Str(cp, FieldMemOperand(elements,
1904 FixedArray::kHeaderSize + 0 * kPointerSize));
1905 __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2));
1906 __ Add(x10, x10, kParameterMapHeaderSize);
1907 __ Str(x10, FieldMemOperand(elements,
1908 FixedArray::kHeaderSize + 1 * kPointerSize));
1910 // Copy the parameter slots and the holes in the arguments.
1911 // We need to fill in mapped_parameter_count slots. Then index the context,
1912 // where parameters are stored in reverse order, at:
1914 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1
1916 // The mapped parameter thus needs to get indices:
1918 // MIN_CONTEXT_SLOTS + parameter_count - 1 ..
1919 // MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count
1921 // We loop from right to left.
1923 // x0 alloc_obj pointer to allocated objects (param map, backing
1924 // store, arguments)
1925 // x1 mapped_params number of mapped parameters, min(params, args)
1926 // x2 arg_count number of function arguments
1927 // x3 arg_count_smi number of function arguments (smi)
1928 // x4 function function pointer
1929 // x5 elements pointer to parameter map or backing store (uninit)
1930 // x6 backing_store pointer to backing store (uninit)
1931 // x7 param_count number of function parameters
1932 // x11 loop_count parameter loop counter (uninit)
1933 // x12 index parameter index (smi, uninit)
1934 // x13 the_hole hole value (uninit)
1935 // x14 recv_arg pointer to receiver arguments
1937 Register loop_count = x11;
1938 Register index = x12;
1939 Register the_hole = x13;
1940 Label parameters_loop, parameters_test;
1941 __ Mov(loop_count, mapped_params);
1942 __ Add(index, param_count, static_cast<int>(Context::MIN_CONTEXT_SLOTS));
1943 __ Sub(index, index, mapped_params);
1945 __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
1946 __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2));
1947 __ Add(backing_store, backing_store, kParameterMapHeaderSize);
1949 __ B(¶meters_test);
1951 __ Bind(¶meters_loop);
1952 __ Sub(loop_count, loop_count, 1);
1953 __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2));
1954 __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag);
1955 __ Str(index, MemOperand(elements, x10));
1956 __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize);
1957 __ Str(the_hole, MemOperand(backing_store, x10));
1958 __ Add(index, index, Smi::FromInt(1));
1959 __ Bind(¶meters_test);
1960 __ Cbnz(loop_count, ¶meters_loop);
1962 __ Bind(&skip_parameter_map);
1963 // Copy arguments header and remaining slots (if there are any.)
1964 __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
1965 __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset));
1966 __ Str(arg_count_smi, FieldMemOperand(backing_store,
1967 FixedArray::kLengthOffset));
1969 // x0 alloc_obj pointer to allocated objects (param map, backing
1970 // store, arguments)
1971 // x1 mapped_params number of mapped parameters, min(params, args)
1972 // x2 arg_count number of function arguments
1973 // x4 function function pointer
1974 // x3 arg_count_smi number of function arguments (smi)
1975 // x6 backing_store pointer to backing store (uninit)
1976 // x14 recv_arg pointer to receiver arguments
1978 Label arguments_loop, arguments_test;
1979 __ Mov(x10, mapped_params);
1980 __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2));
1981 __ B(&arguments_test);
1983 __ Bind(&arguments_loop);
1984 __ Sub(recv_arg, recv_arg, kPointerSize);
1985 __ Ldr(x11, MemOperand(recv_arg));
1986 __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2));
1987 __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
1988 __ Add(x10, x10, 1);
1990 __ Bind(&arguments_test);
1991 __ Cmp(x10, arg_count);
1992 __ B(lt, &arguments_loop);
1996 // Do the runtime call to allocate the arguments object.
1998 __ Push(function, recv_arg, arg_count_smi);
1999 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
2003 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
2004 // Return address is in lr.
2007 Register receiver = LoadDescriptor::ReceiverRegister();
2008 Register key = LoadDescriptor::NameRegister();
2010 // Check that the key is an array index, that is Uint32.
2011 __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
2013 // Everything is fine, call runtime.
2014 __ Push(receiver, key);
2015 __ TailCallExternalReference(
2016 ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
2021 PropertyAccessCompiler::TailCallBuiltin(
2022 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
2026 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
2027 // Stack layout on entry.
2028 // jssp[0]: number of parameters (tagged)
2029 // jssp[8]: address of receiver argument
2030 // jssp[16]: function
2032 // Returns pointer to result object in x0.
2034 // Get the stub arguments from the frame, and make an untagged copy of the
2036 Register param_count_smi = x1;
2037 Register params = x2;
2038 Register function = x3;
2039 Register param_count = x13;
2040 __ Pop(param_count_smi, params, function);
2041 __ SmiUntag(param_count, param_count_smi);
2043 // Test if arguments adaptor needed.
2044 Register caller_fp = x11;
2045 Register caller_ctx = x12;
2046 Label try_allocate, runtime;
2047 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2048 __ Ldr(caller_ctx, MemOperand(caller_fp,
2049 StandardFrameConstants::kContextOffset));
2050 __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2051 __ B(ne, &try_allocate);
2053 // x1 param_count_smi number of parameters passed to function (smi)
2054 // x2 params pointer to parameters
2055 // x3 function function pointer
2056 // x11 caller_fp caller's frame pointer
2057 // x13 param_count number of parameters passed to function
2059 // Patch the argument length and parameters pointer.
2060 __ Ldr(param_count_smi,
2061 MemOperand(caller_fp,
2062 ArgumentsAdaptorFrameConstants::kLengthOffset));
2063 __ SmiUntag(param_count, param_count_smi);
2064 if (has_new_target()) {
2065 // Skip new.target: it is not a part of arguments.
2066 __ Sub(param_count, param_count, Operand(1));
2067 __ SmiTag(param_count_smi, param_count);
2069 __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
2070 __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
2072 // Try the new space allocation. Start out with computing the size of the
2073 // arguments object and the elements array in words.
2074 Register size = x10;
2075 __ Bind(&try_allocate);
2076 __ Add(size, param_count, FixedArray::kHeaderSize / kPointerSize);
2077 __ Cmp(param_count, 0);
2078 __ CzeroX(size, eq);
2079 __ Add(size, size, Heap::kStrictArgumentsObjectSize / kPointerSize);
2081 // Do the allocation of both objects in one go. Assign this to x0, as it will
2082 // be returned to the caller.
2083 Register alloc_obj = x0;
2084 __ Allocate(size, alloc_obj, x11, x12, &runtime,
2085 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
2087 // Get the arguments boilerplate from the current (native) context.
2088 Register global_object = x10;
2089 Register global_ctx = x10;
2090 Register strict_args_map = x4;
2091 __ Ldr(global_object, GlobalObjectMemOperand());
2092 __ Ldr(global_ctx, FieldMemOperand(global_object,
2093 GlobalObject::kNativeContextOffset));
2094 __ Ldr(strict_args_map,
2095 ContextMemOperand(global_ctx, Context::STRICT_ARGUMENTS_MAP_INDEX));
2097 // x0 alloc_obj pointer to allocated objects: parameter array and
2099 // x1 param_count_smi number of parameters passed to function (smi)
2100 // x2 params pointer to parameters
2101 // x3 function function pointer
2102 // x4 strict_args_map offset to arguments map
2103 // x13 param_count number of parameters passed to function
2104 __ Str(strict_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
2105 __ LoadRoot(x5, Heap::kEmptyFixedArrayRootIndex);
2106 __ Str(x5, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset));
2107 __ Str(x5, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
2109 // Set the smi-tagged length as an in-object property.
2110 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2111 const int kLengthOffset = JSObject::kHeaderSize +
2112 Heap::kArgumentsLengthIndex * kPointerSize;
2113 __ Str(param_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
2115 // If there are no actual arguments, we're done.
2117 __ Cbz(param_count, &done);
2119 // Set up the elements pointer in the allocated arguments object and
2120 // initialize the header in the elements fixed array.
2121 Register elements = x5;
2122 __ Add(elements, alloc_obj, Heap::kStrictArgumentsObjectSize);
2123 __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
2124 __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
2125 __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
2126 __ Str(param_count_smi, FieldMemOperand(elements, FixedArray::kLengthOffset));
2128 // x0 alloc_obj pointer to allocated objects: parameter array and
2130 // x1 param_count_smi number of parameters passed to function (smi)
2131 // x2 params pointer to parameters
2132 // x3 function function pointer
2133 // x4 array pointer to array slot (uninit)
2134 // x5 elements pointer to elements array of alloc_obj
2135 // x13 param_count number of parameters passed to function
2137 // Copy the fixed array slots.
2139 Register array = x4;
2140 // Set up pointer to first array slot.
2141 __ Add(array, elements, FixedArray::kHeaderSize - kHeapObjectTag);
2144 // Pre-decrement the parameters pointer by kPointerSize on each iteration.
2145 // Pre-decrement in order to skip receiver.
2146 __ Ldr(x10, MemOperand(params, -kPointerSize, PreIndex));
2147 // Post-increment elements by kPointerSize on each iteration.
2148 __ Str(x10, MemOperand(array, kPointerSize, PostIndex));
2149 __ Sub(param_count, param_count, 1);
2150 __ Cbnz(param_count, &loop);
2152 // Return from stub.
2156 // Do the runtime call to allocate the arguments object.
2158 __ Push(function, params, param_count_smi);
2159 __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
2163 void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
2164 // Stack layout on entry.
2165 // jssp[0]: index of rest parameter (tagged)
2166 // jssp[8]: number of parameters (tagged)
2167 // jssp[16]: address of receiver argument
2169 // Returns pointer to result object in x0.
2171 // Get the stub arguments from the frame, and make an untagged copy of the
2173 Register rest_index_smi = x1;
2174 Register param_count_smi = x2;
2175 Register params = x3;
2176 Register param_count = x13;
2177 __ Pop(rest_index_smi, param_count_smi, params);
2178 __ SmiUntag(param_count, param_count_smi);
2180 // Test if arguments adaptor needed.
2181 Register caller_fp = x11;
2182 Register caller_ctx = x12;
2184 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2185 __ Ldr(caller_ctx, MemOperand(caller_fp,
2186 StandardFrameConstants::kContextOffset));
2187 __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2190 // x1 rest_index_smi index of rest parameter
2191 // x2 param_count_smi number of parameters passed to function (smi)
2192 // x3 params pointer to parameters
2193 // x11 caller_fp caller's frame pointer
2194 // x13 param_count number of parameters passed to function
2196 // Patch the argument length and parameters pointer.
2197 __ Ldr(param_count_smi,
2198 MemOperand(caller_fp,
2199 ArgumentsAdaptorFrameConstants::kLengthOffset));
2200 __ SmiUntag(param_count, param_count_smi);
2201 __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
2202 __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
2205 __ Push(params, param_count_smi, rest_index_smi);
2206 __ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
2210 void RegExpExecStub::Generate(MacroAssembler* masm) {
2211 #ifdef V8_INTERPRETED_REGEXP
2212 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2213 #else // V8_INTERPRETED_REGEXP
2215 // Stack frame on entry.
2216 // jssp[0]: last_match_info (expected JSArray)
2217 // jssp[8]: previous index
2218 // jssp[16]: subject string
2219 // jssp[24]: JSRegExp object
2222 // Use of registers for this function.
2224 // Variable registers:
2225 // x10-x13 used as scratch registers
2226 // w0 string_type type of subject string
2227 // x2 jsstring_length subject string length
2228 // x3 jsregexp_object JSRegExp object
2229 // w4 string_encoding Latin1 or UC16
2230 // w5 sliced_string_offset if the string is a SlicedString
2231 // offset to the underlying string
2232 // w6 string_representation groups attributes of the string:
2234 // - type of the string
2235 // - is a short external string
2236 Register string_type = w0;
2237 Register jsstring_length = x2;
2238 Register jsregexp_object = x3;
2239 Register string_encoding = w4;
2240 Register sliced_string_offset = w5;
2241 Register string_representation = w6;
2243 // These are in callee save registers and will be preserved by the call
2244 // to the native RegExp code, as this code is called using the normal
2245 // C calling convention. When calling directly from generated code the
2246 // native RegExp code will not do a GC and therefore the content of
2247 // these registers are safe to use after the call.
2249 // x19 subject subject string
2250 // x20 regexp_data RegExp data (FixedArray)
2251 // x21 last_match_info_elements info relative to the last match
2253 // x22 code_object generated regexp code
2254 Register subject = x19;
2255 Register regexp_data = x20;
2256 Register last_match_info_elements = x21;
2257 Register code_object = x22;
2259 // TODO(jbramley): Is it necessary to preserve these? I don't think ARM does.
2260 CPURegList used_callee_saved_registers(subject,
2262 last_match_info_elements,
2264 __ PushCPURegList(used_callee_saved_registers);
2271 // jssp[32]: last_match_info (JSArray)
2272 // jssp[40]: previous index
2273 // jssp[48]: subject string
2274 // jssp[56]: JSRegExp object
2276 const int kLastMatchInfoOffset = 4 * kPointerSize;
2277 const int kPreviousIndexOffset = 5 * kPointerSize;
2278 const int kSubjectOffset = 6 * kPointerSize;
2279 const int kJSRegExpOffset = 7 * kPointerSize;
2281 // Ensure that a RegExp stack is allocated.
2282 ExternalReference address_of_regexp_stack_memory_address =
2283 ExternalReference::address_of_regexp_stack_memory_address(isolate());
2284 ExternalReference address_of_regexp_stack_memory_size =
2285 ExternalReference::address_of_regexp_stack_memory_size(isolate());
2286 __ Mov(x10, address_of_regexp_stack_memory_size);
2287 __ Ldr(x10, MemOperand(x10));
2288 __ Cbz(x10, &runtime);
2290 // Check that the first argument is a JSRegExp object.
2291 DCHECK(jssp.Is(__ StackPointer()));
2292 __ Peek(jsregexp_object, kJSRegExpOffset);
2293 __ JumpIfSmi(jsregexp_object, &runtime);
2294 __ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime);
2296 // Check that the RegExp has been compiled (data contains a fixed array).
2297 __ Ldr(regexp_data, FieldMemOperand(jsregexp_object, JSRegExp::kDataOffset));
2298 if (FLAG_debug_code) {
2299 STATIC_ASSERT(kSmiTag == 0);
2300 __ Tst(regexp_data, kSmiTagMask);
2301 __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
2302 __ CompareObjectType(regexp_data, x10, x10, FIXED_ARRAY_TYPE);
2303 __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
2306 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2307 __ Ldr(x10, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2308 __ Cmp(x10, Smi::FromInt(JSRegExp::IRREGEXP));
2311 // Check that the number of captures fit in the static offsets vector buffer.
2312 // We have always at least one capture for the whole match, plus additional
2313 // ones due to capturing parentheses. A capture takes 2 registers.
2314 // The number of capture registers then is (number_of_captures + 1) * 2.
2316 UntagSmiFieldMemOperand(regexp_data,
2317 JSRegExp::kIrregexpCaptureCountOffset));
2318 // Check (number_of_captures + 1) * 2 <= offsets vector size
2319 // number_of_captures * 2 <= offsets vector size - 2
2320 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
2321 __ Add(x10, x10, x10);
2322 __ Cmp(x10, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
2325 // Initialize offset for possibly sliced string.
2326 __ Mov(sliced_string_offset, 0);
2328 DCHECK(jssp.Is(__ StackPointer()));
2329 __ Peek(subject, kSubjectOffset);
2330 __ JumpIfSmi(subject, &runtime);
2332 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
2333 __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
2335 __ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset));
2337 // Handle subject string according to its encoding and representation:
2338 // (1) Sequential string? If yes, go to (5).
2339 // (2) Anything but sequential or cons? If yes, go to (6).
2340 // (3) Cons string. If the string is flat, replace subject with first string.
2341 // Otherwise bailout.
2342 // (4) Is subject external? If yes, go to (7).
2343 // (5) Sequential string. Load regexp code according to encoding.
2347 // Deferred code at the end of the stub:
2348 // (6) Not a long external string? If yes, go to (8).
2349 // (7) External string. Make it, offset-wise, look like a sequential string.
2351 // (8) Short external string or not a string? If yes, bail out to runtime.
2352 // (9) Sliced string. Replace subject with parent. Go to (4).
2354 Label check_underlying; // (4)
2355 Label seq_string; // (5)
2356 Label not_seq_nor_cons; // (6)
2357 Label external_string; // (7)
2358 Label not_long_external; // (8)
2360 // (1) Sequential string? If yes, go to (5).
2361 __ And(string_representation,
2364 kStringRepresentationMask |
2365 kShortExternalStringMask);
2366 // We depend on the fact that Strings of type
2367 // SeqString and not ShortExternalString are defined
2368 // by the following pattern:
2369 // string_type: 0XX0 XX00
2372 // | | is a SeqString
2373 // | is not a short external String
2375 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
2376 STATIC_ASSERT(kShortExternalStringTag != 0);
2377 __ Cbz(string_representation, &seq_string); // Go to (5).
2379 // (2) Anything but sequential or cons? If yes, go to (6).
2380 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2381 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2382 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2383 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2384 __ Cmp(string_representation, kExternalStringTag);
2385 __ B(ge, ¬_seq_nor_cons); // Go to (6).
2387 // (3) Cons string. Check that it's flat.
2388 __ Ldr(x10, FieldMemOperand(subject, ConsString::kSecondOffset));
2389 __ JumpIfNotRoot(x10, Heap::kempty_stringRootIndex, &runtime);
2390 // Replace subject with first string.
2391 __ Ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2393 // (4) Is subject external? If yes, go to (7).
2394 __ Bind(&check_underlying);
2395 // Reload the string type.
2396 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
2397 __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
2398 STATIC_ASSERT(kSeqStringTag == 0);
2399 // The underlying external string is never a short external string.
2400 STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
2401 STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2402 __ TestAndBranchIfAnySet(string_type.X(),
2403 kStringRepresentationMask,
2404 &external_string); // Go to (7).
2406 // (5) Sequential string. Load regexp code according to encoding.
2407 __ Bind(&seq_string);
2409 // Check that the third argument is a positive smi less than the subject
2410 // string length. A negative value will be greater (unsigned comparison).
2411 DCHECK(jssp.Is(__ StackPointer()));
2412 __ Peek(x10, kPreviousIndexOffset);
2413 __ JumpIfNotSmi(x10, &runtime);
2414 __ Cmp(jsstring_length, x10);
2417 // Argument 2 (x1): We need to load argument 2 (the previous index) into x1
2418 // before entering the exit frame.
2419 __ SmiUntag(x1, x10);
2421 // The third bit determines the string encoding in string_type.
2422 STATIC_ASSERT(kOneByteStringTag == 0x04);
2423 STATIC_ASSERT(kTwoByteStringTag == 0x00);
2424 STATIC_ASSERT(kStringEncodingMask == 0x04);
2426 // Find the code object based on the assumptions above.
2427 // kDataOneByteCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
2428 // of kPointerSize to reach the latter.
2429 DCHECK_EQ(JSRegExp::kDataOneByteCodeOffset + kPointerSize,
2430 JSRegExp::kDataUC16CodeOffset);
2431 __ Mov(x10, kPointerSize);
2432 // We will need the encoding later: Latin1 = 0x04
2434 __ Ands(string_encoding, string_type, kStringEncodingMask);
2436 __ Add(x10, regexp_data, x10);
2437 __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataOneByteCodeOffset));
2439 // (E) Carry on. String handling is done.
2441 // Check that the irregexp code has been generated for the actual string
2442 // encoding. If it has, the field contains a code object otherwise it contains
2443 // a smi (code flushing support).
2444 __ JumpIfSmi(code_object, &runtime);
2446 // All checks done. Now push arguments for native regexp code.
2447 __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1,
2451 // Isolates: note we add an additional parameter here (isolate pointer).
2452 __ EnterExitFrame(false, x10, 1);
2453 DCHECK(csp.Is(__ StackPointer()));
2455 // We have 9 arguments to pass to the regexp code, therefore we have to pass
2456 // one on the stack and the rest as registers.
2458 // Note that the placement of the argument on the stack isn't standard
2460 // csp[0]: Space for the return address placed by DirectCEntryStub.
2461 // csp[8]: Argument 9, the current isolate address.
2463 __ Mov(x10, ExternalReference::isolate_address(isolate()));
2464 __ Poke(x10, kPointerSize);
2466 Register length = w11;
2467 Register previous_index_in_bytes = w12;
2468 Register start = x13;
2470 // Load start of the subject string.
2471 __ Add(start, subject, SeqString::kHeaderSize - kHeapObjectTag);
2472 // Load the length from the original subject string from the previous stack
2473 // frame. Therefore we have to use fp, which points exactly to two pointer
2474 // sizes below the previous sp. (Because creating a new stack frame pushes
2475 // the previous fp onto the stack and decrements sp by 2 * kPointerSize.)
2476 __ Ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2477 __ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
2479 // Handle UC16 encoding, two bytes make one character.
2480 // string_encoding: if Latin1: 0x04
2482 STATIC_ASSERT(kStringEncodingMask == 0x04);
2483 __ Ubfx(string_encoding, string_encoding, 2, 1);
2484 __ Eor(string_encoding, string_encoding, 1);
2485 // string_encoding: if Latin1: 0
2488 // Convert string positions from characters to bytes.
2489 // Previous index is in x1.
2490 __ Lsl(previous_index_in_bytes, w1, string_encoding);
2491 __ Lsl(length, length, string_encoding);
2492 __ Lsl(sliced_string_offset, sliced_string_offset, string_encoding);
2494 // Argument 1 (x0): Subject string.
2495 __ Mov(x0, subject);
2497 // Argument 2 (x1): Previous index, already there.
2499 // Argument 3 (x2): Get the start of input.
2500 // Start of input = start of string + previous index + substring offset
2503 __ Add(w10, previous_index_in_bytes, sliced_string_offset);
2504 __ Add(x2, start, Operand(w10, UXTW));
2507 // End of input = start of input + (length of input - previous index)
2508 __ Sub(w10, length, previous_index_in_bytes);
2509 __ Add(x3, x2, Operand(w10, UXTW));
2511 // Argument 5 (x4): static offsets vector buffer.
2512 __ Mov(x4, ExternalReference::address_of_static_offsets_vector(isolate()));
2514 // Argument 6 (x5): Set the number of capture registers to zero to force
2515 // global regexps to behave as non-global. This stub is not used for global
2519 // Argument 7 (x6): Start (high end) of backtracking stack memory area.
2520 __ Mov(x10, address_of_regexp_stack_memory_address);
2521 __ Ldr(x10, MemOperand(x10));
2522 __ Mov(x11, address_of_regexp_stack_memory_size);
2523 __ Ldr(x11, MemOperand(x11));
2524 __ Add(x6, x10, x11);
2526 // Argument 8 (x7): Indicate that this is a direct call from JavaScript.
2529 // Locate the code entry and call it.
2530 __ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag);
2531 DirectCEntryStub stub(isolate());
2532 stub.GenerateCall(masm, code_object);
2534 __ LeaveExitFrame(false, x10, true);
2536 // The generated regexp code returns an int32 in w0.
2537 Label failure, exception;
2538 __ CompareAndBranch(w0, NativeRegExpMacroAssembler::FAILURE, eq, &failure);
2539 __ CompareAndBranch(w0,
2540 NativeRegExpMacroAssembler::EXCEPTION,
2543 __ CompareAndBranch(w0, NativeRegExpMacroAssembler::RETRY, eq, &runtime);
2545 // Success: process the result from the native regexp code.
2546 Register number_of_capture_registers = x12;
2548 // Calculate number of capture registers (number_of_captures + 1) * 2
2549 // and store it in the last match info.
2551 UntagSmiFieldMemOperand(regexp_data,
2552 JSRegExp::kIrregexpCaptureCountOffset));
2553 __ Add(x10, x10, x10);
2554 __ Add(number_of_capture_registers, x10, 2);
2556 // Check that the fourth object is a JSArray object.
2557 DCHECK(jssp.Is(__ StackPointer()));
2558 __ Peek(x10, kLastMatchInfoOffset);
2559 __ JumpIfSmi(x10, &runtime);
2560 __ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime);
2562 // Check that the JSArray is the fast case.
2563 __ Ldr(last_match_info_elements,
2564 FieldMemOperand(x10, JSArray::kElementsOffset));
2566 FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2567 __ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime);
2569 // Check that the last match info has space for the capture registers and the
2570 // additional information (overhead).
2571 // (number_of_captures + 1) * 2 + overhead <= last match info size
2572 // (number_of_captures * 2) + 2 + overhead <= last match info size
2573 // number_of_capture_registers + overhead <= last match info size
2575 UntagSmiFieldMemOperand(last_match_info_elements,
2576 FixedArray::kLengthOffset));
2577 __ Add(x11, number_of_capture_registers, RegExpImpl::kLastMatchOverhead);
2581 // Store the capture count.
2582 __ SmiTag(x10, number_of_capture_registers);
2584 FieldMemOperand(last_match_info_elements,
2585 RegExpImpl::kLastCaptureCountOffset));
2586 // Store last subject and last input.
2588 FieldMemOperand(last_match_info_elements,
2589 RegExpImpl::kLastSubjectOffset));
2590 // Use x10 as the subject string in order to only need
2591 // one RecordWriteStub.
2592 __ Mov(x10, subject);
2593 __ RecordWriteField(last_match_info_elements,
2594 RegExpImpl::kLastSubjectOffset,
2600 FieldMemOperand(last_match_info_elements,
2601 RegExpImpl::kLastInputOffset));
2602 __ Mov(x10, subject);
2603 __ RecordWriteField(last_match_info_elements,
2604 RegExpImpl::kLastInputOffset,
2610 Register last_match_offsets = x13;
2611 Register offsets_vector_index = x14;
2612 Register current_offset = x15;
2614 // Get the static offsets vector filled by the native regexp code
2615 // and fill the last match info.
2616 ExternalReference address_of_static_offsets_vector =
2617 ExternalReference::address_of_static_offsets_vector(isolate());
2618 __ Mov(offsets_vector_index, address_of_static_offsets_vector);
2620 Label next_capture, done;
2621 // Capture register counter starts from number of capture registers and
2622 // iterates down to zero (inclusive).
2623 __ Add(last_match_offsets,
2624 last_match_info_elements,
2625 RegExpImpl::kFirstCaptureOffset - kHeapObjectTag);
2626 __ Bind(&next_capture);
2627 __ Subs(number_of_capture_registers, number_of_capture_registers, 2);
2629 // Read two 32 bit values from the static offsets vector buffer into
2631 __ Ldr(current_offset,
2632 MemOperand(offsets_vector_index, kWRegSize * 2, PostIndex));
2633 // Store the smi values in the last match info.
2634 __ SmiTag(x10, current_offset);
2635 // Clearing the 32 bottom bits gives us a Smi.
2636 STATIC_ASSERT(kSmiTag == 0);
2637 __ Bic(x11, current_offset, kSmiShiftMask);
2640 MemOperand(last_match_offsets, kXRegSize * 2, PostIndex));
2641 __ B(&next_capture);
2644 // Return last match info.
2645 __ Peek(x0, kLastMatchInfoOffset);
2646 __ PopCPURegList(used_callee_saved_registers);
2647 // Drop the 4 arguments of the stub from the stack.
2651 __ Bind(&exception);
2652 Register exception_value = x0;
2653 // A stack overflow (on the backtrack stack) may have occured
2654 // in the RegExp code but no exception has been created yet.
2655 // If there is no pending exception, handle that in the runtime system.
2656 __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
2658 Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2660 __ Ldr(exception_value, MemOperand(x11));
2661 __ Cmp(x10, exception_value);
2664 __ Str(x10, MemOperand(x11)); // Clear pending exception.
2666 // Check if the exception is a termination. If so, throw as uncatchable.
2667 Label termination_exception;
2668 __ JumpIfRoot(exception_value,
2669 Heap::kTerminationExceptionRootIndex,
2670 &termination_exception);
2672 __ Throw(exception_value, x10, x11, x12, x13);
2674 __ Bind(&termination_exception);
2675 __ ThrowUncatchable(exception_value, x10, x11, x12, x13);
2678 __ Mov(x0, Operand(isolate()->factory()->null_value()));
2679 __ PopCPURegList(used_callee_saved_registers);
2680 // Drop the 4 arguments of the stub from the stack.
2685 __ PopCPURegList(used_callee_saved_registers);
2686 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2688 // Deferred code for string handling.
2689 // (6) Not a long external string? If yes, go to (8).
2690 __ Bind(¬_seq_nor_cons);
2691 // Compare flags are still set.
2692 __ B(ne, ¬_long_external); // Go to (8).
2694 // (7) External string. Make it, offset-wise, look like a sequential string.
2695 __ Bind(&external_string);
2696 if (masm->emit_debug_code()) {
2697 // Assert that we do not have a cons or slice (indirect strings) here.
2698 // Sequential strings have already been ruled out.
2699 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
2700 __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
2701 __ Tst(x10, kIsIndirectStringMask);
2702 __ Check(eq, kExternalStringExpectedButNotFound);
2703 __ And(x10, x10, kStringRepresentationMask);
2705 __ Check(ne, kExternalStringExpectedButNotFound);
2708 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
2709 // Move the pointer so that offset-wise, it looks like a sequential string.
2710 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2711 __ Sub(subject, subject, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
2712 __ B(&seq_string); // Go to (5).
2714 // (8) If this is a short external string or not a string, bail out to
2716 __ Bind(¬_long_external);
2717 STATIC_ASSERT(kShortExternalStringTag != 0);
2718 __ TestAndBranchIfAnySet(string_representation,
2719 kShortExternalStringMask | kIsNotStringMask,
2722 // (9) Sliced string. Replace subject with parent.
2723 __ Ldr(sliced_string_offset,
2724 UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset));
2725 __ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
2726 __ B(&check_underlying); // Go to (4).
2731 static void GenerateRecordCallTarget(MacroAssembler* masm,
2734 Register feedback_vector,
2737 Register scratch2) {
2738 ASM_LOCATION("GenerateRecordCallTarget");
2739 DCHECK(!AreAliased(scratch1, scratch2,
2740 argc, function, feedback_vector, index));
2741 // Cache the called function in a feedback vector slot. Cache states are
2742 // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
2743 // argc : number of arguments to the construct function
2744 // function : the function to call
2745 // feedback_vector : the feedback vector
2746 // index : slot in feedback vector (smi)
2747 Label initialize, done, miss, megamorphic, not_array_function;
2749 DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
2750 masm->isolate()->heap()->megamorphic_symbol());
2751 DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
2752 masm->isolate()->heap()->uninitialized_symbol());
2754 // Load the cache state.
2755 __ Add(scratch1, feedback_vector,
2756 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
2757 __ Ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
2759 // A monomorphic cache hit or an already megamorphic state: invoke the
2760 // function without changing the state.
2761 __ Cmp(scratch1, function);
2764 if (!FLAG_pretenuring_call_new) {
2765 // If we came here, we need to see if we are the array function.
2766 // If we didn't have a matching function, and we didn't find the megamorph
2767 // sentinel, then we have in the slot either some other function or an
2768 // AllocationSite. Do a map check on the object in scratch1 register.
2769 __ Ldr(scratch2, FieldMemOperand(scratch1, AllocationSite::kMapOffset));
2770 __ JumpIfNotRoot(scratch2, Heap::kAllocationSiteMapRootIndex, &miss);
2772 // Make sure the function is the Array() function
2773 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
2774 __ Cmp(function, scratch1);
2775 __ B(ne, &megamorphic);
2781 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2783 __ JumpIfRoot(scratch1, Heap::kuninitialized_symbolRootIndex, &initialize);
2784 // MegamorphicSentinel is an immortal immovable object (undefined) so no
2785 // write-barrier is needed.
2786 __ Bind(&megamorphic);
2787 __ Add(scratch1, feedback_vector,
2788 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
2789 __ LoadRoot(scratch2, Heap::kmegamorphic_symbolRootIndex);
2790 __ Str(scratch2, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
2793 // An uninitialized cache is patched with the function or sentinel to
2794 // indicate the ElementsKind if function is the Array constructor.
2795 __ Bind(&initialize);
2797 if (!FLAG_pretenuring_call_new) {
2798 // Make sure the function is the Array() function
2799 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
2800 __ Cmp(function, scratch1);
2801 __ B(ne, ¬_array_function);
2803 // The target function is the Array constructor,
2804 // Create an AllocationSite if we don't already have it, store it in the
2807 FrameScope scope(masm, StackFrame::INTERNAL);
2808 CreateAllocationSiteStub create_stub(masm->isolate());
2810 // Arguments register must be smi-tagged to call out.
2812 __ Push(argc, function, feedback_vector, index);
2814 // CreateAllocationSiteStub expect the feedback vector in x2 and the slot
2816 DCHECK(feedback_vector.Is(x2) && index.Is(x3));
2817 __ CallStub(&create_stub);
2819 __ Pop(index, feedback_vector, function, argc);
2824 __ Bind(¬_array_function);
2827 // An uninitialized cache is patched with the function.
2829 __ Add(scratch1, feedback_vector,
2830 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
2831 __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
2832 __ Str(function, MemOperand(scratch1, 0));
2835 __ RecordWrite(feedback_vector, scratch1, function, kLRHasNotBeenSaved,
2836 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
2843 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
2844 // Do not transform the receiver for strict mode functions.
2845 __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
2846 __ Ldr(w4, FieldMemOperand(x3, SharedFunctionInfo::kCompilerHintsOffset));
2847 __ Tbnz(w4, SharedFunctionInfo::kStrictModeFunction, cont);
2849 // Do not transform the receiver for native (Compilerhints already in x3).
2850 __ Tbnz(w4, SharedFunctionInfo::kNative, cont);
2854 static void EmitSlowCase(MacroAssembler* masm,
2858 Label* non_function) {
2859 // Check for function proxy.
2860 // x10 : function type.
2861 __ CompareAndBranch(type, JS_FUNCTION_PROXY_TYPE, ne, non_function);
2862 __ Push(function); // put proxy as additional argument
2863 __ Mov(x0, argc + 1);
2865 __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
2867 Handle<Code> adaptor =
2868 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
2869 __ Jump(adaptor, RelocInfo::CODE_TARGET);
2872 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
2873 // of the original receiver from the call site).
2874 __ Bind(non_function);
2875 __ Poke(function, argc * kXRegSize);
2876 __ Mov(x0, argc); // Set up the number of arguments.
2878 __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION);
2879 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2880 RelocInfo::CODE_TARGET);
2884 static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
2885 // Wrap the receiver and patch it back onto the stack.
2886 { FrameScope frame_scope(masm, StackFrame::INTERNAL);
2888 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
2891 __ Poke(x0, argc * kPointerSize);
2896 static void CallFunctionNoFeedback(MacroAssembler* masm,
2897 int argc, bool needs_checks,
2898 bool call_as_method) {
2899 // x1 function the function to call
2900 Register function = x1;
2902 Label slow, non_function, wrap, cont;
2904 // TODO(jbramley): This function has a lot of unnamed registers. Name them,
2905 // and tidy things up a bit.
2908 // Check that the function is really a JavaScript function.
2909 __ JumpIfSmi(function, &non_function);
2911 // Goto slow case if we do not have a function.
2912 __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
2915 // Fast-case: Invoke the function now.
2916 // x1 function pushed function
2917 ParameterCount actual(argc);
2919 if (call_as_method) {
2921 EmitContinueIfStrictOrNative(masm, &cont);
2924 // Compute the receiver in sloppy mode.
2925 __ Peek(x3, argc * kPointerSize);
2928 __ JumpIfSmi(x3, &wrap);
2929 __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
2937 __ InvokeFunction(function,
2942 // Slow-case: Non-function called.
2944 EmitSlowCase(masm, argc, function, type, &non_function);
2947 if (call_as_method) {
2949 EmitWrapCase(masm, argc, &cont);
2954 void CallFunctionStub::Generate(MacroAssembler* masm) {
2955 ASM_LOCATION("CallFunctionStub::Generate");
2956 CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
2960 void CallConstructStub::Generate(MacroAssembler* masm) {
2961 ASM_LOCATION("CallConstructStub::Generate");
2962 // x0 : number of arguments
2963 // x1 : the function to call
2964 // x2 : feedback vector
2965 // x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
2966 Register function = x1;
2967 Label slow, non_function_call;
2969 // Check that the function is not a smi.
2970 __ JumpIfSmi(function, &non_function_call);
2971 // Check that the function is a JSFunction.
2972 Register object_type = x10;
2973 __ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE,
2976 if (RecordCallTarget()) {
2977 GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5);
2979 __ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
2980 if (FLAG_pretenuring_call_new) {
2981 // Put the AllocationSite from the feedback vector into x2.
2982 // By adding kPointerSize we encode that we know the AllocationSite
2983 // entry is at the feedback vector slot given by x3 + 1.
2984 __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize + kPointerSize));
2986 Label feedback_register_initialized;
2987 // Put the AllocationSite from the feedback vector into x2, or undefined.
2988 __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize));
2989 __ Ldr(x5, FieldMemOperand(x2, AllocationSite::kMapOffset));
2990 __ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex,
2991 &feedback_register_initialized);
2992 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
2993 __ bind(&feedback_register_initialized);
2996 __ AssertUndefinedOrAllocationSite(x2, x5);
2999 if (IsSuperConstructorCall()) {
3000 __ Mov(x4, Operand(1 * kPointerSize));
3001 __ Add(x4, x4, Operand(x0, LSL, kPointerSizeLog2));
3004 __ Mov(x3, function);
3007 // Jump to the function-specific construct stub.
3008 Register jump_reg = x4;
3009 Register shared_func_info = jump_reg;
3010 Register cons_stub = jump_reg;
3011 Register cons_stub_code = jump_reg;
3012 __ Ldr(shared_func_info,
3013 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3015 FieldMemOperand(shared_func_info,
3016 SharedFunctionInfo::kConstructStubOffset));
3017 __ Add(cons_stub_code, cons_stub, Code::kHeaderSize - kHeapObjectTag);
3018 __ Br(cons_stub_code);
3022 __ Cmp(object_type, JS_FUNCTION_PROXY_TYPE);
3023 __ B(ne, &non_function_call);
3024 __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
3027 __ Bind(&non_function_call);
3028 __ GetBuiltinFunction(x1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
3031 // Set expected number of arguments to zero (not changing x0).
3033 __ Jump(isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3034 RelocInfo::CODE_TARGET);
3038 static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
3039 __ Ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3040 __ Ldr(vector, FieldMemOperand(vector,
3041 JSFunction::kSharedFunctionInfoOffset));
3042 __ Ldr(vector, FieldMemOperand(vector,
3043 SharedFunctionInfo::kFeedbackVectorOffset));
3047 void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
3052 Register function = x1;
3053 Register feedback_vector = x2;
3054 Register index = x3;
3055 Register scratch = x4;
3057 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch);
3058 __ Cmp(function, scratch);
3061 __ Mov(x0, Operand(arg_count()));
3063 __ Add(scratch, feedback_vector,
3064 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
3065 __ Ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
3067 // Verify that scratch contains an AllocationSite
3069 __ Ldr(map, FieldMemOperand(scratch, HeapObject::kMapOffset));
3070 __ JumpIfNotRoot(map, Heap::kAllocationSiteMapRootIndex, &miss);
3072 Register allocation_site = feedback_vector;
3073 __ Mov(allocation_site, scratch);
3075 Register original_constructor = x3;
3076 __ Mov(original_constructor, function);
3077 ArrayConstructorStub stub(masm->isolate(), arg_count());
3078 __ TailCallStub(&stub);
3083 // The slow case, we need this no matter what to complete a call after a miss.
3084 CallFunctionNoFeedback(masm,
3093 void CallICStub::Generate(MacroAssembler* masm) {
3094 ASM_LOCATION("CallICStub");
3097 // x3 - slot id (Smi)
3099 const int with_types_offset =
3100 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
3101 const int generic_offset =
3102 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
3103 Label extra_checks_or_miss, slow_start;
3104 Label slow, non_function, wrap, cont;
3105 Label have_js_function;
3106 int argc = arg_count();
3107 ParameterCount actual(argc);
3109 Register function = x1;
3110 Register feedback_vector = x2;
3111 Register index = x3;
3114 // The checks. First, does x1 match the recorded monomorphic target?
3115 __ Add(x4, feedback_vector,
3116 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
3117 __ Ldr(x4, FieldMemOperand(x4, FixedArray::kHeaderSize));
3119 // We don't know that we have a weak cell. We might have a private symbol
3120 // or an AllocationSite, but the memory is safe to examine.
3121 // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
3123 // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
3124 // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
3125 // computed, meaning that it can't appear to be a pointer. If the low bit is
3126 // 0, then hash is computed, but the 0 bit prevents the field from appearing
3128 STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
3129 STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
3130 WeakCell::kValueOffset &&
3131 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
3133 __ Ldr(x5, FieldMemOperand(x4, WeakCell::kValueOffset));
3134 __ Cmp(x5, function);
3135 __ B(ne, &extra_checks_or_miss);
3137 // The compare above could have been a SMI/SMI comparison. Guard against this
3138 // convincing us that we have a monomorphic JSFunction.
3139 __ JumpIfSmi(function, &extra_checks_or_miss);
3141 __ bind(&have_js_function);
3142 if (CallAsMethod()) {
3143 EmitContinueIfStrictOrNative(masm, &cont);
3145 // Compute the receiver in sloppy mode.
3146 __ Peek(x3, argc * kPointerSize);
3148 __ JumpIfSmi(x3, &wrap);
3149 __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
3154 __ InvokeFunction(function,
3160 EmitSlowCase(masm, argc, function, type, &non_function);
3162 if (CallAsMethod()) {
3164 EmitWrapCase(masm, argc, &cont);
3167 __ bind(&extra_checks_or_miss);
3168 Label uninitialized, miss;
3170 __ JumpIfRoot(x4, Heap::kmegamorphic_symbolRootIndex, &slow_start);
3172 // The following cases attempt to handle MISS cases without going to the
3174 if (FLAG_trace_ic) {
3178 __ JumpIfRoot(x4, Heap::kuninitialized_symbolRootIndex, &miss);
3180 // We are going megamorphic. If the feedback is a JSFunction, it is fine
3181 // to handle it here. More complex cases are dealt with in the runtime.
3182 __ AssertNotSmi(x4);
3183 __ JumpIfNotObjectType(x4, x5, x5, JS_FUNCTION_TYPE, &miss);
3184 __ Add(x4, feedback_vector,
3185 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
3186 __ LoadRoot(x5, Heap::kmegamorphic_symbolRootIndex);
3187 __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
3188 // We have to update statistics for runtime profiling.
3189 __ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
3190 __ Subs(x4, x4, Operand(Smi::FromInt(1)));
3191 __ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
3192 __ Ldr(x4, FieldMemOperand(feedback_vector, generic_offset));
3193 __ Adds(x4, x4, Operand(Smi::FromInt(1)));
3194 __ Str(x4, FieldMemOperand(feedback_vector, generic_offset));
3197 __ bind(&uninitialized);
3199 // We are going monomorphic, provided we actually have a JSFunction.
3200 __ JumpIfSmi(function, &miss);
3202 // Goto miss case if we do not have a function.
3203 __ JumpIfNotObjectType(function, x5, x5, JS_FUNCTION_TYPE, &miss);
3205 // Make sure the function is not the Array() function, which requires special
3206 // behavior on MISS.
3207 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, x5);
3208 __ Cmp(function, x5);
3212 __ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
3213 __ Adds(x4, x4, Operand(Smi::FromInt(1)));
3214 __ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
3216 // Store the function. Use a stub since we need a frame for allocation.
3221 FrameScope scope(masm, StackFrame::INTERNAL);
3222 CreateWeakCellStub create_stub(masm->isolate());
3224 __ CallStub(&create_stub);
3228 __ B(&have_js_function);
3230 // We are here because tracing is on or we encountered a MISS case we can't
3236 __ bind(&slow_start);
3238 // Check that the function is really a JavaScript function.
3239 __ JumpIfSmi(function, &non_function);
3241 // Goto slow case if we do not have a function.
3242 __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
3243 __ B(&have_js_function);
3247 void CallICStub::GenerateMiss(MacroAssembler* masm) {
3248 ASM_LOCATION("CallICStub[Miss]");
3250 FrameScope scope(masm, StackFrame::INTERNAL);
3252 // Push the receiver and the function and feedback info.
3253 __ Push(x1, x2, x3);
3256 IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
3257 : IC::kCallIC_Customization_Miss;
3259 ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
3260 __ CallExternalReference(miss, 3);
3262 // Move result to edi and exit the internal frame.
3267 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
3268 // If the receiver is a smi trigger the non-string case.
3269 if (check_mode_ == RECEIVER_IS_UNKNOWN) {
3270 __ JumpIfSmi(object_, receiver_not_string_);
3272 // Fetch the instance type of the receiver into result register.
3273 __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3274 __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3276 // If the receiver is not a string trigger the non-string case.
3277 __ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_);
3280 // If the index is non-smi trigger the non-smi case.
3281 __ JumpIfNotSmi(index_, &index_not_smi_);
3283 __ Bind(&got_smi_index_);
3284 // Check for index out of range.
3285 __ Ldrsw(result_, UntagSmiFieldMemOperand(object_, String::kLengthOffset));
3286 __ Cmp(result_, Operand::UntagSmi(index_));
3287 __ B(ls, index_out_of_range_);
3289 __ SmiUntag(index_);
3291 StringCharLoadGenerator::Generate(masm,
3301 void StringCharCodeAtGenerator::GenerateSlow(
3302 MacroAssembler* masm,
3303 const RuntimeCallHelper& call_helper) {
3304 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
3306 __ Bind(&index_not_smi_);
3307 // If index is a heap number, try converting it to an integer.
3308 __ JumpIfNotHeapNumber(index_, index_not_number_);
3309 call_helper.BeforeCall(masm);
3310 // Save object_ on the stack and pass index_ as argument for runtime call.
3311 __ Push(object_, index_);
3312 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3313 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3315 DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3316 // NumberToSmi discards numbers that are not exact integers.
3317 __ CallRuntime(Runtime::kNumberToSmi, 1);
3319 // Save the conversion result before the pop instructions below
3320 // have a chance to overwrite it.
3323 // Reload the instance type.
3324 __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3325 __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3326 call_helper.AfterCall(masm);
3328 // If index is still not a smi, it must be out of range.
3329 __ JumpIfNotSmi(index_, index_out_of_range_);
3330 // Otherwise, return to the fast path.
3331 __ B(&got_smi_index_);
3333 // Call runtime. We get here when the receiver is a string and the
3334 // index is a number, but the code of getting the actual character
3335 // is too complex (e.g., when the string needs to be flattened).
3336 __ Bind(&call_runtime_);
3337 call_helper.BeforeCall(masm);
3339 __ Push(object_, index_);
3340 __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
3341 __ Mov(result_, x0);
3342 call_helper.AfterCall(masm);
3345 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3349 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3350 __ JumpIfNotSmi(code_, &slow_case_);
3351 __ Cmp(code_, Smi::FromInt(String::kMaxOneByteCharCode));
3352 __ B(hi, &slow_case_);
3354 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3355 // At this point code register contains smi tagged one-byte char code.
3356 __ Add(result_, result_, Operand::UntagSmiAndScale(code_, kPointerSizeLog2));
3357 __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3358 __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
3363 void StringCharFromCodeGenerator::GenerateSlow(
3364 MacroAssembler* masm,
3365 const RuntimeCallHelper& call_helper) {
3366 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3368 __ Bind(&slow_case_);
3369 call_helper.BeforeCall(masm);
3371 __ CallRuntime(Runtime::kCharFromCode, 1);
3372 __ Mov(result_, x0);
3373 call_helper.AfterCall(masm);
3376 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3380 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
3381 // Inputs are in x0 (lhs) and x1 (rhs).
3382 DCHECK(state() == CompareICState::SMI);
3383 ASM_LOCATION("CompareICStub[Smis]");
3385 // Bail out (to 'miss') unless both x0 and x1 are smis.
3386 __ JumpIfEitherNotSmi(x0, x1, &miss);
3388 if (GetCondition() == eq) {
3389 // For equality we do not care about the sign of the result.
3392 // Untag before subtracting to avoid handling overflow.
3394 __ Sub(x0, x1, Operand::UntagSmi(x0));
3403 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
3404 DCHECK(state() == CompareICState::NUMBER);
3405 ASM_LOCATION("CompareICStub[HeapNumbers]");
3407 Label unordered, maybe_undefined1, maybe_undefined2;
3408 Label miss, handle_lhs, values_in_d_regs;
3409 Label untag_rhs, untag_lhs;
3411 Register result = x0;
3414 FPRegister rhs_d = d0;
3415 FPRegister lhs_d = d1;
3417 if (left() == CompareICState::SMI) {
3418 __ JumpIfNotSmi(lhs, &miss);
3420 if (right() == CompareICState::SMI) {
3421 __ JumpIfNotSmi(rhs, &miss);
3424 __ SmiUntagToDouble(rhs_d, rhs, kSpeculativeUntag);
3425 __ SmiUntagToDouble(lhs_d, lhs, kSpeculativeUntag);
3427 // Load rhs if it's a heap number.
3428 __ JumpIfSmi(rhs, &handle_lhs);
3429 __ JumpIfNotHeapNumber(rhs, &maybe_undefined1);
3430 __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
3432 // Load lhs if it's a heap number.
3433 __ Bind(&handle_lhs);
3434 __ JumpIfSmi(lhs, &values_in_d_regs);
3435 __ JumpIfNotHeapNumber(lhs, &maybe_undefined2);
3436 __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
3438 __ Bind(&values_in_d_regs);
3439 __ Fcmp(lhs_d, rhs_d);
3440 __ B(vs, &unordered); // Overflow flag set if either is NaN.
3441 STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
3442 __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
3443 __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
3446 __ Bind(&unordered);
3447 CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
3448 CompareICState::GENERIC, CompareICState::GENERIC);
3449 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3451 __ Bind(&maybe_undefined1);
3452 if (Token::IsOrderedRelationalCompareOp(op())) {
3453 __ JumpIfNotRoot(rhs, Heap::kUndefinedValueRootIndex, &miss);
3454 __ JumpIfSmi(lhs, &unordered);
3455 __ JumpIfNotHeapNumber(lhs, &maybe_undefined2);
3459 __ Bind(&maybe_undefined2);
3460 if (Token::IsOrderedRelationalCompareOp(op())) {
3461 __ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered);
3469 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3470 DCHECK(state() == CompareICState::INTERNALIZED_STRING);
3471 ASM_LOCATION("CompareICStub[InternalizedStrings]");
3474 Register result = x0;
3478 // Check that both operands are heap objects.
3479 __ JumpIfEitherSmi(lhs, rhs, &miss);
3481 // Check that both operands are internalized strings.
3482 Register rhs_map = x10;
3483 Register lhs_map = x11;
3484 Register rhs_type = x10;
3485 Register lhs_type = x11;
3486 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
3487 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
3488 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
3489 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
3491 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
3492 __ Orr(x12, lhs_type, rhs_type);
3493 __ TestAndBranchIfAnySet(
3494 x12, kIsNotStringMask | kIsNotInternalizedMask, &miss);
3496 // Internalized strings are compared by identity.
3497 STATIC_ASSERT(EQUAL == 0);
3499 __ Cset(result, ne);
3507 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
3508 DCHECK(state() == CompareICState::UNIQUE_NAME);
3509 ASM_LOCATION("CompareICStub[UniqueNames]");
3510 DCHECK(GetCondition() == eq);
3513 Register result = x0;
3517 Register lhs_instance_type = w2;
3518 Register rhs_instance_type = w3;
3520 // Check that both operands are heap objects.
3521 __ JumpIfEitherSmi(lhs, rhs, &miss);
3523 // Check that both operands are unique names. This leaves the instance
3524 // types loaded in tmp1 and tmp2.
3525 __ Ldr(x10, FieldMemOperand(lhs, HeapObject::kMapOffset));
3526 __ Ldr(x11, FieldMemOperand(rhs, HeapObject::kMapOffset));
3527 __ Ldrb(lhs_instance_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
3528 __ Ldrb(rhs_instance_type, FieldMemOperand(x11, Map::kInstanceTypeOffset));
3530 // To avoid a miss, each instance type should be either SYMBOL_TYPE or it
3531 // should have kInternalizedTag set.
3532 __ JumpIfNotUniqueNameInstanceType(lhs_instance_type, &miss);
3533 __ JumpIfNotUniqueNameInstanceType(rhs_instance_type, &miss);
3535 // Unique names are compared by identity.
3536 STATIC_ASSERT(EQUAL == 0);
3538 __ Cset(result, ne);
3546 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
3547 DCHECK(state() == CompareICState::STRING);
3548 ASM_LOCATION("CompareICStub[Strings]");
3552 bool equality = Token::IsEqualityOp(op());
3554 Register result = x0;
3558 // Check that both operands are heap objects.
3559 __ JumpIfEitherSmi(rhs, lhs, &miss);
3561 // Check that both operands are strings.
3562 Register rhs_map = x10;
3563 Register lhs_map = x11;
3564 Register rhs_type = x10;
3565 Register lhs_type = x11;
3566 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
3567 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
3568 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
3569 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
3570 STATIC_ASSERT(kNotStringTag != 0);
3571 __ Orr(x12, lhs_type, rhs_type);
3572 __ Tbnz(x12, MaskToBit(kIsNotStringMask), &miss);
3574 // Fast check for identical strings.
3577 __ B(ne, ¬_equal);
3578 __ Mov(result, EQUAL);
3581 __ Bind(¬_equal);
3582 // Handle not identical strings
3584 // Check that both strings are internalized strings. If they are, we're done
3585 // because we already know they are not identical. We know they are both
3588 DCHECK(GetCondition() == eq);
3589 STATIC_ASSERT(kInternalizedTag == 0);
3590 Label not_internalized_strings;
3591 __ Orr(x12, lhs_type, rhs_type);
3592 __ TestAndBranchIfAnySet(
3593 x12, kIsNotInternalizedMask, ¬_internalized_strings);
3594 // Result is in rhs (x0), and not EQUAL, as rhs is not a smi.
3596 __ Bind(¬_internalized_strings);
3599 // Check that both strings are sequential one-byte.
3601 __ JumpIfBothInstanceTypesAreNotSequentialOneByte(lhs_type, rhs_type, x12,
3604 // Compare flat one-byte strings. Returns when done.
3606 StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, x10, x11,
3609 StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, x10, x11,
3613 // Handle more complex cases in runtime.
3617 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
3619 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3627 void CompareICStub::GenerateObjects(MacroAssembler* masm) {
3628 DCHECK(state() == CompareICState::OBJECT);
3629 ASM_LOCATION("CompareICStub[Objects]");
3633 Register result = x0;
3637 __ JumpIfEitherSmi(rhs, lhs, &miss);
3639 __ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss);
3640 __ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss);
3642 DCHECK(GetCondition() == eq);
3643 __ Sub(result, rhs, lhs);
3651 void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
3652 ASM_LOCATION("CompareICStub[KnownObjects]");
3655 Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
3657 Register result = x0;
3661 __ JumpIfEitherSmi(rhs, lhs, &miss);
3663 Register rhs_map = x10;
3664 Register lhs_map = x11;
3666 __ GetWeakValue(map, cell);
3667 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
3668 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
3669 __ Cmp(rhs_map, map);
3671 __ Cmp(lhs_map, map);
3674 __ Sub(result, rhs, lhs);
3682 // This method handles the case where a compare stub had the wrong
3683 // implementation. It calls a miss handler, which re-writes the stub. All other
3684 // CompareICStub::Generate* methods should fall back into this one if their
3685 // operands were not the expected types.
3686 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
3687 ASM_LOCATION("CompareICStub[Miss]");
3689 Register stub_entry = x11;
3691 ExternalReference miss =
3692 ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
3694 FrameScope scope(masm, StackFrame::INTERNAL);
3697 Register right = x0;
3698 // Preserve some caller-saved registers.
3699 __ Push(x1, x0, lr);
3700 // Push the arguments.
3701 __ Mov(op, Smi::FromInt(this->op()));
3702 __ Push(left, right, op);
3704 // Call the miss handler. This also pops the arguments.
3705 __ CallExternalReference(miss, 3);
3707 // Compute the entry point of the rewritten stub.
3708 __ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag);
3709 // Restore caller-saved registers.
3713 // Tail-call to the new stub.
3714 __ Jump(stub_entry);
3718 void SubStringStub::Generate(MacroAssembler* masm) {
3719 ASM_LOCATION("SubStringStub::Generate");
3722 // Stack frame on entry.
3723 // lr: return address
3724 // jssp[0]: substring "to" offset
3725 // jssp[8]: substring "from" offset
3726 // jssp[16]: pointer to string object
3728 // This stub is called from the native-call %_SubString(...), so
3729 // nothing can be assumed about the arguments. It is tested that:
3730 // "string" is a sequential string,
3731 // both "from" and "to" are smis, and
3732 // 0 <= from <= to <= string.length (in debug mode.)
3733 // If any of these assumptions fail, we call the runtime system.
3735 static const int kToOffset = 0 * kPointerSize;
3736 static const int kFromOffset = 1 * kPointerSize;
3737 static const int kStringOffset = 2 * kPointerSize;
3740 Register from = x15;
3741 Register input_string = x10;
3742 Register input_length = x11;
3743 Register input_type = x12;
3744 Register result_string = x0;
3745 Register result_length = x1;
3748 __ Peek(to, kToOffset);
3749 __ Peek(from, kFromOffset);
3751 // Check that both from and to are smis. If not, jump to runtime.
3752 __ JumpIfEitherNotSmi(from, to, &runtime);
3756 // Calculate difference between from and to. If to < from, branch to runtime.
3757 __ Subs(result_length, to, from);
3760 // Check from is positive.
3761 __ Tbnz(from, kWSignBit, &runtime);
3763 // Make sure first argument is a string.
3764 __ Peek(input_string, kStringOffset);
3765 __ JumpIfSmi(input_string, &runtime);
3766 __ IsObjectJSStringType(input_string, input_type, &runtime);
3769 __ Cmp(result_length, 1);
3770 __ B(eq, &single_char);
3772 // Short-cut for the case of trivial substring.
3774 __ Ldrsw(input_length,
3775 UntagSmiFieldMemOperand(input_string, String::kLengthOffset));
3777 __ Cmp(result_length, input_length);
3778 __ CmovX(x0, input_string, eq);
3779 // Return original string.
3780 __ B(eq, &return_x0);
3782 // Longer than original string's length or negative: unsafe arguments.
3785 // Shorter than original string's length: an actual substring.
3787 // x0 to substring end character offset
3788 // x1 result_length length of substring result
3789 // x10 input_string pointer to input string object
3790 // x10 unpacked_string pointer to unpacked string object
3791 // x11 input_length length of input string
3792 // x12 input_type instance type of input string
3793 // x15 from substring start character offset
3795 // Deal with different string types: update the index if necessary and put
3796 // the underlying string into register unpacked_string.
3797 Label underlying_unpacked, sliced_string, seq_or_external_string;
3798 Label update_instance_type;
3799 // If the string is not indirect, it can only be sequential or external.
3800 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
3801 STATIC_ASSERT(kIsIndirectStringMask != 0);
3803 // Test for string types, and branch/fall through to appropriate unpacking
3805 __ Tst(input_type, kIsIndirectStringMask);
3806 __ B(eq, &seq_or_external_string);
3807 __ Tst(input_type, kSlicedNotConsMask);
3808 __ B(ne, &sliced_string);
3810 Register unpacked_string = input_string;
3812 // Cons string. Check whether it is flat, then fetch first part.
3813 __ Ldr(temp, FieldMemOperand(input_string, ConsString::kSecondOffset));
3814 __ JumpIfNotRoot(temp, Heap::kempty_stringRootIndex, &runtime);
3815 __ Ldr(unpacked_string,
3816 FieldMemOperand(input_string, ConsString::kFirstOffset));
3817 __ B(&update_instance_type);
3819 __ Bind(&sliced_string);
3820 // Sliced string. Fetch parent and correct start index by offset.
3822 UntagSmiFieldMemOperand(input_string, SlicedString::kOffsetOffset));
3823 __ Add(from, from, temp);
3824 __ Ldr(unpacked_string,
3825 FieldMemOperand(input_string, SlicedString::kParentOffset));
3827 __ Bind(&update_instance_type);
3828 __ Ldr(temp, FieldMemOperand(unpacked_string, HeapObject::kMapOffset));
3829 __ Ldrb(input_type, FieldMemOperand(temp, Map::kInstanceTypeOffset));
3830 // Now control must go to &underlying_unpacked. Since the no code is generated
3831 // before then we fall through instead of generating a useless branch.
3833 __ Bind(&seq_or_external_string);
3834 // Sequential or external string. Registers unpacked_string and input_string
3835 // alias, so there's nothing to do here.
3836 // Note that if code is added here, the above code must be updated.
3838 // x0 result_string pointer to result string object (uninit)
3839 // x1 result_length length of substring result
3840 // x10 unpacked_string pointer to unpacked string object
3841 // x11 input_length length of input string
3842 // x12 input_type instance type of input string
3843 // x15 from substring start character offset
3844 __ Bind(&underlying_unpacked);
3846 if (FLAG_string_slices) {
3848 __ Cmp(result_length, SlicedString::kMinLength);
3849 // Short slice. Copy instead of slicing.
3850 __ B(lt, ©_routine);
3851 // Allocate new sliced string. At this point we do not reload the instance
3852 // type including the string encoding because we simply rely on the info
3853 // provided by the original string. It does not matter if the original
3854 // string's encoding is wrong because we always have to recheck encoding of
3855 // the newly created string's parent anyway due to externalized strings.
3856 Label two_byte_slice, set_slice_header;
3857 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
3858 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
3859 __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_slice);
3860 __ AllocateOneByteSlicedString(result_string, result_length, x3, x4,
3862 __ B(&set_slice_header);
3864 __ Bind(&two_byte_slice);
3865 __ AllocateTwoByteSlicedString(result_string, result_length, x3, x4,
3868 __ Bind(&set_slice_header);
3870 __ Str(from, FieldMemOperand(result_string, SlicedString::kOffsetOffset));
3871 __ Str(unpacked_string,
3872 FieldMemOperand(result_string, SlicedString::kParentOffset));
3875 __ Bind(©_routine);
3878 // x0 result_string pointer to result string object (uninit)
3879 // x1 result_length length of substring result
3880 // x10 unpacked_string pointer to unpacked string object
3881 // x11 input_length length of input string
3882 // x12 input_type instance type of input string
3883 // x13 unpacked_char0 pointer to first char of unpacked string (uninit)
3884 // x13 substring_char0 pointer to first char of substring (uninit)
3885 // x14 result_char0 pointer to first char of result (uninit)
3886 // x15 from substring start character offset
3887 Register unpacked_char0 = x13;
3888 Register substring_char0 = x13;
3889 Register result_char0 = x14;
3890 Label two_byte_sequential, sequential_string, allocate_result;
3891 STATIC_ASSERT(kExternalStringTag != 0);
3892 STATIC_ASSERT(kSeqStringTag == 0);
3894 __ Tst(input_type, kExternalStringTag);
3895 __ B(eq, &sequential_string);
3897 __ Tst(input_type, kShortExternalStringTag);
3899 __ Ldr(unpacked_char0,
3900 FieldMemOperand(unpacked_string, ExternalString::kResourceDataOffset));
3901 // unpacked_char0 points to the first character of the underlying string.
3902 __ B(&allocate_result);
3904 __ Bind(&sequential_string);
3905 // Locate first character of underlying subject string.
3906 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3907 __ Add(unpacked_char0, unpacked_string,
3908 SeqOneByteString::kHeaderSize - kHeapObjectTag);
3910 __ Bind(&allocate_result);
3911 // Sequential one-byte string. Allocate the result.
3912 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3913 __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_sequential);
3915 // Allocate and copy the resulting one-byte string.
3916 __ AllocateOneByteString(result_string, result_length, x3, x4, x5, &runtime);
3918 // Locate first character of substring to copy.
3919 __ Add(substring_char0, unpacked_char0, from);
3921 // Locate first character of result.
3922 __ Add(result_char0, result_string,
3923 SeqOneByteString::kHeaderSize - kHeapObjectTag);
3925 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3926 __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
3929 // Allocate and copy the resulting two-byte string.
3930 __ Bind(&two_byte_sequential);
3931 __ AllocateTwoByteString(result_string, result_length, x3, x4, x5, &runtime);
3933 // Locate first character of substring to copy.
3934 __ Add(substring_char0, unpacked_char0, Operand(from, LSL, 1));
3936 // Locate first character of result.
3937 __ Add(result_char0, result_string,
3938 SeqTwoByteString::kHeaderSize - kHeapObjectTag);
3940 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3941 __ Add(result_length, result_length, result_length);
3942 __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
3944 __ Bind(&return_x0);
3945 Counters* counters = isolate()->counters();
3946 __ IncrementCounter(counters->sub_string_native(), 1, x3, x4);
3951 __ TailCallRuntime(Runtime::kSubString, 3, 1);
3953 __ bind(&single_char);
3954 // x1: result_length
3955 // x10: input_string
3957 // x15: from (untagged)
3959 StringCharAtGenerator generator(input_string, from, result_length, x0,
3960 &runtime, &runtime, &runtime,
3961 STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
3962 generator.GenerateFast(masm);
3965 generator.SkipSlow(masm, &runtime);
3969 void ToNumberStub::Generate(MacroAssembler* masm) {
3970 // The ToNumber stub takes one argument in x0.
3972 __ JumpIfNotSmi(x0, ¬_smi);
3976 Label not_heap_number;
3977 __ Ldr(x1, FieldMemOperand(x0, HeapObject::kMapOffset));
3978 __ Ldrb(x1, FieldMemOperand(x1, Map::kInstanceTypeOffset));
3980 // x1: instance type
3981 __ Cmp(x1, HEAP_NUMBER_TYPE);
3982 __ B(ne, ¬_heap_number);
3984 __ Bind(¬_heap_number);
3986 Label not_string, slow_string;
3987 __ Cmp(x1, FIRST_NONSTRING_TYPE);
3988 __ B(hs, ¬_string);
3989 // Check if string has a cached array index.
3990 __ Ldr(x2, FieldMemOperand(x0, String::kHashFieldOffset));
3991 __ Tst(x2, Operand(String::kContainsCachedArrayIndexMask));
3992 __ B(ne, &slow_string);
3993 __ IndexFromHash(x2, x0);
3995 __ Bind(&slow_string);
3996 __ Push(x0); // Push argument.
3997 __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
3998 __ Bind(¬_string);
4001 __ Cmp(x1, ODDBALL_TYPE);
4002 __ B(ne, ¬_oddball);
4003 __ Ldr(x0, FieldMemOperand(x0, Oddball::kToNumberOffset));
4005 __ Bind(¬_oddball);
4007 __ Push(x0); // Push argument.
4008 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
4012 void StringHelper::GenerateFlatOneByteStringEquals(
4013 MacroAssembler* masm, Register left, Register right, Register scratch1,
4014 Register scratch2, Register scratch3) {
4015 DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3));
4016 Register result = x0;
4017 Register left_length = scratch1;
4018 Register right_length = scratch2;
4020 // Compare lengths. If lengths differ, strings can't be equal. Lengths are
4021 // smis, and don't need to be untagged.
4022 Label strings_not_equal, check_zero_length;
4023 __ Ldr(left_length, FieldMemOperand(left, String::kLengthOffset));
4024 __ Ldr(right_length, FieldMemOperand(right, String::kLengthOffset));
4025 __ Cmp(left_length, right_length);
4026 __ B(eq, &check_zero_length);
4028 __ Bind(&strings_not_equal);
4029 __ Mov(result, Smi::FromInt(NOT_EQUAL));
4032 // Check if the length is zero. If so, the strings must be equal (and empty.)
4033 Label compare_chars;
4034 __ Bind(&check_zero_length);
4035 STATIC_ASSERT(kSmiTag == 0);
4036 __ Cbnz(left_length, &compare_chars);
4037 __ Mov(result, Smi::FromInt(EQUAL));
4040 // Compare characters. Falls through if all characters are equal.
4041 __ Bind(&compare_chars);
4042 GenerateOneByteCharsCompareLoop(masm, left, right, left_length, scratch2,
4043 scratch3, &strings_not_equal);
4045 // Characters in strings are equal.
4046 __ Mov(result, Smi::FromInt(EQUAL));
4051 void StringHelper::GenerateCompareFlatOneByteStrings(
4052 MacroAssembler* masm, Register left, Register right, Register scratch1,
4053 Register scratch2, Register scratch3, Register scratch4) {
4054 DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
4055 Label result_not_equal, compare_lengths;
4057 // Find minimum length and length difference.
4058 Register length_delta = scratch3;
4059 __ Ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
4060 __ Ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
4061 __ Subs(length_delta, scratch1, scratch2);
4063 Register min_length = scratch1;
4064 __ Csel(min_length, scratch2, scratch1, gt);
4065 __ Cbz(min_length, &compare_lengths);
4068 GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
4069 scratch4, &result_not_equal);
4071 // Compare lengths - strings up to min-length are equal.
4072 __ Bind(&compare_lengths);
4074 DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
4076 // Use length_delta as result if it's zero.
4077 Register result = x0;
4078 __ Subs(result, length_delta, 0);
4080 __ Bind(&result_not_equal);
4081 Register greater = x10;
4082 Register less = x11;
4083 __ Mov(greater, Smi::FromInt(GREATER));
4084 __ Mov(less, Smi::FromInt(LESS));
4085 __ CmovX(result, greater, gt);
4086 __ CmovX(result, less, lt);
4091 void StringHelper::GenerateOneByteCharsCompareLoop(
4092 MacroAssembler* masm, Register left, Register right, Register length,
4093 Register scratch1, Register scratch2, Label* chars_not_equal) {
4094 DCHECK(!AreAliased(left, right, length, scratch1, scratch2));
4096 // Change index to run from -length to -1 by adding length to string
4097 // start. This means that loop ends when index reaches zero, which
4098 // doesn't need an additional compare.
4099 __ SmiUntag(length);
4100 __ Add(scratch1, length, SeqOneByteString::kHeaderSize - kHeapObjectTag);
4101 __ Add(left, left, scratch1);
4102 __ Add(right, right, scratch1);
4104 Register index = length;
4105 __ Neg(index, length); // index = -length;
4110 __ Ldrb(scratch1, MemOperand(left, index));
4111 __ Ldrb(scratch2, MemOperand(right, index));
4112 __ Cmp(scratch1, scratch2);
4113 __ B(ne, chars_not_equal);
4114 __ Add(index, index, 1);
4115 __ Cbnz(index, &loop);
4119 void StringCompareStub::Generate(MacroAssembler* masm) {
4122 Counters* counters = isolate()->counters();
4124 // Stack frame on entry.
4125 // sp[0]: right string
4126 // sp[8]: left string
4127 Register right = x10;
4128 Register left = x11;
4129 Register result = x0;
4130 __ Pop(right, left);
4133 __ Subs(result, right, left);
4134 __ B(ne, ¬_same);
4135 STATIC_ASSERT(EQUAL == 0);
4136 __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
4141 // Check that both objects are sequential one-byte strings.
4142 __ JumpIfEitherIsNotSequentialOneByteStrings(left, right, x12, x13, &runtime);
4144 // Compare flat one-byte strings natively. Remove arguments from stack first,
4145 // as this function will generate a return.
4146 __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
4147 StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, x12, x13,
4152 // Push arguments back on to the stack.
4153 // sp[0] = right string
4154 // sp[8] = left string.
4155 __ Push(left, right);
4157 // Call the runtime.
4158 // Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
4159 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
4163 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
4164 // ----------- S t a t e -------------
4167 // -- lr : return address
4168 // -----------------------------------
4170 // Load x2 with the allocation site. We stick an undefined dummy value here
4171 // and replace it with the real allocation site later when we instantiate this
4172 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
4173 __ LoadObject(x2, handle(isolate()->heap()->undefined_value()));
4175 // Make sure that we actually patched the allocation site.
4176 if (FLAG_debug_code) {
4177 __ AssertNotSmi(x2, kExpectedAllocationSite);
4178 __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
4179 __ AssertRegisterIsRoot(x10, Heap::kAllocationSiteMapRootIndex,
4180 kExpectedAllocationSite);
4183 // Tail call into the stub that handles binary operations with allocation
4185 BinaryOpWithAllocationSiteStub stub(isolate(), state());
4186 __ TailCallStub(&stub);
4190 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4191 // We need some extra registers for this stub, they have been allocated
4192 // but we need to save them before using them.
4195 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4196 Label dont_need_remembered_set;
4198 Register val = regs_.scratch0();
4199 __ Ldr(val, MemOperand(regs_.address()));
4200 __ JumpIfNotInNewSpace(val, &dont_need_remembered_set);
4202 __ CheckPageFlagSet(regs_.object(), val, 1 << MemoryChunk::SCAN_ON_SCAVENGE,
4203 &dont_need_remembered_set);
4205 // First notify the incremental marker if necessary, then update the
4207 CheckNeedsToInformIncrementalMarker(
4208 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4209 InformIncrementalMarker(masm);
4210 regs_.Restore(masm); // Restore the extra scratch registers we used.
4212 __ RememberedSetHelper(object(), address(),
4213 value(), // scratch1
4214 save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
4216 __ Bind(&dont_need_remembered_set);
4219 CheckNeedsToInformIncrementalMarker(
4220 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4221 InformIncrementalMarker(masm);
4222 regs_.Restore(masm); // Restore the extra scratch registers we used.
4227 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4228 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
4230 x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
4231 DCHECK(!address.Is(regs_.object()));
4232 DCHECK(!address.Is(x0));
4233 __ Mov(address, regs_.address());
4234 __ Mov(x0, regs_.object());
4235 __ Mov(x1, address);
4236 __ Mov(x2, ExternalReference::isolate_address(isolate()));
4238 AllowExternalCallThatCantCauseGC scope(masm);
4239 ExternalReference function =
4240 ExternalReference::incremental_marking_record_write_function(
4242 __ CallCFunction(function, 3, 0);
4244 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
4248 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4249 MacroAssembler* masm,
4250 OnNoNeedToInformIncrementalMarker on_no_need,
4253 Label need_incremental;
4254 Label need_incremental_pop_scratch;
4256 Register mem_chunk = regs_.scratch0();
4257 Register counter = regs_.scratch1();
4258 __ Bic(mem_chunk, regs_.object(), Page::kPageAlignmentMask);
4260 MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
4261 __ Subs(counter, counter, 1);
4263 MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
4264 __ B(mi, &need_incremental);
4266 // If the object is not black we don't have to inform the incremental marker.
4267 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4269 regs_.Restore(masm); // Restore the extra scratch registers we used.
4270 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4271 __ RememberedSetHelper(object(), address(),
4272 value(), // scratch1
4273 save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
4279 // Get the value from the slot.
4280 Register val = regs_.scratch0();
4281 __ Ldr(val, MemOperand(regs_.address()));
4283 if (mode == INCREMENTAL_COMPACTION) {
4284 Label ensure_not_white;
4286 __ CheckPageFlagClear(val, regs_.scratch1(),
4287 MemoryChunk::kEvacuationCandidateMask,
4290 __ CheckPageFlagClear(regs_.object(),
4292 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
4295 __ Bind(&ensure_not_white);
4298 // We need extra registers for this, so we push the object and the address
4299 // register temporarily.
4300 __ Push(regs_.address(), regs_.object());
4301 __ EnsureNotWhite(val,
4302 regs_.scratch1(), // Scratch.
4303 regs_.object(), // Scratch.
4304 regs_.address(), // Scratch.
4305 regs_.scratch2(), // Scratch.
4306 &need_incremental_pop_scratch);
4307 __ Pop(regs_.object(), regs_.address());
4309 regs_.Restore(masm); // Restore the extra scratch registers we used.
4310 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4311 __ RememberedSetHelper(object(), address(),
4312 value(), // scratch1
4313 save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
4318 __ Bind(&need_incremental_pop_scratch);
4319 __ Pop(regs_.object(), regs_.address());
4321 __ Bind(&need_incremental);
4322 // Fall through when we need to inform the incremental marker.
4326 void RecordWriteStub::Generate(MacroAssembler* masm) {
4327 Label skip_to_incremental_noncompacting;
4328 Label skip_to_incremental_compacting;
4330 // We patch these two first instructions back and forth between a nop and
4331 // real branch when we start and stop incremental heap marking.
4332 // Initially the stub is expected to be in STORE_BUFFER_ONLY mode, so 2 nops
4334 // See RecordWriteStub::Patch for details.
4336 InstructionAccurateScope scope(masm, 2);
4337 __ adr(xzr, &skip_to_incremental_noncompacting);
4338 __ adr(xzr, &skip_to_incremental_compacting);
4341 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4342 __ RememberedSetHelper(object(), address(),
4343 value(), // scratch1
4344 save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
4348 __ Bind(&skip_to_incremental_noncompacting);
4349 GenerateIncremental(masm, INCREMENTAL);
4351 __ Bind(&skip_to_incremental_compacting);
4352 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4356 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4357 // x0 value element value to store
4358 // x3 index_smi element index as smi
4359 // sp[0] array_index_smi array literal index in function as smi
4360 // sp[1] array array literal
4362 Register value = x0;
4363 Register index_smi = x3;
4365 Register array = x1;
4366 Register array_map = x2;
4367 Register array_index_smi = x4;
4368 __ PeekPair(array_index_smi, array, 0);
4369 __ Ldr(array_map, FieldMemOperand(array, JSObject::kMapOffset));
4371 Label double_elements, smi_element, fast_elements, slow_elements;
4372 Register bitfield2 = x10;
4373 __ Ldrb(bitfield2, FieldMemOperand(array_map, Map::kBitField2Offset));
4375 // Jump if array's ElementsKind is not FAST*_SMI_ELEMENTS, FAST_ELEMENTS or
4376 // FAST_HOLEY_ELEMENTS.
4377 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4378 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4379 STATIC_ASSERT(FAST_ELEMENTS == 2);
4380 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4381 __ Cmp(bitfield2, Map::kMaximumBitField2FastHoleyElementValue);
4382 __ B(hi, &double_elements);
4384 __ JumpIfSmi(value, &smi_element);
4386 // Jump if array's ElementsKind is not FAST_ELEMENTS or FAST_HOLEY_ELEMENTS.
4387 __ Tbnz(bitfield2, MaskToBit(FAST_ELEMENTS << Map::ElementsKindBits::kShift),
4390 // Store into the array literal requires an elements transition. Call into
4392 __ Bind(&slow_elements);
4393 __ Push(array, index_smi, value);
4394 __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4395 __ Ldr(x11, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
4396 __ Push(x11, array_index_smi);
4397 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4399 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4400 __ Bind(&fast_elements);
4401 __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
4402 __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
4403 __ Add(x11, x11, FixedArray::kHeaderSize - kHeapObjectTag);
4404 __ Str(value, MemOperand(x11));
4405 // Update the write barrier for the array store.
4406 __ RecordWrite(x10, x11, value, kLRHasNotBeenSaved, kDontSaveFPRegs,
4407 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
4410 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
4411 // and value is Smi.
4412 __ Bind(&smi_element);
4413 __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
4414 __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
4415 __ Str(value, FieldMemOperand(x11, FixedArray::kHeaderSize));
4418 __ Bind(&double_elements);
4419 __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
4420 __ StoreNumberToDoubleElements(value, index_smi, x10, x11, d0,
4426 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4427 CEntryStub ces(isolate(), 1, kSaveFPRegs);
4428 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4429 int parameter_count_offset =
4430 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4431 __ Ldr(x1, MemOperand(fp, parameter_count_offset));
4432 if (function_mode() == JS_FUNCTION_STUB_MODE) {
4435 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4437 // Return to IC Miss stub, continuation still on stack.
4442 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
4443 EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4444 VectorLoadStub stub(isolate(), state());
4445 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4449 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
4450 EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4451 VectorKeyedLoadStub stub(isolate());
4452 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4456 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
4457 EmitLoadTypeFeedbackVector(masm, x2);
4458 CallICStub stub(isolate(), state());
4459 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4463 void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
4464 EmitLoadTypeFeedbackVector(masm, x2);
4465 CallIC_ArrayStub stub(isolate(), state());
4466 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4470 // The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
4471 // a "Push lr" instruction, followed by a call.
4472 static const unsigned int kProfileEntryHookCallSize =
4473 Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
4476 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4477 if (masm->isolate()->function_entry_hook() != NULL) {
4478 ProfileEntryHookStub stub(masm->isolate());
4479 Assembler::BlockConstPoolScope no_const_pools(masm);
4480 DontEmitDebugCodeScope no_debug_code(masm);
4481 Label entry_hook_call_start;
4482 __ Bind(&entry_hook_call_start);
4485 DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
4486 kProfileEntryHookCallSize);
4493 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4494 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
4496 // Save all kCallerSaved registers (including lr), since this can be called
4498 // TODO(jbramley): What about FP registers?
4499 __ PushCPURegList(kCallerSaved);
4500 DCHECK(kCallerSaved.IncludesAliasOf(lr));
4501 const int kNumSavedRegs = kCallerSaved.Count();
4503 // Compute the function's address as the first argument.
4504 __ Sub(x0, lr, kProfileEntryHookCallSize);
4506 #if V8_HOST_ARCH_ARM64
4507 uintptr_t entry_hook =
4508 reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
4509 __ Mov(x10, entry_hook);
4511 // Under the simulator we need to indirect the entry hook through a trampoline
4512 // function at a known address.
4513 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4514 __ Mov(x10, Operand(ExternalReference(&dispatcher,
4515 ExternalReference::BUILTIN_CALL,
4517 // It additionally takes an isolate as a third parameter
4518 __ Mov(x2, ExternalReference::isolate_address(isolate()));
4521 // The caller's return address is above the saved temporaries.
4522 // Grab its location for the second argument to the hook.
4523 __ Add(x1, __ StackPointer(), kNumSavedRegs * kPointerSize);
4526 // Create a dummy frame, as CallCFunction requires this.
4527 FrameScope frame(masm, StackFrame::MANUAL);
4528 __ CallCFunction(x10, 2, 0);
4531 __ PopCPURegList(kCallerSaved);
4536 void DirectCEntryStub::Generate(MacroAssembler* masm) {
4537 // When calling into C++ code the stack pointer must be csp.
4538 // Therefore this code must use csp for peek/poke operations when the
4539 // stub is generated. When the stub is called
4540 // (via DirectCEntryStub::GenerateCall), the caller must setup an ExitFrame
4541 // and configure the stack pointer *before* doing the call.
4542 const Register old_stack_pointer = __ StackPointer();
4543 __ SetStackPointer(csp);
4545 // Put return address on the stack (accessible to GC through exit frame pc).
4547 // Call the C++ function.
4549 // Return to calling code.
4551 __ AssertFPCRState();
4554 __ SetStackPointer(old_stack_pointer);
4557 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
4559 // Make sure the caller configured the stack pointer (see comment in
4560 // DirectCEntryStub::Generate).
4561 DCHECK(csp.Is(__ StackPointer()));
4564 reinterpret_cast<intptr_t>(GetCode().location());
4565 __ Mov(lr, Operand(code, RelocInfo::CODE_TARGET));
4566 __ Mov(x10, target);
4567 // Branch to the stub.
4572 // Probe the name dictionary in the 'elements' register.
4573 // Jump to the 'done' label if a property with the given name is found.
4574 // Jump to the 'miss' label otherwise.
4576 // If lookup was successful 'scratch2' will be equal to elements + 4 * index.
4577 // 'elements' and 'name' registers are preserved on miss.
4578 void NameDictionaryLookupStub::GeneratePositiveLookup(
4579 MacroAssembler* masm,
4585 Register scratch2) {
4586 DCHECK(!AreAliased(elements, name, scratch1, scratch2));
4588 // Assert that name contains a string.
4589 __ AssertName(name);
4591 // Compute the capacity mask.
4592 __ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset));
4593 __ Sub(scratch1, scratch1, 1);
4595 // Generate an unrolled loop that performs a few probes before giving up.
4596 for (int i = 0; i < kInlinedProbes; i++) {
4597 // Compute the masked index: (hash + i + i * i) & mask.
4598 __ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
4600 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4601 // the hash in a separate instruction. The value hash + i + i * i is right
4602 // shifted in the following and instruction.
4603 DCHECK(NameDictionary::GetProbeOffset(i) <
4604 1 << (32 - Name::kHashFieldOffset));
4605 __ Add(scratch2, scratch2, Operand(
4606 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4608 __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
4610 // Scale the index by multiplying by the element size.
4611 DCHECK(NameDictionary::kEntrySize == 3);
4612 __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
4614 // Check if the key is identical to the name.
4615 UseScratchRegisterScope temps(masm);
4616 Register scratch3 = temps.AcquireX();
4617 __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
4618 __ Ldr(scratch3, FieldMemOperand(scratch2, kElementsStartOffset));
4619 __ Cmp(name, scratch3);
4623 // The inlined probes didn't find the entry.
4624 // Call the complete stub to scan the whole dictionary.
4626 CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
4627 spill_list.Combine(lr);
4628 spill_list.Remove(scratch1);
4629 spill_list.Remove(scratch2);
4631 __ PushCPURegList(spill_list);
4634 DCHECK(!elements.is(x1));
4636 __ Mov(x0, elements);
4638 __ Mov(x0, elements);
4643 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
4645 __ Cbz(x0, ¬_found);
4646 __ Mov(scratch2, x2); // Move entry index into scratch2.
4647 __ PopCPURegList(spill_list);
4650 __ Bind(¬_found);
4651 __ PopCPURegList(spill_list);
4656 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
4660 Register properties,
4662 Register scratch0) {
4663 DCHECK(!AreAliased(receiver, properties, scratch0));
4664 DCHECK(name->IsUniqueName());
4665 // If names of slots in range from 1 to kProbes - 1 for the hash value are
4666 // not equal to the name and kProbes-th slot is not used (its name is the
4667 // undefined value), it guarantees the hash table doesn't contain the
4668 // property. It's true even if some slots represent deleted properties
4669 // (their names are the hole value).
4670 for (int i = 0; i < kInlinedProbes; i++) {
4671 // scratch0 points to properties hash.
4672 // Compute the masked index: (hash + i + i * i) & mask.
4673 Register index = scratch0;
4674 // Capacity is smi 2^n.
4675 __ Ldrsw(index, UntagSmiFieldMemOperand(properties, kCapacityOffset));
4676 __ Sub(index, index, 1);
4677 __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
4679 // Scale the index by multiplying by the entry size.
4680 DCHECK(NameDictionary::kEntrySize == 3);
4681 __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
4683 Register entity_name = scratch0;
4684 // Having undefined at this place means the name is not contained.
4685 Register tmp = index;
4686 __ Add(tmp, properties, Operand(index, LSL, kPointerSizeLog2));
4687 __ Ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
4689 __ JumpIfRoot(entity_name, Heap::kUndefinedValueRootIndex, done);
4691 // Stop if found the property.
4692 __ Cmp(entity_name, Operand(name));
4696 __ JumpIfRoot(entity_name, Heap::kTheHoleValueRootIndex, &good);
4698 // Check if the entry name is not a unique name.
4699 __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
4700 __ Ldrb(entity_name,
4701 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
4702 __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
4706 CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
4707 spill_list.Combine(lr);
4708 spill_list.Remove(scratch0); // Scratch registers don't need to be preserved.
4710 __ PushCPURegList(spill_list);
4712 __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4713 __ Mov(x1, Operand(name));
4714 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
4716 // Move stub return value to scratch0. Note that scratch0 is not included in
4717 // spill_list and won't be clobbered by PopCPURegList.
4718 __ Mov(scratch0, x0);
4719 __ PopCPURegList(spill_list);
4721 __ Cbz(scratch0, done);
4726 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
4727 // This stub overrides SometimesSetsUpAFrame() to return false. That means
4728 // we cannot call anything that could cause a GC from this stub.
4730 // Arguments are in x0 and x1:
4731 // x0: property dictionary.
4732 // x1: the name of the property we are looking for.
4734 // Return value is in x0 and is zero if lookup failed, non zero otherwise.
4735 // If the lookup is successful, x2 will contains the index of the entry.
4737 Register result = x0;
4738 Register dictionary = x0;
4740 Register index = x2;
4743 Register undefined = x5;
4744 Register entry_key = x6;
4746 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
4748 __ Ldrsw(mask, UntagSmiFieldMemOperand(dictionary, kCapacityOffset));
4749 __ Sub(mask, mask, 1);
4751 __ Ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
4752 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
4754 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
4755 // Compute the masked index: (hash + i + i * i) & mask.
4756 // Capacity is smi 2^n.
4758 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4759 // the hash in a separate instruction. The value hash + i + i * i is right
4760 // shifted in the following and instruction.
4761 DCHECK(NameDictionary::GetProbeOffset(i) <
4762 1 << (32 - Name::kHashFieldOffset));
4764 NameDictionary::GetProbeOffset(i) << Name::kHashShift);
4766 __ Mov(index, hash);
4768 __ And(index, mask, Operand(index, LSR, Name::kHashShift));
4770 // Scale the index by multiplying by the entry size.
4771 DCHECK(NameDictionary::kEntrySize == 3);
4772 __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
4774 __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
4775 __ Ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
4777 // Having undefined at this place means the name is not contained.
4778 __ Cmp(entry_key, undefined);
4779 __ B(eq, ¬_in_dictionary);
4781 // Stop if found the property.
4782 __ Cmp(entry_key, key);
4783 __ B(eq, &in_dictionary);
4785 if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
4786 // Check if the entry name is not a unique name.
4787 __ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
4788 __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
4789 __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
4793 __ Bind(&maybe_in_dictionary);
4794 // If we are doing negative lookup then probing failure should be
4795 // treated as a lookup success. For positive lookup, probing failure
4796 // should be treated as lookup failure.
4797 if (mode() == POSITIVE_LOOKUP) {
4802 __ Bind(&in_dictionary);
4806 __ Bind(¬_in_dictionary);
4813 static void CreateArrayDispatch(MacroAssembler* masm,
4814 AllocationSiteOverrideMode mode) {
4815 ASM_LOCATION("CreateArrayDispatch");
4816 if (mode == DISABLE_ALLOCATION_SITES) {
4817 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4818 __ TailCallStub(&stub);
4820 } else if (mode == DONT_OVERRIDE) {
4823 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
4824 for (int i = 0; i <= last_index; ++i) {
4826 ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
4827 // TODO(jbramley): Is this the best way to handle this? Can we make the
4828 // tail calls conditional, rather than hopping over each one?
4829 __ CompareAndBranch(kind, candidate_kind, ne, &next);
4830 T stub(masm->isolate(), candidate_kind);
4831 __ TailCallStub(&stub);
4835 // If we reached this point there is a problem.
4836 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4844 // TODO(jbramley): If this needs to be a special case, make it a proper template
4845 // specialization, and not a separate function.
4846 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4847 AllocationSiteOverrideMode mode) {
4848 ASM_LOCATION("CreateArrayDispatchOneArgument");
4850 // x1 - constructor?
4851 // x2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4852 // x3 - kind (if mode != DISABLE_ALLOCATION_SITES)
4853 // sp[0] - last argument
4855 Register allocation_site = x2;
4858 Label normal_sequence;
4859 if (mode == DONT_OVERRIDE) {
4860 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4861 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4862 STATIC_ASSERT(FAST_ELEMENTS == 2);
4863 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4864 STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
4865 STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
4867 // Is the low bit set? If so, the array is holey.
4868 __ Tbnz(kind, 0, &normal_sequence);
4871 // Look at the last argument.
4872 // TODO(jbramley): What does a 0 argument represent?
4874 __ Cbz(x10, &normal_sequence);
4876 if (mode == DISABLE_ALLOCATION_SITES) {
4877 ElementsKind initial = GetInitialFastElementsKind();
4878 ElementsKind holey_initial = GetHoleyElementsKind(initial);
4880 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
4882 DISABLE_ALLOCATION_SITES);
4883 __ TailCallStub(&stub_holey);
4885 __ Bind(&normal_sequence);
4886 ArraySingleArgumentConstructorStub stub(masm->isolate(),
4888 DISABLE_ALLOCATION_SITES);
4889 __ TailCallStub(&stub);
4890 } else if (mode == DONT_OVERRIDE) {
4891 // We are going to create a holey array, but our kind is non-holey.
4892 // Fix kind and retry (only if we have an allocation site in the slot).
4893 __ Orr(kind, kind, 1);
4895 if (FLAG_debug_code) {
4896 __ Ldr(x10, FieldMemOperand(allocation_site, 0));
4897 __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex,
4899 __ Assert(eq, kExpectedAllocationSite);
4902 // Save the resulting elements kind in type info. We can't just store 'kind'
4903 // in the AllocationSite::transition_info field because elements kind is
4904 // restricted to a portion of the field; upper bits need to be left alone.
4905 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4906 __ Ldr(x11, FieldMemOperand(allocation_site,
4907 AllocationSite::kTransitionInfoOffset));
4908 __ Add(x11, x11, Smi::FromInt(kFastElementsKindPackedToHoley));
4909 __ Str(x11, FieldMemOperand(allocation_site,
4910 AllocationSite::kTransitionInfoOffset));
4912 __ Bind(&normal_sequence);
4914 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
4915 for (int i = 0; i <= last_index; ++i) {
4917 ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
4918 __ CompareAndBranch(kind, candidate_kind, ne, &next);
4919 ArraySingleArgumentConstructorStub stub(masm->isolate(), candidate_kind);
4920 __ TailCallStub(&stub);
4924 // If we reached this point there is a problem.
4925 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4933 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
4934 int to_index = GetSequenceIndexFromFastElementsKind(
4935 TERMINAL_FAST_ELEMENTS_KIND);
4936 for (int i = 0; i <= to_index; ++i) {
4937 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4938 T stub(isolate, kind);
4940 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
4941 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
4948 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
4949 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
4951 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
4953 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
4958 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
4960 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
4961 for (int i = 0; i < 2; i++) {
4962 // For internal arrays we only need a few things
4963 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
4965 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
4967 InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
4973 void ArrayConstructorStub::GenerateDispatchToArrayStub(
4974 MacroAssembler* masm,
4975 AllocationSiteOverrideMode mode) {
4977 if (argument_count() == ANY) {
4978 Label zero_case, n_case;
4979 __ Cbz(argc, &zero_case);
4984 CreateArrayDispatchOneArgument(masm, mode);
4986 __ Bind(&zero_case);
4988 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4992 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4994 } else if (argument_count() == NONE) {
4995 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4996 } else if (argument_count() == ONE) {
4997 CreateArrayDispatchOneArgument(masm, mode);
4998 } else if (argument_count() == MORE_THAN_ONE) {
4999 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5006 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
5007 ASM_LOCATION("ArrayConstructorStub::Generate");
5008 // ----------- S t a t e -------------
5009 // -- x0 : argc (only if argument_count() is ANY or MORE_THAN_ONE)
5010 // -- x1 : constructor
5011 // -- x2 : AllocationSite or undefined
5012 // -- x3 : original constructor
5013 // -- sp[0] : last argument
5014 // -----------------------------------
5015 Register constructor = x1;
5016 Register allocation_site = x2;
5017 Register original_constructor = x3;
5019 if (FLAG_debug_code) {
5020 // The array construct code is only set for the global and natives
5021 // builtin Array functions which always have maps.
5023 Label unexpected_map, map_ok;
5024 // Initial map for the builtin Array function should be a map.
5025 __ Ldr(x10, FieldMemOperand(constructor,
5026 JSFunction::kPrototypeOrInitialMapOffset));
5027 // Will both indicate a NULL and a Smi.
5028 __ JumpIfSmi(x10, &unexpected_map);
5029 __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
5030 __ Bind(&unexpected_map);
5031 __ Abort(kUnexpectedInitialMapForArrayFunction);
5034 // We should either have undefined in the allocation_site register or a
5035 // valid AllocationSite.
5036 __ AssertUndefinedOrAllocationSite(allocation_site, x10);
5040 __ Cmp(original_constructor, constructor);
5041 __ B(ne, &subclassing);
5045 // Get the elements kind and case on that.
5046 __ JumpIfRoot(allocation_site, Heap::kUndefinedValueRootIndex, &no_info);
5049 UntagSmiFieldMemOperand(allocation_site,
5050 AllocationSite::kTransitionInfoOffset));
5051 __ And(kind, kind, AllocationSite::ElementsKindBits::kMask);
5052 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
5055 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
5057 // Subclassing support.
5058 __ Bind(&subclassing);
5059 __ Push(constructor, original_constructor);
5061 switch (argument_count()) {
5064 __ add(x0, x0, Operand(2));
5067 __ Mov(x0, Operand(2));
5070 __ Mov(x0, Operand(3));
5073 __ JumpToExternalReference(
5074 ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
5078 void InternalArrayConstructorStub::GenerateCase(
5079 MacroAssembler* masm, ElementsKind kind) {
5080 Label zero_case, n_case;
5083 __ Cbz(argc, &zero_case);
5084 __ CompareAndBranch(argc, 1, ne, &n_case);
5087 if (IsFastPackedElementsKind(kind)) {
5090 // We might need to create a holey array; look at the first argument.
5092 __ Cbz(x10, &packed_case);
5094 InternalArraySingleArgumentConstructorStub
5095 stub1_holey(isolate(), GetHoleyElementsKind(kind));
5096 __ TailCallStub(&stub1_holey);
5098 __ Bind(&packed_case);
5100 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
5101 __ TailCallStub(&stub1);
5103 __ Bind(&zero_case);
5105 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
5106 __ TailCallStub(&stub0);
5110 InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
5111 __ TailCallStub(&stubN);
5115 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
5116 // ----------- S t a t e -------------
5118 // -- x1 : constructor
5119 // -- sp[0] : return address
5120 // -- sp[4] : last argument
5121 // -----------------------------------
5123 Register constructor = x1;
5125 if (FLAG_debug_code) {
5126 // The array construct code is only set for the global and natives
5127 // builtin Array functions which always have maps.
5129 Label unexpected_map, map_ok;
5130 // Initial map for the builtin Array function should be a map.
5131 __ Ldr(x10, FieldMemOperand(constructor,
5132 JSFunction::kPrototypeOrInitialMapOffset));
5133 // Will both indicate a NULL and a Smi.
5134 __ JumpIfSmi(x10, &unexpected_map);
5135 __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
5136 __ Bind(&unexpected_map);
5137 __ Abort(kUnexpectedInitialMapForArrayFunction);
5142 // Figure out the right elements kind
5143 __ Ldr(x10, FieldMemOperand(constructor,
5144 JSFunction::kPrototypeOrInitialMapOffset));
5146 // Retrieve elements_kind from map.
5147 __ LoadElementsKindFromMap(kind, x10);
5149 if (FLAG_debug_code) {
5151 __ Cmp(x3, FAST_ELEMENTS);
5152 __ Ccmp(x3, FAST_HOLEY_ELEMENTS, ZFlag, ne);
5153 __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
5156 Label fast_elements_case;
5157 __ CompareAndBranch(kind, FAST_ELEMENTS, eq, &fast_elements_case);
5158 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
5160 __ Bind(&fast_elements_case);
5161 GenerateCase(masm, FAST_ELEMENTS);
5165 // The number of register that CallApiFunctionAndReturn will need to save on
5166 // the stack. The space for these registers need to be allocated in the
5167 // ExitFrame before calling CallApiFunctionAndReturn.
5168 static const int kCallApiFunctionSpillSpace = 4;
5171 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
5172 return ref0.address() - ref1.address();
5176 // Calls an API function. Allocates HandleScope, extracts returned value
5177 // from handle and propagates exceptions.
5178 // 'stack_space' is the space to be unwound on exit (includes the call JS
5179 // arguments space and the additional space allocated for the fast call).
5180 // 'spill_offset' is the offset from the stack pointer where
5181 // CallApiFunctionAndReturn can spill registers.
5182 static void CallApiFunctionAndReturn(
5183 MacroAssembler* masm, Register function_address,
5184 ExternalReference thunk_ref, int stack_space,
5185 MemOperand* stack_space_operand, int spill_offset,
5186 MemOperand return_value_operand, MemOperand* context_restore_operand) {
5187 ASM_LOCATION("CallApiFunctionAndReturn");
5188 Isolate* isolate = masm->isolate();
5189 ExternalReference next_address =
5190 ExternalReference::handle_scope_next_address(isolate);
5191 const int kNextOffset = 0;
5192 const int kLimitOffset = AddressOffset(
5193 ExternalReference::handle_scope_limit_address(isolate), next_address);
5194 const int kLevelOffset = AddressOffset(
5195 ExternalReference::handle_scope_level_address(isolate), next_address);
5197 DCHECK(function_address.is(x1) || function_address.is(x2));
5199 Label profiler_disabled;
5200 Label end_profiler_check;
5201 __ Mov(x10, ExternalReference::is_profiling_address(isolate));
5202 __ Ldrb(w10, MemOperand(x10));
5203 __ Cbz(w10, &profiler_disabled);
5204 __ Mov(x3, thunk_ref);
5205 __ B(&end_profiler_check);
5207 __ Bind(&profiler_disabled);
5208 __ Mov(x3, function_address);
5209 __ Bind(&end_profiler_check);
5211 // Save the callee-save registers we are going to use.
5212 // TODO(all): Is this necessary? ARM doesn't do it.
5213 STATIC_ASSERT(kCallApiFunctionSpillSpace == 4);
5214 __ Poke(x19, (spill_offset + 0) * kXRegSize);
5215 __ Poke(x20, (spill_offset + 1) * kXRegSize);
5216 __ Poke(x21, (spill_offset + 2) * kXRegSize);
5217 __ Poke(x22, (spill_offset + 3) * kXRegSize);
5219 // Allocate HandleScope in callee-save registers.
5220 // We will need to restore the HandleScope after the call to the API function,
5221 // by allocating it in callee-save registers they will be preserved by C code.
5222 Register handle_scope_base = x22;
5223 Register next_address_reg = x19;
5224 Register limit_reg = x20;
5225 Register level_reg = w21;
5227 __ Mov(handle_scope_base, next_address);
5228 __ Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
5229 __ Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
5230 __ Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset));
5231 __ Add(level_reg, level_reg, 1);
5232 __ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
5234 if (FLAG_log_timer_events) {
5235 FrameScope frame(masm, StackFrame::MANUAL);
5236 __ PushSafepointRegisters();
5237 __ Mov(x0, ExternalReference::isolate_address(isolate));
5238 __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
5240 __ PopSafepointRegisters();
5243 // Native call returns to the DirectCEntry stub which redirects to the
5244 // return address pushed on stack (could have moved after GC).
5245 // DirectCEntry stub itself is generated early and never moves.
5246 DirectCEntryStub stub(isolate);
5247 stub.GenerateCall(masm, x3);
5249 if (FLAG_log_timer_events) {
5250 FrameScope frame(masm, StackFrame::MANUAL);
5251 __ PushSafepointRegisters();
5252 __ Mov(x0, ExternalReference::isolate_address(isolate));
5253 __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
5255 __ PopSafepointRegisters();
5258 Label promote_scheduled_exception;
5259 Label exception_handled;
5260 Label delete_allocated_handles;
5261 Label leave_exit_frame;
5262 Label return_value_loaded;
5264 // Load value from ReturnValue.
5265 __ Ldr(x0, return_value_operand);
5266 __ Bind(&return_value_loaded);
5267 // No more valid handles (the result handle was the last one). Restore
5268 // previous handle scope.
5269 __ Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
5270 if (__ emit_debug_code()) {
5271 __ Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
5272 __ Cmp(w1, level_reg);
5273 __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
5275 __ Sub(level_reg, level_reg, 1);
5276 __ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
5277 __ Ldr(x1, MemOperand(handle_scope_base, kLimitOffset));
5278 __ Cmp(limit_reg, x1);
5279 __ B(ne, &delete_allocated_handles);
5281 __ Bind(&leave_exit_frame);
5282 // Restore callee-saved registers.
5283 __ Peek(x19, (spill_offset + 0) * kXRegSize);
5284 __ Peek(x20, (spill_offset + 1) * kXRegSize);
5285 __ Peek(x21, (spill_offset + 2) * kXRegSize);
5286 __ Peek(x22, (spill_offset + 3) * kXRegSize);
5288 // Check if the function scheduled an exception.
5289 __ Mov(x5, ExternalReference::scheduled_exception_address(isolate));
5290 __ Ldr(x5, MemOperand(x5));
5291 __ JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex,
5292 &promote_scheduled_exception);
5293 __ Bind(&exception_handled);
5295 bool restore_context = context_restore_operand != NULL;
5296 if (restore_context) {
5297 __ Ldr(cp, *context_restore_operand);
5300 if (stack_space_operand != NULL) {
5301 __ Ldr(w2, *stack_space_operand);
5304 __ LeaveExitFrame(false, x1, !restore_context);
5305 if (stack_space_operand != NULL) {
5308 __ Drop(stack_space);
5312 __ Bind(&promote_scheduled_exception);
5314 FrameScope frame(masm, StackFrame::INTERNAL);
5315 __ CallExternalReference(
5316 ExternalReference(Runtime::kPromoteScheduledException, isolate), 0);
5318 __ B(&exception_handled);
5320 // HandleScope limit has changed. Delete allocated extensions.
5321 __ Bind(&delete_allocated_handles);
5322 __ Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
5323 // Save the return value in a callee-save register.
5324 Register saved_result = x19;
5325 __ Mov(saved_result, x0);
5326 __ Mov(x0, ExternalReference::isolate_address(isolate));
5327 __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
5329 __ Mov(x0, saved_result);
5330 __ B(&leave_exit_frame);
5334 static void CallApiFunctionStubHelper(MacroAssembler* masm,
5335 const ParameterCount& argc,
5336 bool return_first_arg,
5337 bool call_data_undefined) {
5338 // ----------- S t a t e -------------
5340 // -- x4 : call_data
5342 // -- x1 : api_function_address
5343 // -- x3 : number of arguments if argc is a register
5346 // -- sp[0] : last argument
5348 // -- sp[(argc - 1) * 8] : first argument
5349 // -- sp[argc * 8] : receiver
5350 // -----------------------------------
5352 Register callee = x0;
5353 Register call_data = x4;
5354 Register holder = x2;
5355 Register api_function_address = x1;
5356 Register context = cp;
5358 typedef FunctionCallbackArguments FCA;
5360 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
5361 STATIC_ASSERT(FCA::kCalleeIndex == 5);
5362 STATIC_ASSERT(FCA::kDataIndex == 4);
5363 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
5364 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
5365 STATIC_ASSERT(FCA::kIsolateIndex == 1);
5366 STATIC_ASSERT(FCA::kHolderIndex == 0);
5367 STATIC_ASSERT(FCA::kArgsLength == 7);
5369 DCHECK(argc.is_immediate() || x3.is(argc.reg()));
5371 // FunctionCallbackArguments: context, callee and call data.
5372 __ Push(context, callee, call_data);
5374 // Load context from callee
5375 __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
5377 if (!call_data_undefined) {
5378 __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
5380 Register isolate_reg = x5;
5381 __ Mov(isolate_reg, ExternalReference::isolate_address(masm->isolate()));
5383 // FunctionCallbackArguments:
5384 // return value, return value default, isolate, holder.
5385 __ Push(call_data, call_data, isolate_reg, holder);
5387 // Prepare arguments.
5389 __ Mov(args, masm->StackPointer());
5391 // Allocate the v8::Arguments structure in the arguments' space, since it's
5392 // not controlled by GC.
5393 const int kApiStackSpace = 4;
5395 // Allocate space for CallApiFunctionAndReturn can store some scratch
5396 // registeres on the stack.
5397 const int kCallApiFunctionSpillSpace = 4;
5399 FrameScope frame_scope(masm, StackFrame::MANUAL);
5400 __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
5402 DCHECK(!AreAliased(x0, api_function_address));
5403 // x0 = FunctionCallbackInfo&
5404 // Arguments is after the return address.
5405 __ Add(x0, masm->StackPointer(), 1 * kPointerSize);
5406 if (argc.is_immediate()) {
5407 // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
5409 Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
5410 __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
5411 // FunctionCallbackInfo::length_ = argc and
5412 // FunctionCallbackInfo::is_construct_call = 0
5413 __ Mov(x10, argc.immediate());
5414 __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
5416 // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
5417 __ Add(x10, args, Operand(argc.reg(), LSL, kPointerSizeLog2));
5418 __ Add(x10, x10, (FCA::kArgsLength - 1) * kPointerSize);
5419 __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
5420 // FunctionCallbackInfo::length_ = argc and
5421 // FunctionCallbackInfo::is_construct_call
5422 __ Add(x10, argc.reg(), FCA::kArgsLength + 1);
5423 __ Mov(x10, Operand(x10, LSL, kPointerSizeLog2));
5424 __ Stp(argc.reg(), x10, MemOperand(x0, 2 * kPointerSize));
5427 ExternalReference thunk_ref =
5428 ExternalReference::invoke_function_callback(masm->isolate());
5430 AllowExternalCallThatCantCauseGC scope(masm);
5431 MemOperand context_restore_operand(
5432 fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
5433 // Stores return the first js argument
5434 int return_value_offset = 0;
5435 if (return_first_arg) {
5436 return_value_offset = 2 + FCA::kArgsLength;
5438 return_value_offset = 2 + FCA::kReturnValueOffset;
5440 MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
5441 int stack_space = 0;
5442 MemOperand is_construct_call_operand =
5443 MemOperand(masm->StackPointer(), 4 * kPointerSize);
5444 MemOperand* stack_space_operand = &is_construct_call_operand;
5445 if (argc.is_immediate()) {
5446 stack_space = argc.immediate() + FCA::kArgsLength + 1;
5447 stack_space_operand = NULL;
5450 const int spill_offset = 1 + kApiStackSpace;
5451 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
5452 stack_space_operand, spill_offset,
5453 return_value_operand, &context_restore_operand);
5457 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
5458 bool call_data_undefined = this->call_data_undefined();
5459 CallApiFunctionStubHelper(masm, ParameterCount(x3), false,
5460 call_data_undefined);
5464 void CallApiAccessorStub::Generate(MacroAssembler* masm) {
5465 bool is_store = this->is_store();
5466 int argc = this->argc();
5467 bool call_data_undefined = this->call_data_undefined();
5468 CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
5469 call_data_undefined);
5473 void CallApiGetterStub::Generate(MacroAssembler* masm) {
5474 // ----------- S t a t e -------------
5476 // -- sp[8 - kArgsLength*8] : PropertyCallbackArguments object
5478 // -- x2 : api_function_address
5479 // -----------------------------------
5481 Register api_function_address = ApiGetterDescriptor::function_address();
5482 DCHECK(api_function_address.is(x2));
5484 __ Mov(x0, masm->StackPointer()); // x0 = Handle<Name>
5485 __ Add(x1, x0, 1 * kPointerSize); // x1 = PCA
5487 const int kApiStackSpace = 1;
5489 // Allocate space for CallApiFunctionAndReturn can store some scratch
5490 // registeres on the stack.
5491 const int kCallApiFunctionSpillSpace = 4;
5493 FrameScope frame_scope(masm, StackFrame::MANUAL);
5494 __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
5496 // Create PropertyAccessorInfo instance on the stack above the exit frame with
5497 // x1 (internal::Object** args_) as the data.
5498 __ Poke(x1, 1 * kPointerSize);
5499 __ Add(x1, masm->StackPointer(), 1 * kPointerSize); // x1 = AccessorInfo&
5501 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
5503 ExternalReference thunk_ref =
5504 ExternalReference::invoke_accessor_getter_callback(isolate());
5506 const int spill_offset = 1 + kApiStackSpace;
5507 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
5508 kStackUnwindSpace, NULL, spill_offset,
5509 MemOperand(fp, 6 * kPointerSize), NULL);
5515 } } // namespace v8::internal
5517 #endif // V8_TARGET_ARCH_ARM64