1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #if V8_TARGET_ARCH_ARM64
9 #include "src/bootstrapper.h"
10 #include "src/code-stubs.h"
11 #include "src/regexp-macro-assembler.h"
12 #include "src/stub-cache.h"
18 void FastNewClosureStub::InitializeInterfaceDescriptor(
19 CodeStubInterfaceDescriptor* descriptor) {
21 static Register registers[] = { x2 };
22 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
23 descriptor->register_params_ = registers;
24 descriptor->deoptimization_handler_ =
25 Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
29 void FastNewContextStub::InitializeInterfaceDescriptor(
30 CodeStubInterfaceDescriptor* descriptor) {
32 static Register registers[] = { x1 };
33 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
34 descriptor->register_params_ = registers;
35 descriptor->deoptimization_handler_ = NULL;
39 void ToNumberStub::InitializeInterfaceDescriptor(
40 CodeStubInterfaceDescriptor* descriptor) {
42 static Register registers[] = { x0 };
43 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
44 descriptor->register_params_ = registers;
45 descriptor->deoptimization_handler_ = NULL;
49 void NumberToStringStub::InitializeInterfaceDescriptor(
50 CodeStubInterfaceDescriptor* descriptor) {
52 static Register registers[] = { x0 };
53 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
54 descriptor->register_params_ = registers;
55 descriptor->deoptimization_handler_ =
56 Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
60 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
61 CodeStubInterfaceDescriptor* descriptor) {
62 // x3: array literals array
63 // x2: array literal index
64 // x1: constant elements
65 static Register registers[] = { x3, x2, x1 };
66 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
67 descriptor->register_params_ = registers;
68 static Representation representations[] = {
69 Representation::Tagged(),
70 Representation::Smi(),
71 Representation::Tagged() };
72 descriptor->register_param_representations_ = representations;
73 descriptor->deoptimization_handler_ =
74 Runtime::FunctionForId(
75 Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
79 void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
80 CodeStubInterfaceDescriptor* descriptor) {
81 // x3: object literals array
82 // x2: object literal index
83 // x1: constant properties
84 // x0: object literal flags
85 static Register registers[] = { x3, x2, x1, x0 };
86 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
87 descriptor->register_params_ = registers;
88 descriptor->deoptimization_handler_ =
89 Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
93 void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
94 CodeStubInterfaceDescriptor* descriptor) {
95 // x2: feedback vector
96 // x3: call feedback slot
97 static Register registers[] = { x2, x3 };
98 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
99 descriptor->register_params_ = registers;
100 descriptor->deoptimization_handler_ = NULL;
104 void KeyedLoadGenericElementStub::InitializeInterfaceDescriptor(
105 CodeStubInterfaceDescriptor* descriptor) {
106 static Register registers[] = { x1, x0 };
107 descriptor->register_param_count_ = 2;
108 descriptor->register_params_ = registers;
109 descriptor->deoptimization_handler_ =
110 Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry;
114 void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
115 CodeStubInterfaceDescriptor* descriptor) {
118 static Register registers[] = { x1, x0 };
119 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
120 descriptor->register_params_ = registers;
121 descriptor->deoptimization_handler_ =
122 FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
126 void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
127 CodeStubInterfaceDescriptor* descriptor) {
130 static Register registers[] = { x1, x0 };
131 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
132 descriptor->register_params_ = registers;
133 descriptor->deoptimization_handler_ =
134 FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
138 void RegExpConstructResultStub::InitializeInterfaceDescriptor(
139 CodeStubInterfaceDescriptor* descriptor) {
141 // x1: index (of last match)
143 static Register registers[] = { x2, x1, x0 };
144 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
145 descriptor->register_params_ = registers;
146 descriptor->deoptimization_handler_ =
147 Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
151 void LoadFieldStub::InitializeInterfaceDescriptor(
152 CodeStubInterfaceDescriptor* descriptor) {
154 static Register registers[] = { x0 };
155 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
156 descriptor->register_params_ = registers;
157 descriptor->deoptimization_handler_ = NULL;
161 void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
162 CodeStubInterfaceDescriptor* descriptor) {
164 static Register registers[] = { x1 };
165 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
166 descriptor->register_params_ = registers;
167 descriptor->deoptimization_handler_ = NULL;
171 void StringLengthStub::InitializeInterfaceDescriptor(
172 CodeStubInterfaceDescriptor* descriptor) {
173 static Register registers[] = { x0, x2 };
174 descriptor->register_param_count_ = 2;
175 descriptor->register_params_ = registers;
176 descriptor->deoptimization_handler_ = NULL;
180 void KeyedStringLengthStub::InitializeInterfaceDescriptor(
181 CodeStubInterfaceDescriptor* descriptor) {
182 static Register registers[] = { x1, x0 };
183 descriptor->register_param_count_ = 2;
184 descriptor->register_params_ = registers;
185 descriptor->deoptimization_handler_ = NULL;
189 void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
190 CodeStubInterfaceDescriptor* descriptor) {
194 static Register registers[] = { x2, x1, x0 };
195 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
196 descriptor->register_params_ = registers;
197 descriptor->deoptimization_handler_ =
198 FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
202 void TransitionElementsKindStub::InitializeInterfaceDescriptor(
203 CodeStubInterfaceDescriptor* descriptor) {
204 // x0: value (js_array)
206 static Register registers[] = { x0, x1 };
207 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
208 descriptor->register_params_ = registers;
210 Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
211 descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
215 void CompareNilICStub::InitializeInterfaceDescriptor(
216 CodeStubInterfaceDescriptor* descriptor) {
217 // x0: value to compare
218 static Register registers[] = { x0 };
219 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
220 descriptor->register_params_ = registers;
221 descriptor->deoptimization_handler_ =
222 FUNCTION_ADDR(CompareNilIC_Miss);
223 descriptor->SetMissHandler(
224 ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
228 static void InitializeArrayConstructorDescriptor(
229 CodeStubInterfaceDescriptor* descriptor,
230 int constant_stack_parameter_count) {
232 // x2: allocation site with elements kind
233 // x0: number of arguments to the constructor function
234 static Register registers_variable_args[] = { x1, x2, x0 };
235 static Register registers_no_args[] = { x1, x2 };
237 if (constant_stack_parameter_count == 0) {
238 descriptor->register_param_count_ =
239 sizeof(registers_no_args) / sizeof(registers_no_args[0]);
240 descriptor->register_params_ = registers_no_args;
242 // stack param count needs (constructor pointer, and single argument)
243 descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
244 descriptor->stack_parameter_count_ = x0;
245 descriptor->register_param_count_ =
246 sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
247 descriptor->register_params_ = registers_variable_args;
248 static Representation representations[] = {
249 Representation::Tagged(),
250 Representation::Tagged(),
251 Representation::Integer32() };
252 descriptor->register_param_representations_ = representations;
255 descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
256 descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
257 descriptor->deoptimization_handler_ =
258 Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
262 void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
263 CodeStubInterfaceDescriptor* descriptor) {
264 InitializeArrayConstructorDescriptor(descriptor, 0);
268 void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
269 CodeStubInterfaceDescriptor* descriptor) {
270 InitializeArrayConstructorDescriptor(descriptor, 1);
274 void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
275 CodeStubInterfaceDescriptor* descriptor) {
276 InitializeArrayConstructorDescriptor(descriptor, -1);
280 static void InitializeInternalArrayConstructorDescriptor(
281 CodeStubInterfaceDescriptor* descriptor,
282 int constant_stack_parameter_count) {
283 // x1: constructor function
284 // x0: number of arguments to the constructor function
285 static Register registers_variable_args[] = { x1, x0 };
286 static Register registers_no_args[] = { x1 };
288 if (constant_stack_parameter_count == 0) {
289 descriptor->register_param_count_ =
290 sizeof(registers_no_args) / sizeof(registers_no_args[0]);
291 descriptor->register_params_ = registers_no_args;
293 // stack param count needs (constructor pointer, and single argument)
294 descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
295 descriptor->stack_parameter_count_ = x0;
296 descriptor->register_param_count_ =
297 sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
298 descriptor->register_params_ = registers_variable_args;
299 static Representation representations[] = {
300 Representation::Tagged(),
301 Representation::Integer32() };
302 descriptor->register_param_representations_ = representations;
305 descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
306 descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
307 descriptor->deoptimization_handler_ =
308 Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
312 void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
313 CodeStubInterfaceDescriptor* descriptor) {
314 InitializeInternalArrayConstructorDescriptor(descriptor, 0);
318 void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
319 CodeStubInterfaceDescriptor* descriptor) {
320 InitializeInternalArrayConstructorDescriptor(descriptor, 1);
324 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
325 CodeStubInterfaceDescriptor* descriptor) {
326 InitializeInternalArrayConstructorDescriptor(descriptor, -1);
330 void ToBooleanStub::InitializeInterfaceDescriptor(
331 CodeStubInterfaceDescriptor* descriptor) {
333 static Register registers[] = { x0 };
334 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
335 descriptor->register_params_ = registers;
336 descriptor->deoptimization_handler_ = FUNCTION_ADDR(ToBooleanIC_Miss);
337 descriptor->SetMissHandler(
338 ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
342 void StoreGlobalStub::InitializeInterfaceDescriptor(
343 CodeStubInterfaceDescriptor* descriptor) {
347 static Register registers[] = { x1, x2, x0 };
348 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
349 descriptor->register_params_ = registers;
350 descriptor->deoptimization_handler_ =
351 FUNCTION_ADDR(StoreIC_MissFromStubFailure);
355 void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
356 CodeStubInterfaceDescriptor* descriptor) {
361 static Register registers[] = { x0, x3, x1, x2 };
362 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
363 descriptor->register_params_ = registers;
364 descriptor->deoptimization_handler_ =
365 FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
369 void BinaryOpICStub::InitializeInterfaceDescriptor(
370 CodeStubInterfaceDescriptor* descriptor) {
373 static Register registers[] = { x1, x0 };
374 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
375 descriptor->register_params_ = registers;
376 descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
377 descriptor->SetMissHandler(
378 ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
382 void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
383 CodeStubInterfaceDescriptor* descriptor) {
384 // x2: allocation site
387 static Register registers[] = { x2, x1, x0 };
388 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
389 descriptor->register_params_ = registers;
390 descriptor->deoptimization_handler_ =
391 FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
395 void StringAddStub::InitializeInterfaceDescriptor(
396 CodeStubInterfaceDescriptor* descriptor) {
399 static Register registers[] = { x1, x0 };
400 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
401 descriptor->register_params_ = registers;
402 descriptor->deoptimization_handler_ =
403 Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
407 void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
408 static PlatformCallInterfaceDescriptor default_descriptor =
409 PlatformCallInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
411 static PlatformCallInterfaceDescriptor noInlineDescriptor =
412 PlatformCallInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
415 CallInterfaceDescriptor* descriptor =
416 isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
417 static Register registers[] = { x1, // JSFunction
419 x0, // actual number of arguments
420 x2, // expected number of arguments
422 static Representation representations[] = {
423 Representation::Tagged(), // JSFunction
424 Representation::Tagged(), // context
425 Representation::Integer32(), // actual number of arguments
426 Representation::Integer32(), // expected number of arguments
428 descriptor->register_param_count_ = 4;
429 descriptor->register_params_ = registers;
430 descriptor->param_representations_ = representations;
431 descriptor->platform_specific_descriptor_ = &default_descriptor;
434 CallInterfaceDescriptor* descriptor =
435 isolate->call_descriptor(Isolate::KeyedCall);
436 static Register registers[] = { cp, // context
439 static Representation representations[] = {
440 Representation::Tagged(), // context
441 Representation::Tagged(), // key
443 descriptor->register_param_count_ = 2;
444 descriptor->register_params_ = registers;
445 descriptor->param_representations_ = representations;
446 descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
449 CallInterfaceDescriptor* descriptor =
450 isolate->call_descriptor(Isolate::NamedCall);
451 static Register registers[] = { cp, // context
454 static Representation representations[] = {
455 Representation::Tagged(), // context
456 Representation::Tagged(), // name
458 descriptor->register_param_count_ = 2;
459 descriptor->register_params_ = registers;
460 descriptor->param_representations_ = representations;
461 descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
464 CallInterfaceDescriptor* descriptor =
465 isolate->call_descriptor(Isolate::CallHandler);
466 static Register registers[] = { cp, // context
469 static Representation representations[] = {
470 Representation::Tagged(), // context
471 Representation::Tagged(), // receiver
473 descriptor->register_param_count_ = 2;
474 descriptor->register_params_ = registers;
475 descriptor->param_representations_ = representations;
476 descriptor->platform_specific_descriptor_ = &default_descriptor;
479 CallInterfaceDescriptor* descriptor =
480 isolate->call_descriptor(Isolate::ApiFunctionCall);
481 static Register registers[] = { x0, // callee
484 x1, // api_function_address
487 static Representation representations[] = {
488 Representation::Tagged(), // callee
489 Representation::Tagged(), // call_data
490 Representation::Tagged(), // holder
491 Representation::External(), // api_function_address
492 Representation::Tagged(), // context
494 descriptor->register_param_count_ = 5;
495 descriptor->register_params_ = registers;
496 descriptor->param_representations_ = representations;
497 descriptor->platform_specific_descriptor_ = &default_descriptor;
502 #define __ ACCESS_MASM(masm)
505 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
506 // Update the static counter each time a new code stub is generated.
507 isolate()->counters()->code_stubs()->Increment();
509 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
510 int param_count = descriptor->register_param_count_;
512 // Call the runtime system in a fresh internal frame.
513 FrameScope scope(masm, StackFrame::INTERNAL);
514 ASSERT((descriptor->register_param_count_ == 0) ||
515 x0.Is(descriptor->register_params_[param_count - 1]));
518 MacroAssembler::PushPopQueue queue(masm);
519 for (int i = 0; i < param_count; ++i) {
520 queue.Queue(descriptor->register_params_[i]);
524 ExternalReference miss = descriptor->miss_handler();
525 __ CallExternalReference(miss, descriptor->register_param_count_);
532 void DoubleToIStub::Generate(MacroAssembler* masm) {
534 Register input = source();
535 Register result = destination();
536 ASSERT(is_truncating());
538 ASSERT(result.Is64Bits());
539 ASSERT(jssp.Is(masm->StackPointer()));
541 int double_offset = offset();
543 DoubleRegister double_scratch = d0; // only used if !skip_fastpath()
544 Register scratch1 = GetAllocatableRegisterThatIsNotOneOf(input, result);
546 GetAllocatableRegisterThatIsNotOneOf(input, result, scratch1);
548 __ Push(scratch1, scratch2);
549 // Account for saved regs if input is jssp.
550 if (input.is(jssp)) double_offset += 2 * kPointerSize;
552 if (!skip_fastpath()) {
553 __ Push(double_scratch);
554 if (input.is(jssp)) double_offset += 1 * kDoubleSize;
555 __ Ldr(double_scratch, MemOperand(input, double_offset));
556 // Try to convert with a FPU convert instruction. This handles all
557 // non-saturating cases.
558 __ TryConvertDoubleToInt64(result, double_scratch, &done);
559 __ Fmov(result, double_scratch);
561 __ Ldr(result, MemOperand(input, double_offset));
564 // If we reach here we need to manually convert the input to an int32.
566 // Extract the exponent.
567 Register exponent = scratch1;
568 __ Ubfx(exponent, result, HeapNumber::kMantissaBits,
569 HeapNumber::kExponentBits);
571 // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
572 // the mantissa gets shifted completely out of the int32_t result.
573 __ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32);
574 __ CzeroX(result, ge);
577 // The Fcvtzs sequence handles all cases except where the conversion causes
578 // signed overflow in the int64_t target. Since we've already handled
579 // exponents >= 84, we can guarantee that 63 <= exponent < 84.
581 if (masm->emit_debug_code()) {
582 __ Cmp(exponent, HeapNumber::kExponentBias + 63);
583 // Exponents less than this should have been handled by the Fcvt case.
584 __ Check(ge, kUnexpectedValue);
587 // Isolate the mantissa bits, and set the implicit '1'.
588 Register mantissa = scratch2;
589 __ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
590 __ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits);
592 // Negate the mantissa if necessary.
593 __ Tst(result, kXSignMask);
594 __ Cneg(mantissa, mantissa, ne);
596 // Shift the mantissa bits in the correct place. We know that we have to shift
597 // it left here, because exponent >= 63 >= kMantissaBits.
598 __ Sub(exponent, exponent,
599 HeapNumber::kExponentBias + HeapNumber::kMantissaBits);
600 __ Lsl(result, mantissa, exponent);
603 if (!skip_fastpath()) {
604 __ Pop(double_scratch);
606 __ Pop(scratch2, scratch1);
611 // See call site for description.
612 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
616 FPRegister double_scratch,
619 ASSERT(!AreAliased(left, right, scratch));
620 Label not_identical, return_equal, heap_number;
621 Register result = x0;
624 __ B(ne, ¬_identical);
626 // Test for NaN. Sadly, we can't just compare to factory::nan_value(),
627 // so we do the second best thing - test it ourselves.
628 // They are both equal and they are not both Smis so both of them are not
629 // Smis. If it's not a heap number, then return equal.
630 if ((cond == lt) || (cond == gt)) {
631 __ JumpIfObjectType(right, scratch, scratch, FIRST_SPEC_OBJECT_TYPE, slow,
634 Register right_type = scratch;
635 __ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
637 // Comparing JS objects with <=, >= is complicated.
639 __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
641 // Normally here we fall through to return_equal, but undefined is
642 // special: (undefined == undefined) == true, but
643 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
644 if ((cond == le) || (cond == ge)) {
645 __ Cmp(right_type, ODDBALL_TYPE);
646 __ B(ne, &return_equal);
647 __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal);
649 // undefined <= undefined should fail.
650 __ Mov(result, GREATER);
652 // undefined >= undefined should fail.
653 __ Mov(result, LESS);
660 __ Bind(&return_equal);
662 __ Mov(result, GREATER); // Things aren't less than themselves.
663 } else if (cond == gt) {
664 __ Mov(result, LESS); // Things aren't greater than themselves.
666 __ Mov(result, EQUAL); // Things are <=, >=, ==, === themselves.
670 // Cases lt and gt have been handled earlier, and case ne is never seen, as
671 // it is handled in the parser (see Parser::ParseBinaryExpression). We are
672 // only concerned with cases ge, le and eq here.
673 if ((cond != lt) && (cond != gt)) {
674 ASSERT((cond == ge) || (cond == le) || (cond == eq));
675 __ Bind(&heap_number);
676 // Left and right are identical pointers to a heap number object. Return
677 // non-equal if the heap number is a NaN, and equal otherwise. Comparing
678 // the number to itself will set the overflow flag iff the number is NaN.
679 __ Ldr(double_scratch, FieldMemOperand(right, HeapNumber::kValueOffset));
680 __ Fcmp(double_scratch, double_scratch);
681 __ B(vc, &return_equal); // Not NaN, so treat as normal heap number.
684 __ Mov(result, GREATER);
686 __ Mov(result, LESS);
691 // No fall through here.
692 if (FLAG_debug_code) {
696 __ Bind(¬_identical);
700 // See call site for description.
701 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
707 ASSERT(!AreAliased(left, right, left_type, right_type, scratch));
709 if (masm->emit_debug_code()) {
710 // We assume that the arguments are not identical.
712 __ Assert(ne, kExpectedNonIdenticalObjects);
715 // If either operand is a JS object or an oddball value, then they are not
716 // equal since their pointers are different.
717 // There is no test for undetectability in strict equality.
718 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
719 Label right_non_object;
721 __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
722 __ B(lt, &right_non_object);
724 // Return non-zero - x0 already contains a non-zero pointer.
725 ASSERT(left.is(x0) || right.is(x0));
726 Label return_not_equal;
727 __ Bind(&return_not_equal);
730 __ Bind(&right_non_object);
732 // Check for oddballs: true, false, null, undefined.
733 __ Cmp(right_type, ODDBALL_TYPE);
735 // If right is not ODDBALL, test left. Otherwise, set eq condition.
736 __ Ccmp(left_type, ODDBALL_TYPE, ZFlag, ne);
738 // If right or left is not ODDBALL, test left >= FIRST_SPEC_OBJECT_TYPE.
739 // Otherwise, right or left is ODDBALL, so set a ge condition.
740 __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NVFlag, ne);
742 __ B(ge, &return_not_equal);
744 // Internalized strings are unique, so they can only be equal if they are the
745 // same object. We have already tested that case, so if left and right are
746 // both internalized strings, they cannot be equal.
747 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
748 __ Orr(scratch, left_type, right_type);
749 __ TestAndBranchIfAllClear(
750 scratch, kIsNotStringMask | kIsNotInternalizedMask, &return_not_equal);
754 // See call site for description.
755 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
763 ASSERT(!AreAliased(left, right, scratch));
764 ASSERT(!AreAliased(left_d, right_d));
765 ASSERT((left.is(x0) && right.is(x1)) ||
766 (right.is(x0) && left.is(x1)));
767 Register result = x0;
769 Label right_is_smi, done;
770 __ JumpIfSmi(right, &right_is_smi);
772 // Left is the smi. Check whether right is a heap number.
774 // If right is not a number and left is a smi, then strict equality cannot
775 // succeed. Return non-equal.
776 Label is_heap_number;
777 __ JumpIfObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE,
779 // Register right is a non-zero pointer, which is a valid NOT_EQUAL result.
780 if (!right.is(result)) {
781 __ Mov(result, NOT_EQUAL);
784 __ Bind(&is_heap_number);
786 // Smi compared non-strictly with a non-smi, non-heap-number. Call the
788 __ JumpIfNotObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE, slow);
791 // Left is the smi. Right is a heap number. Load right value into right_d, and
792 // convert left smi into double in left_d.
793 __ Ldr(right_d, FieldMemOperand(right, HeapNumber::kValueOffset));
794 __ SmiUntagToDouble(left_d, left);
797 __ Bind(&right_is_smi);
798 // Right is a smi. Check whether the non-smi left is a heap number.
800 // If left is not a number and right is a smi then strict equality cannot
801 // succeed. Return non-equal.
802 Label is_heap_number;
803 __ JumpIfObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE,
805 // Register left is a non-zero pointer, which is a valid NOT_EQUAL result.
806 if (!left.is(result)) {
807 __ Mov(result, NOT_EQUAL);
810 __ Bind(&is_heap_number);
812 // Smi compared non-strictly with a non-smi, non-heap-number. Call the
814 __ JumpIfNotObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE, slow);
817 // Right is the smi. Left is a heap number. Load left value into left_d, and
818 // convert right smi into double in right_d.
819 __ Ldr(left_d, FieldMemOperand(left, HeapNumber::kValueOffset));
820 __ SmiUntagToDouble(right_d, right);
822 // Fall through to both_loaded_as_doubles.
827 // Fast negative check for internalized-to-internalized equality.
828 // See call site for description.
829 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
836 Label* possible_strings,
837 Label* not_both_strings) {
838 ASSERT(!AreAliased(left, right, left_map, right_map, left_type, right_type));
839 Register result = x0;
842 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
843 // TODO(all): reexamine this branch sequence for optimisation wrt branch
845 __ Tbnz(right_type, MaskToBit(kIsNotStringMask), &object_test);
846 __ Tbnz(right_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
847 __ Tbnz(left_type, MaskToBit(kIsNotStringMask), not_both_strings);
848 __ Tbnz(left_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
850 // Both are internalized. We already checked that they weren't the same
851 // pointer, so they are not equal.
852 __ Mov(result, NOT_EQUAL);
855 __ Bind(&object_test);
857 __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
859 // If right >= FIRST_SPEC_OBJECT_TYPE, test left.
860 // Otherwise, right < FIRST_SPEC_OBJECT_TYPE, so set lt condition.
861 __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NFlag, ge);
863 __ B(lt, not_both_strings);
865 // If both objects are undetectable, they are equal. Otherwise, they are not
866 // equal, since they are different objects and an object is not equal to
869 // Returning here, so we can corrupt right_type and left_type.
870 Register right_bitfield = right_type;
871 Register left_bitfield = left_type;
872 __ Ldrb(right_bitfield, FieldMemOperand(right_map, Map::kBitFieldOffset));
873 __ Ldrb(left_bitfield, FieldMemOperand(left_map, Map::kBitFieldOffset));
874 __ And(result, right_bitfield, left_bitfield);
875 __ And(result, result, 1 << Map::kIsUndetectable);
876 __ Eor(result, result, 1 << Map::kIsUndetectable);
881 static void ICCompareStub_CheckInputType(MacroAssembler* masm,
884 CompareIC::State expected,
887 if (expected == CompareIC::SMI) {
888 __ JumpIfNotSmi(input, fail);
889 } else if (expected == CompareIC::NUMBER) {
890 __ JumpIfSmi(input, &ok);
891 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
894 // We could be strict about internalized/non-internalized here, but as long as
895 // hydrogen doesn't care, the stub doesn't have to care either.
900 void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
903 Register result = x0;
904 Condition cond = GetCondition();
907 ICCompareStub_CheckInputType(masm, lhs, x2, left_, &miss);
908 ICCompareStub_CheckInputType(masm, rhs, x3, right_, &miss);
910 Label slow; // Call builtin.
911 Label not_smis, both_loaded_as_doubles;
912 Label not_two_smis, smi_done;
913 __ JumpIfEitherNotSmi(lhs, rhs, ¬_two_smis);
915 __ Sub(result, lhs, Operand::UntagSmi(rhs));
918 __ Bind(¬_two_smis);
920 // NOTICE! This code is only reached after a smi-fast-case check, so it is
921 // certain that at least one operand isn't a smi.
923 // Handle the case where the objects are identical. Either returns the answer
924 // or goes to slow. Only falls through if the objects were not identical.
925 EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond);
927 // If either is a smi (we know that at least one is not a smi), then they can
928 // only be strictly equal if the other is a HeapNumber.
929 __ JumpIfBothNotSmi(lhs, rhs, ¬_smis);
931 // Exactly one operand is a smi. EmitSmiNonsmiComparison generates code that
933 // 1) Return the answer.
934 // 2) Branch to the slow case.
935 // 3) Fall through to both_loaded_as_doubles.
936 // In case 3, we have found out that we were dealing with a number-number
937 // comparison. The double values of the numbers have been loaded, right into
938 // rhs_d, left into lhs_d.
939 FPRegister rhs_d = d0;
940 FPRegister lhs_d = d1;
941 EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, x10, &slow, strict());
943 __ Bind(&both_loaded_as_doubles);
944 // The arguments have been converted to doubles and stored in rhs_d and
947 __ Fcmp(lhs_d, rhs_d);
948 __ B(vs, &nan); // Overflow flag set if either is NaN.
949 STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
950 __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
951 __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
955 // Left and/or right is a NaN. Load the result register with whatever makes
956 // the comparison fail, since comparisons with NaN always fail (except ne,
957 // which is filtered out at a higher level.)
959 if ((cond == lt) || (cond == le)) {
960 __ Mov(result, GREATER);
962 __ Mov(result, LESS);
967 // At this point we know we are dealing with two different objects, and
968 // neither of them is a smi. The objects are in rhs_ and lhs_.
970 // Load the maps and types of the objects.
971 Register rhs_map = x10;
972 Register rhs_type = x11;
973 Register lhs_map = x12;
974 Register lhs_type = x13;
975 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
976 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
977 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
978 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
981 // This emits a non-equal return sequence for some object types, or falls
982 // through if it was not lucky.
983 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs, lhs_type, rhs_type, x14);
986 Label check_for_internalized_strings;
987 Label flat_string_check;
988 // Check for heap number comparison. Branch to earlier double comparison code
989 // if they are heap numbers, otherwise, branch to internalized string check.
990 __ Cmp(rhs_type, HEAP_NUMBER_TYPE);
991 __ B(ne, &check_for_internalized_strings);
992 __ Cmp(lhs_map, rhs_map);
994 // If maps aren't equal, lhs_ and rhs_ are not heap numbers. Branch to flat
996 __ B(ne, &flat_string_check);
998 // Both lhs_ and rhs_ are heap numbers. Load them and branch to the double
1000 __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1001 __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1002 __ B(&both_loaded_as_doubles);
1004 __ Bind(&check_for_internalized_strings);
1005 // In the strict case, the EmitStrictTwoHeapObjectCompare already took care
1006 // of internalized strings.
1007 if ((cond == eq) && !strict()) {
1008 // Returns an answer for two internalized strings or two detectable objects.
1009 // Otherwise branches to the string case or not both strings case.
1010 EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, lhs_map, rhs_map,
1012 &flat_string_check, &slow);
1015 // Check for both being sequential ASCII strings, and inline if that is the
1017 __ Bind(&flat_string_check);
1018 __ JumpIfBothInstanceTypesAreNotSequentialAscii(lhs_type, rhs_type, x14,
1021 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x10,
1024 StringCompareStub::GenerateFlatAsciiStringEquals(masm, lhs, rhs,
1027 StringCompareStub::GenerateCompareFlatAsciiStrings(masm, lhs, rhs,
1028 x10, x11, x12, x13);
1031 // Never fall through to here.
1032 if (FLAG_debug_code) {
1039 // Figure out which native to call and setup the arguments.
1040 Builtins::JavaScript native;
1042 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1044 native = Builtins::COMPARE;
1045 int ncr; // NaN compare result
1046 if ((cond == lt) || (cond == le)) {
1049 ASSERT((cond == gt) || (cond == ge)); // remaining cases
1052 __ Mov(x10, Smi::FromInt(ncr));
1056 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1057 // tagged as a small integer.
1058 __ InvokeBuiltin(native, JUMP_FUNCTION);
1065 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1066 CPURegList saved_regs = kCallerSaved;
1067 CPURegList saved_fp_regs = kCallerSavedFP;
1069 // We don't allow a GC during a store buffer overflow so there is no need to
1070 // store the registers in any particular way, but we do have to store and
1073 // We don't care if MacroAssembler scratch registers are corrupted.
1074 saved_regs.Remove(*(masm->TmpList()));
1075 saved_fp_regs.Remove(*(masm->FPTmpList()));
1077 __ PushCPURegList(saved_regs);
1078 if (save_doubles_ == kSaveFPRegs) {
1079 __ PushCPURegList(saved_fp_regs);
1082 AllowExternalCallThatCantCauseGC scope(masm);
1083 __ Mov(x0, ExternalReference::isolate_address(isolate()));
1085 ExternalReference::store_buffer_overflow_function(isolate()), 1, 0);
1087 if (save_doubles_ == kSaveFPRegs) {
1088 __ PopCPURegList(saved_fp_regs);
1090 __ PopCPURegList(saved_regs);
1095 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
1097 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
1099 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
1104 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
1105 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
1106 UseScratchRegisterScope temps(masm);
1107 Register saved_lr = temps.UnsafeAcquire(to_be_pushed_lr());
1108 Register return_address = temps.AcquireX();
1109 __ Mov(return_address, lr);
1110 // Restore lr with the value it had before the call to this stub (the value
1111 // which must be pushed).
1112 __ Mov(lr, saved_lr);
1113 if (save_doubles_ == kSaveFPRegs) {
1114 __ PushSafepointRegistersAndDoubles();
1116 __ PushSafepointRegisters();
1118 __ Ret(return_address);
1122 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
1123 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
1124 UseScratchRegisterScope temps(masm);
1125 Register return_address = temps.AcquireX();
1126 // Preserve the return address (lr will be clobbered by the pop).
1127 __ Mov(return_address, lr);
1128 if (save_doubles_ == kSaveFPRegs) {
1129 __ PopSafepointRegistersAndDoubles();
1131 __ PopSafepointRegisters();
1133 __ Ret(return_address);
1137 void MathPowStub::Generate(MacroAssembler* masm) {
1139 // jssp[0]: Exponent (as a tagged value).
1140 // jssp[1]: Base (as a tagged value).
1142 // The (tagged) result will be returned in x0, as a heap number.
1144 Register result_tagged = x0;
1145 Register base_tagged = x10;
1146 Register exponent_tagged = x11;
1147 Register exponent_integer = x12;
1148 Register scratch1 = x14;
1149 Register scratch0 = x15;
1150 Register saved_lr = x19;
1151 FPRegister result_double = d0;
1152 FPRegister base_double = d0;
1153 FPRegister exponent_double = d1;
1154 FPRegister base_double_copy = d2;
1155 FPRegister scratch1_double = d6;
1156 FPRegister scratch0_double = d7;
1158 // A fast-path for integer exponents.
1159 Label exponent_is_smi, exponent_is_integer;
1160 // Bail out to runtime.
1162 // Allocate a heap number for the result, and return it.
1165 // Unpack the inputs.
1166 if (exponent_type_ == ON_STACK) {
1168 Label unpack_exponent;
1170 __ Pop(exponent_tagged, base_tagged);
1172 __ JumpIfSmi(base_tagged, &base_is_smi);
1173 __ JumpIfNotHeapNumber(base_tagged, &call_runtime);
1174 // base_tagged is a heap number, so load its double value.
1175 __ Ldr(base_double, FieldMemOperand(base_tagged, HeapNumber::kValueOffset));
1176 __ B(&unpack_exponent);
1177 __ Bind(&base_is_smi);
1178 // base_tagged is a SMI, so untag it and convert it to a double.
1179 __ SmiUntagToDouble(base_double, base_tagged);
1181 __ Bind(&unpack_exponent);
1182 // x10 base_tagged The tagged base (input).
1183 // x11 exponent_tagged The tagged exponent (input).
1184 // d1 base_double The base as a double.
1185 __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
1186 __ JumpIfNotHeapNumber(exponent_tagged, &call_runtime);
1187 // exponent_tagged is a heap number, so load its double value.
1188 __ Ldr(exponent_double,
1189 FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
1190 } else if (exponent_type_ == TAGGED) {
1191 __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
1192 __ Ldr(exponent_double,
1193 FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
1196 // Handle double (heap number) exponents.
1197 if (exponent_type_ != INTEGER) {
1198 // Detect integer exponents stored as doubles and handle those in the
1199 // integer fast-path.
1200 __ TryRepresentDoubleAsInt64(exponent_integer, exponent_double,
1201 scratch0_double, &exponent_is_integer);
1203 if (exponent_type_ == ON_STACK) {
1204 FPRegister half_double = d3;
1205 FPRegister minus_half_double = d4;
1206 // Detect square root case. Crankshaft detects constant +/-0.5 at compile
1207 // time and uses DoMathPowHalf instead. We then skip this check for
1208 // non-constant cases of +/-0.5 as these hardly occur.
1210 __ Fmov(minus_half_double, -0.5);
1211 __ Fmov(half_double, 0.5);
1212 __ Fcmp(minus_half_double, exponent_double);
1213 __ Fccmp(half_double, exponent_double, NZFlag, ne);
1214 // Condition flags at this point:
1215 // 0.5; nZCv // Identified by eq && pl
1216 // -0.5: NZcv // Identified by eq && mi
1217 // other: ?z?? // Identified by ne
1218 __ B(ne, &call_runtime);
1220 // The exponent is 0.5 or -0.5.
1222 // Given that exponent is known to be either 0.5 or -0.5, the following
1223 // special cases could apply (according to ECMA-262 15.8.2.13):
1225 // base.isNaN(): The result is NaN.
1226 // (base == +INFINITY) || (base == -INFINITY)
1227 // exponent == 0.5: The result is +INFINITY.
1228 // exponent == -0.5: The result is +0.
1229 // (base == +0) || (base == -0)
1230 // exponent == 0.5: The result is +0.
1231 // exponent == -0.5: The result is +INFINITY.
1232 // (base < 0) && base.isFinite(): The result is NaN.
1234 // Fsqrt (and Fdiv for the -0.5 case) can handle all of those except
1235 // where base is -INFINITY or -0.
1237 // Add +0 to base. This has no effect other than turning -0 into +0.
1238 __ Fadd(base_double, base_double, fp_zero);
1239 // The operation -0+0 results in +0 in all cases except where the
1240 // FPCR rounding mode is 'round towards minus infinity' (RM). The
1241 // ARM64 simulator does not currently simulate FPCR (where the rounding
1242 // mode is set), so test the operation with some debug code.
1243 if (masm->emit_debug_code()) {
1244 UseScratchRegisterScope temps(masm);
1245 Register temp = temps.AcquireX();
1246 __ Fneg(scratch0_double, fp_zero);
1247 // Verify that we correctly generated +0.0 and -0.0.
1248 // bits(+0.0) = 0x0000000000000000
1249 // bits(-0.0) = 0x8000000000000000
1250 __ Fmov(temp, fp_zero);
1251 __ CheckRegisterIsClear(temp, kCouldNotGenerateZero);
1252 __ Fmov(temp, scratch0_double);
1253 __ Eor(temp, temp, kDSignMask);
1254 __ CheckRegisterIsClear(temp, kCouldNotGenerateNegativeZero);
1255 // Check that -0.0 + 0.0 == +0.0.
1256 __ Fadd(scratch0_double, scratch0_double, fp_zero);
1257 __ Fmov(temp, scratch0_double);
1258 __ CheckRegisterIsClear(temp, kExpectedPositiveZero);
1261 // If base is -INFINITY, make it +INFINITY.
1262 // * Calculate base - base: All infinities will become NaNs since both
1263 // -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in ARM64.
1264 // * If the result is NaN, calculate abs(base).
1265 __ Fsub(scratch0_double, base_double, base_double);
1266 __ Fcmp(scratch0_double, 0.0);
1267 __ Fabs(scratch1_double, base_double);
1268 __ Fcsel(base_double, scratch1_double, base_double, vs);
1270 // Calculate the square root of base.
1271 __ Fsqrt(result_double, base_double);
1272 __ Fcmp(exponent_double, 0.0);
1273 __ B(ge, &done); // Finish now for exponents of 0.5.
1274 // Find the inverse for exponents of -0.5.
1275 __ Fmov(scratch0_double, 1.0);
1276 __ Fdiv(result_double, scratch0_double, result_double);
1281 AllowExternalCallThatCantCauseGC scope(masm);
1282 __ Mov(saved_lr, lr);
1284 ExternalReference::power_double_double_function(isolate()),
1286 __ Mov(lr, saved_lr);
1290 // Handle SMI exponents.
1291 __ Bind(&exponent_is_smi);
1292 // x10 base_tagged The tagged base (input).
1293 // x11 exponent_tagged The tagged exponent (input).
1294 // d1 base_double The base as a double.
1295 __ SmiUntag(exponent_integer, exponent_tagged);
1298 __ Bind(&exponent_is_integer);
1299 // x10 base_tagged The tagged base (input).
1300 // x11 exponent_tagged The tagged exponent (input).
1301 // x12 exponent_integer The exponent as an integer.
1302 // d1 base_double The base as a double.
1304 // Find abs(exponent). For negative exponents, we can find the inverse later.
1305 Register exponent_abs = x13;
1306 __ Cmp(exponent_integer, 0);
1307 __ Cneg(exponent_abs, exponent_integer, mi);
1308 // x13 exponent_abs The value of abs(exponent_integer).
1310 // Repeatedly multiply to calculate the power.
1312 // For each bit n (exponent_integer{n}) {
1313 // if (exponent_integer{n}) {
1317 // if (remaining bits in exponent_integer are all zero) {
1321 Label power_loop, power_loop_entry, power_loop_exit;
1322 __ Fmov(scratch1_double, base_double);
1323 __ Fmov(base_double_copy, base_double);
1324 __ Fmov(result_double, 1.0);
1325 __ B(&power_loop_entry);
1327 __ Bind(&power_loop);
1328 __ Fmul(scratch1_double, scratch1_double, scratch1_double);
1329 __ Lsr(exponent_abs, exponent_abs, 1);
1330 __ Cbz(exponent_abs, &power_loop_exit);
1332 __ Bind(&power_loop_entry);
1333 __ Tbz(exponent_abs, 0, &power_loop);
1334 __ Fmul(result_double, result_double, scratch1_double);
1337 __ Bind(&power_loop_exit);
1339 // If the exponent was positive, result_double holds the result.
1340 __ Tbz(exponent_integer, kXSignBit, &done);
1342 // The exponent was negative, so find the inverse.
1343 __ Fmov(scratch0_double, 1.0);
1344 __ Fdiv(result_double, scratch0_double, result_double);
1345 // ECMA-262 only requires Math.pow to return an 'implementation-dependent
1346 // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow
1347 // to calculate the subnormal value 2^-1074. This method of calculating
1348 // negative powers doesn't work because 2^1074 overflows to infinity. To
1349 // catch this corner-case, we bail out if the result was 0. (This can only
1350 // occur if the divisor is infinity or the base is zero.)
1351 __ Fcmp(result_double, 0.0);
1354 if (exponent_type_ == ON_STACK) {
1355 // Bail out to runtime code.
1356 __ Bind(&call_runtime);
1357 // Put the arguments back on the stack.
1358 __ Push(base_tagged, exponent_tagged);
1359 __ TailCallRuntime(Runtime::kHiddenMathPow, 2, 1);
1363 __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1,
1365 ASSERT(result_tagged.is(x0));
1366 __ IncrementCounter(
1367 isolate()->counters()->math_pow(), 1, scratch0, scratch1);
1370 AllowExternalCallThatCantCauseGC scope(masm);
1371 __ Mov(saved_lr, lr);
1372 __ Fmov(base_double, base_double_copy);
1373 __ Scvtf(exponent_double, exponent_integer);
1375 ExternalReference::power_double_double_function(isolate()),
1377 __ Mov(lr, saved_lr);
1379 __ IncrementCounter(
1380 isolate()->counters()->math_pow(), 1, scratch0, scratch1);
1386 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
1387 // It is important that the following stubs are generated in this order
1388 // because pregenerated stubs can only call other pregenerated stubs.
1389 // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses
1391 CEntryStub::GenerateAheadOfTime(isolate);
1392 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
1393 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
1394 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
1395 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
1396 BinaryOpICStub::GenerateAheadOfTime(isolate);
1397 StoreRegistersStateStub::GenerateAheadOfTime(isolate);
1398 RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
1399 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
1403 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1404 StoreRegistersStateStub stub1(isolate, kDontSaveFPRegs);
1406 StoreRegistersStateStub stub2(isolate, kSaveFPRegs);
1411 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1412 RestoreRegistersStateStub stub1(isolate, kDontSaveFPRegs);
1414 RestoreRegistersStateStub stub2(isolate, kSaveFPRegs);
1419 void CodeStub::GenerateFPStubs(Isolate* isolate) {
1420 // Floating-point code doesn't get special handling in ARM64, so there's
1421 // nothing to do here.
1426 bool CEntryStub::NeedsImmovableCode() {
1427 // CEntryStub stores the return address on the stack before calling into
1428 // C++ code. In some cases, the VM accesses this address, but it is not used
1429 // when the C++ code returns to the stub because LR holds the return address
1430 // in AAPCS64. If the stub is moved (perhaps during a GC), we could end up
1431 // returning to dead code.
1432 // TODO(jbramley): Whilst this is the only analysis that makes sense, I can't
1433 // find any comment to confirm this, and I don't hit any crashes whatever
1434 // this function returns. The anaylsis should be properly confirmed.
1439 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1440 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
1442 CEntryStub stub_fp(isolate, 1, kSaveFPRegs);
1447 void CEntryStub::Generate(MacroAssembler* masm) {
1448 // The Abort mechanism relies on CallRuntime, which in turn relies on
1449 // CEntryStub, so until this stub has been generated, we have to use a
1450 // fall-back Abort mechanism.
1452 // Note that this stub must be generated before any use of Abort.
1453 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
1455 ASM_LOCATION("CEntryStub::Generate entry");
1456 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1458 // Register parameters:
1459 // x0: argc (including receiver, untagged)
1462 // The stack on entry holds the arguments and the receiver, with the receiver
1463 // at the highest address:
1465 // jssp]argc-1]: receiver
1466 // jssp[argc-2]: arg[argc-2]
1471 // The arguments are in reverse order, so that arg[argc-2] is actually the
1472 // first argument to the target function and arg[0] is the last.
1473 ASSERT(jssp.Is(__ StackPointer()));
1474 const Register& argc_input = x0;
1475 const Register& target_input = x1;
1477 // Calculate argv, argc and the target address, and store them in
1478 // callee-saved registers so we can retry the call without having to reload
1480 // TODO(jbramley): If the first call attempt succeeds in the common case (as
1481 // it should), then we might be better off putting these parameters directly
1482 // into their argument registers, rather than using callee-saved registers and
1483 // preserving them on the stack.
1484 const Register& argv = x21;
1485 const Register& argc = x22;
1486 const Register& target = x23;
1488 // Derive argv from the stack pointer so that it points to the first argument
1489 // (arg[argc-2]), or just below the receiver in case there are no arguments.
1490 // - Adjust for the arg[] array.
1491 Register temp_argv = x11;
1492 __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
1493 // - Adjust for the receiver.
1494 __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
1496 // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved
1498 FrameScope scope(masm, StackFrame::MANUAL);
1499 __ EnterExitFrame(save_doubles_, x10, 3);
1500 ASSERT(csp.Is(__ StackPointer()));
1502 // Poke callee-saved registers into reserved space.
1503 __ Poke(argv, 1 * kPointerSize);
1504 __ Poke(argc, 2 * kPointerSize);
1505 __ Poke(target, 3 * kPointerSize);
1507 // We normally only keep tagged values in callee-saved registers, as they
1508 // could be pushed onto the stack by called stubs and functions, and on the
1509 // stack they can confuse the GC. However, we're only calling C functions
1510 // which can push arbitrary data onto the stack anyway, and so the GC won't
1511 // examine that part of the stack.
1512 __ Mov(argc, argc_input);
1513 __ Mov(target, target_input);
1514 __ Mov(argv, temp_argv);
1518 // x23 : call target
1520 // The stack (on entry) holds the arguments and the receiver, with the
1521 // receiver at the highest address:
1523 // argv[8]: receiver
1524 // argv -> argv[0]: arg[argc-2]
1526 // argv[...]: arg[1]
1527 // argv[...]: arg[0]
1529 // Immediately below (after) this is the exit frame, as constructed by
1531 // fp[8]: CallerPC (lr)
1532 // fp -> fp[0]: CallerFP (old fp)
1533 // fp[-8]: Space reserved for SPOffset.
1534 // fp[-16]: CodeObject()
1535 // csp[...]: Saved doubles, if saved_doubles is true.
1536 // csp[32]: Alignment padding, if necessary.
1537 // csp[24]: Preserved x23 (used for target).
1538 // csp[16]: Preserved x22 (used for argc).
1539 // csp[8]: Preserved x21 (used for argv).
1540 // csp -> csp[0]: Space reserved for the return address.
1542 // After a successful call, the exit frame, preserved registers (x21-x23) and
1543 // the arguments (including the receiver) are dropped or popped as
1544 // appropriate. The stub then returns.
1546 // After an unsuccessful call, the exit frame and suchlike are left
1547 // untouched, and the stub either throws an exception by jumping to one of
1548 // the exception_returned label.
1550 ASSERT(csp.Is(__ StackPointer()));
1552 // Prepare AAPCS64 arguments to pass to the builtin.
1555 __ Mov(x2, ExternalReference::isolate_address(isolate()));
1557 Label return_location;
1558 __ Adr(x12, &return_location);
1561 if (__ emit_debug_code()) {
1562 // Verify that the slot below fp[kSPOffset]-8 points to the return location
1563 // (currently in x12).
1564 UseScratchRegisterScope temps(masm);
1565 Register temp = temps.AcquireX();
1566 __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
1567 __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSize)));
1569 __ Check(eq, kReturnAddressNotFoundInFrame);
1572 // Call the builtin.
1574 __ Bind(&return_location);
1576 // x0 result The return code from the call.
1580 const Register& result = x0;
1582 // Check result for exception sentinel.
1583 Label exception_returned;
1584 __ CompareRoot(result, Heap::kExceptionRootIndex);
1585 __ B(eq, &exception_returned);
1587 // The call succeeded, so unwind the stack and return.
1589 // Restore callee-saved registers x21-x23.
1592 __ Peek(argv, 1 * kPointerSize);
1593 __ Peek(argc, 2 * kPointerSize);
1594 __ Peek(target, 3 * kPointerSize);
1596 __ LeaveExitFrame(save_doubles_, x10, true);
1597 ASSERT(jssp.Is(__ StackPointer()));
1598 // Pop or drop the remaining stack slots and return from the stub.
1599 // jssp[24]: Arguments array (of size argc), including receiver.
1600 // jssp[16]: Preserved x23 (used for target).
1601 // jssp[8]: Preserved x22 (used for argc).
1602 // jssp[0]: Preserved x21 (used for argv).
1604 __ AssertFPCRState();
1607 // The stack pointer is still csp if we aren't returning, and the frame
1608 // hasn't changed (except for the return address).
1609 __ SetStackPointer(csp);
1611 // Handling of exception.
1612 __ Bind(&exception_returned);
1614 // Retrieve the pending exception.
1615 ExternalReference pending_exception_address(
1616 Isolate::kPendingExceptionAddress, isolate());
1617 const Register& exception = result;
1618 const Register& exception_address = x11;
1619 __ Mov(exception_address, Operand(pending_exception_address));
1620 __ Ldr(exception, MemOperand(exception_address));
1622 // Clear the pending exception.
1623 __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
1624 __ Str(x10, MemOperand(exception_address));
1626 // x0 exception The exception descriptor.
1631 // Special handling of termination exceptions, which are uncatchable by
1633 Label throw_termination_exception;
1634 __ Cmp(exception, Operand(isolate()->factory()->termination_exception()));
1635 __ B(eq, &throw_termination_exception);
1637 // We didn't execute a return case, so the stack frame hasn't been updated
1638 // (except for the return address slot). However, we don't need to initialize
1639 // jssp because the throw method will immediately overwrite it when it
1640 // unwinds the stack.
1641 __ SetStackPointer(jssp);
1643 ASM_LOCATION("Throw normal");
1647 __ Throw(x0, x10, x11, x12, x13);
1649 __ Bind(&throw_termination_exception);
1650 ASM_LOCATION("Throw termination");
1654 __ ThrowUncatchable(x0, x10, x11, x12, x13);
1658 // This is the entry point from C++. 5 arguments are provided in x0-x4.
1659 // See use of the CALL_GENERATED_CODE macro for example in src/execution.cc.
1668 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
1669 ASSERT(jssp.Is(__ StackPointer()));
1670 Register code_entry = x0;
1672 // Enable instruction instrumentation. This only works on the simulator, and
1673 // will have no effect on the model or real hardware.
1674 __ EnableInstrumentation();
1676 Label invoke, handler_entry, exit;
1678 // Push callee-saved registers and synchronize the system stack pointer (csp)
1679 // and the JavaScript stack pointer (jssp).
1681 // We must not write to jssp until after the PushCalleeSavedRegisters()
1682 // call, since jssp is itself a callee-saved register.
1683 __ SetStackPointer(csp);
1684 __ PushCalleeSavedRegisters();
1686 __ SetStackPointer(jssp);
1688 // Configure the FPCR. We don't restore it, so this is technically not allowed
1689 // according to AAPCS64. However, we only set default-NaN mode and this will
1690 // be harmless for most C code. Also, it works for ARM.
1693 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1695 // Set up the reserved register for 0.0.
1696 __ Fmov(fp_zero, 0.0);
1698 // Build an entry frame (see layout below).
1699 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
1700 int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
1701 __ Mov(x13, bad_frame_pointer);
1702 __ Mov(x12, Smi::FromInt(marker));
1703 __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
1704 __ Ldr(x10, MemOperand(x11));
1706 __ Push(x13, xzr, x12, x10);
1708 __ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset);
1710 // Push the JS entry frame marker. Also set js_entry_sp if this is the
1711 // outermost JS call.
1712 Label non_outermost_js, done;
1713 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
1714 __ Mov(x10, ExternalReference(js_entry_sp));
1715 __ Ldr(x11, MemOperand(x10));
1716 __ Cbnz(x11, &non_outermost_js);
1717 __ Str(fp, MemOperand(x10));
1718 __ Mov(x12, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
1721 __ Bind(&non_outermost_js);
1722 // We spare one instruction by pushing xzr since the marker is 0.
1723 ASSERT(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
1727 // The frame set up looks like this:
1728 // jssp[0] : JS entry frame marker.
1729 // jssp[1] : C entry FP.
1730 // jssp[2] : stack frame marker.
1731 // jssp[3] : stack frmae marker.
1732 // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
1735 // Jump to a faked try block that does the invoke, with a faked catch
1736 // block that sets the pending exception.
1739 // Prevent the constant pool from being emitted between the record of the
1740 // handler_entry position and the first instruction of the sequence here.
1741 // There is no risk because Assembler::Emit() emits the instruction before
1742 // checking for constant pool emission, but we do not want to depend on
1745 Assembler::BlockPoolsScope block_pools(masm);
1746 __ bind(&handler_entry);
1747 handler_offset_ = handler_entry.pos();
1748 // Caught exception: Store result (exception) in the pending exception
1749 // field in the JSEnv and return a failure sentinel. Coming in here the
1750 // fp will be invalid because the PushTryHandler below sets it to 0 to
1751 // signal the existence of the JSEntry frame.
1752 __ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1755 __ Str(code_entry, MemOperand(x10));
1756 __ LoadRoot(x0, Heap::kExceptionRootIndex);
1759 // Invoke: Link this frame into the handler chain. There's only one
1760 // handler block in this code object, so its index is 0.
1762 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
1763 // If an exception not caught by another handler occurs, this handler
1764 // returns control to the code after the B(&invoke) above, which
1765 // restores all callee-saved registers (including cp and fp) to their
1766 // saved values before returning a failure to C.
1768 // Clear any pending exceptions.
1769 __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
1770 __ Mov(x11, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1772 __ Str(x10, MemOperand(x11));
1774 // Invoke the function by calling through the JS entry trampoline builtin.
1775 // Notice that we cannot store a reference to the trampoline code directly in
1776 // this stub, because runtime stubs are not traversed when doing GC.
1778 // Expected registers by Builtins::JSEntryTrampoline
1784 ExternalReference entry(is_construct ? Builtins::kJSConstructEntryTrampoline
1785 : Builtins::kJSEntryTrampoline,
1789 // Call the JSEntryTrampoline.
1790 __ Ldr(x11, MemOperand(x10)); // Dereference the address.
1791 __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag);
1794 // Unlink this frame from the handler chain.
1799 // x0 holds the result.
1800 // The stack pointer points to the top of the entry frame pushed on entry from
1801 // C++ (at the beginning of this stub):
1802 // jssp[0] : JS entry frame marker.
1803 // jssp[1] : C entry FP.
1804 // jssp[2] : stack frame marker.
1805 // jssp[3] : stack frmae marker.
1806 // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
1808 // Check if the current stack frame is marked as the outermost JS frame.
1809 Label non_outermost_js_2;
1811 __ Cmp(x10, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
1812 __ B(ne, &non_outermost_js_2);
1813 __ Mov(x11, ExternalReference(js_entry_sp));
1814 __ Str(xzr, MemOperand(x11));
1815 __ Bind(&non_outermost_js_2);
1817 // Restore the top frame descriptors from the stack.
1819 __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
1820 __ Str(x10, MemOperand(x11));
1822 // Reset the stack to the callee saved registers.
1823 __ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes);
1824 // Restore the callee-saved registers and return.
1825 ASSERT(jssp.Is(__ StackPointer()));
1827 __ SetStackPointer(csp);
1828 __ PopCalleeSavedRegisters();
1829 // After this point, we must not modify jssp because it is a callee-saved
1830 // register which we have just restored.
1835 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1838 if (kind() == Code::KEYED_LOAD_IC) {
1839 // ----------- S t a t e -------------
1840 // -- lr : return address
1843 // -----------------------------------
1846 __ Cmp(key, Operand(isolate()->factory()->prototype_string()));
1849 ASSERT(kind() == Code::LOAD_IC);
1850 // ----------- S t a t e -------------
1851 // -- lr : return address
1854 // -- sp[0] : receiver
1855 // -----------------------------------
1859 StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10, x11, &miss);
1862 StubCompiler::TailCallBuiltin(masm,
1863 BaseLoadStoreStubCompiler::MissBuiltin(kind()));
1867 void InstanceofStub::Generate(MacroAssembler* masm) {
1869 // jssp[0]: function.
1872 // Returns result in x0. Zero indicates instanceof, smi 1 indicates not
1875 Register result = x0;
1876 Register function = right();
1877 Register object = left();
1878 Register scratch1 = x6;
1879 Register scratch2 = x7;
1880 Register res_true = x8;
1881 Register res_false = x9;
1882 // Only used if there was an inline map check site. (See
1883 // LCodeGen::DoInstanceOfKnownGlobal().)
1884 Register map_check_site = x4;
1885 // Delta for the instructions generated between the inline map check and the
1886 // instruction setting the result.
1887 const int32_t kDeltaToLoadBoolResult = 4 * kInstructionSize;
1889 Label not_js_object, slow;
1891 if (!HasArgsInRegisters()) {
1892 __ Pop(function, object);
1895 if (ReturnTrueFalseObject()) {
1896 __ LoadTrueFalseRoots(res_true, res_false);
1898 // This is counter-intuitive, but correct.
1899 __ Mov(res_true, Smi::FromInt(0));
1900 __ Mov(res_false, Smi::FromInt(1));
1903 // Check that the left hand side is a JS object and load its map as a side
1906 __ JumpIfSmi(object, ¬_js_object);
1907 __ IsObjectJSObjectType(object, map, scratch2, ¬_js_object);
1909 // If there is a call site cache, don't look in the global cache, but do the
1910 // real lookup and update the call site cache.
1911 if (!HasCallSiteInlineCheck()) {
1913 __ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex, &miss);
1914 __ JumpIfNotRoot(map, Heap::kInstanceofCacheMapRootIndex, &miss);
1915 __ LoadRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
1920 // Get the prototype of the function.
1921 Register prototype = x13;
1922 __ TryGetFunctionPrototype(function, prototype, scratch2, &slow,
1923 MacroAssembler::kMissOnBoundFunction);
1925 // Check that the function prototype is a JS object.
1926 __ JumpIfSmi(prototype, &slow);
1927 __ IsObjectJSObjectType(prototype, scratch1, scratch2, &slow);
1929 // Update the global instanceof or call site inlined cache with the current
1930 // map and function. The cached answer will be set when it is known below.
1931 if (HasCallSiteInlineCheck()) {
1932 // Patch the (relocated) inlined map check.
1933 __ GetRelocatedValueLocation(map_check_site, scratch1);
1934 // We have a cell, so need another level of dereferencing.
1935 __ Ldr(scratch1, MemOperand(scratch1));
1936 __ Str(map, FieldMemOperand(scratch1, Cell::kValueOffset));
1938 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1939 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
1942 Label return_true, return_result;
1944 // Loop through the prototype chain looking for the function prototype.
1945 Register chain_map = x1;
1946 Register chain_prototype = x14;
1947 Register null_value = x15;
1949 __ Ldr(chain_prototype, FieldMemOperand(map, Map::kPrototypeOffset));
1950 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
1951 // Speculatively set a result.
1952 __ Mov(result, res_false);
1956 // If the chain prototype is the object prototype, return true.
1957 __ Cmp(chain_prototype, prototype);
1958 __ B(eq, &return_true);
1960 // If the chain prototype is null, we've reached the end of the chain, so
1962 __ Cmp(chain_prototype, null_value);
1963 __ B(eq, &return_result);
1965 // Otherwise, load the next prototype in the chain, and loop.
1966 __ Ldr(chain_map, FieldMemOperand(chain_prototype, HeapObject::kMapOffset));
1967 __ Ldr(chain_prototype, FieldMemOperand(chain_map, Map::kPrototypeOffset));
1971 // Return sequence when no arguments are on the stack.
1972 // We cannot fall through to here.
1973 __ Bind(&return_true);
1974 __ Mov(result, res_true);
1975 __ Bind(&return_result);
1976 if (HasCallSiteInlineCheck()) {
1977 ASSERT(ReturnTrueFalseObject());
1978 __ Add(map_check_site, map_check_site, kDeltaToLoadBoolResult);
1979 __ GetRelocatedValueLocation(map_check_site, scratch2);
1980 __ Str(result, MemOperand(scratch2));
1982 __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
1986 Label object_not_null, object_not_null_or_smi;
1988 __ Bind(¬_js_object);
1989 Register object_type = x14;
1990 // x0 result result return register (uninit)
1991 // x10 function pointer to function
1992 // x11 object pointer to object
1993 // x14 object_type type of object (uninit)
1995 // Before null, smi and string checks, check that the rhs is a function.
1996 // For a non-function rhs, an exception must be thrown.
1997 __ JumpIfSmi(function, &slow);
1998 __ JumpIfNotObjectType(
1999 function, scratch1, object_type, JS_FUNCTION_TYPE, &slow);
2001 __ Mov(result, res_false);
2003 // Null is not instance of anything.
2004 __ Cmp(object_type, Operand(isolate()->factory()->null_value()));
2005 __ B(ne, &object_not_null);
2008 __ Bind(&object_not_null);
2009 // Smi values are not instances of anything.
2010 __ JumpIfNotSmi(object, &object_not_null_or_smi);
2013 __ Bind(&object_not_null_or_smi);
2014 // String values are not instances of anything.
2015 __ IsObjectJSStringType(object, scratch2, &slow);
2018 // Slow-case. Tail call builtin.
2021 FrameScope scope(masm, StackFrame::INTERNAL);
2022 // Arguments have either been passed into registers or have been previously
2023 // popped. We need to push them before calling builtin.
2024 __ Push(object, function);
2025 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
2027 if (ReturnTrueFalseObject()) {
2028 // Reload true/false because they were clobbered in the builtin call.
2029 __ LoadTrueFalseRoots(res_true, res_false);
2031 __ Csel(result, res_true, res_false, eq);
2037 Register InstanceofStub::left() {
2038 // Object to check (instanceof lhs).
2043 Register InstanceofStub::right() {
2044 // Constructor function (instanceof rhs).
2049 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
2050 Register arg_count = x0;
2053 // The displacement is the offset of the last parameter (if any) relative
2054 // to the frame pointer.
2055 static const int kDisplacement =
2056 StandardFrameConstants::kCallerSPOffset - kPointerSize;
2058 // Check that the key is a smi.
2060 __ JumpIfNotSmi(key, &slow);
2062 // Check if the calling frame is an arguments adaptor frame.
2063 Register local_fp = x11;
2064 Register caller_fp = x11;
2065 Register caller_ctx = x12;
2067 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2068 __ Ldr(caller_ctx, MemOperand(caller_fp,
2069 StandardFrameConstants::kContextOffset));
2070 __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2071 __ Csel(local_fp, fp, caller_fp, ne);
2072 __ B(ne, &skip_adaptor);
2074 // Load the actual arguments limit found in the arguments adaptor frame.
2075 __ Ldr(arg_count, MemOperand(caller_fp,
2076 ArgumentsAdaptorFrameConstants::kLengthOffset));
2077 __ Bind(&skip_adaptor);
2079 // Check index against formal parameters count limit. Use unsigned comparison
2080 // to get negative check for free: branch if key < 0 or key >= arg_count.
2081 __ Cmp(key, arg_count);
2084 // Read the argument from the stack and return it.
2085 __ Sub(x10, arg_count, key);
2086 __ Add(x10, local_fp, Operand::UntagSmiAndScale(x10, kPointerSizeLog2));
2087 __ Ldr(x0, MemOperand(x10, kDisplacement));
2090 // Slow case: handle non-smi or out-of-bounds access to arguments by calling
2091 // the runtime system.
2094 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
2098 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
2099 // Stack layout on entry.
2100 // jssp[0]: number of parameters (tagged)
2101 // jssp[8]: address of receiver argument
2102 // jssp[16]: function
2104 // Check if the calling frame is an arguments adaptor frame.
2106 Register caller_fp = x10;
2107 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2108 // Load and untag the context.
2109 STATIC_ASSERT((kSmiShift / kBitsPerByte) == 4);
2110 __ Ldr(w11, MemOperand(caller_fp, StandardFrameConstants::kContextOffset +
2111 (kSmiShift / kBitsPerByte)));
2112 __ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR);
2115 // Patch the arguments.length and parameters pointer in the current frame.
2116 __ Ldr(x11, MemOperand(caller_fp,
2117 ArgumentsAdaptorFrameConstants::kLengthOffset));
2118 __ Poke(x11, 0 * kXRegSize);
2119 __ Add(x10, caller_fp, Operand::UntagSmiAndScale(x11, kPointerSizeLog2));
2120 __ Add(x10, x10, StandardFrameConstants::kCallerSPOffset);
2121 __ Poke(x10, 1 * kXRegSize);
2124 __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
2128 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
2129 // Stack layout on entry.
2130 // jssp[0]: number of parameters (tagged)
2131 // jssp[8]: address of receiver argument
2132 // jssp[16]: function
2134 // Returns pointer to result object in x0.
2136 // Note: arg_count_smi is an alias of param_count_smi.
2137 Register arg_count_smi = x3;
2138 Register param_count_smi = x3;
2139 Register param_count = x7;
2140 Register recv_arg = x14;
2141 Register function = x4;
2142 __ Pop(param_count_smi, recv_arg, function);
2143 __ SmiUntag(param_count, param_count_smi);
2145 // Check if the calling frame is an arguments adaptor frame.
2146 Register caller_fp = x11;
2147 Register caller_ctx = x12;
2149 Label adaptor_frame, try_allocate;
2150 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2151 __ Ldr(caller_ctx, MemOperand(caller_fp,
2152 StandardFrameConstants::kContextOffset));
2153 __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2154 __ B(eq, &adaptor_frame);
2156 // No adaptor, parameter count = argument count.
2158 // x1 mapped_params number of mapped params, min(params, args) (uninit)
2159 // x2 arg_count number of function arguments (uninit)
2160 // x3 arg_count_smi number of function arguments (smi)
2161 // x4 function function pointer
2162 // x7 param_count number of function parameters
2163 // x11 caller_fp caller's frame pointer
2164 // x14 recv_arg pointer to receiver arguments
2166 Register arg_count = x2;
2167 __ Mov(arg_count, param_count);
2168 __ B(&try_allocate);
2170 // We have an adaptor frame. Patch the parameters pointer.
2171 __ Bind(&adaptor_frame);
2172 __ Ldr(arg_count_smi,
2173 MemOperand(caller_fp,
2174 ArgumentsAdaptorFrameConstants::kLengthOffset));
2175 __ SmiUntag(arg_count, arg_count_smi);
2176 __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2));
2177 __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset);
2179 // Compute the mapped parameter count = min(param_count, arg_count)
2180 Register mapped_params = x1;
2181 __ Cmp(param_count, arg_count);
2182 __ Csel(mapped_params, param_count, arg_count, lt);
2184 __ Bind(&try_allocate);
2186 // x0 alloc_obj pointer to allocated objects: param map, backing
2187 // store, arguments (uninit)
2188 // x1 mapped_params number of mapped parameters, min(params, args)
2189 // x2 arg_count number of function arguments
2190 // x3 arg_count_smi number of function arguments (smi)
2191 // x4 function function pointer
2192 // x7 param_count number of function parameters
2193 // x10 size size of objects to allocate (uninit)
2194 // x14 recv_arg pointer to receiver arguments
2196 // Compute the size of backing store, parameter map, and arguments object.
2197 // 1. Parameter map, has two extra words containing context and backing
2199 const int kParameterMapHeaderSize =
2200 FixedArray::kHeaderSize + 2 * kPointerSize;
2202 // Calculate the parameter map size, assuming it exists.
2203 Register size = x10;
2204 __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2));
2205 __ Add(size, size, kParameterMapHeaderSize);
2207 // If there are no mapped parameters, set the running size total to zero.
2208 // Otherwise, use the parameter map size calculated earlier.
2209 __ Cmp(mapped_params, 0);
2210 __ CzeroX(size, eq);
2212 // 2. Add the size of the backing store and arguments object.
2213 __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2));
2215 FixedArray::kHeaderSize + Heap::kSloppyArgumentsObjectSize);
2217 // Do the allocation of all three objects in one go. Assign this to x0, as it
2218 // will be returned to the caller.
2219 Register alloc_obj = x0;
2220 __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT);
2222 // Get the arguments boilerplate from the current (global) context.
2224 // x0 alloc_obj pointer to allocated objects (param map, backing
2225 // store, arguments)
2226 // x1 mapped_params number of mapped parameters, min(params, args)
2227 // x2 arg_count number of function arguments
2228 // x3 arg_count_smi number of function arguments (smi)
2229 // x4 function function pointer
2230 // x7 param_count number of function parameters
2231 // x11 args_offset offset to args (or aliased args) boilerplate (uninit)
2232 // x14 recv_arg pointer to receiver arguments
2234 Register global_object = x10;
2235 Register global_ctx = x10;
2236 Register args_offset = x11;
2237 Register aliased_args_offset = x10;
2238 __ Ldr(global_object, GlobalObjectMemOperand());
2239 __ Ldr(global_ctx, FieldMemOperand(global_object,
2240 GlobalObject::kNativeContextOffset));
2243 ContextMemOperand(global_ctx,
2244 Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX));
2245 __ Ldr(aliased_args_offset,
2246 ContextMemOperand(global_ctx,
2247 Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX));
2248 __ Cmp(mapped_params, 0);
2249 __ CmovX(args_offset, aliased_args_offset, ne);
2251 // Copy the JS object part.
2252 __ CopyFields(alloc_obj, args_offset, CPURegList(x10, x12, x13),
2253 JSObject::kHeaderSize / kPointerSize);
2255 // Set up the callee in-object property.
2256 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
2257 const int kCalleeOffset = JSObject::kHeaderSize +
2258 Heap::kArgumentsCalleeIndex * kPointerSize;
2259 __ Str(function, FieldMemOperand(alloc_obj, kCalleeOffset));
2261 // Use the length and set that as an in-object property.
2262 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2263 const int kLengthOffset = JSObject::kHeaderSize +
2264 Heap::kArgumentsLengthIndex * kPointerSize;
2265 __ Str(arg_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
2267 // Set up the elements pointer in the allocated arguments object.
2268 // If we allocated a parameter map, "elements" will point there, otherwise
2269 // it will point to the backing store.
2271 // x0 alloc_obj pointer to allocated objects (param map, backing
2272 // store, arguments)
2273 // x1 mapped_params number of mapped parameters, min(params, args)
2274 // x2 arg_count number of function arguments
2275 // x3 arg_count_smi number of function arguments (smi)
2276 // x4 function function pointer
2277 // x5 elements pointer to parameter map or backing store (uninit)
2278 // x6 backing_store pointer to backing store (uninit)
2279 // x7 param_count number of function parameters
2280 // x14 recv_arg pointer to receiver arguments
2282 Register elements = x5;
2283 __ Add(elements, alloc_obj, Heap::kSloppyArgumentsObjectSize);
2284 __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
2286 // Initialize parameter map. If there are no mapped arguments, we're done.
2287 Label skip_parameter_map;
2288 __ Cmp(mapped_params, 0);
2289 // Set up backing store address, because it is needed later for filling in
2290 // the unmapped arguments.
2291 Register backing_store = x6;
2292 __ CmovX(backing_store, elements, eq);
2293 __ B(eq, &skip_parameter_map);
2295 __ LoadRoot(x10, Heap::kSloppyArgumentsElementsMapRootIndex);
2296 __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
2297 __ Add(x10, mapped_params, 2);
2299 __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset));
2300 __ Str(cp, FieldMemOperand(elements,
2301 FixedArray::kHeaderSize + 0 * kPointerSize));
2302 __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2));
2303 __ Add(x10, x10, kParameterMapHeaderSize);
2304 __ Str(x10, FieldMemOperand(elements,
2305 FixedArray::kHeaderSize + 1 * kPointerSize));
2307 // Copy the parameter slots and the holes in the arguments.
2308 // We need to fill in mapped_parameter_count slots. Then index the context,
2309 // where parameters are stored in reverse order, at:
2311 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1
2313 // The mapped parameter thus needs to get indices:
2315 // MIN_CONTEXT_SLOTS + parameter_count - 1 ..
2316 // MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count
2318 // We loop from right to left.
2320 // x0 alloc_obj pointer to allocated objects (param map, backing
2321 // store, arguments)
2322 // x1 mapped_params number of mapped parameters, min(params, args)
2323 // x2 arg_count number of function arguments
2324 // x3 arg_count_smi number of function arguments (smi)
2325 // x4 function function pointer
2326 // x5 elements pointer to parameter map or backing store (uninit)
2327 // x6 backing_store pointer to backing store (uninit)
2328 // x7 param_count number of function parameters
2329 // x11 loop_count parameter loop counter (uninit)
2330 // x12 index parameter index (smi, uninit)
2331 // x13 the_hole hole value (uninit)
2332 // x14 recv_arg pointer to receiver arguments
2334 Register loop_count = x11;
2335 Register index = x12;
2336 Register the_hole = x13;
2337 Label parameters_loop, parameters_test;
2338 __ Mov(loop_count, mapped_params);
2339 __ Add(index, param_count, static_cast<int>(Context::MIN_CONTEXT_SLOTS));
2340 __ Sub(index, index, mapped_params);
2342 __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
2343 __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2));
2344 __ Add(backing_store, backing_store, kParameterMapHeaderSize);
2346 __ B(¶meters_test);
2348 __ Bind(¶meters_loop);
2349 __ Sub(loop_count, loop_count, 1);
2350 __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2));
2351 __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag);
2352 __ Str(index, MemOperand(elements, x10));
2353 __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize);
2354 __ Str(the_hole, MemOperand(backing_store, x10));
2355 __ Add(index, index, Smi::FromInt(1));
2356 __ Bind(¶meters_test);
2357 __ Cbnz(loop_count, ¶meters_loop);
2359 __ Bind(&skip_parameter_map);
2360 // Copy arguments header and remaining slots (if there are any.)
2361 __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
2362 __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset));
2363 __ Str(arg_count_smi, FieldMemOperand(backing_store,
2364 FixedArray::kLengthOffset));
2366 // x0 alloc_obj pointer to allocated objects (param map, backing
2367 // store, arguments)
2368 // x1 mapped_params number of mapped parameters, min(params, args)
2369 // x2 arg_count number of function arguments
2370 // x4 function function pointer
2371 // x3 arg_count_smi number of function arguments (smi)
2372 // x6 backing_store pointer to backing store (uninit)
2373 // x14 recv_arg pointer to receiver arguments
2375 Label arguments_loop, arguments_test;
2376 __ Mov(x10, mapped_params);
2377 __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2));
2378 __ B(&arguments_test);
2380 __ Bind(&arguments_loop);
2381 __ Sub(recv_arg, recv_arg, kPointerSize);
2382 __ Ldr(x11, MemOperand(recv_arg));
2383 __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2));
2384 __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
2385 __ Add(x10, x10, 1);
2387 __ Bind(&arguments_test);
2388 __ Cmp(x10, arg_count);
2389 __ B(lt, &arguments_loop);
2393 // Do the runtime call to allocate the arguments object.
2395 __ Push(function, recv_arg, arg_count_smi);
2396 __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
2400 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
2401 // Stack layout on entry.
2402 // jssp[0]: number of parameters (tagged)
2403 // jssp[8]: address of receiver argument
2404 // jssp[16]: function
2406 // Returns pointer to result object in x0.
2408 // Get the stub arguments from the frame, and make an untagged copy of the
2410 Register param_count_smi = x1;
2411 Register params = x2;
2412 Register function = x3;
2413 Register param_count = x13;
2414 __ Pop(param_count_smi, params, function);
2415 __ SmiUntag(param_count, param_count_smi);
2417 // Test if arguments adaptor needed.
2418 Register caller_fp = x11;
2419 Register caller_ctx = x12;
2420 Label try_allocate, runtime;
2421 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2422 __ Ldr(caller_ctx, MemOperand(caller_fp,
2423 StandardFrameConstants::kContextOffset));
2424 __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2425 __ B(ne, &try_allocate);
2427 // x1 param_count_smi number of parameters passed to function (smi)
2428 // x2 params pointer to parameters
2429 // x3 function function pointer
2430 // x11 caller_fp caller's frame pointer
2431 // x13 param_count number of parameters passed to function
2433 // Patch the argument length and parameters pointer.
2434 __ Ldr(param_count_smi,
2435 MemOperand(caller_fp,
2436 ArgumentsAdaptorFrameConstants::kLengthOffset));
2437 __ SmiUntag(param_count, param_count_smi);
2438 __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
2439 __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
2441 // Try the new space allocation. Start out with computing the size of the
2442 // arguments object and the elements array in words.
2443 Register size = x10;
2444 __ Bind(&try_allocate);
2445 __ Add(size, param_count, FixedArray::kHeaderSize / kPointerSize);
2446 __ Cmp(param_count, 0);
2447 __ CzeroX(size, eq);
2448 __ Add(size, size, Heap::kStrictArgumentsObjectSize / kPointerSize);
2450 // Do the allocation of both objects in one go. Assign this to x0, as it will
2451 // be returned to the caller.
2452 Register alloc_obj = x0;
2453 __ Allocate(size, alloc_obj, x11, x12, &runtime,
2454 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
2456 // Get the arguments boilerplate from the current (native) context.
2457 Register global_object = x10;
2458 Register global_ctx = x10;
2459 Register args_offset = x4;
2460 __ Ldr(global_object, GlobalObjectMemOperand());
2461 __ Ldr(global_ctx, FieldMemOperand(global_object,
2462 GlobalObject::kNativeContextOffset));
2464 ContextMemOperand(global_ctx,
2465 Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX));
2467 // x0 alloc_obj pointer to allocated objects: parameter array and
2469 // x1 param_count_smi number of parameters passed to function (smi)
2470 // x2 params pointer to parameters
2471 // x3 function function pointer
2472 // x4 args_offset offset to arguments boilerplate
2473 // x13 param_count number of parameters passed to function
2475 // Copy the JS object part.
2476 __ CopyFields(alloc_obj, args_offset, CPURegList(x5, x6, x7),
2477 JSObject::kHeaderSize / kPointerSize);
2479 // Set the smi-tagged length as an in-object property.
2480 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2481 const int kLengthOffset = JSObject::kHeaderSize +
2482 Heap::kArgumentsLengthIndex * kPointerSize;
2483 __ Str(param_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
2485 // If there are no actual arguments, we're done.
2487 __ Cbz(param_count, &done);
2489 // Set up the elements pointer in the allocated arguments object and
2490 // initialize the header in the elements fixed array.
2491 Register elements = x5;
2492 __ Add(elements, alloc_obj, Heap::kStrictArgumentsObjectSize);
2493 __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
2494 __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
2495 __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
2496 __ Str(param_count_smi, FieldMemOperand(elements, FixedArray::kLengthOffset));
2498 // x0 alloc_obj pointer to allocated objects: parameter array and
2500 // x1 param_count_smi number of parameters passed to function (smi)
2501 // x2 params pointer to parameters
2502 // x3 function function pointer
2503 // x4 array pointer to array slot (uninit)
2504 // x5 elements pointer to elements array of alloc_obj
2505 // x13 param_count number of parameters passed to function
2507 // Copy the fixed array slots.
2509 Register array = x4;
2510 // Set up pointer to first array slot.
2511 __ Add(array, elements, FixedArray::kHeaderSize - kHeapObjectTag);
2514 // Pre-decrement the parameters pointer by kPointerSize on each iteration.
2515 // Pre-decrement in order to skip receiver.
2516 __ Ldr(x10, MemOperand(params, -kPointerSize, PreIndex));
2517 // Post-increment elements by kPointerSize on each iteration.
2518 __ Str(x10, MemOperand(array, kPointerSize, PostIndex));
2519 __ Sub(param_count, param_count, 1);
2520 __ Cbnz(param_count, &loop);
2522 // Return from stub.
2526 // Do the runtime call to allocate the arguments object.
2528 __ Push(function, params, param_count_smi);
2529 __ TailCallRuntime(Runtime::kHiddenNewStrictArguments, 3, 1);
2533 void RegExpExecStub::Generate(MacroAssembler* masm) {
2534 #ifdef V8_INTERPRETED_REGEXP
2535 __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
2536 #else // V8_INTERPRETED_REGEXP
2538 // Stack frame on entry.
2539 // jssp[0]: last_match_info (expected JSArray)
2540 // jssp[8]: previous index
2541 // jssp[16]: subject string
2542 // jssp[24]: JSRegExp object
2545 // Use of registers for this function.
2547 // Variable registers:
2548 // x10-x13 used as scratch registers
2549 // w0 string_type type of subject string
2550 // x2 jsstring_length subject string length
2551 // x3 jsregexp_object JSRegExp object
2552 // w4 string_encoding ASCII or UC16
2553 // w5 sliced_string_offset if the string is a SlicedString
2554 // offset to the underlying string
2555 // w6 string_representation groups attributes of the string:
2557 // - type of the string
2558 // - is a short external string
2559 Register string_type = w0;
2560 Register jsstring_length = x2;
2561 Register jsregexp_object = x3;
2562 Register string_encoding = w4;
2563 Register sliced_string_offset = w5;
2564 Register string_representation = w6;
2566 // These are in callee save registers and will be preserved by the call
2567 // to the native RegExp code, as this code is called using the normal
2568 // C calling convention. When calling directly from generated code the
2569 // native RegExp code will not do a GC and therefore the content of
2570 // these registers are safe to use after the call.
2572 // x19 subject subject string
2573 // x20 regexp_data RegExp data (FixedArray)
2574 // x21 last_match_info_elements info relative to the last match
2576 // x22 code_object generated regexp code
2577 Register subject = x19;
2578 Register regexp_data = x20;
2579 Register last_match_info_elements = x21;
2580 Register code_object = x22;
2582 // TODO(jbramley): Is it necessary to preserve these? I don't think ARM does.
2583 CPURegList used_callee_saved_registers(subject,
2585 last_match_info_elements,
2587 __ PushCPURegList(used_callee_saved_registers);
2594 // jssp[32]: last_match_info (JSArray)
2595 // jssp[40]: previous index
2596 // jssp[48]: subject string
2597 // jssp[56]: JSRegExp object
2599 const int kLastMatchInfoOffset = 4 * kPointerSize;
2600 const int kPreviousIndexOffset = 5 * kPointerSize;
2601 const int kSubjectOffset = 6 * kPointerSize;
2602 const int kJSRegExpOffset = 7 * kPointerSize;
2604 // Ensure that a RegExp stack is allocated.
2605 ExternalReference address_of_regexp_stack_memory_address =
2606 ExternalReference::address_of_regexp_stack_memory_address(isolate());
2607 ExternalReference address_of_regexp_stack_memory_size =
2608 ExternalReference::address_of_regexp_stack_memory_size(isolate());
2609 __ Mov(x10, address_of_regexp_stack_memory_size);
2610 __ Ldr(x10, MemOperand(x10));
2611 __ Cbz(x10, &runtime);
2613 // Check that the first argument is a JSRegExp object.
2614 ASSERT(jssp.Is(__ StackPointer()));
2615 __ Peek(jsregexp_object, kJSRegExpOffset);
2616 __ JumpIfSmi(jsregexp_object, &runtime);
2617 __ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime);
2619 // Check that the RegExp has been compiled (data contains a fixed array).
2620 __ Ldr(regexp_data, FieldMemOperand(jsregexp_object, JSRegExp::kDataOffset));
2621 if (FLAG_debug_code) {
2622 STATIC_ASSERT(kSmiTag == 0);
2623 __ Tst(regexp_data, kSmiTagMask);
2624 __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
2625 __ CompareObjectType(regexp_data, x10, x10, FIXED_ARRAY_TYPE);
2626 __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
2629 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2630 __ Ldr(x10, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2631 __ Cmp(x10, Smi::FromInt(JSRegExp::IRREGEXP));
2634 // Check that the number of captures fit in the static offsets vector buffer.
2635 // We have always at least one capture for the whole match, plus additional
2636 // ones due to capturing parentheses. A capture takes 2 registers.
2637 // The number of capture registers then is (number_of_captures + 1) * 2.
2639 UntagSmiFieldMemOperand(regexp_data,
2640 JSRegExp::kIrregexpCaptureCountOffset));
2641 // Check (number_of_captures + 1) * 2 <= offsets vector size
2642 // number_of_captures * 2 <= offsets vector size - 2
2643 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
2644 __ Add(x10, x10, x10);
2645 __ Cmp(x10, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
2648 // Initialize offset for possibly sliced string.
2649 __ Mov(sliced_string_offset, 0);
2651 ASSERT(jssp.Is(__ StackPointer()));
2652 __ Peek(subject, kSubjectOffset);
2653 __ JumpIfSmi(subject, &runtime);
2655 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
2656 __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
2658 __ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset));
2660 // Handle subject string according to its encoding and representation:
2661 // (1) Sequential string? If yes, go to (5).
2662 // (2) Anything but sequential or cons? If yes, go to (6).
2663 // (3) Cons string. If the string is flat, replace subject with first string.
2664 // Otherwise bailout.
2665 // (4) Is subject external? If yes, go to (7).
2666 // (5) Sequential string. Load regexp code according to encoding.
2670 // Deferred code at the end of the stub:
2671 // (6) Not a long external string? If yes, go to (8).
2672 // (7) External string. Make it, offset-wise, look like a sequential string.
2674 // (8) Short external string or not a string? If yes, bail out to runtime.
2675 // (9) Sliced string. Replace subject with parent. Go to (4).
2677 Label check_underlying; // (4)
2678 Label seq_string; // (5)
2679 Label not_seq_nor_cons; // (6)
2680 Label external_string; // (7)
2681 Label not_long_external; // (8)
2683 // (1) Sequential string? If yes, go to (5).
2684 __ And(string_representation,
2687 kStringRepresentationMask |
2688 kShortExternalStringMask);
2689 // We depend on the fact that Strings of type
2690 // SeqString and not ShortExternalString are defined
2691 // by the following pattern:
2692 // string_type: 0XX0 XX00
2695 // | | is a SeqString
2696 // | is not a short external String
2698 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
2699 STATIC_ASSERT(kShortExternalStringTag != 0);
2700 __ Cbz(string_representation, &seq_string); // Go to (5).
2702 // (2) Anything but sequential or cons? If yes, go to (6).
2703 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2704 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2705 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2706 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2707 __ Cmp(string_representation, kExternalStringTag);
2708 __ B(ge, ¬_seq_nor_cons); // Go to (6).
2710 // (3) Cons string. Check that it's flat.
2711 __ Ldr(x10, FieldMemOperand(subject, ConsString::kSecondOffset));
2712 __ JumpIfNotRoot(x10, Heap::kempty_stringRootIndex, &runtime);
2713 // Replace subject with first string.
2714 __ Ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2716 // (4) Is subject external? If yes, go to (7).
2717 __ Bind(&check_underlying);
2718 // Reload the string type.
2719 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
2720 __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
2721 STATIC_ASSERT(kSeqStringTag == 0);
2722 // The underlying external string is never a short external string.
2723 STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
2724 STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2725 __ TestAndBranchIfAnySet(string_type.X(),
2726 kStringRepresentationMask,
2727 &external_string); // Go to (7).
2729 // (5) Sequential string. Load regexp code according to encoding.
2730 __ Bind(&seq_string);
2732 // Check that the third argument is a positive smi less than the subject
2733 // string length. A negative value will be greater (unsigned comparison).
2734 ASSERT(jssp.Is(__ StackPointer()));
2735 __ Peek(x10, kPreviousIndexOffset);
2736 __ JumpIfNotSmi(x10, &runtime);
2737 __ Cmp(jsstring_length, x10);
2740 // Argument 2 (x1): We need to load argument 2 (the previous index) into x1
2741 // before entering the exit frame.
2742 __ SmiUntag(x1, x10);
2744 // The third bit determines the string encoding in string_type.
2745 STATIC_ASSERT(kOneByteStringTag == 0x04);
2746 STATIC_ASSERT(kTwoByteStringTag == 0x00);
2747 STATIC_ASSERT(kStringEncodingMask == 0x04);
2749 // Find the code object based on the assumptions above.
2750 // kDataAsciiCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
2751 // of kPointerSize to reach the latter.
2752 ASSERT_EQ(JSRegExp::kDataAsciiCodeOffset + kPointerSize,
2753 JSRegExp::kDataUC16CodeOffset);
2754 __ Mov(x10, kPointerSize);
2755 // We will need the encoding later: ASCII = 0x04
2757 __ Ands(string_encoding, string_type, kStringEncodingMask);
2759 __ Add(x10, regexp_data, x10);
2760 __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataAsciiCodeOffset));
2762 // (E) Carry on. String handling is done.
2764 // Check that the irregexp code has been generated for the actual string
2765 // encoding. If it has, the field contains a code object otherwise it contains
2766 // a smi (code flushing support).
2767 __ JumpIfSmi(code_object, &runtime);
2769 // All checks done. Now push arguments for native regexp code.
2770 __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1,
2774 // Isolates: note we add an additional parameter here (isolate pointer).
2775 __ EnterExitFrame(false, x10, 1);
2776 ASSERT(csp.Is(__ StackPointer()));
2778 // We have 9 arguments to pass to the regexp code, therefore we have to pass
2779 // one on the stack and the rest as registers.
2781 // Note that the placement of the argument on the stack isn't standard
2783 // csp[0]: Space for the return address placed by DirectCEntryStub.
2784 // csp[8]: Argument 9, the current isolate address.
2786 __ Mov(x10, ExternalReference::isolate_address(isolate()));
2787 __ Poke(x10, kPointerSize);
2789 Register length = w11;
2790 Register previous_index_in_bytes = w12;
2791 Register start = x13;
2793 // Load start of the subject string.
2794 __ Add(start, subject, SeqString::kHeaderSize - kHeapObjectTag);
2795 // Load the length from the original subject string from the previous stack
2796 // frame. Therefore we have to use fp, which points exactly to two pointer
2797 // sizes below the previous sp. (Because creating a new stack frame pushes
2798 // the previous fp onto the stack and decrements sp by 2 * kPointerSize.)
2799 __ Ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2800 __ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
2802 // Handle UC16 encoding, two bytes make one character.
2803 // string_encoding: if ASCII: 0x04
2805 STATIC_ASSERT(kStringEncodingMask == 0x04);
2806 __ Ubfx(string_encoding, string_encoding, 2, 1);
2807 __ Eor(string_encoding, string_encoding, 1);
2808 // string_encoding: if ASCII: 0
2811 // Convert string positions from characters to bytes.
2812 // Previous index is in x1.
2813 __ Lsl(previous_index_in_bytes, w1, string_encoding);
2814 __ Lsl(length, length, string_encoding);
2815 __ Lsl(sliced_string_offset, sliced_string_offset, string_encoding);
2817 // Argument 1 (x0): Subject string.
2818 __ Mov(x0, subject);
2820 // Argument 2 (x1): Previous index, already there.
2822 // Argument 3 (x2): Get the start of input.
2823 // Start of input = start of string + previous index + substring offset
2826 __ Add(w10, previous_index_in_bytes, sliced_string_offset);
2827 __ Add(x2, start, Operand(w10, UXTW));
2830 // End of input = start of input + (length of input - previous index)
2831 __ Sub(w10, length, previous_index_in_bytes);
2832 __ Add(x3, x2, Operand(w10, UXTW));
2834 // Argument 5 (x4): static offsets vector buffer.
2835 __ Mov(x4, ExternalReference::address_of_static_offsets_vector(isolate()));
2837 // Argument 6 (x5): Set the number of capture registers to zero to force
2838 // global regexps to behave as non-global. This stub is not used for global
2842 // Argument 7 (x6): Start (high end) of backtracking stack memory area.
2843 __ Mov(x10, address_of_regexp_stack_memory_address);
2844 __ Ldr(x10, MemOperand(x10));
2845 __ Mov(x11, address_of_regexp_stack_memory_size);
2846 __ Ldr(x11, MemOperand(x11));
2847 __ Add(x6, x10, x11);
2849 // Argument 8 (x7): Indicate that this is a direct call from JavaScript.
2852 // Locate the code entry and call it.
2853 __ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag);
2854 DirectCEntryStub stub(isolate());
2855 stub.GenerateCall(masm, code_object);
2857 __ LeaveExitFrame(false, x10, true);
2859 // The generated regexp code returns an int32 in w0.
2860 Label failure, exception;
2861 __ CompareAndBranch(w0, NativeRegExpMacroAssembler::FAILURE, eq, &failure);
2862 __ CompareAndBranch(w0,
2863 NativeRegExpMacroAssembler::EXCEPTION,
2866 __ CompareAndBranch(w0, NativeRegExpMacroAssembler::RETRY, eq, &runtime);
2868 // Success: process the result from the native regexp code.
2869 Register number_of_capture_registers = x12;
2871 // Calculate number of capture registers (number_of_captures + 1) * 2
2872 // and store it in the last match info.
2874 UntagSmiFieldMemOperand(regexp_data,
2875 JSRegExp::kIrregexpCaptureCountOffset));
2876 __ Add(x10, x10, x10);
2877 __ Add(number_of_capture_registers, x10, 2);
2879 // Check that the fourth object is a JSArray object.
2880 ASSERT(jssp.Is(__ StackPointer()));
2881 __ Peek(x10, kLastMatchInfoOffset);
2882 __ JumpIfSmi(x10, &runtime);
2883 __ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime);
2885 // Check that the JSArray is the fast case.
2886 __ Ldr(last_match_info_elements,
2887 FieldMemOperand(x10, JSArray::kElementsOffset));
2889 FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2890 __ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime);
2892 // Check that the last match info has space for the capture registers and the
2893 // additional information (overhead).
2894 // (number_of_captures + 1) * 2 + overhead <= last match info size
2895 // (number_of_captures * 2) + 2 + overhead <= last match info size
2896 // number_of_capture_registers + overhead <= last match info size
2898 UntagSmiFieldMemOperand(last_match_info_elements,
2899 FixedArray::kLengthOffset));
2900 __ Add(x11, number_of_capture_registers, RegExpImpl::kLastMatchOverhead);
2904 // Store the capture count.
2905 __ SmiTag(x10, number_of_capture_registers);
2907 FieldMemOperand(last_match_info_elements,
2908 RegExpImpl::kLastCaptureCountOffset));
2909 // Store last subject and last input.
2911 FieldMemOperand(last_match_info_elements,
2912 RegExpImpl::kLastSubjectOffset));
2913 // Use x10 as the subject string in order to only need
2914 // one RecordWriteStub.
2915 __ Mov(x10, subject);
2916 __ RecordWriteField(last_match_info_elements,
2917 RegExpImpl::kLastSubjectOffset,
2923 FieldMemOperand(last_match_info_elements,
2924 RegExpImpl::kLastInputOffset));
2925 __ Mov(x10, subject);
2926 __ RecordWriteField(last_match_info_elements,
2927 RegExpImpl::kLastInputOffset,
2933 Register last_match_offsets = x13;
2934 Register offsets_vector_index = x14;
2935 Register current_offset = x15;
2937 // Get the static offsets vector filled by the native regexp code
2938 // and fill the last match info.
2939 ExternalReference address_of_static_offsets_vector =
2940 ExternalReference::address_of_static_offsets_vector(isolate());
2941 __ Mov(offsets_vector_index, address_of_static_offsets_vector);
2943 Label next_capture, done;
2944 // Capture register counter starts from number of capture registers and
2945 // iterates down to zero (inclusive).
2946 __ Add(last_match_offsets,
2947 last_match_info_elements,
2948 RegExpImpl::kFirstCaptureOffset - kHeapObjectTag);
2949 __ Bind(&next_capture);
2950 __ Subs(number_of_capture_registers, number_of_capture_registers, 2);
2952 // Read two 32 bit values from the static offsets vector buffer into
2954 __ Ldr(current_offset,
2955 MemOperand(offsets_vector_index, kWRegSize * 2, PostIndex));
2956 // Store the smi values in the last match info.
2957 __ SmiTag(x10, current_offset);
2958 // Clearing the 32 bottom bits gives us a Smi.
2959 STATIC_ASSERT(kSmiShift == 32);
2960 __ And(x11, current_offset, ~kWRegMask);
2963 MemOperand(last_match_offsets, kXRegSize * 2, PostIndex));
2964 __ B(&next_capture);
2967 // Return last match info.
2968 __ Peek(x0, kLastMatchInfoOffset);
2969 __ PopCPURegList(used_callee_saved_registers);
2970 // Drop the 4 arguments of the stub from the stack.
2974 __ Bind(&exception);
2975 Register exception_value = x0;
2976 // A stack overflow (on the backtrack stack) may have occured
2977 // in the RegExp code but no exception has been created yet.
2978 // If there is no pending exception, handle that in the runtime system.
2979 __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
2981 Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2983 __ Ldr(exception_value, MemOperand(x11));
2984 __ Cmp(x10, exception_value);
2987 __ Str(x10, MemOperand(x11)); // Clear pending exception.
2989 // Check if the exception is a termination. If so, throw as uncatchable.
2990 Label termination_exception;
2991 __ JumpIfRoot(exception_value,
2992 Heap::kTerminationExceptionRootIndex,
2993 &termination_exception);
2995 __ Throw(exception_value, x10, x11, x12, x13);
2997 __ Bind(&termination_exception);
2998 __ ThrowUncatchable(exception_value, x10, x11, x12, x13);
3001 __ Mov(x0, Operand(isolate()->factory()->null_value()));
3002 __ PopCPURegList(used_callee_saved_registers);
3003 // Drop the 4 arguments of the stub from the stack.
3008 __ PopCPURegList(used_callee_saved_registers);
3009 __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
3011 // Deferred code for string handling.
3012 // (6) Not a long external string? If yes, go to (8).
3013 __ Bind(¬_seq_nor_cons);
3014 // Compare flags are still set.
3015 __ B(ne, ¬_long_external); // Go to (8).
3017 // (7) External string. Make it, offset-wise, look like a sequential string.
3018 __ Bind(&external_string);
3019 if (masm->emit_debug_code()) {
3020 // Assert that we do not have a cons or slice (indirect strings) here.
3021 // Sequential strings have already been ruled out.
3022 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
3023 __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
3024 __ Tst(x10, kIsIndirectStringMask);
3025 __ Check(eq, kExternalStringExpectedButNotFound);
3026 __ And(x10, x10, kStringRepresentationMask);
3028 __ Check(ne, kExternalStringExpectedButNotFound);
3031 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
3032 // Move the pointer so that offset-wise, it looks like a sequential string.
3033 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3034 __ Sub(subject, subject, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
3035 __ B(&seq_string); // Go to (5).
3037 // (8) If this is a short external string or not a string, bail out to
3039 __ Bind(¬_long_external);
3040 STATIC_ASSERT(kShortExternalStringTag != 0);
3041 __ TestAndBranchIfAnySet(string_representation,
3042 kShortExternalStringMask | kIsNotStringMask,
3045 // (9) Sliced string. Replace subject with parent.
3046 __ Ldr(sliced_string_offset,
3047 UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset));
3048 __ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
3049 __ B(&check_underlying); // Go to (4).
3054 static void GenerateRecordCallTarget(MacroAssembler* masm,
3057 Register feedback_vector,
3060 Register scratch2) {
3061 ASM_LOCATION("GenerateRecordCallTarget");
3062 ASSERT(!AreAliased(scratch1, scratch2,
3063 argc, function, feedback_vector, index));
3064 // Cache the called function in a feedback vector slot. Cache states are
3065 // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
3066 // argc : number of arguments to the construct function
3067 // function : the function to call
3068 // feedback_vector : the feedback vector
3069 // index : slot in feedback vector (smi)
3070 Label initialize, done, miss, megamorphic, not_array_function;
3072 ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
3073 masm->isolate()->heap()->megamorphic_symbol());
3074 ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
3075 masm->isolate()->heap()->uninitialized_symbol());
3077 // Load the cache state.
3078 __ Add(scratch1, feedback_vector,
3079 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
3080 __ Ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
3082 // A monomorphic cache hit or an already megamorphic state: invoke the
3083 // function without changing the state.
3084 __ Cmp(scratch1, function);
3087 if (!FLAG_pretenuring_call_new) {
3088 // If we came here, we need to see if we are the array function.
3089 // If we didn't have a matching function, and we didn't find the megamorph
3090 // sentinel, then we have in the slot either some other function or an
3091 // AllocationSite. Do a map check on the object in scratch1 register.
3092 __ Ldr(scratch2, FieldMemOperand(scratch1, AllocationSite::kMapOffset));
3093 __ JumpIfNotRoot(scratch2, Heap::kAllocationSiteMapRootIndex, &miss);
3095 // Make sure the function is the Array() function
3096 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
3097 __ Cmp(function, scratch1);
3098 __ B(ne, &megamorphic);
3104 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
3106 __ JumpIfRoot(scratch1, Heap::kUninitializedSymbolRootIndex, &initialize);
3107 // MegamorphicSentinel is an immortal immovable object (undefined) so no
3108 // write-barrier is needed.
3109 __ Bind(&megamorphic);
3110 __ Add(scratch1, feedback_vector,
3111 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
3112 __ LoadRoot(scratch2, Heap::kMegamorphicSymbolRootIndex);
3113 __ Str(scratch2, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
3116 // An uninitialized cache is patched with the function or sentinel to
3117 // indicate the ElementsKind if function is the Array constructor.
3118 __ Bind(&initialize);
3120 if (!FLAG_pretenuring_call_new) {
3121 // Make sure the function is the Array() function
3122 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
3123 __ Cmp(function, scratch1);
3124 __ B(ne, ¬_array_function);
3126 // The target function is the Array constructor,
3127 // Create an AllocationSite if we don't already have it, store it in the
3130 FrameScope scope(masm, StackFrame::INTERNAL);
3131 CreateAllocationSiteStub create_stub(masm->isolate());
3133 // Arguments register must be smi-tagged to call out.
3135 __ Push(argc, function, feedback_vector, index);
3137 // CreateAllocationSiteStub expect the feedback vector in x2 and the slot
3139 ASSERT(feedback_vector.Is(x2) && index.Is(x3));
3140 __ CallStub(&create_stub);
3142 __ Pop(index, feedback_vector, function, argc);
3147 __ Bind(¬_array_function);
3150 // An uninitialized cache is patched with the function.
3152 __ Add(scratch1, feedback_vector,
3153 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
3154 __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
3155 __ Str(function, MemOperand(scratch1, 0));
3158 __ RecordWrite(feedback_vector, scratch1, function, kLRHasNotBeenSaved,
3159 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
3166 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
3167 // Do not transform the receiver for strict mode functions.
3168 __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
3169 __ Ldr(w4, FieldMemOperand(x3, SharedFunctionInfo::kCompilerHintsOffset));
3170 __ Tbnz(w4, SharedFunctionInfo::kStrictModeFunction, cont);
3172 // Do not transform the receiver for native (Compilerhints already in x3).
3173 __ Tbnz(w4, SharedFunctionInfo::kNative, cont);
3177 static void EmitSlowCase(MacroAssembler* masm,
3181 Label* non_function) {
3182 // Check for function proxy.
3183 // x10 : function type.
3184 __ CompareAndBranch(type, JS_FUNCTION_PROXY_TYPE, ne, non_function);
3185 __ Push(function); // put proxy as additional argument
3186 __ Mov(x0, argc + 1);
3188 __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
3190 Handle<Code> adaptor =
3191 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
3192 __ Jump(adaptor, RelocInfo::CODE_TARGET);
3195 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
3196 // of the original receiver from the call site).
3197 __ Bind(non_function);
3198 __ Poke(function, argc * kXRegSize);
3199 __ Mov(x0, argc); // Set up the number of arguments.
3201 __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION);
3202 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3203 RelocInfo::CODE_TARGET);
3207 static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
3208 // Wrap the receiver and patch it back onto the stack.
3209 { FrameScope frame_scope(masm, StackFrame::INTERNAL);
3211 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
3214 __ Poke(x0, argc * kPointerSize);
3219 static void CallFunctionNoFeedback(MacroAssembler* masm,
3220 int argc, bool needs_checks,
3221 bool call_as_method) {
3222 // x1 function the function to call
3223 Register function = x1;
3225 Label slow, non_function, wrap, cont;
3227 // TODO(jbramley): This function has a lot of unnamed registers. Name them,
3228 // and tidy things up a bit.
3231 // Check that the function is really a JavaScript function.
3232 __ JumpIfSmi(function, &non_function);
3234 // Goto slow case if we do not have a function.
3235 __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
3238 // Fast-case: Invoke the function now.
3239 // x1 function pushed function
3240 ParameterCount actual(argc);
3242 if (call_as_method) {
3244 EmitContinueIfStrictOrNative(masm, &cont);
3247 // Compute the receiver in sloppy mode.
3248 __ Peek(x3, argc * kPointerSize);
3251 __ JumpIfSmi(x3, &wrap);
3252 __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
3260 __ InvokeFunction(function,
3265 // Slow-case: Non-function called.
3267 EmitSlowCase(masm, argc, function, type, &non_function);
3270 if (call_as_method) {
3272 EmitWrapCase(masm, argc, &cont);
3277 void CallFunctionStub::Generate(MacroAssembler* masm) {
3278 ASM_LOCATION("CallFunctionStub::Generate");
3279 CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
3283 void CallConstructStub::Generate(MacroAssembler* masm) {
3284 ASM_LOCATION("CallConstructStub::Generate");
3285 // x0 : number of arguments
3286 // x1 : the function to call
3287 // x2 : feedback vector
3288 // x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
3289 Register function = x1;
3290 Label slow, non_function_call;
3292 // Check that the function is not a smi.
3293 __ JumpIfSmi(function, &non_function_call);
3294 // Check that the function is a JSFunction.
3295 Register object_type = x10;
3296 __ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE,
3299 if (RecordCallTarget()) {
3300 GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5);
3302 __ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
3303 if (FLAG_pretenuring_call_new) {
3304 // Put the AllocationSite from the feedback vector into x2.
3305 // By adding kPointerSize we encode that we know the AllocationSite
3306 // entry is at the feedback vector slot given by x3 + 1.
3307 __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize + kPointerSize));
3309 Label feedback_register_initialized;
3310 // Put the AllocationSite from the feedback vector into x2, or undefined.
3311 __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize));
3312 __ Ldr(x5, FieldMemOperand(x2, AllocationSite::kMapOffset));
3313 __ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex,
3314 &feedback_register_initialized);
3315 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
3316 __ bind(&feedback_register_initialized);
3319 __ AssertUndefinedOrAllocationSite(x2, x5);
3322 // Jump to the function-specific construct stub.
3323 Register jump_reg = x4;
3324 Register shared_func_info = jump_reg;
3325 Register cons_stub = jump_reg;
3326 Register cons_stub_code = jump_reg;
3327 __ Ldr(shared_func_info,
3328 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3330 FieldMemOperand(shared_func_info,
3331 SharedFunctionInfo::kConstructStubOffset));
3332 __ Add(cons_stub_code, cons_stub, Code::kHeaderSize - kHeapObjectTag);
3333 __ Br(cons_stub_code);
3337 __ Cmp(object_type, JS_FUNCTION_PROXY_TYPE);
3338 __ B(ne, &non_function_call);
3339 __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
3342 __ Bind(&non_function_call);
3343 __ GetBuiltinFunction(x1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
3346 // Set expected number of arguments to zero (not changing x0).
3348 __ Jump(isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3349 RelocInfo::CODE_TARGET);
3353 static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
3354 __ Ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3355 __ Ldr(vector, FieldMemOperand(vector,
3356 JSFunction::kSharedFunctionInfoOffset));
3357 __ Ldr(vector, FieldMemOperand(vector,
3358 SharedFunctionInfo::kFeedbackVectorOffset));
3362 void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
3366 Register function = x1;
3367 Register feedback_vector = x2;
3368 Register index = x3;
3369 Register scratch = x4;
3371 EmitLoadTypeFeedbackVector(masm, feedback_vector);
3373 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch);
3374 __ Cmp(function, scratch);
3377 Register allocation_site = feedback_vector;
3378 __ Mov(x0, Operand(arg_count()));
3380 __ Add(scratch, feedback_vector,
3381 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
3382 __ Ldr(allocation_site, FieldMemOperand(scratch, FixedArray::kHeaderSize));
3384 // Verify that x2 contains an AllocationSite
3385 __ AssertUndefinedOrAllocationSite(allocation_site, scratch);
3386 ArrayConstructorStub stub(masm->isolate(), arg_count());
3387 __ TailCallStub(&stub);
3390 GenerateMiss(masm, IC::kCallIC_Customization_Miss);
3392 // The slow case, we need this no matter what to complete a call after a miss.
3393 CallFunctionNoFeedback(masm,
3402 void CallICStub::Generate(MacroAssembler* masm) {
3403 ASM_LOCATION("CallICStub");
3406 // x3 - slot id (Smi)
3407 Label extra_checks_or_miss, slow_start;
3408 Label slow, non_function, wrap, cont;
3409 Label have_js_function;
3410 int argc = state_.arg_count();
3411 ParameterCount actual(argc);
3413 Register function = x1;
3414 Register feedback_vector = x2;
3415 Register index = x3;
3418 EmitLoadTypeFeedbackVector(masm, feedback_vector);
3420 // The checks. First, does x1 match the recorded monomorphic target?
3421 __ Add(x4, feedback_vector,
3422 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
3423 __ Ldr(x4, FieldMemOperand(x4, FixedArray::kHeaderSize));
3425 __ Cmp(x4, function);
3426 __ B(ne, &extra_checks_or_miss);
3428 __ bind(&have_js_function);
3429 if (state_.CallAsMethod()) {
3430 EmitContinueIfStrictOrNative(masm, &cont);
3432 // Compute the receiver in sloppy mode.
3433 __ Peek(x3, argc * kPointerSize);
3435 __ JumpIfSmi(x3, &wrap);
3436 __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
3441 __ InvokeFunction(function,
3447 EmitSlowCase(masm, argc, function, type, &non_function);
3449 if (state_.CallAsMethod()) {
3451 EmitWrapCase(masm, argc, &cont);
3454 __ bind(&extra_checks_or_miss);
3457 __ JumpIfRoot(x4, Heap::kMegamorphicSymbolRootIndex, &slow_start);
3458 __ JumpIfRoot(x4, Heap::kUninitializedSymbolRootIndex, &miss);
3460 if (!FLAG_trace_ic) {
3461 // We are going megamorphic, and we don't want to visit the runtime.
3462 __ Add(x4, feedback_vector,
3463 Operand::UntagSmiAndScale(index, kPointerSizeLog2));
3464 __ LoadRoot(x5, Heap::kMegamorphicSymbolRootIndex);
3465 __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
3469 // We are here because tracing is on or we are going monomorphic.
3471 GenerateMiss(masm, IC::kCallIC_Miss);
3474 __ bind(&slow_start);
3476 // Check that the function is really a JavaScript function.
3477 __ JumpIfSmi(function, &non_function);
3479 // Goto slow case if we do not have a function.
3480 __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
3481 __ B(&have_js_function);
3485 void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
3486 ASM_LOCATION("CallICStub[Miss]");
3488 // Get the receiver of the function from the stack; 1 ~ return address.
3489 __ Peek(x4, (state_.arg_count() + 1) * kPointerSize);
3492 FrameScope scope(masm, StackFrame::INTERNAL);
3494 // Push the receiver and the function and feedback info.
3495 __ Push(x4, x1, x2, x3);
3498 ExternalReference miss = ExternalReference(IC_Utility(id),
3500 __ CallExternalReference(miss, 4);
3502 // Move result to edi and exit the internal frame.
3508 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
3509 // If the receiver is a smi trigger the non-string case.
3510 __ JumpIfSmi(object_, receiver_not_string_);
3512 // Fetch the instance type of the receiver into result register.
3513 __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3514 __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3516 // If the receiver is not a string trigger the non-string case.
3517 __ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_);
3519 // If the index is non-smi trigger the non-smi case.
3520 __ JumpIfNotSmi(index_, &index_not_smi_);
3522 __ Bind(&got_smi_index_);
3523 // Check for index out of range.
3524 __ Ldrsw(result_, UntagSmiFieldMemOperand(object_, String::kLengthOffset));
3525 __ Cmp(result_, Operand::UntagSmi(index_));
3526 __ B(ls, index_out_of_range_);
3528 __ SmiUntag(index_);
3530 StringCharLoadGenerator::Generate(masm,
3540 void StringCharCodeAtGenerator::GenerateSlow(
3541 MacroAssembler* masm,
3542 const RuntimeCallHelper& call_helper) {
3543 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
3545 __ Bind(&index_not_smi_);
3546 // If index is a heap number, try converting it to an integer.
3549 Heap::kHeapNumberMapRootIndex,
3552 call_helper.BeforeCall(masm);
3553 // Save object_ on the stack and pass index_ as argument for runtime call.
3554 __ Push(object_, index_);
3555 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3556 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3558 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3559 // NumberToSmi discards numbers that are not exact integers.
3560 __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
3562 // Save the conversion result before the pop instructions below
3563 // have a chance to overwrite it.
3566 // Reload the instance type.
3567 __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3568 __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3569 call_helper.AfterCall(masm);
3571 // If index is still not a smi, it must be out of range.
3572 __ JumpIfNotSmi(index_, index_out_of_range_);
3573 // Otherwise, return to the fast path.
3574 __ B(&got_smi_index_);
3576 // Call runtime. We get here when the receiver is a string and the
3577 // index is a number, but the code of getting the actual character
3578 // is too complex (e.g., when the string needs to be flattened).
3579 __ Bind(&call_runtime_);
3580 call_helper.BeforeCall(masm);
3582 __ Push(object_, index_);
3583 __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
3584 __ Mov(result_, x0);
3585 call_helper.AfterCall(masm);
3588 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3592 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3593 __ JumpIfNotSmi(code_, &slow_case_);
3594 __ Cmp(code_, Smi::FromInt(String::kMaxOneByteCharCode));
3595 __ B(hi, &slow_case_);
3597 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3598 // At this point code register contains smi tagged ASCII char code.
3599 STATIC_ASSERT(kSmiShift > kPointerSizeLog2);
3600 __ Add(result_, result_, Operand(code_, LSR, kSmiShift - kPointerSizeLog2));
3601 __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3602 __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
3607 void StringCharFromCodeGenerator::GenerateSlow(
3608 MacroAssembler* masm,
3609 const RuntimeCallHelper& call_helper) {
3610 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3612 __ Bind(&slow_case_);
3613 call_helper.BeforeCall(masm);
3615 __ CallRuntime(Runtime::kCharFromCode, 1);
3616 __ Mov(result_, x0);
3617 call_helper.AfterCall(masm);
3620 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3624 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
3625 // Inputs are in x0 (lhs) and x1 (rhs).
3626 ASSERT(state_ == CompareIC::SMI);
3627 ASM_LOCATION("ICCompareStub[Smis]");
3629 // Bail out (to 'miss') unless both x0 and x1 are smis.
3630 __ JumpIfEitherNotSmi(x0, x1, &miss);
3632 if (GetCondition() == eq) {
3633 // For equality we do not care about the sign of the result.
3636 // Untag before subtracting to avoid handling overflow.
3638 __ Sub(x0, x1, Operand::UntagSmi(x0));
3647 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
3648 ASSERT(state_ == CompareIC::NUMBER);
3649 ASM_LOCATION("ICCompareStub[HeapNumbers]");
3651 Label unordered, maybe_undefined1, maybe_undefined2;
3652 Label miss, handle_lhs, values_in_d_regs;
3653 Label untag_rhs, untag_lhs;
3655 Register result = x0;
3658 FPRegister rhs_d = d0;
3659 FPRegister lhs_d = d1;
3661 if (left_ == CompareIC::SMI) {
3662 __ JumpIfNotSmi(lhs, &miss);
3664 if (right_ == CompareIC::SMI) {
3665 __ JumpIfNotSmi(rhs, &miss);
3668 __ SmiUntagToDouble(rhs_d, rhs, kSpeculativeUntag);
3669 __ SmiUntagToDouble(lhs_d, lhs, kSpeculativeUntag);
3671 // Load rhs if it's a heap number.
3672 __ JumpIfSmi(rhs, &handle_lhs);
3673 __ CheckMap(rhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
3675 __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
3677 // Load lhs if it's a heap number.
3678 __ Bind(&handle_lhs);
3679 __ JumpIfSmi(lhs, &values_in_d_regs);
3680 __ CheckMap(lhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
3682 __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
3684 __ Bind(&values_in_d_regs);
3685 __ Fcmp(lhs_d, rhs_d);
3686 __ B(vs, &unordered); // Overflow flag set if either is NaN.
3687 STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
3688 __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
3689 __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
3692 __ Bind(&unordered);
3693 ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
3694 CompareIC::GENERIC);
3695 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3697 __ Bind(&maybe_undefined1);
3698 if (Token::IsOrderedRelationalCompareOp(op_)) {
3699 __ JumpIfNotRoot(rhs, Heap::kUndefinedValueRootIndex, &miss);
3700 __ JumpIfSmi(lhs, &unordered);
3701 __ JumpIfNotObjectType(lhs, x10, x10, HEAP_NUMBER_TYPE, &maybe_undefined2);
3705 __ Bind(&maybe_undefined2);
3706 if (Token::IsOrderedRelationalCompareOp(op_)) {
3707 __ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered);
3715 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3716 ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
3717 ASM_LOCATION("ICCompareStub[InternalizedStrings]");
3720 Register result = x0;
3724 // Check that both operands are heap objects.
3725 __ JumpIfEitherSmi(lhs, rhs, &miss);
3727 // Check that both operands are internalized strings.
3728 Register rhs_map = x10;
3729 Register lhs_map = x11;
3730 Register rhs_type = x10;
3731 Register lhs_type = x11;
3732 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
3733 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
3734 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
3735 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
3737 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
3738 __ Orr(x12, lhs_type, rhs_type);
3739 __ TestAndBranchIfAnySet(
3740 x12, kIsNotStringMask | kIsNotInternalizedMask, &miss);
3742 // Internalized strings are compared by identity.
3743 STATIC_ASSERT(EQUAL == 0);
3745 __ Cset(result, ne);
3753 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
3754 ASSERT(state_ == CompareIC::UNIQUE_NAME);
3755 ASM_LOCATION("ICCompareStub[UniqueNames]");
3756 ASSERT(GetCondition() == eq);
3759 Register result = x0;
3763 Register lhs_instance_type = w2;
3764 Register rhs_instance_type = w3;
3766 // Check that both operands are heap objects.
3767 __ JumpIfEitherSmi(lhs, rhs, &miss);
3769 // Check that both operands are unique names. This leaves the instance
3770 // types loaded in tmp1 and tmp2.
3771 __ Ldr(x10, FieldMemOperand(lhs, HeapObject::kMapOffset));
3772 __ Ldr(x11, FieldMemOperand(rhs, HeapObject::kMapOffset));
3773 __ Ldrb(lhs_instance_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
3774 __ Ldrb(rhs_instance_type, FieldMemOperand(x11, Map::kInstanceTypeOffset));
3776 // To avoid a miss, each instance type should be either SYMBOL_TYPE or it
3777 // should have kInternalizedTag set.
3778 __ JumpIfNotUniqueName(lhs_instance_type, &miss);
3779 __ JumpIfNotUniqueName(rhs_instance_type, &miss);
3781 // Unique names are compared by identity.
3782 STATIC_ASSERT(EQUAL == 0);
3784 __ Cset(result, ne);
3792 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
3793 ASSERT(state_ == CompareIC::STRING);
3794 ASM_LOCATION("ICCompareStub[Strings]");
3798 bool equality = Token::IsEqualityOp(op_);
3800 Register result = x0;
3804 // Check that both operands are heap objects.
3805 __ JumpIfEitherSmi(rhs, lhs, &miss);
3807 // Check that both operands are strings.
3808 Register rhs_map = x10;
3809 Register lhs_map = x11;
3810 Register rhs_type = x10;
3811 Register lhs_type = x11;
3812 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
3813 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
3814 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
3815 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
3816 STATIC_ASSERT(kNotStringTag != 0);
3817 __ Orr(x12, lhs_type, rhs_type);
3818 __ Tbnz(x12, MaskToBit(kIsNotStringMask), &miss);
3820 // Fast check for identical strings.
3823 __ B(ne, ¬_equal);
3824 __ Mov(result, EQUAL);
3827 __ Bind(¬_equal);
3828 // Handle not identical strings
3830 // Check that both strings are internalized strings. If they are, we're done
3831 // because we already know they are not identical. We know they are both
3834 ASSERT(GetCondition() == eq);
3835 STATIC_ASSERT(kInternalizedTag == 0);
3836 Label not_internalized_strings;
3837 __ Orr(x12, lhs_type, rhs_type);
3838 __ TestAndBranchIfAnySet(
3839 x12, kIsNotInternalizedMask, ¬_internalized_strings);
3840 // Result is in rhs (x0), and not EQUAL, as rhs is not a smi.
3842 __ Bind(¬_internalized_strings);
3845 // Check that both strings are sequential ASCII.
3847 __ JumpIfBothInstanceTypesAreNotSequentialAscii(
3848 lhs_type, rhs_type, x12, x13, &runtime);
3850 // Compare flat ASCII strings. Returns when done.
3852 StringCompareStub::GenerateFlatAsciiStringEquals(
3853 masm, lhs, rhs, x10, x11, x12);
3855 StringCompareStub::GenerateCompareFlatAsciiStrings(
3856 masm, lhs, rhs, x10, x11, x12, x13);
3859 // Handle more complex cases in runtime.
3863 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
3865 __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
3873 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
3874 ASSERT(state_ == CompareIC::OBJECT);
3875 ASM_LOCATION("ICCompareStub[Objects]");
3879 Register result = x0;
3883 __ JumpIfEitherSmi(rhs, lhs, &miss);
3885 __ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss);
3886 __ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss);
3888 ASSERT(GetCondition() == eq);
3889 __ Sub(result, rhs, lhs);
3897 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
3898 ASM_LOCATION("ICCompareStub[KnownObjects]");
3902 Register result = x0;
3906 __ JumpIfEitherSmi(rhs, lhs, &miss);
3908 Register rhs_map = x10;
3909 Register lhs_map = x11;
3910 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
3911 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
3912 __ Cmp(rhs_map, Operand(known_map_));
3914 __ Cmp(lhs_map, Operand(known_map_));
3917 __ Sub(result, rhs, lhs);
3925 // This method handles the case where a compare stub had the wrong
3926 // implementation. It calls a miss handler, which re-writes the stub. All other
3927 // ICCompareStub::Generate* methods should fall back into this one if their
3928 // operands were not the expected types.
3929 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
3930 ASM_LOCATION("ICCompareStub[Miss]");
3932 Register stub_entry = x11;
3934 ExternalReference miss =
3935 ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
3937 FrameScope scope(masm, StackFrame::INTERNAL);
3940 Register right = x0;
3941 // Preserve some caller-saved registers.
3942 __ Push(x1, x0, lr);
3943 // Push the arguments.
3944 __ Mov(op, Smi::FromInt(op_));
3945 __ Push(left, right, op);
3947 // Call the miss handler. This also pops the arguments.
3948 __ CallExternalReference(miss, 3);
3950 // Compute the entry point of the rewritten stub.
3951 __ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag);
3952 // Restore caller-saved registers.
3956 // Tail-call to the new stub.
3957 __ Jump(stub_entry);
3961 void StringHelper::GenerateHashInit(MacroAssembler* masm,
3963 Register character) {
3964 ASSERT(!AreAliased(hash, character));
3966 // hash = character + (character << 10);
3967 __ LoadRoot(hash, Heap::kHashSeedRootIndex);
3968 // Untag smi seed and add the character.
3969 __ Add(hash, character, Operand(hash, LSR, kSmiShift));
3971 // Compute hashes modulo 2^32 using a 32-bit W register.
3972 Register hash_w = hash.W();
3974 // hash += hash << 10;
3975 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
3976 // hash ^= hash >> 6;
3977 __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
3981 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
3983 Register character) {
3984 ASSERT(!AreAliased(hash, character));
3986 // hash += character;
3987 __ Add(hash, hash, character);
3989 // Compute hashes modulo 2^32 using a 32-bit W register.
3990 Register hash_w = hash.W();
3992 // hash += hash << 10;
3993 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
3994 // hash ^= hash >> 6;
3995 __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
3999 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
4002 // Compute hashes modulo 2^32 using a 32-bit W register.
4003 Register hash_w = hash.W();
4004 Register scratch_w = scratch.W();
4005 ASSERT(!AreAliased(hash_w, scratch_w));
4007 // hash += hash << 3;
4008 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 3));
4009 // hash ^= hash >> 11;
4010 __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 11));
4011 // hash += hash << 15;
4012 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 15));
4014 __ Ands(hash_w, hash_w, String::kHashBitMask);
4016 // if (hash == 0) hash = 27;
4017 __ Mov(scratch_w, StringHasher::kZeroHash);
4018 __ Csel(hash_w, scratch_w, hash_w, eq);
4022 void SubStringStub::Generate(MacroAssembler* masm) {
4023 ASM_LOCATION("SubStringStub::Generate");
4026 // Stack frame on entry.
4027 // lr: return address
4028 // jssp[0]: substring "to" offset
4029 // jssp[8]: substring "from" offset
4030 // jssp[16]: pointer to string object
4032 // This stub is called from the native-call %_SubString(...), so
4033 // nothing can be assumed about the arguments. It is tested that:
4034 // "string" is a sequential string,
4035 // both "from" and "to" are smis, and
4036 // 0 <= from <= to <= string.length (in debug mode.)
4037 // If any of these assumptions fail, we call the runtime system.
4039 static const int kToOffset = 0 * kPointerSize;
4040 static const int kFromOffset = 1 * kPointerSize;
4041 static const int kStringOffset = 2 * kPointerSize;
4044 Register from = x15;
4045 Register input_string = x10;
4046 Register input_length = x11;
4047 Register input_type = x12;
4048 Register result_string = x0;
4049 Register result_length = x1;
4052 __ Peek(to, kToOffset);
4053 __ Peek(from, kFromOffset);
4055 // Check that both from and to are smis. If not, jump to runtime.
4056 __ JumpIfEitherNotSmi(from, to, &runtime);
4060 // Calculate difference between from and to. If to < from, branch to runtime.
4061 __ Subs(result_length, to, from);
4064 // Check from is positive.
4065 __ Tbnz(from, kWSignBit, &runtime);
4067 // Make sure first argument is a string.
4068 __ Peek(input_string, kStringOffset);
4069 __ JumpIfSmi(input_string, &runtime);
4070 __ IsObjectJSStringType(input_string, input_type, &runtime);
4073 __ Cmp(result_length, 1);
4074 __ B(eq, &single_char);
4076 // Short-cut for the case of trivial substring.
4078 __ Ldrsw(input_length,
4079 UntagSmiFieldMemOperand(input_string, String::kLengthOffset));
4081 __ Cmp(result_length, input_length);
4082 __ CmovX(x0, input_string, eq);
4083 // Return original string.
4084 __ B(eq, &return_x0);
4086 // Longer than original string's length or negative: unsafe arguments.
4089 // Shorter than original string's length: an actual substring.
4091 // x0 to substring end character offset
4092 // x1 result_length length of substring result
4093 // x10 input_string pointer to input string object
4094 // x10 unpacked_string pointer to unpacked string object
4095 // x11 input_length length of input string
4096 // x12 input_type instance type of input string
4097 // x15 from substring start character offset
4099 // Deal with different string types: update the index if necessary and put
4100 // the underlying string into register unpacked_string.
4101 Label underlying_unpacked, sliced_string, seq_or_external_string;
4102 Label update_instance_type;
4103 // If the string is not indirect, it can only be sequential or external.
4104 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
4105 STATIC_ASSERT(kIsIndirectStringMask != 0);
4107 // Test for string types, and branch/fall through to appropriate unpacking
4109 __ Tst(input_type, kIsIndirectStringMask);
4110 __ B(eq, &seq_or_external_string);
4111 __ Tst(input_type, kSlicedNotConsMask);
4112 __ B(ne, &sliced_string);
4114 Register unpacked_string = input_string;
4116 // Cons string. Check whether it is flat, then fetch first part.
4117 __ Ldr(temp, FieldMemOperand(input_string, ConsString::kSecondOffset));
4118 __ JumpIfNotRoot(temp, Heap::kempty_stringRootIndex, &runtime);
4119 __ Ldr(unpacked_string,
4120 FieldMemOperand(input_string, ConsString::kFirstOffset));
4121 __ B(&update_instance_type);
4123 __ Bind(&sliced_string);
4124 // Sliced string. Fetch parent and correct start index by offset.
4126 UntagSmiFieldMemOperand(input_string, SlicedString::kOffsetOffset));
4127 __ Add(from, from, temp);
4128 __ Ldr(unpacked_string,
4129 FieldMemOperand(input_string, SlicedString::kParentOffset));
4131 __ Bind(&update_instance_type);
4132 __ Ldr(temp, FieldMemOperand(unpacked_string, HeapObject::kMapOffset));
4133 __ Ldrb(input_type, FieldMemOperand(temp, Map::kInstanceTypeOffset));
4134 // Now control must go to &underlying_unpacked. Since the no code is generated
4135 // before then we fall through instead of generating a useless branch.
4137 __ Bind(&seq_or_external_string);
4138 // Sequential or external string. Registers unpacked_string and input_string
4139 // alias, so there's nothing to do here.
4140 // Note that if code is added here, the above code must be updated.
4142 // x0 result_string pointer to result string object (uninit)
4143 // x1 result_length length of substring result
4144 // x10 unpacked_string pointer to unpacked string object
4145 // x11 input_length length of input string
4146 // x12 input_type instance type of input string
4147 // x15 from substring start character offset
4148 __ Bind(&underlying_unpacked);
4150 if (FLAG_string_slices) {
4152 __ Cmp(result_length, SlicedString::kMinLength);
4153 // Short slice. Copy instead of slicing.
4154 __ B(lt, ©_routine);
4155 // Allocate new sliced string. At this point we do not reload the instance
4156 // type including the string encoding because we simply rely on the info
4157 // provided by the original string. It does not matter if the original
4158 // string's encoding is wrong because we always have to recheck encoding of
4159 // the newly created string's parent anyway due to externalized strings.
4160 Label two_byte_slice, set_slice_header;
4161 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
4162 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
4163 __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_slice);
4164 __ AllocateAsciiSlicedString(result_string, result_length, x3, x4,
4166 __ B(&set_slice_header);
4168 __ Bind(&two_byte_slice);
4169 __ AllocateTwoByteSlicedString(result_string, result_length, x3, x4,
4172 __ Bind(&set_slice_header);
4174 __ Str(from, FieldMemOperand(result_string, SlicedString::kOffsetOffset));
4175 __ Str(unpacked_string,
4176 FieldMemOperand(result_string, SlicedString::kParentOffset));
4179 __ Bind(©_routine);
4182 // x0 result_string pointer to result string object (uninit)
4183 // x1 result_length length of substring result
4184 // x10 unpacked_string pointer to unpacked string object
4185 // x11 input_length length of input string
4186 // x12 input_type instance type of input string
4187 // x13 unpacked_char0 pointer to first char of unpacked string (uninit)
4188 // x13 substring_char0 pointer to first char of substring (uninit)
4189 // x14 result_char0 pointer to first char of result (uninit)
4190 // x15 from substring start character offset
4191 Register unpacked_char0 = x13;
4192 Register substring_char0 = x13;
4193 Register result_char0 = x14;
4194 Label two_byte_sequential, sequential_string, allocate_result;
4195 STATIC_ASSERT(kExternalStringTag != 0);
4196 STATIC_ASSERT(kSeqStringTag == 0);
4198 __ Tst(input_type, kExternalStringTag);
4199 __ B(eq, &sequential_string);
4201 __ Tst(input_type, kShortExternalStringTag);
4203 __ Ldr(unpacked_char0,
4204 FieldMemOperand(unpacked_string, ExternalString::kResourceDataOffset));
4205 // unpacked_char0 points to the first character of the underlying string.
4206 __ B(&allocate_result);
4208 __ Bind(&sequential_string);
4209 // Locate first character of underlying subject string.
4210 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
4211 __ Add(unpacked_char0, unpacked_string,
4212 SeqOneByteString::kHeaderSize - kHeapObjectTag);
4214 __ Bind(&allocate_result);
4215 // Sequential ASCII string. Allocate the result.
4216 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
4217 __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_sequential);
4219 // Allocate and copy the resulting ASCII string.
4220 __ AllocateAsciiString(result_string, result_length, x3, x4, x5, &runtime);
4222 // Locate first character of substring to copy.
4223 __ Add(substring_char0, unpacked_char0, from);
4225 // Locate first character of result.
4226 __ Add(result_char0, result_string,
4227 SeqOneByteString::kHeaderSize - kHeapObjectTag);
4229 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
4230 __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
4233 // Allocate and copy the resulting two-byte string.
4234 __ Bind(&two_byte_sequential);
4235 __ AllocateTwoByteString(result_string, result_length, x3, x4, x5, &runtime);
4237 // Locate first character of substring to copy.
4238 __ Add(substring_char0, unpacked_char0, Operand(from, LSL, 1));
4240 // Locate first character of result.
4241 __ Add(result_char0, result_string,
4242 SeqTwoByteString::kHeaderSize - kHeapObjectTag);
4244 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
4245 __ Add(result_length, result_length, result_length);
4246 __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
4248 __ Bind(&return_x0);
4249 Counters* counters = isolate()->counters();
4250 __ IncrementCounter(counters->sub_string_native(), 1, x3, x4);
4255 __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
4257 __ bind(&single_char);
4258 // x1: result_length
4259 // x10: input_string
4261 // x15: from (untagged)
4263 StringCharAtGenerator generator(
4264 input_string, from, result_length, x0,
4265 &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
4266 generator.GenerateFast(masm);
4269 generator.SkipSlow(masm, &runtime);
4273 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
4278 Register scratch3) {
4279 ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3));
4280 Register result = x0;
4281 Register left_length = scratch1;
4282 Register right_length = scratch2;
4284 // Compare lengths. If lengths differ, strings can't be equal. Lengths are
4285 // smis, and don't need to be untagged.
4286 Label strings_not_equal, check_zero_length;
4287 __ Ldr(left_length, FieldMemOperand(left, String::kLengthOffset));
4288 __ Ldr(right_length, FieldMemOperand(right, String::kLengthOffset));
4289 __ Cmp(left_length, right_length);
4290 __ B(eq, &check_zero_length);
4292 __ Bind(&strings_not_equal);
4293 __ Mov(result, Smi::FromInt(NOT_EQUAL));
4296 // Check if the length is zero. If so, the strings must be equal (and empty.)
4297 Label compare_chars;
4298 __ Bind(&check_zero_length);
4299 STATIC_ASSERT(kSmiTag == 0);
4300 __ Cbnz(left_length, &compare_chars);
4301 __ Mov(result, Smi::FromInt(EQUAL));
4304 // Compare characters. Falls through if all characters are equal.
4305 __ Bind(&compare_chars);
4306 GenerateAsciiCharsCompareLoop(masm, left, right, left_length, scratch2,
4307 scratch3, &strings_not_equal);
4309 // Characters in strings are equal.
4310 __ Mov(result, Smi::FromInt(EQUAL));
4315 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
4321 Register scratch4) {
4322 ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
4323 Label result_not_equal, compare_lengths;
4325 // Find minimum length and length difference.
4326 Register length_delta = scratch3;
4327 __ Ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
4328 __ Ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
4329 __ Subs(length_delta, scratch1, scratch2);
4331 Register min_length = scratch1;
4332 __ Csel(min_length, scratch2, scratch1, gt);
4333 __ Cbz(min_length, &compare_lengths);
4336 GenerateAsciiCharsCompareLoop(masm,
4337 left, right, min_length, scratch2, scratch4,
4340 // Compare lengths - strings up to min-length are equal.
4341 __ Bind(&compare_lengths);
4343 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
4345 // Use length_delta as result if it's zero.
4346 Register result = x0;
4347 __ Subs(result, length_delta, 0);
4349 __ Bind(&result_not_equal);
4350 Register greater = x10;
4351 Register less = x11;
4352 __ Mov(greater, Smi::FromInt(GREATER));
4353 __ Mov(less, Smi::FromInt(LESS));
4354 __ CmovX(result, greater, gt);
4355 __ CmovX(result, less, lt);
4360 void StringCompareStub::GenerateAsciiCharsCompareLoop(
4361 MacroAssembler* masm,
4367 Label* chars_not_equal) {
4368 ASSERT(!AreAliased(left, right, length, scratch1, scratch2));
4370 // Change index to run from -length to -1 by adding length to string
4371 // start. This means that loop ends when index reaches zero, which
4372 // doesn't need an additional compare.
4373 __ SmiUntag(length);
4374 __ Add(scratch1, length, SeqOneByteString::kHeaderSize - kHeapObjectTag);
4375 __ Add(left, left, scratch1);
4376 __ Add(right, right, scratch1);
4378 Register index = length;
4379 __ Neg(index, length); // index = -length;
4384 __ Ldrb(scratch1, MemOperand(left, index));
4385 __ Ldrb(scratch2, MemOperand(right, index));
4386 __ Cmp(scratch1, scratch2);
4387 __ B(ne, chars_not_equal);
4388 __ Add(index, index, 1);
4389 __ Cbnz(index, &loop);
4393 void StringCompareStub::Generate(MacroAssembler* masm) {
4396 Counters* counters = isolate()->counters();
4398 // Stack frame on entry.
4399 // sp[0]: right string
4400 // sp[8]: left string
4401 Register right = x10;
4402 Register left = x11;
4403 Register result = x0;
4404 __ Pop(right, left);
4407 __ Subs(result, right, left);
4408 __ B(ne, ¬_same);
4409 STATIC_ASSERT(EQUAL == 0);
4410 __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
4415 // Check that both objects are sequential ASCII strings.
4416 __ JumpIfEitherIsNotSequentialAsciiStrings(left, right, x12, x13, &runtime);
4418 // Compare flat ASCII strings natively. Remove arguments from stack first,
4419 // as this function will generate a return.
4420 __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
4421 GenerateCompareFlatAsciiStrings(masm, left, right, x12, x13, x14, x15);
4425 // Push arguments back on to the stack.
4426 // sp[0] = right string
4427 // sp[8] = left string.
4428 __ Push(left, right);
4430 // Call the runtime.
4431 // Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
4432 __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
4436 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
4437 // ----------- S t a t e -------------
4440 // -- lr : return address
4441 // -----------------------------------
4443 // Load x2 with the allocation site. We stick an undefined dummy value here
4444 // and replace it with the real allocation site later when we instantiate this
4445 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
4446 __ LoadObject(x2, handle(isolate()->heap()->undefined_value()));
4448 // Make sure that we actually patched the allocation site.
4449 if (FLAG_debug_code) {
4450 __ AssertNotSmi(x2, kExpectedAllocationSite);
4451 __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
4452 __ AssertRegisterIsRoot(x10, Heap::kAllocationSiteMapRootIndex,
4453 kExpectedAllocationSite);
4456 // Tail call into the stub that handles binary operations with allocation
4458 BinaryOpWithAllocationSiteStub stub(isolate(), state_);
4459 __ TailCallStub(&stub);
4463 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4464 // We need some extra registers for this stub, they have been allocated
4465 // but we need to save them before using them.
4468 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4469 Label dont_need_remembered_set;
4471 Register value = regs_.scratch0();
4472 __ Ldr(value, MemOperand(regs_.address()));
4473 __ JumpIfNotInNewSpace(value, &dont_need_remembered_set);
4475 __ CheckPageFlagSet(regs_.object(),
4477 1 << MemoryChunk::SCAN_ON_SCAVENGE,
4478 &dont_need_remembered_set);
4480 // First notify the incremental marker if necessary, then update the
4482 CheckNeedsToInformIncrementalMarker(
4483 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4484 InformIncrementalMarker(masm);
4485 regs_.Restore(masm); // Restore the extra scratch registers we used.
4487 __ RememberedSetHelper(object_,
4491 MacroAssembler::kReturnAtEnd);
4493 __ Bind(&dont_need_remembered_set);
4496 CheckNeedsToInformIncrementalMarker(
4497 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4498 InformIncrementalMarker(masm);
4499 regs_.Restore(masm); // Restore the extra scratch registers we used.
4504 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4505 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
4507 x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
4508 ASSERT(!address.Is(regs_.object()));
4509 ASSERT(!address.Is(x0));
4510 __ Mov(address, regs_.address());
4511 __ Mov(x0, regs_.object());
4512 __ Mov(x1, address);
4513 __ Mov(x2, ExternalReference::isolate_address(isolate()));
4515 AllowExternalCallThatCantCauseGC scope(masm);
4516 ExternalReference function =
4517 ExternalReference::incremental_marking_record_write_function(
4519 __ CallCFunction(function, 3, 0);
4521 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
4525 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4526 MacroAssembler* masm,
4527 OnNoNeedToInformIncrementalMarker on_no_need,
4530 Label need_incremental;
4531 Label need_incremental_pop_scratch;
4533 Register mem_chunk = regs_.scratch0();
4534 Register counter = regs_.scratch1();
4535 __ Bic(mem_chunk, regs_.object(), Page::kPageAlignmentMask);
4537 MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
4538 __ Subs(counter, counter, 1);
4540 MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
4541 __ B(mi, &need_incremental);
4543 // If the object is not black we don't have to inform the incremental marker.
4544 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4546 regs_.Restore(masm); // Restore the extra scratch registers we used.
4547 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4548 __ RememberedSetHelper(object_,
4552 MacroAssembler::kReturnAtEnd);
4558 // Get the value from the slot.
4559 Register value = regs_.scratch0();
4560 __ Ldr(value, MemOperand(regs_.address()));
4562 if (mode == INCREMENTAL_COMPACTION) {
4563 Label ensure_not_white;
4565 __ CheckPageFlagClear(value,
4567 MemoryChunk::kEvacuationCandidateMask,
4570 __ CheckPageFlagClear(regs_.object(),
4572 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
4575 __ Bind(&ensure_not_white);
4578 // We need extra registers for this, so we push the object and the address
4579 // register temporarily.
4580 __ Push(regs_.address(), regs_.object());
4581 __ EnsureNotWhite(value,
4582 regs_.scratch1(), // Scratch.
4583 regs_.object(), // Scratch.
4584 regs_.address(), // Scratch.
4585 regs_.scratch2(), // Scratch.
4586 &need_incremental_pop_scratch);
4587 __ Pop(regs_.object(), regs_.address());
4589 regs_.Restore(masm); // Restore the extra scratch registers we used.
4590 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4591 __ RememberedSetHelper(object_,
4595 MacroAssembler::kReturnAtEnd);
4600 __ Bind(&need_incremental_pop_scratch);
4601 __ Pop(regs_.object(), regs_.address());
4603 __ Bind(&need_incremental);
4604 // Fall through when we need to inform the incremental marker.
4608 void RecordWriteStub::Generate(MacroAssembler* masm) {
4609 Label skip_to_incremental_noncompacting;
4610 Label skip_to_incremental_compacting;
4612 // We patch these two first instructions back and forth between a nop and
4613 // real branch when we start and stop incremental heap marking.
4614 // Initially the stub is expected to be in STORE_BUFFER_ONLY mode, so 2 nops
4616 // See RecordWriteStub::Patch for details.
4618 InstructionAccurateScope scope(masm, 2);
4619 __ adr(xzr, &skip_to_incremental_noncompacting);
4620 __ adr(xzr, &skip_to_incremental_compacting);
4623 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4624 __ RememberedSetHelper(object_,
4628 MacroAssembler::kReturnAtEnd);
4632 __ Bind(&skip_to_incremental_noncompacting);
4633 GenerateIncremental(masm, INCREMENTAL);
4635 __ Bind(&skip_to_incremental_compacting);
4636 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4640 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4641 // x0 value element value to store
4642 // x3 index_smi element index as smi
4643 // sp[0] array_index_smi array literal index in function as smi
4644 // sp[1] array array literal
4646 Register value = x0;
4647 Register index_smi = x3;
4649 Register array = x1;
4650 Register array_map = x2;
4651 Register array_index_smi = x4;
4652 __ PeekPair(array_index_smi, array, 0);
4653 __ Ldr(array_map, FieldMemOperand(array, JSObject::kMapOffset));
4655 Label double_elements, smi_element, fast_elements, slow_elements;
4656 Register bitfield2 = x10;
4657 __ Ldrb(bitfield2, FieldMemOperand(array_map, Map::kBitField2Offset));
4659 // Jump if array's ElementsKind is not FAST*_SMI_ELEMENTS, FAST_ELEMENTS or
4660 // FAST_HOLEY_ELEMENTS.
4661 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4662 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4663 STATIC_ASSERT(FAST_ELEMENTS == 2);
4664 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4665 __ Cmp(bitfield2, Map::kMaximumBitField2FastHoleyElementValue);
4666 __ B(hi, &double_elements);
4668 __ JumpIfSmi(value, &smi_element);
4670 // Jump if array's ElementsKind is not FAST_ELEMENTS or FAST_HOLEY_ELEMENTS.
4671 __ Tbnz(bitfield2, MaskToBit(FAST_ELEMENTS << Map::ElementsKindBits::kShift),
4674 // Store into the array literal requires an elements transition. Call into
4676 __ Bind(&slow_elements);
4677 __ Push(array, index_smi, value);
4678 __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4679 __ Ldr(x11, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
4680 __ Push(x11, array_index_smi);
4681 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4683 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4684 __ Bind(&fast_elements);
4685 __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
4686 __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
4687 __ Add(x11, x11, FixedArray::kHeaderSize - kHeapObjectTag);
4688 __ Str(value, MemOperand(x11));
4689 // Update the write barrier for the array store.
4690 __ RecordWrite(x10, x11, value, kLRHasNotBeenSaved, kDontSaveFPRegs,
4691 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
4694 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
4695 // and value is Smi.
4696 __ Bind(&smi_element);
4697 __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
4698 __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
4699 __ Str(value, FieldMemOperand(x11, FixedArray::kHeaderSize));
4702 __ Bind(&double_elements);
4703 __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
4704 __ StoreNumberToDoubleElements(value, index_smi, x10, x11, d0,
4710 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4711 CEntryStub ces(isolate(), 1, kSaveFPRegs);
4712 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4713 int parameter_count_offset =
4714 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4715 __ Ldr(x1, MemOperand(fp, parameter_count_offset));
4716 if (function_mode_ == JS_FUNCTION_STUB_MODE) {
4719 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4721 // Return to IC Miss stub, continuation still on stack.
4726 static unsigned int GetProfileEntryHookCallSize(MacroAssembler* masm) {
4727 // The entry hook is a "BumpSystemStackPointer" instruction (sub),
4728 // followed by a "Push lr" instruction, followed by a call.
4730 Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
4731 if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
4732 // If ALWAYS_ALIGN_CSP then there will be an extra bic instruction in
4733 // "BumpSystemStackPointer".
4734 size += kInstructionSize;
4740 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4741 if (masm->isolate()->function_entry_hook() != NULL) {
4742 ProfileEntryHookStub stub(masm->isolate());
4743 Assembler::BlockConstPoolScope no_const_pools(masm);
4744 DontEmitDebugCodeScope no_debug_code(masm);
4745 Label entry_hook_call_start;
4746 __ Bind(&entry_hook_call_start);
4749 ASSERT(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
4750 GetProfileEntryHookCallSize(masm));
4757 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4758 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
4760 // Save all kCallerSaved registers (including lr), since this can be called
4762 // TODO(jbramley): What about FP registers?
4763 __ PushCPURegList(kCallerSaved);
4764 ASSERT(kCallerSaved.IncludesAliasOf(lr));
4765 const int kNumSavedRegs = kCallerSaved.Count();
4767 // Compute the function's address as the first argument.
4768 __ Sub(x0, lr, GetProfileEntryHookCallSize(masm));
4770 #if V8_HOST_ARCH_ARM64
4771 uintptr_t entry_hook =
4772 reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
4773 __ Mov(x10, entry_hook);
4775 // Under the simulator we need to indirect the entry hook through a trampoline
4776 // function at a known address.
4777 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4778 __ Mov(x10, Operand(ExternalReference(&dispatcher,
4779 ExternalReference::BUILTIN_CALL,
4781 // It additionally takes an isolate as a third parameter
4782 __ Mov(x2, ExternalReference::isolate_address(isolate()));
4785 // The caller's return address is above the saved temporaries.
4786 // Grab its location for the second argument to the hook.
4787 __ Add(x1, __ StackPointer(), kNumSavedRegs * kPointerSize);
4790 // Create a dummy frame, as CallCFunction requires this.
4791 FrameScope frame(masm, StackFrame::MANUAL);
4792 __ CallCFunction(x10, 2, 0);
4795 __ PopCPURegList(kCallerSaved);
4800 void DirectCEntryStub::Generate(MacroAssembler* masm) {
4801 // When calling into C++ code the stack pointer must be csp.
4802 // Therefore this code must use csp for peek/poke operations when the
4803 // stub is generated. When the stub is called
4804 // (via DirectCEntryStub::GenerateCall), the caller must setup an ExitFrame
4805 // and configure the stack pointer *before* doing the call.
4806 const Register old_stack_pointer = __ StackPointer();
4807 __ SetStackPointer(csp);
4809 // Put return address on the stack (accessible to GC through exit frame pc).
4811 // Call the C++ function.
4813 // Return to calling code.
4815 __ AssertFPCRState();
4818 __ SetStackPointer(old_stack_pointer);
4821 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
4823 // Make sure the caller configured the stack pointer (see comment in
4824 // DirectCEntryStub::Generate).
4825 ASSERT(csp.Is(__ StackPointer()));
4828 reinterpret_cast<intptr_t>(GetCode().location());
4829 __ Mov(lr, Operand(code, RelocInfo::CODE_TARGET));
4830 __ Mov(x10, target);
4831 // Branch to the stub.
4836 // Probe the name dictionary in the 'elements' register.
4837 // Jump to the 'done' label if a property with the given name is found.
4838 // Jump to the 'miss' label otherwise.
4840 // If lookup was successful 'scratch2' will be equal to elements + 4 * index.
4841 // 'elements' and 'name' registers are preserved on miss.
4842 void NameDictionaryLookupStub::GeneratePositiveLookup(
4843 MacroAssembler* masm,
4849 Register scratch2) {
4850 ASSERT(!AreAliased(elements, name, scratch1, scratch2));
4852 // Assert that name contains a string.
4853 __ AssertName(name);
4855 // Compute the capacity mask.
4856 __ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset));
4857 __ Sub(scratch1, scratch1, 1);
4859 // Generate an unrolled loop that performs a few probes before giving up.
4860 for (int i = 0; i < kInlinedProbes; i++) {
4861 // Compute the masked index: (hash + i + i * i) & mask.
4862 __ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
4864 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4865 // the hash in a separate instruction. The value hash + i + i * i is right
4866 // shifted in the following and instruction.
4867 ASSERT(NameDictionary::GetProbeOffset(i) <
4868 1 << (32 - Name::kHashFieldOffset));
4869 __ Add(scratch2, scratch2, Operand(
4870 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4872 __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
4874 // Scale the index by multiplying by the element size.
4875 ASSERT(NameDictionary::kEntrySize == 3);
4876 __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
4878 // Check if the key is identical to the name.
4879 UseScratchRegisterScope temps(masm);
4880 Register scratch3 = temps.AcquireX();
4881 __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
4882 __ Ldr(scratch3, FieldMemOperand(scratch2, kElementsStartOffset));
4883 __ Cmp(name, scratch3);
4887 // The inlined probes didn't find the entry.
4888 // Call the complete stub to scan the whole dictionary.
4890 CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
4891 spill_list.Combine(lr);
4892 spill_list.Remove(scratch1);
4893 spill_list.Remove(scratch2);
4895 __ PushCPURegList(spill_list);
4898 ASSERT(!elements.is(x1));
4900 __ Mov(x0, elements);
4902 __ Mov(x0, elements);
4907 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
4909 __ Cbz(x0, ¬_found);
4910 __ Mov(scratch2, x2); // Move entry index into scratch2.
4911 __ PopCPURegList(spill_list);
4914 __ Bind(¬_found);
4915 __ PopCPURegList(spill_list);
4920 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
4924 Register properties,
4926 Register scratch0) {
4927 ASSERT(!AreAliased(receiver, properties, scratch0));
4928 ASSERT(name->IsUniqueName());
4929 // If names of slots in range from 1 to kProbes - 1 for the hash value are
4930 // not equal to the name and kProbes-th slot is not used (its name is the
4931 // undefined value), it guarantees the hash table doesn't contain the
4932 // property. It's true even if some slots represent deleted properties
4933 // (their names are the hole value).
4934 for (int i = 0; i < kInlinedProbes; i++) {
4935 // scratch0 points to properties hash.
4936 // Compute the masked index: (hash + i + i * i) & mask.
4937 Register index = scratch0;
4938 // Capacity is smi 2^n.
4939 __ Ldrsw(index, UntagSmiFieldMemOperand(properties, kCapacityOffset));
4940 __ Sub(index, index, 1);
4941 __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
4943 // Scale the index by multiplying by the entry size.
4944 ASSERT(NameDictionary::kEntrySize == 3);
4945 __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
4947 Register entity_name = scratch0;
4948 // Having undefined at this place means the name is not contained.
4949 Register tmp = index;
4950 __ Add(tmp, properties, Operand(index, LSL, kPointerSizeLog2));
4951 __ Ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
4953 __ JumpIfRoot(entity_name, Heap::kUndefinedValueRootIndex, done);
4955 // Stop if found the property.
4956 __ Cmp(entity_name, Operand(name));
4960 __ JumpIfRoot(entity_name, Heap::kTheHoleValueRootIndex, &good);
4962 // Check if the entry name is not a unique name.
4963 __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
4964 __ Ldrb(entity_name,
4965 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
4966 __ JumpIfNotUniqueName(entity_name, miss);
4970 CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
4971 spill_list.Combine(lr);
4972 spill_list.Remove(scratch0); // Scratch registers don't need to be preserved.
4974 __ PushCPURegList(spill_list);
4976 __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4977 __ Mov(x1, Operand(name));
4978 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
4980 // Move stub return value to scratch0. Note that scratch0 is not included in
4981 // spill_list and won't be clobbered by PopCPURegList.
4982 __ Mov(scratch0, x0);
4983 __ PopCPURegList(spill_list);
4985 __ Cbz(scratch0, done);
4990 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
4991 // This stub overrides SometimesSetsUpAFrame() to return false. That means
4992 // we cannot call anything that could cause a GC from this stub.
4994 // Arguments are in x0 and x1:
4995 // x0: property dictionary.
4996 // x1: the name of the property we are looking for.
4998 // Return value is in x0 and is zero if lookup failed, non zero otherwise.
4999 // If the lookup is successful, x2 will contains the index of the entry.
5001 Register result = x0;
5002 Register dictionary = x0;
5004 Register index = x2;
5007 Register undefined = x5;
5008 Register entry_key = x6;
5010 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
5012 __ Ldrsw(mask, UntagSmiFieldMemOperand(dictionary, kCapacityOffset));
5013 __ Sub(mask, mask, 1);
5015 __ Ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
5016 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
5018 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
5019 // Compute the masked index: (hash + i + i * i) & mask.
5020 // Capacity is smi 2^n.
5022 // Add the probe offset (i + i * i) left shifted to avoid right shifting
5023 // the hash in a separate instruction. The value hash + i + i * i is right
5024 // shifted in the following and instruction.
5025 ASSERT(NameDictionary::GetProbeOffset(i) <
5026 1 << (32 - Name::kHashFieldOffset));
5028 NameDictionary::GetProbeOffset(i) << Name::kHashShift);
5030 __ Mov(index, hash);
5032 __ And(index, mask, Operand(index, LSR, Name::kHashShift));
5034 // Scale the index by multiplying by the entry size.
5035 ASSERT(NameDictionary::kEntrySize == 3);
5036 __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
5038 __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
5039 __ Ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
5041 // Having undefined at this place means the name is not contained.
5042 __ Cmp(entry_key, undefined);
5043 __ B(eq, ¬_in_dictionary);
5045 // Stop if found the property.
5046 __ Cmp(entry_key, key);
5047 __ B(eq, &in_dictionary);
5049 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
5050 // Check if the entry name is not a unique name.
5051 __ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
5052 __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
5053 __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
5057 __ Bind(&maybe_in_dictionary);
5058 // If we are doing negative lookup then probing failure should be
5059 // treated as a lookup success. For positive lookup, probing failure
5060 // should be treated as lookup failure.
5061 if (mode_ == POSITIVE_LOOKUP) {
5066 __ Bind(&in_dictionary);
5070 __ Bind(¬_in_dictionary);
5077 static void CreateArrayDispatch(MacroAssembler* masm,
5078 AllocationSiteOverrideMode mode) {
5079 ASM_LOCATION("CreateArrayDispatch");
5080 if (mode == DISABLE_ALLOCATION_SITES) {
5081 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
5082 __ TailCallStub(&stub);
5084 } else if (mode == DONT_OVERRIDE) {
5087 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
5088 for (int i = 0; i <= last_index; ++i) {
5090 ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
5091 // TODO(jbramley): Is this the best way to handle this? Can we make the
5092 // tail calls conditional, rather than hopping over each one?
5093 __ CompareAndBranch(kind, candidate_kind, ne, &next);
5094 T stub(masm->isolate(), candidate_kind);
5095 __ TailCallStub(&stub);
5099 // If we reached this point there is a problem.
5100 __ Abort(kUnexpectedElementsKindInArrayConstructor);
5108 // TODO(jbramley): If this needs to be a special case, make it a proper template
5109 // specialization, and not a separate function.
5110 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
5111 AllocationSiteOverrideMode mode) {
5112 ASM_LOCATION("CreateArrayDispatchOneArgument");
5114 // x1 - constructor?
5115 // x2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
5116 // x3 - kind (if mode != DISABLE_ALLOCATION_SITES)
5117 // sp[0] - last argument
5119 Register allocation_site = x2;
5122 Label normal_sequence;
5123 if (mode == DONT_OVERRIDE) {
5124 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
5125 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
5126 STATIC_ASSERT(FAST_ELEMENTS == 2);
5127 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
5128 STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
5129 STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
5131 // Is the low bit set? If so, the array is holey.
5132 __ Tbnz(kind, 0, &normal_sequence);
5135 // Look at the last argument.
5136 // TODO(jbramley): What does a 0 argument represent?
5138 __ Cbz(x10, &normal_sequence);
5140 if (mode == DISABLE_ALLOCATION_SITES) {
5141 ElementsKind initial = GetInitialFastElementsKind();
5142 ElementsKind holey_initial = GetHoleyElementsKind(initial);
5144 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
5146 DISABLE_ALLOCATION_SITES);
5147 __ TailCallStub(&stub_holey);
5149 __ Bind(&normal_sequence);
5150 ArraySingleArgumentConstructorStub stub(masm->isolate(),
5152 DISABLE_ALLOCATION_SITES);
5153 __ TailCallStub(&stub);
5154 } else if (mode == DONT_OVERRIDE) {
5155 // We are going to create a holey array, but our kind is non-holey.
5156 // Fix kind and retry (only if we have an allocation site in the slot).
5157 __ Orr(kind, kind, 1);
5159 if (FLAG_debug_code) {
5160 __ Ldr(x10, FieldMemOperand(allocation_site, 0));
5161 __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex,
5163 __ Assert(eq, kExpectedAllocationSite);
5166 // Save the resulting elements kind in type info. We can't just store 'kind'
5167 // in the AllocationSite::transition_info field because elements kind is
5168 // restricted to a portion of the field; upper bits need to be left alone.
5169 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
5170 __ Ldr(x11, FieldMemOperand(allocation_site,
5171 AllocationSite::kTransitionInfoOffset));
5172 __ Add(x11, x11, Smi::FromInt(kFastElementsKindPackedToHoley));
5173 __ Str(x11, FieldMemOperand(allocation_site,
5174 AllocationSite::kTransitionInfoOffset));
5176 __ Bind(&normal_sequence);
5178 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
5179 for (int i = 0; i <= last_index; ++i) {
5181 ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
5182 __ CompareAndBranch(kind, candidate_kind, ne, &next);
5183 ArraySingleArgumentConstructorStub stub(masm->isolate(), candidate_kind);
5184 __ TailCallStub(&stub);
5188 // If we reached this point there is a problem.
5189 __ Abort(kUnexpectedElementsKindInArrayConstructor);
5197 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
5198 int to_index = GetSequenceIndexFromFastElementsKind(
5199 TERMINAL_FAST_ELEMENTS_KIND);
5200 for (int i = 0; i <= to_index; ++i) {
5201 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
5202 T stub(isolate, kind);
5204 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
5205 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
5212 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
5213 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
5215 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
5217 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
5222 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
5224 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
5225 for (int i = 0; i < 2; i++) {
5226 // For internal arrays we only need a few things
5227 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
5229 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
5231 InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
5237 void ArrayConstructorStub::GenerateDispatchToArrayStub(
5238 MacroAssembler* masm,
5239 AllocationSiteOverrideMode mode) {
5241 if (argument_count_ == ANY) {
5242 Label zero_case, n_case;
5243 __ Cbz(argc, &zero_case);
5248 CreateArrayDispatchOneArgument(masm, mode);
5250 __ Bind(&zero_case);
5252 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5256 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5258 } else if (argument_count_ == NONE) {
5259 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5260 } else if (argument_count_ == ONE) {
5261 CreateArrayDispatchOneArgument(masm, mode);
5262 } else if (argument_count_ == MORE_THAN_ONE) {
5263 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5270 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
5271 ASM_LOCATION("ArrayConstructorStub::Generate");
5272 // ----------- S t a t e -------------
5273 // -- x0 : argc (only if argument_count_ == ANY)
5274 // -- x1 : constructor
5275 // -- x2 : AllocationSite or undefined
5276 // -- sp[0] : return address
5277 // -- sp[4] : last argument
5278 // -----------------------------------
5279 Register constructor = x1;
5280 Register allocation_site = x2;
5282 if (FLAG_debug_code) {
5283 // The array construct code is only set for the global and natives
5284 // builtin Array functions which always have maps.
5286 Label unexpected_map, map_ok;
5287 // Initial map for the builtin Array function should be a map.
5288 __ Ldr(x10, FieldMemOperand(constructor,
5289 JSFunction::kPrototypeOrInitialMapOffset));
5290 // Will both indicate a NULL and a Smi.
5291 __ JumpIfSmi(x10, &unexpected_map);
5292 __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
5293 __ Bind(&unexpected_map);
5294 __ Abort(kUnexpectedInitialMapForArrayFunction);
5297 // We should either have undefined in the allocation_site register or a
5298 // valid AllocationSite.
5299 __ AssertUndefinedOrAllocationSite(allocation_site, x10);
5304 // Get the elements kind and case on that.
5305 __ JumpIfRoot(allocation_site, Heap::kUndefinedValueRootIndex, &no_info);
5308 UntagSmiFieldMemOperand(allocation_site,
5309 AllocationSite::kTransitionInfoOffset));
5310 __ And(kind, kind, AllocationSite::ElementsKindBits::kMask);
5311 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
5314 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
5318 void InternalArrayConstructorStub::GenerateCase(
5319 MacroAssembler* masm, ElementsKind kind) {
5320 Label zero_case, n_case;
5323 __ Cbz(argc, &zero_case);
5324 __ CompareAndBranch(argc, 1, ne, &n_case);
5327 if (IsFastPackedElementsKind(kind)) {
5330 // We might need to create a holey array; look at the first argument.
5332 __ Cbz(x10, &packed_case);
5334 InternalArraySingleArgumentConstructorStub
5335 stub1_holey(isolate(), GetHoleyElementsKind(kind));
5336 __ TailCallStub(&stub1_holey);
5338 __ Bind(&packed_case);
5340 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
5341 __ TailCallStub(&stub1);
5343 __ Bind(&zero_case);
5345 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
5346 __ TailCallStub(&stub0);
5350 InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
5351 __ TailCallStub(&stubN);
5355 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
5356 // ----------- S t a t e -------------
5358 // -- x1 : constructor
5359 // -- sp[0] : return address
5360 // -- sp[4] : last argument
5361 // -----------------------------------
5363 Register constructor = x1;
5365 if (FLAG_debug_code) {
5366 // The array construct code is only set for the global and natives
5367 // builtin Array functions which always have maps.
5369 Label unexpected_map, map_ok;
5370 // Initial map for the builtin Array function should be a map.
5371 __ Ldr(x10, FieldMemOperand(constructor,
5372 JSFunction::kPrototypeOrInitialMapOffset));
5373 // Will both indicate a NULL and a Smi.
5374 __ JumpIfSmi(x10, &unexpected_map);
5375 __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
5376 __ Bind(&unexpected_map);
5377 __ Abort(kUnexpectedInitialMapForArrayFunction);
5382 // Figure out the right elements kind
5383 __ Ldr(x10, FieldMemOperand(constructor,
5384 JSFunction::kPrototypeOrInitialMapOffset));
5386 // Retrieve elements_kind from map.
5387 __ LoadElementsKindFromMap(kind, x10);
5389 if (FLAG_debug_code) {
5391 __ Cmp(x3, FAST_ELEMENTS);
5392 __ Ccmp(x3, FAST_HOLEY_ELEMENTS, ZFlag, ne);
5393 __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
5396 Label fast_elements_case;
5397 __ CompareAndBranch(kind, FAST_ELEMENTS, eq, &fast_elements_case);
5398 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
5400 __ Bind(&fast_elements_case);
5401 GenerateCase(masm, FAST_ELEMENTS);
5405 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
5406 // ----------- S t a t e -------------
5408 // -- x4 : call_data
5410 // -- x1 : api_function_address
5413 // -- sp[0] : last argument
5415 // -- sp[(argc - 1) * 8] : first argument
5416 // -- sp[argc * 8] : receiver
5417 // -----------------------------------
5419 Register callee = x0;
5420 Register call_data = x4;
5421 Register holder = x2;
5422 Register api_function_address = x1;
5423 Register context = cp;
5425 int argc = ArgumentBits::decode(bit_field_);
5426 bool is_store = IsStoreBits::decode(bit_field_);
5427 bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
5429 typedef FunctionCallbackArguments FCA;
5431 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
5432 STATIC_ASSERT(FCA::kCalleeIndex == 5);
5433 STATIC_ASSERT(FCA::kDataIndex == 4);
5434 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
5435 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
5436 STATIC_ASSERT(FCA::kIsolateIndex == 1);
5437 STATIC_ASSERT(FCA::kHolderIndex == 0);
5438 STATIC_ASSERT(FCA::kArgsLength == 7);
5440 // FunctionCallbackArguments: context, callee and call data.
5441 __ Push(context, callee, call_data);
5443 // Load context from callee
5444 __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
5446 if (!call_data_undefined) {
5447 __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
5449 Register isolate_reg = x5;
5450 __ Mov(isolate_reg, ExternalReference::isolate_address(isolate()));
5452 // FunctionCallbackArguments:
5453 // return value, return value default, isolate, holder.
5454 __ Push(call_data, call_data, isolate_reg, holder);
5456 // Prepare arguments.
5458 __ Mov(args, masm->StackPointer());
5460 // Allocate the v8::Arguments structure in the arguments' space, since it's
5461 // not controlled by GC.
5462 const int kApiStackSpace = 4;
5464 // Allocate space for CallApiFunctionAndReturn can store some scratch
5465 // registeres on the stack.
5466 const int kCallApiFunctionSpillSpace = 4;
5468 FrameScope frame_scope(masm, StackFrame::MANUAL);
5469 __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
5471 ASSERT(!AreAliased(x0, api_function_address));
5472 // x0 = FunctionCallbackInfo&
5473 // Arguments is after the return address.
5474 __ Add(x0, masm->StackPointer(), 1 * kPointerSize);
5475 // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
5476 __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
5477 __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
5478 // FunctionCallbackInfo::length_ = argc and
5479 // FunctionCallbackInfo::is_construct_call = 0
5481 __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
5483 const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
5484 ExternalReference thunk_ref =
5485 ExternalReference::invoke_function_callback(isolate());
5487 AllowExternalCallThatCantCauseGC scope(masm);
5488 MemOperand context_restore_operand(
5489 fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
5490 // Stores return the first js argument
5491 int return_value_offset = 0;
5493 return_value_offset = 2 + FCA::kArgsLength;
5495 return_value_offset = 2 + FCA::kReturnValueOffset;
5497 MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
5499 const int spill_offset = 1 + kApiStackSpace;
5500 __ CallApiFunctionAndReturn(api_function_address,
5504 return_value_operand,
5505 &context_restore_operand);
5509 void CallApiGetterStub::Generate(MacroAssembler* masm) {
5510 // ----------- S t a t e -------------
5512 // -- sp[8 - kArgsLength*8] : PropertyCallbackArguments object
5514 // -- x2 : api_function_address
5515 // -----------------------------------
5517 Register api_function_address = x2;
5519 __ Mov(x0, masm->StackPointer()); // x0 = Handle<Name>
5520 __ Add(x1, x0, 1 * kPointerSize); // x1 = PCA
5522 const int kApiStackSpace = 1;
5524 // Allocate space for CallApiFunctionAndReturn can store some scratch
5525 // registeres on the stack.
5526 const int kCallApiFunctionSpillSpace = 4;
5528 FrameScope frame_scope(masm, StackFrame::MANUAL);
5529 __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
5531 // Create PropertyAccessorInfo instance on the stack above the exit frame with
5532 // x1 (internal::Object** args_) as the data.
5533 __ Poke(x1, 1 * kPointerSize);
5534 __ Add(x1, masm->StackPointer(), 1 * kPointerSize); // x1 = AccessorInfo&
5536 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
5538 ExternalReference thunk_ref =
5539 ExternalReference::invoke_accessor_getter_callback(isolate());
5541 const int spill_offset = 1 + kApiStackSpace;
5542 __ CallApiFunctionAndReturn(api_function_address,
5546 MemOperand(fp, 6 * kPointerSize),
5553 } } // namespace v8::internal
5555 #endif // V8_TARGET_ARCH_ARM64