1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #if V8_TARGET_ARCH_X64
32 #include "bootstrapper.h"
33 #include "code-stubs.h"
34 #include "regexp-macro-assembler.h"
35 #include "stub-cache.h"
42 void FastNewClosureStub::InitializeInterfaceDescriptor(
44 CodeStubInterfaceDescriptor* descriptor) {
45 static Register registers[] = { rbx };
46 descriptor->register_param_count_ = 1;
47 descriptor->register_params_ = registers;
48 descriptor->deoptimization_handler_ =
49 Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
53 void FastNewContextStub::InitializeInterfaceDescriptor(
55 CodeStubInterfaceDescriptor* descriptor) {
56 static Register registers[] = { rdi };
57 descriptor->register_param_count_ = 1;
58 descriptor->register_params_ = registers;
59 descriptor->deoptimization_handler_ = NULL;
63 void ToNumberStub::InitializeInterfaceDescriptor(
65 CodeStubInterfaceDescriptor* descriptor) {
66 static Register registers[] = { rax };
67 descriptor->register_param_count_ = 1;
68 descriptor->register_params_ = registers;
69 descriptor->deoptimization_handler_ = NULL;
73 void NumberToStringStub::InitializeInterfaceDescriptor(
75 CodeStubInterfaceDescriptor* descriptor) {
76 static Register registers[] = { rax };
77 descriptor->register_param_count_ = 1;
78 descriptor->register_params_ = registers;
79 descriptor->deoptimization_handler_ =
80 Runtime::FunctionForId(Runtime::kNumberToString)->entry;
84 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
86 CodeStubInterfaceDescriptor* descriptor) {
87 static Register registers[] = { rax, rbx, rcx };
88 descriptor->register_param_count_ = 3;
89 descriptor->register_params_ = registers;
90 descriptor->deoptimization_handler_ =
91 Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
95 void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
97 CodeStubInterfaceDescriptor* descriptor) {
98 static Register registers[] = { rax, rbx, rcx, rdx };
99 descriptor->register_param_count_ = 4;
100 descriptor->register_params_ = registers;
101 descriptor->deoptimization_handler_ =
102 Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
106 void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
108 CodeStubInterfaceDescriptor* descriptor) {
109 static Register registers[] = { rbx };
110 descriptor->register_param_count_ = 1;
111 descriptor->register_params_ = registers;
112 descriptor->deoptimization_handler_ = NULL;
116 void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
118 CodeStubInterfaceDescriptor* descriptor) {
119 static Register registers[] = { rdx, rax };
120 descriptor->register_param_count_ = 2;
121 descriptor->register_params_ = registers;
122 descriptor->deoptimization_handler_ =
123 FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
127 void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
129 CodeStubInterfaceDescriptor* descriptor) {
130 static Register registers[] = { rdx, rax };
131 descriptor->register_param_count_ = 2;
132 descriptor->register_params_ = registers;
133 descriptor->deoptimization_handler_ =
134 FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
138 void RegExpConstructResultStub::InitializeInterfaceDescriptor(
140 CodeStubInterfaceDescriptor* descriptor) {
141 static Register registers[] = { rcx, rbx, rax };
142 descriptor->register_param_count_ = 3;
143 descriptor->register_params_ = registers;
144 descriptor->deoptimization_handler_ =
145 Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry;
149 void LoadFieldStub::InitializeInterfaceDescriptor(
151 CodeStubInterfaceDescriptor* descriptor) {
152 static Register registers[] = { rax };
153 descriptor->register_param_count_ = 1;
154 descriptor->register_params_ = registers;
155 descriptor->deoptimization_handler_ = NULL;
159 void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
161 CodeStubInterfaceDescriptor* descriptor) {
162 static Register registers[] = { rdx };
163 descriptor->register_param_count_ = 1;
164 descriptor->register_params_ = registers;
165 descriptor->deoptimization_handler_ = NULL;
169 void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
171 CodeStubInterfaceDescriptor* descriptor) {
172 static Register registers[] = { rdx, rcx, rax };
173 descriptor->register_param_count_ = 3;
174 descriptor->register_params_ = registers;
175 descriptor->deoptimization_handler_ =
176 FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
180 void TransitionElementsKindStub::InitializeInterfaceDescriptor(
182 CodeStubInterfaceDescriptor* descriptor) {
183 static Register registers[] = { rax, rbx };
184 descriptor->register_param_count_ = 2;
185 descriptor->register_params_ = registers;
186 descriptor->deoptimization_handler_ =
187 Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
191 static void InitializeArrayConstructorDescriptor(
193 CodeStubInterfaceDescriptor* descriptor,
194 int constant_stack_parameter_count) {
196 // rax -- number of arguments
198 // rbx -- allocation site with elements kind
199 static Register registers_variable_args[] = { rdi, rbx, rax };
200 static Register registers_no_args[] = { rdi, rbx };
202 if (constant_stack_parameter_count == 0) {
203 descriptor->register_param_count_ = 2;
204 descriptor->register_params_ = registers_no_args;
206 // stack param count needs (constructor pointer, and single argument)
207 descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
208 descriptor->stack_parameter_count_ = rax;
209 descriptor->register_param_count_ = 3;
210 descriptor->register_params_ = registers_variable_args;
213 descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
214 descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
215 descriptor->deoptimization_handler_ =
216 Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
220 static void InitializeInternalArrayConstructorDescriptor(
222 CodeStubInterfaceDescriptor* descriptor,
223 int constant_stack_parameter_count) {
225 // rax -- number of arguments
226 // rdi -- constructor function
227 static Register registers_variable_args[] = { rdi, rax };
228 static Register registers_no_args[] = { rdi };
230 if (constant_stack_parameter_count == 0) {
231 descriptor->register_param_count_ = 1;
232 descriptor->register_params_ = registers_no_args;
234 // stack param count needs (constructor pointer, and single argument)
235 descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
236 descriptor->stack_parameter_count_ = rax;
237 descriptor->register_param_count_ = 2;
238 descriptor->register_params_ = registers_variable_args;
241 descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
242 descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
243 descriptor->deoptimization_handler_ =
244 Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
248 void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
250 CodeStubInterfaceDescriptor* descriptor) {
251 InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
255 void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
257 CodeStubInterfaceDescriptor* descriptor) {
258 InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
262 void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
264 CodeStubInterfaceDescriptor* descriptor) {
265 InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
269 void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
271 CodeStubInterfaceDescriptor* descriptor) {
272 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
276 void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
278 CodeStubInterfaceDescriptor* descriptor) {
279 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
283 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
285 CodeStubInterfaceDescriptor* descriptor) {
286 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
290 void CompareNilICStub::InitializeInterfaceDescriptor(
292 CodeStubInterfaceDescriptor* descriptor) {
293 static Register registers[] = { rax };
294 descriptor->register_param_count_ = 1;
295 descriptor->register_params_ = registers;
296 descriptor->deoptimization_handler_ =
297 FUNCTION_ADDR(CompareNilIC_Miss);
298 descriptor->SetMissHandler(
299 ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
303 void ToBooleanStub::InitializeInterfaceDescriptor(
305 CodeStubInterfaceDescriptor* descriptor) {
306 static Register registers[] = { rax };
307 descriptor->register_param_count_ = 1;
308 descriptor->register_params_ = registers;
309 descriptor->deoptimization_handler_ =
310 FUNCTION_ADDR(ToBooleanIC_Miss);
311 descriptor->SetMissHandler(
312 ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
316 void StoreGlobalStub::InitializeInterfaceDescriptor(
318 CodeStubInterfaceDescriptor* descriptor) {
319 static Register registers[] = { rdx, rcx, rax };
320 descriptor->register_param_count_ = 3;
321 descriptor->register_params_ = registers;
322 descriptor->deoptimization_handler_ =
323 FUNCTION_ADDR(StoreIC_MissFromStubFailure);
327 void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
329 CodeStubInterfaceDescriptor* descriptor) {
330 static Register registers[] = { rax, rbx, rcx, rdx };
331 descriptor->register_param_count_ = 4;
332 descriptor->register_params_ = registers;
333 descriptor->deoptimization_handler_ =
334 FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
338 void BinaryOpICStub::InitializeInterfaceDescriptor(
340 CodeStubInterfaceDescriptor* descriptor) {
341 static Register registers[] = { rdx, rax };
342 descriptor->register_param_count_ = 2;
343 descriptor->register_params_ = registers;
344 descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
345 descriptor->SetMissHandler(
346 ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
350 void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
352 CodeStubInterfaceDescriptor* descriptor) {
353 static Register registers[] = { rcx, rdx, rax };
354 descriptor->register_param_count_ = 3;
355 descriptor->register_params_ = registers;
356 descriptor->deoptimization_handler_ =
357 FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
361 void StringAddStub::InitializeInterfaceDescriptor(
363 CodeStubInterfaceDescriptor* descriptor) {
364 static Register registers[] = { rdx, rax };
365 descriptor->register_param_count_ = 2;
366 descriptor->register_params_ = registers;
367 descriptor->deoptimization_handler_ =
368 Runtime::FunctionForId(Runtime::kStringAdd)->entry;
372 void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
374 CallInterfaceDescriptor* descriptor =
375 isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
376 static Register registers[] = { rdi, // JSFunction
378 rax, // actual number of arguments
379 rbx, // expected number of arguments
381 static Representation representations[] = {
382 Representation::Tagged(), // JSFunction
383 Representation::Tagged(), // context
384 Representation::Integer32(), // actual number of arguments
385 Representation::Integer32(), // expected number of arguments
387 descriptor->register_param_count_ = 4;
388 descriptor->register_params_ = registers;
389 descriptor->param_representations_ = representations;
392 CallInterfaceDescriptor* descriptor =
393 isolate->call_descriptor(Isolate::KeyedCall);
394 static Register registers[] = { rsi, // context
397 static Representation representations[] = {
398 Representation::Tagged(), // context
399 Representation::Tagged(), // key
401 descriptor->register_param_count_ = 2;
402 descriptor->register_params_ = registers;
403 descriptor->param_representations_ = representations;
406 CallInterfaceDescriptor* descriptor =
407 isolate->call_descriptor(Isolate::NamedCall);
408 static Register registers[] = { rsi, // context
411 static Representation representations[] = {
412 Representation::Tagged(), // context
413 Representation::Tagged(), // name
415 descriptor->register_param_count_ = 2;
416 descriptor->register_params_ = registers;
417 descriptor->param_representations_ = representations;
420 CallInterfaceDescriptor* descriptor =
421 isolate->call_descriptor(Isolate::CallHandler);
422 static Register registers[] = { rsi, // context
425 static Representation representations[] = {
426 Representation::Tagged(), // context
427 Representation::Tagged(), // receiver
429 descriptor->register_param_count_ = 2;
430 descriptor->register_params_ = registers;
431 descriptor->param_representations_ = representations;
434 CallInterfaceDescriptor* descriptor =
435 isolate->call_descriptor(Isolate::ApiFunctionCall);
436 static Register registers[] = { rax, // callee
439 rdx, // api_function_address
442 static Representation representations[] = {
443 Representation::Tagged(), // callee
444 Representation::Tagged(), // call_data
445 Representation::Tagged(), // holder
446 Representation::External(), // api_function_address
447 Representation::Tagged(), // context
449 descriptor->register_param_count_ = 5;
450 descriptor->register_params_ = registers;
451 descriptor->param_representations_ = representations;
456 #define __ ACCESS_MASM(masm)
459 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
460 // Update the static counter each time a new code stub is generated.
461 Isolate* isolate = masm->isolate();
462 isolate->counters()->code_stubs()->Increment();
464 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
465 int param_count = descriptor->register_param_count_;
467 // Call the runtime system in a fresh internal frame.
468 FrameScope scope(masm, StackFrame::INTERNAL);
469 ASSERT(descriptor->register_param_count_ == 0 ||
470 rax.is(descriptor->register_params_[param_count - 1]));
472 for (int i = 0; i < param_count; ++i) {
473 __ push(descriptor->register_params_[i]);
475 ExternalReference miss = descriptor->miss_handler();
476 __ CallExternalReference(miss, descriptor->register_param_count_);
483 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
484 __ PushCallerSaved(save_doubles_);
485 const int argument_count = 1;
486 __ PrepareCallCFunction(argument_count);
487 __ LoadAddress(arg_reg_1,
488 ExternalReference::isolate_address(masm->isolate()));
490 AllowExternalCallThatCantCauseGC scope(masm);
492 ExternalReference::store_buffer_overflow_function(masm->isolate()),
494 __ PopCallerSaved(save_doubles_);
499 class FloatingPointHelper : public AllStatic {
501 enum ConvertUndefined {
502 CONVERT_UNDEFINED_TO_ZERO,
505 // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
506 // If the operands are not both numbers, jump to not_numbers.
507 // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
508 // NumberOperands assumes both are smis or heap numbers.
509 static void LoadSSE2UnknownOperands(MacroAssembler* masm,
514 void DoubleToIStub::Generate(MacroAssembler* masm) {
515 Register input_reg = this->source();
516 Register final_result_reg = this->destination();
517 ASSERT(is_truncating());
519 Label check_negative, process_64_bits, done;
521 int double_offset = offset();
523 // Account for return address and saved regs if input is rsp.
524 if (input_reg.is(rsp)) double_offset += 3 * kPointerSize;
526 MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
527 MemOperand exponent_operand(MemOperand(input_reg,
528 double_offset + kDoubleSize / 2));
531 Register scratch_candidates[3] = { rbx, rdx, rdi };
532 for (int i = 0; i < 3; i++) {
533 scratch1 = scratch_candidates[i];
534 if (!final_result_reg.is(scratch1) && !input_reg.is(scratch1)) break;
537 // Since we must use rcx for shifts below, use some other register (rax)
538 // to calculate the result if ecx is the requested return register.
539 Register result_reg = final_result_reg.is(rcx) ? rax : final_result_reg;
540 // Save ecx if it isn't the return register and therefore volatile, or if it
541 // is the return register, then save the temp register we use in its stead
543 Register save_reg = final_result_reg.is(rcx) ? rax : rcx;
547 bool stash_exponent_copy = !input_reg.is(rsp);
548 __ movl(scratch1, mantissa_operand);
549 __ movsd(xmm0, mantissa_operand);
550 __ movl(rcx, exponent_operand);
551 if (stash_exponent_copy) __ push(rcx);
553 __ andl(rcx, Immediate(HeapNumber::kExponentMask));
554 __ shrl(rcx, Immediate(HeapNumber::kExponentShift));
555 __ leal(result_reg, MemOperand(rcx, -HeapNumber::kExponentBias));
556 __ cmpl(result_reg, Immediate(HeapNumber::kMantissaBits));
557 __ j(below, &process_64_bits);
559 // Result is entirely in lower 32-bits of mantissa
560 int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize;
561 __ subl(rcx, Immediate(delta));
562 __ xorl(result_reg, result_reg);
563 __ cmpl(rcx, Immediate(31));
565 __ shll_cl(scratch1);
566 __ jmp(&check_negative);
568 __ bind(&process_64_bits);
569 __ cvttsd2siq(result_reg, xmm0);
570 __ jmp(&done, Label::kNear);
572 // If the double was negative, negate the integer result.
573 __ bind(&check_negative);
574 __ movl(result_reg, scratch1);
576 if (stash_exponent_copy) {
577 __ cmpl(MemOperand(rsp, 0), Immediate(0));
579 __ cmpl(exponent_operand, Immediate(0));
581 __ cmovl(greater, result_reg, scratch1);
585 if (stash_exponent_copy) {
586 __ addq(rsp, Immediate(kDoubleSize));
588 if (!final_result_reg.is(result_reg)) {
589 ASSERT(final_result_reg.is(rcx));
590 __ movl(final_result_reg, result_reg);
598 void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
599 Label* not_numbers) {
600 Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
601 // Load operand in rdx into xmm0, or branch to not_numbers.
602 __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
603 __ JumpIfSmi(rdx, &load_smi_rdx);
604 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
605 __ j(not_equal, not_numbers); // Argument in rdx is not a number.
606 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
607 // Load operand in rax into xmm1, or branch to not_numbers.
608 __ JumpIfSmi(rax, &load_smi_rax);
610 __ bind(&load_nonsmi_rax);
611 __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
612 __ j(not_equal, not_numbers);
613 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
616 __ bind(&load_smi_rdx);
617 __ SmiToInteger32(kScratchRegister, rdx);
618 __ Cvtlsi2sd(xmm0, kScratchRegister);
619 __ JumpIfNotSmi(rax, &load_nonsmi_rax);
621 __ bind(&load_smi_rax);
622 __ SmiToInteger32(kScratchRegister, rax);
623 __ Cvtlsi2sd(xmm1, kScratchRegister);
628 void MathPowStub::Generate(MacroAssembler* masm) {
629 const Register exponent = rdx;
630 const Register base = rax;
631 const Register scratch = rcx;
632 const XMMRegister double_result = xmm3;
633 const XMMRegister double_base = xmm2;
634 const XMMRegister double_exponent = xmm1;
635 const XMMRegister double_scratch = xmm4;
637 Label call_runtime, done, exponent_not_smi, int_exponent;
639 // Save 1 in double_result - we need this several times later on.
640 __ movp(scratch, Immediate(1));
641 __ Cvtlsi2sd(double_result, scratch);
643 if (exponent_type_ == ON_STACK) {
644 Label base_is_smi, unpack_exponent;
645 // The exponent and base are supplied as arguments on the stack.
646 // This can only happen if the stub is called from non-optimized code.
647 // Load input parameters from stack.
648 StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
649 __ movp(base, args.GetArgumentOperand(0));
650 __ movp(exponent, args.GetArgumentOperand(1));
651 __ JumpIfSmi(base, &base_is_smi, Label::kNear);
652 __ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
653 Heap::kHeapNumberMapRootIndex);
654 __ j(not_equal, &call_runtime);
656 __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
657 __ jmp(&unpack_exponent, Label::kNear);
659 __ bind(&base_is_smi);
660 __ SmiToInteger32(base, base);
661 __ Cvtlsi2sd(double_base, base);
662 __ bind(&unpack_exponent);
664 __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
665 __ SmiToInteger32(exponent, exponent);
666 __ jmp(&int_exponent);
668 __ bind(&exponent_not_smi);
669 __ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset),
670 Heap::kHeapNumberMapRootIndex);
671 __ j(not_equal, &call_runtime);
672 __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
673 } else if (exponent_type_ == TAGGED) {
674 __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
675 __ SmiToInteger32(exponent, exponent);
676 __ jmp(&int_exponent);
678 __ bind(&exponent_not_smi);
679 __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
682 if (exponent_type_ != INTEGER) {
683 Label fast_power, try_arithmetic_simplification;
684 // Detect integer exponents stored as double.
685 __ DoubleToI(exponent, double_exponent, double_scratch,
686 TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification);
687 __ jmp(&int_exponent);
689 __ bind(&try_arithmetic_simplification);
690 __ cvttsd2si(exponent, double_exponent);
691 // Skip to runtime if possibly NaN (indicated by the indefinite integer).
692 __ cmpl(exponent, Immediate(0x80000000u));
693 __ j(equal, &call_runtime);
695 if (exponent_type_ == ON_STACK) {
696 // Detect square root case. Crankshaft detects constant +/-0.5 at
697 // compile time and uses DoMathPowHalf instead. We then skip this check
698 // for non-constant cases of +/-0.5 as these hardly occur.
699 Label continue_sqrt, continue_rsqrt, not_plus_half;
701 // Load double_scratch with 0.5.
702 __ movq(scratch, V8_UINT64_C(0x3FE0000000000000));
703 __ movq(double_scratch, scratch);
704 // Already ruled out NaNs for exponent.
705 __ ucomisd(double_scratch, double_exponent);
706 __ j(not_equal, ¬_plus_half, Label::kNear);
708 // Calculates square root of base. Check for the special case of
709 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
710 // According to IEEE-754, double-precision -Infinity has the highest
711 // 12 bits set and the lowest 52 bits cleared.
712 __ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
713 __ movq(double_scratch, scratch);
714 __ ucomisd(double_scratch, double_base);
715 // Comparing -Infinity with NaN results in "unordered", which sets the
716 // zero flag as if both were equal. However, it also sets the carry flag.
717 __ j(not_equal, &continue_sqrt, Label::kNear);
718 __ j(carry, &continue_sqrt, Label::kNear);
720 // Set result to Infinity in the special case.
721 __ xorps(double_result, double_result);
722 __ subsd(double_result, double_scratch);
725 __ bind(&continue_sqrt);
726 // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
727 __ xorps(double_scratch, double_scratch);
728 __ addsd(double_scratch, double_base); // Convert -0 to 0.
729 __ sqrtsd(double_result, double_scratch);
733 __ bind(¬_plus_half);
734 // Load double_scratch with -0.5 by substracting 1.
735 __ subsd(double_scratch, double_result);
736 // Already ruled out NaNs for exponent.
737 __ ucomisd(double_scratch, double_exponent);
738 __ j(not_equal, &fast_power, Label::kNear);
740 // Calculates reciprocal of square root of base. Check for the special
741 // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
742 // According to IEEE-754, double-precision -Infinity has the highest
743 // 12 bits set and the lowest 52 bits cleared.
744 __ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
745 __ movq(double_scratch, scratch);
746 __ ucomisd(double_scratch, double_base);
747 // Comparing -Infinity with NaN results in "unordered", which sets the
748 // zero flag as if both were equal. However, it also sets the carry flag.
749 __ j(not_equal, &continue_rsqrt, Label::kNear);
750 __ j(carry, &continue_rsqrt, Label::kNear);
752 // Set result to 0 in the special case.
753 __ xorps(double_result, double_result);
756 __ bind(&continue_rsqrt);
757 // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
758 __ xorps(double_exponent, double_exponent);
759 __ addsd(double_exponent, double_base); // Convert -0 to +0.
760 __ sqrtsd(double_exponent, double_exponent);
761 __ divsd(double_result, double_exponent);
765 // Using FPU instructions to calculate power.
766 Label fast_power_failed;
767 __ bind(&fast_power);
768 __ fnclex(); // Clear flags to catch exceptions later.
769 // Transfer (B)ase and (E)xponent onto the FPU register stack.
770 __ subq(rsp, Immediate(kDoubleSize));
771 __ movsd(Operand(rsp, 0), double_exponent);
772 __ fld_d(Operand(rsp, 0)); // E
773 __ movsd(Operand(rsp, 0), double_base);
774 __ fld_d(Operand(rsp, 0)); // B, E
776 // Exponent is in st(1) and base is in st(0)
777 // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
778 // FYL2X calculates st(1) * log2(st(0))
781 __ frndint(); // rnd(X), X
782 __ fsub(1); // rnd(X), X-rnd(X)
783 __ fxch(1); // X - rnd(X), rnd(X)
784 // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
785 __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
786 __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
787 __ faddp(1); // 2^(X-rnd(X)), rnd(X)
788 // FSCALE calculates st(0) * 2^st(1)
789 __ fscale(); // 2^X, rnd(X)
791 // Bail out to runtime in case of exceptions in the status word.
793 __ testb(rax, Immediate(0x5F)); // Check for all but precision exception.
794 __ j(not_zero, &fast_power_failed, Label::kNear);
795 __ fstp_d(Operand(rsp, 0));
796 __ movsd(double_result, Operand(rsp, 0));
797 __ addq(rsp, Immediate(kDoubleSize));
800 __ bind(&fast_power_failed);
802 __ addq(rsp, Immediate(kDoubleSize));
803 __ jmp(&call_runtime);
806 // Calculate power with integer exponent.
807 __ bind(&int_exponent);
808 const XMMRegister double_scratch2 = double_exponent;
809 // Back up exponent as we need to check if exponent is negative later.
810 __ movp(scratch, exponent); // Back up exponent.
811 __ movsd(double_scratch, double_base); // Back up base.
812 __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
814 // Get absolute value of exponent.
815 Label no_neg, while_true, while_false;
816 __ testl(scratch, scratch);
817 __ j(positive, &no_neg, Label::kNear);
821 __ j(zero, &while_false, Label::kNear);
822 __ shrl(scratch, Immediate(1));
823 // Above condition means CF==0 && ZF==0. This means that the
824 // bit that has been shifted out is 0 and the result is not 0.
825 __ j(above, &while_true, Label::kNear);
826 __ movsd(double_result, double_scratch);
827 __ j(zero, &while_false, Label::kNear);
829 __ bind(&while_true);
830 __ shrl(scratch, Immediate(1));
831 __ mulsd(double_scratch, double_scratch);
832 __ j(above, &while_true, Label::kNear);
833 __ mulsd(double_result, double_scratch);
834 __ j(not_zero, &while_true);
836 __ bind(&while_false);
837 // If the exponent is negative, return 1/result.
838 __ testl(exponent, exponent);
839 __ j(greater, &done);
840 __ divsd(double_scratch2, double_result);
841 __ movsd(double_result, double_scratch2);
842 // Test whether result is zero. Bail out to check for subnormal result.
843 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
844 __ xorps(double_scratch2, double_scratch2);
845 __ ucomisd(double_scratch2, double_result);
846 // double_exponent aliased as double_scratch2 has already been overwritten
847 // and may not have contained the exponent value in the first place when the
848 // input was a smi. We reset it with exponent value before bailing out.
849 __ j(not_equal, &done);
850 __ Cvtlsi2sd(double_exponent, exponent);
852 // Returning or bailing out.
853 Counters* counters = masm->isolate()->counters();
854 if (exponent_type_ == ON_STACK) {
855 // The arguments are still on the stack.
856 __ bind(&call_runtime);
857 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
859 // The stub is called from non-optimized code, which expects the result
860 // as heap number in rax.
862 __ AllocateHeapNumber(rax, rcx, &call_runtime);
863 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
864 __ IncrementCounter(counters->math_pow(), 1);
865 __ ret(2 * kPointerSize);
867 __ bind(&call_runtime);
868 // Move base to the correct argument register. Exponent is already in xmm1.
869 __ movsd(xmm0, double_base);
870 ASSERT(double_exponent.is(xmm1));
872 AllowExternalCallThatCantCauseGC scope(masm);
873 __ PrepareCallCFunction(2);
875 ExternalReference::power_double_double_function(masm->isolate()), 2);
877 // Return value is in xmm0.
878 __ movsd(double_result, xmm0);
881 __ IncrementCounter(counters->math_pow(), 1);
887 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
890 if (kind() == Code::KEYED_LOAD_IC) {
891 // ----------- S t a t e -------------
894 // -- rsp[0] : return address
895 // -----------------------------------
896 __ Cmp(rax, masm->isolate()->factory()->prototype_string());
897 __ j(not_equal, &miss);
900 ASSERT(kind() == Code::LOAD_IC);
901 // ----------- S t a t e -------------
904 // -- rsp[0] : return address
905 // -----------------------------------
909 StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r8, r9, &miss);
911 StubCompiler::TailCallBuiltin(
912 masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
916 void StringLengthStub::Generate(MacroAssembler* masm) {
919 if (kind() == Code::KEYED_LOAD_IC) {
920 // ----------- S t a t e -------------
923 // -- rsp[0] : return address
924 // -----------------------------------
925 __ Cmp(rax, masm->isolate()->factory()->length_string());
926 __ j(not_equal, &miss);
929 ASSERT(kind() == Code::LOAD_IC);
930 // ----------- S t a t e -------------
933 // -- rsp[0] : return address
934 // -----------------------------------
938 StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss);
940 StubCompiler::TailCallBuiltin(
941 masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
945 void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
946 // ----------- S t a t e -------------
950 // -- rsp[0] : return address
951 // -----------------------------------
953 // This accepts as a receiver anything JSArray::SetElementsLength accepts
954 // (currently anything except for external arrays which means anything with
955 // elements of FixedArray type). Value must be a number, but only smis are
956 // accepted as the most common case.
960 Register receiver = rdx;
961 Register value = rax;
962 Register scratch = rbx;
963 if (kind() == Code::KEYED_STORE_IC) {
964 __ Cmp(rcx, masm->isolate()->factory()->length_string());
965 __ j(not_equal, &miss);
968 // Check that the receiver isn't a smi.
969 __ JumpIfSmi(receiver, &miss);
971 // Check that the object is a JS array.
972 __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
973 __ j(not_equal, &miss);
975 // Check that elements are FixedArray.
976 // We rely on StoreIC_ArrayLength below to deal with all types of
977 // fast elements (including COW).
978 __ movp(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
979 __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
980 __ j(not_equal, &miss);
982 // Check that the array has fast properties, otherwise the length
983 // property might have been redefined.
984 __ movp(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
985 __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
986 Heap::kHashTableMapRootIndex);
989 // Check that value is a smi.
990 __ JumpIfNotSmi(value, &miss);
992 // Prepare tail call to StoreIC_ArrayLength.
993 __ PopReturnAddressTo(scratch);
996 __ PushReturnAddressFrom(scratch);
998 ExternalReference ref =
999 ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
1000 __ TailCallExternalReference(ref, 2, 1);
1004 StubCompiler::TailCallBuiltin(
1005 masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
1009 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
1010 // The key is in rdx and the parameter count is in rax.
1012 // Check that the key is a smi.
1014 __ JumpIfNotSmi(rdx, &slow);
1016 // Check if the calling frame is an arguments adaptor frame. We look at the
1017 // context offset, and if the frame is not a regular one, then we find a
1018 // Smi instead of the context. We can't use SmiCompare here, because that
1019 // only works for comparing two smis.
1021 __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
1022 __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
1023 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1024 __ j(equal, &adaptor);
1026 // Check index against formal parameters count limit passed in
1027 // through register rax. Use unsigned comparison to get negative
1030 __ j(above_equal, &slow);
1032 // Read the argument from the stack and return it.
1033 __ SmiSub(rax, rax, rdx);
1034 __ SmiToInteger32(rax, rax);
1035 StackArgumentsAccessor args(rbp, rax, ARGUMENTS_DONT_CONTAIN_RECEIVER);
1036 __ movp(rax, args.GetArgumentOperand(0));
1039 // Arguments adaptor case: Check index against actual arguments
1040 // limit found in the arguments adaptor frame. Use unsigned
1041 // comparison to get negative check for free.
1043 __ movp(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
1045 __ j(above_equal, &slow);
1047 // Read the argument from the stack and return it.
1048 __ SmiSub(rcx, rcx, rdx);
1049 __ SmiToInteger32(rcx, rcx);
1050 StackArgumentsAccessor adaptor_args(rbx, rcx,
1051 ARGUMENTS_DONT_CONTAIN_RECEIVER);
1052 __ movp(rax, adaptor_args.GetArgumentOperand(0));
1055 // Slow-case: Handle non-smi or out-of-bounds access to arguments
1056 // by calling the runtime system.
1058 __ PopReturnAddressTo(rbx);
1060 __ PushReturnAddressFrom(rbx);
1061 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
1065 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
1067 // rsp[0] : return address
1068 // rsp[8] : number of parameters (tagged)
1069 // rsp[16] : receiver displacement
1070 // rsp[24] : function
1071 // Registers used over the whole function:
1072 // rbx: the mapped parameter count (untagged)
1073 // rax: the allocated object (tagged).
1075 Factory* factory = masm->isolate()->factory();
1077 StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
1078 __ SmiToInteger64(rbx, args.GetArgumentOperand(2));
1079 // rbx = parameter count (untagged)
1081 // Check if the calling frame is an arguments adaptor frame.
1083 Label adaptor_frame, try_allocate;
1084 __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
1085 __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
1086 __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1087 __ j(equal, &adaptor_frame);
1089 // No adaptor, parameter count = argument count.
1091 __ jmp(&try_allocate, Label::kNear);
1093 // We have an adaptor frame. Patch the parameters pointer.
1094 __ bind(&adaptor_frame);
1095 __ SmiToInteger64(rcx,
1097 ArgumentsAdaptorFrameConstants::kLengthOffset));
1098 __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
1099 StandardFrameConstants::kCallerSPOffset));
1100 __ movp(args.GetArgumentOperand(1), rdx);
1102 // rbx = parameter count (untagged)
1103 // rcx = argument count (untagged)
1104 // Compute the mapped parameter count = min(rbx, rcx) in rbx.
1106 __ j(less_equal, &try_allocate, Label::kNear);
1109 __ bind(&try_allocate);
1111 // Compute the sizes of backing store, parameter map, and arguments object.
1112 // 1. Parameter map, has 2 extra words containing context and backing store.
1113 const int kParameterMapHeaderSize =
1114 FixedArray::kHeaderSize + 2 * kPointerSize;
1115 Label no_parameter_map;
1118 __ j(zero, &no_parameter_map, Label::kNear);
1119 __ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
1120 __ bind(&no_parameter_map);
1122 // 2. Backing store.
1123 __ lea(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize));
1125 // 3. Arguments object.
1126 __ addq(r8, Immediate(Heap::kArgumentsObjectSize));
1128 // Do the allocation of all three objects in one go.
1129 __ Allocate(r8, rax, rdx, rdi, &runtime, TAG_OBJECT);
1131 // rax = address of new object(s) (tagged)
1132 // rcx = argument count (untagged)
1133 // Get the arguments boilerplate from the current native context into rdi.
1134 Label has_mapped_parameters, copy;
1135 __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1136 __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
1138 __ j(not_zero, &has_mapped_parameters, Label::kNear);
1140 const int kIndex = Context::ARGUMENTS_BOILERPLATE_INDEX;
1141 __ movp(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
1142 __ jmp(©, Label::kNear);
1144 const int kAliasedIndex = Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX;
1145 __ bind(&has_mapped_parameters);
1146 __ movp(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex)));
1149 // rax = address of new object (tagged)
1150 // rbx = mapped parameter count (untagged)
1151 // rcx = argument count (untagged)
1152 // rdi = address of boilerplate object (tagged)
1153 // Copy the JS object part.
1154 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
1155 __ movp(rdx, FieldOperand(rdi, i));
1156 __ movp(FieldOperand(rax, i), rdx);
1159 // Set up the callee in-object property.
1160 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
1161 __ movp(rdx, args.GetArgumentOperand(0));
1162 __ movp(FieldOperand(rax, JSObject::kHeaderSize +
1163 Heap::kArgumentsCalleeIndex * kPointerSize),
1166 // Use the length (smi tagged) and set that as an in-object property too.
1167 // Note: rcx is tagged from here on.
1168 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1169 __ Integer32ToSmi(rcx, rcx);
1170 __ movp(FieldOperand(rax, JSObject::kHeaderSize +
1171 Heap::kArgumentsLengthIndex * kPointerSize),
1174 // Set up the elements pointer in the allocated arguments object.
1175 // If we allocated a parameter map, edi will point there, otherwise to the
1177 __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
1178 __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
1180 // rax = address of new object (tagged)
1181 // rbx = mapped parameter count (untagged)
1182 // rcx = argument count (tagged)
1183 // rdi = address of parameter map or backing store (tagged)
1185 // Initialize parameter map. If there are no mapped arguments, we're done.
1186 Label skip_parameter_map;
1188 __ j(zero, &skip_parameter_map);
1190 __ LoadRoot(kScratchRegister, Heap::kNonStrictArgumentsElementsMapRootIndex);
1191 // rbx contains the untagged argument count. Add 2 and tag to write.
1192 __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
1193 __ Integer64PlusConstantToSmi(r9, rbx, 2);
1194 __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
1195 __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
1196 __ lea(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
1197 __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
1199 // Copy the parameter slots and the holes in the arguments.
1200 // We need to fill in mapped_parameter_count slots. They index the context,
1201 // where parameters are stored in reverse order, at
1202 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
1203 // The mapped parameter thus need to get indices
1204 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
1205 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
1206 // We loop from right to left.
1207 Label parameters_loop, parameters_test;
1209 // Load tagged parameter count into r9.
1210 __ Integer32ToSmi(r9, rbx);
1211 __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
1212 __ addq(r8, args.GetArgumentOperand(2));
1214 __ Move(r11, factory->the_hole_value());
1216 __ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
1217 // r9 = loop variable (tagged)
1218 // r8 = mapping index (tagged)
1219 // r11 = the hole value
1220 // rdx = address of parameter map (tagged)
1221 // rdi = address of backing store (tagged)
1222 __ jmp(¶meters_test, Label::kNear);
1224 __ bind(¶meters_loop);
1225 __ SmiSubConstant(r9, r9, Smi::FromInt(1));
1226 __ SmiToInteger64(kScratchRegister, r9);
1227 __ movp(FieldOperand(rdx, kScratchRegister,
1229 kParameterMapHeaderSize),
1231 __ movp(FieldOperand(rdi, kScratchRegister,
1233 FixedArray::kHeaderSize),
1235 __ SmiAddConstant(r8, r8, Smi::FromInt(1));
1236 __ bind(¶meters_test);
1238 __ j(not_zero, ¶meters_loop, Label::kNear);
1240 __ bind(&skip_parameter_map);
1242 // rcx = argument count (tagged)
1243 // rdi = address of backing store (tagged)
1244 // Copy arguments header and remaining slots (if there are any).
1245 __ Move(FieldOperand(rdi, FixedArray::kMapOffset),
1246 factory->fixed_array_map());
1247 __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
1249 Label arguments_loop, arguments_test;
1251 __ movp(rdx, args.GetArgumentOperand(1));
1252 // Untag rcx for the loop below.
1253 __ SmiToInteger64(rcx, rcx);
1254 __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
1255 __ subq(rdx, kScratchRegister);
1256 __ jmp(&arguments_test, Label::kNear);
1258 __ bind(&arguments_loop);
1259 __ subq(rdx, Immediate(kPointerSize));
1260 __ movp(r9, Operand(rdx, 0));
1261 __ movp(FieldOperand(rdi, r8,
1263 FixedArray::kHeaderSize),
1265 __ addq(r8, Immediate(1));
1267 __ bind(&arguments_test);
1269 __ j(less, &arguments_loop, Label::kNear);
1271 // Return and remove the on-stack parameters.
1272 __ ret(3 * kPointerSize);
1274 // Do the runtime call to allocate the arguments object.
1275 // rcx = argument count (untagged)
1277 __ Integer32ToSmi(rcx, rcx);
1278 __ movp(args.GetArgumentOperand(2), rcx); // Patch argument count.
1279 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
1283 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
1284 // rsp[0] : return address
1285 // rsp[8] : number of parameters
1286 // rsp[16] : receiver displacement
1287 // rsp[24] : function
1289 // Check if the calling frame is an arguments adaptor frame.
1291 __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
1292 __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
1293 __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1294 __ j(not_equal, &runtime);
1296 // Patch the arguments.length and the parameters pointer.
1297 StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
1298 __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
1299 __ movp(args.GetArgumentOperand(2), rcx);
1300 __ SmiToInteger64(rcx, rcx);
1301 __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
1302 StandardFrameConstants::kCallerSPOffset));
1303 __ movp(args.GetArgumentOperand(1), rdx);
1306 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
1310 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
1311 // rsp[0] : return address
1312 // rsp[8] : number of parameters
1313 // rsp[16] : receiver displacement
1314 // rsp[24] : function
1316 // Check if the calling frame is an arguments adaptor frame.
1317 Label adaptor_frame, try_allocate, runtime;
1318 __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
1319 __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
1320 __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1321 __ j(equal, &adaptor_frame);
1323 // Get the length from the frame.
1324 StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
1325 __ movp(rcx, args.GetArgumentOperand(2));
1326 __ SmiToInteger64(rcx, rcx);
1327 __ jmp(&try_allocate);
1329 // Patch the arguments.length and the parameters pointer.
1330 __ bind(&adaptor_frame);
1331 __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
1332 __ movp(args.GetArgumentOperand(2), rcx);
1333 __ SmiToInteger64(rcx, rcx);
1334 __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
1335 StandardFrameConstants::kCallerSPOffset));
1336 __ movp(args.GetArgumentOperand(1), rdx);
1338 // Try the new space allocation. Start out with computing the size of
1339 // the arguments object and the elements array.
1340 Label add_arguments_object;
1341 __ bind(&try_allocate);
1343 __ j(zero, &add_arguments_object, Label::kNear);
1344 __ lea(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
1345 __ bind(&add_arguments_object);
1346 __ addq(rcx, Immediate(Heap::kArgumentsObjectSizeStrict));
1348 // Do the allocation of both objects in one go.
1349 __ Allocate(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
1351 // Get the arguments boilerplate from the current native context.
1352 __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1353 __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
1355 Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
1356 __ movp(rdi, Operand(rdi, offset));
1358 // Copy the JS object part.
1359 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
1360 __ movp(rbx, FieldOperand(rdi, i));
1361 __ movp(FieldOperand(rax, i), rbx);
1364 // Get the length (smi tagged) and set that as an in-object property too.
1365 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1366 __ movp(rcx, args.GetArgumentOperand(2));
1367 __ movp(FieldOperand(rax, JSObject::kHeaderSize +
1368 Heap::kArgumentsLengthIndex * kPointerSize),
1371 // If there are no actual arguments, we're done.
1376 // Get the parameters pointer from the stack.
1377 __ movp(rdx, args.GetArgumentOperand(1));
1379 // Set up the elements pointer in the allocated arguments object and
1380 // initialize the header in the elements fixed array.
1381 __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict));
1382 __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
1383 __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
1384 __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
1387 __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
1388 // Untag the length for the loop below.
1389 __ SmiToInteger64(rcx, rcx);
1391 // Copy the fixed array slots.
1394 __ movp(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
1395 __ movp(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
1396 __ addq(rdi, Immediate(kPointerSize));
1397 __ subq(rdx, Immediate(kPointerSize));
1399 __ j(not_zero, &loop);
1401 // Return and remove the on-stack parameters.
1403 __ ret(3 * kPointerSize);
1405 // Do the runtime call to allocate the arguments object.
1407 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
1411 void RegExpExecStub::Generate(MacroAssembler* masm) {
1412 // Just jump directly to runtime if native RegExp is not selected at compile
1413 // time or if regexp entry in generated code is turned off runtime switch or
1415 #ifdef V8_INTERPRETED_REGEXP
1416 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
1417 #else // V8_INTERPRETED_REGEXP
1419 // Stack frame on entry.
1420 // rsp[0] : return address
1421 // rsp[8] : last_match_info (expected JSArray)
1422 // rsp[16] : previous index
1423 // rsp[24] : subject string
1424 // rsp[32] : JSRegExp object
1426 enum RegExpExecStubArgumentIndices {
1427 JS_REG_EXP_OBJECT_ARGUMENT_INDEX,
1428 SUBJECT_STRING_ARGUMENT_INDEX,
1429 PREVIOUS_INDEX_ARGUMENT_INDEX,
1430 LAST_MATCH_INFO_ARGUMENT_INDEX,
1431 REG_EXP_EXEC_ARGUMENT_COUNT
1434 StackArgumentsAccessor args(rsp, REG_EXP_EXEC_ARGUMENT_COUNT,
1435 ARGUMENTS_DONT_CONTAIN_RECEIVER);
1437 // Ensure that a RegExp stack is allocated.
1438 Isolate* isolate = masm->isolate();
1439 ExternalReference address_of_regexp_stack_memory_address =
1440 ExternalReference::address_of_regexp_stack_memory_address(isolate);
1441 ExternalReference address_of_regexp_stack_memory_size =
1442 ExternalReference::address_of_regexp_stack_memory_size(isolate);
1443 __ Load(kScratchRegister, address_of_regexp_stack_memory_size);
1444 __ testq(kScratchRegister, kScratchRegister);
1445 __ j(zero, &runtime);
1447 // Check that the first argument is a JSRegExp object.
1448 __ movp(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
1449 __ JumpIfSmi(rax, &runtime);
1450 __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
1451 __ j(not_equal, &runtime);
1453 // Check that the RegExp has been compiled (data contains a fixed array).
1454 __ movp(rax, FieldOperand(rax, JSRegExp::kDataOffset));
1455 if (FLAG_debug_code) {
1456 Condition is_smi = masm->CheckSmi(rax);
1457 __ Check(NegateCondition(is_smi),
1458 kUnexpectedTypeForRegExpDataFixedArrayExpected);
1459 __ CmpObjectType(rax, FIXED_ARRAY_TYPE, kScratchRegister);
1460 __ Check(equal, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1463 // rax: RegExp data (FixedArray)
1464 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
1465 __ SmiToInteger32(rbx, FieldOperand(rax, JSRegExp::kDataTagOffset));
1466 __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
1467 __ j(not_equal, &runtime);
1469 // rax: RegExp data (FixedArray)
1470 // Check that the number of captures fit in the static offsets vector buffer.
1471 __ SmiToInteger32(rdx,
1472 FieldOperand(rax, JSRegExp::kIrregexpCaptureCountOffset));
1473 // Check (number_of_captures + 1) * 2 <= offsets vector size
1474 // Or number_of_captures <= offsets vector size / 2 - 1
1475 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
1476 __ cmpl(rdx, Immediate(Isolate::kJSRegexpStaticOffsetsVectorSize / 2 - 1));
1477 __ j(above, &runtime);
1479 // Reset offset for possibly sliced string.
1481 __ movp(rdi, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
1482 __ JumpIfSmi(rdi, &runtime);
1483 __ movp(r15, rdi); // Make a copy of the original subject string.
1484 __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
1485 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
1486 // rax: RegExp data (FixedArray)
1487 // rdi: subject string
1488 // r15: subject string
1489 // Handle subject string according to its encoding and representation:
1490 // (1) Sequential two byte? If yes, go to (9).
1491 // (2) Sequential one byte? If yes, go to (6).
1492 // (3) Anything but sequential or cons? If yes, go to (7).
1493 // (4) Cons string. If the string is flat, replace subject with first string.
1494 // Otherwise bailout.
1495 // (5a) Is subject sequential two byte? If yes, go to (9).
1496 // (5b) Is subject external? If yes, go to (8).
1497 // (6) One byte sequential. Load regexp code for one byte.
1501 // Deferred code at the end of the stub:
1502 // (7) Not a long external string? If yes, go to (10).
1503 // (8) External string. Make it, offset-wise, look like a sequential string.
1504 // (8a) Is the external string one byte? If yes, go to (6).
1505 // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
1506 // (10) Short external string or not a string? If yes, bail out to runtime.
1507 // (11) Sliced string. Replace subject with parent. Go to (5a).
1509 Label seq_one_byte_string /* 6 */, seq_two_byte_string /* 9 */,
1510 external_string /* 8 */, check_underlying /* 5a */,
1511 not_seq_nor_cons /* 7 */, check_code /* E */,
1512 not_long_external /* 10 */;
1514 // (1) Sequential two byte? If yes, go to (9).
1515 __ andb(rbx, Immediate(kIsNotStringMask |
1516 kStringRepresentationMask |
1517 kStringEncodingMask |
1518 kShortExternalStringMask));
1519 STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
1520 __ j(zero, &seq_two_byte_string); // Go to (9).
1522 // (2) Sequential one byte? If yes, go to (6).
1523 // Any other sequential string must be one byte.
1524 __ andb(rbx, Immediate(kIsNotStringMask |
1525 kStringRepresentationMask |
1526 kShortExternalStringMask));
1527 __ j(zero, &seq_one_byte_string, Label::kNear); // Go to (6).
1529 // (3) Anything but sequential or cons? If yes, go to (7).
1530 // We check whether the subject string is a cons, since sequential strings
1531 // have already been covered.
1532 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
1533 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
1534 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
1535 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
1536 __ cmpq(rbx, Immediate(kExternalStringTag));
1537 __ j(greater_equal, ¬_seq_nor_cons); // Go to (7).
1539 // (4) Cons string. Check that it's flat.
1540 // Replace subject with first string and reload instance type.
1541 __ CompareRoot(FieldOperand(rdi, ConsString::kSecondOffset),
1542 Heap::kempty_stringRootIndex);
1543 __ j(not_equal, &runtime);
1544 __ movp(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
1545 __ bind(&check_underlying);
1546 __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
1547 __ movp(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
1549 // (5a) Is subject sequential two byte? If yes, go to (9).
1550 __ testb(rbx, Immediate(kStringRepresentationMask | kStringEncodingMask));
1551 STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
1552 __ j(zero, &seq_two_byte_string); // Go to (9).
1553 // (5b) Is subject external? If yes, go to (8).
1554 __ testb(rbx, Immediate(kStringRepresentationMask));
1555 // The underlying external string is never a short external string.
1556 STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
1557 STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
1558 __ j(not_zero, &external_string); // Go to (8)
1560 // (6) One byte sequential. Load regexp code for one byte.
1561 __ bind(&seq_one_byte_string);
1562 // rax: RegExp data (FixedArray)
1563 __ movp(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
1564 __ Set(rcx, 1); // Type is one byte.
1566 // (E) Carry on. String handling is done.
1567 __ bind(&check_code);
1568 // r11: irregexp code
1569 // Check that the irregexp code has been generated for the actual string
1570 // encoding. If it has, the field contains a code object otherwise it contains
1571 // smi (code flushing support)
1572 __ JumpIfSmi(r11, &runtime);
1574 // rdi: sequential subject string (or look-alike, external string)
1575 // r15: original subject string
1576 // rcx: encoding of subject string (1 if ASCII, 0 if two_byte);
1578 // Load used arguments before starting to push arguments for call to native
1579 // RegExp code to avoid handling changing stack height.
1580 // We have to use r15 instead of rdi to load the length because rdi might
1581 // have been only made to look like a sequential string when it actually
1582 // is an external string.
1583 __ movp(rbx, args.GetArgumentOperand(PREVIOUS_INDEX_ARGUMENT_INDEX));
1584 __ JumpIfNotSmi(rbx, &runtime);
1585 __ SmiCompare(rbx, FieldOperand(r15, String::kLengthOffset));
1586 __ j(above_equal, &runtime);
1587 __ SmiToInteger64(rbx, rbx);
1589 // rdi: subject string
1590 // rbx: previous index
1591 // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
1593 // All checks done. Now push arguments for native regexp code.
1594 Counters* counters = masm->isolate()->counters();
1595 __ IncrementCounter(counters->regexp_entry_native(), 1);
1597 // Isolates: note we add an additional parameter here (isolate pointer).
1598 static const int kRegExpExecuteArguments = 9;
1599 int argument_slots_on_stack =
1600 masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
1601 __ EnterApiExitFrame(argument_slots_on_stack);
1603 // Argument 9: Pass current isolate address.
1604 __ LoadAddress(kScratchRegister,
1605 ExternalReference::isolate_address(masm->isolate()));
1606 __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kRegisterSize),
1609 // Argument 8: Indicate that this is a direct call from JavaScript.
1610 __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kRegisterSize),
1613 // Argument 7: Start (high end) of backtracking stack memory area.
1614 __ Move(kScratchRegister, address_of_regexp_stack_memory_address);
1615 __ movp(r9, Operand(kScratchRegister, 0));
1616 __ Move(kScratchRegister, address_of_regexp_stack_memory_size);
1617 __ addq(r9, Operand(kScratchRegister, 0));
1618 __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kRegisterSize), r9);
1620 // Argument 6: Set the number of capture registers to zero to force global
1621 // regexps to behave as non-global. This does not affect non-global regexps.
1622 // Argument 6 is passed in r9 on Linux and on the stack on Windows.
1624 __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kRegisterSize),
1630 // Argument 5: static offsets vector buffer.
1632 ExternalReference::address_of_static_offsets_vector(isolate));
1633 // Argument 5 passed in r8 on Linux and on the stack on Windows.
1635 __ movq(Operand(rsp, (argument_slots_on_stack - 5) * kRegisterSize), r8);
1638 // rdi: subject string
1639 // rbx: previous index
1640 // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
1642 // r14: slice offset
1643 // r15: original subject string
1645 // Argument 2: Previous index.
1646 __ movp(arg_reg_2, rbx);
1648 // Argument 4: End of string data
1649 // Argument 3: Start of string data
1650 Label setup_two_byte, setup_rest, got_length, length_not_from_slice;
1651 // Prepare start and end index of the input.
1652 // Load the length from the original sliced string if that is the case.
1654 __ SmiToInteger32(arg_reg_3, FieldOperand(r15, String::kLengthOffset));
1655 __ addq(r14, arg_reg_3); // Using arg3 as scratch.
1657 // rbx: start index of the input
1658 // r14: end index of the input
1659 // r15: original subject string
1660 __ testb(rcx, rcx); // Last use of rcx as encoding of subject string.
1661 __ j(zero, &setup_two_byte, Label::kNear);
1663 FieldOperand(rdi, r14, times_1, SeqOneByteString::kHeaderSize));
1665 FieldOperand(rdi, rbx, times_1, SeqOneByteString::kHeaderSize));
1666 __ jmp(&setup_rest, Label::kNear);
1667 __ bind(&setup_two_byte);
1669 FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize));
1671 FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize));
1672 __ bind(&setup_rest);
1674 // Argument 1: Original subject string.
1675 // The original subject is in the previous stack frame. Therefore we have to
1676 // use rbp, which points exactly to one pointer size below the previous rsp.
1677 // (Because creating a new stack frame pushes the previous rbp onto the stack
1678 // and thereby moves up rsp by one kPointerSize.)
1679 __ movp(arg_reg_1, r15);
1681 // Locate the code entry and call it.
1682 __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
1685 __ LeaveApiExitFrame(true);
1687 // Check the result.
1690 __ cmpl(rax, Immediate(1));
1691 // We expect exactly one result since we force the called regexp to behave
1693 __ j(equal, &success, Label::kNear);
1694 __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
1695 __ j(equal, &exception);
1696 __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
1697 // If none of the above, it can only be retry.
1698 // Handle that in the runtime system.
1699 __ j(not_equal, &runtime);
1701 // For failure return null.
1702 __ LoadRoot(rax, Heap::kNullValueRootIndex);
1703 __ ret(REG_EXP_EXEC_ARGUMENT_COUNT * kPointerSize);
1705 // Load RegExp data.
1707 __ movp(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
1708 __ movp(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
1709 __ SmiToInteger32(rax,
1710 FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
1711 // Calculate number of capture registers (number_of_captures + 1) * 2.
1712 __ leal(rdx, Operand(rax, rax, times_1, 2));
1714 // rdx: Number of capture registers
1715 // Check that the fourth object is a JSArray object.
1716 __ movp(r15, args.GetArgumentOperand(LAST_MATCH_INFO_ARGUMENT_INDEX));
1717 __ JumpIfSmi(r15, &runtime);
1718 __ CmpObjectType(r15, JS_ARRAY_TYPE, kScratchRegister);
1719 __ j(not_equal, &runtime);
1720 // Check that the JSArray is in fast case.
1721 __ movp(rbx, FieldOperand(r15, JSArray::kElementsOffset));
1722 __ movp(rax, FieldOperand(rbx, HeapObject::kMapOffset));
1723 __ CompareRoot(rax, Heap::kFixedArrayMapRootIndex);
1724 __ j(not_equal, &runtime);
1725 // Check that the last match info has space for the capture registers and the
1726 // additional information. Ensure no overflow in add.
1727 STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
1728 __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
1729 __ subl(rax, Immediate(RegExpImpl::kLastMatchOverhead));
1731 __ j(greater, &runtime);
1733 // rbx: last_match_info backing store (FixedArray)
1734 // rdx: number of capture registers
1735 // Store the capture count.
1736 __ Integer32ToSmi(kScratchRegister, rdx);
1737 __ movp(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
1739 // Store last subject and last input.
1740 __ movp(rax, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
1741 __ movp(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
1743 __ RecordWriteField(rbx,
1744 RegExpImpl::kLastSubjectOffset,
1749 __ movp(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
1750 __ RecordWriteField(rbx,
1751 RegExpImpl::kLastInputOffset,
1756 // Get the static offsets vector filled by the native regexp code.
1758 ExternalReference::address_of_static_offsets_vector(isolate));
1760 // rbx: last_match_info backing store (FixedArray)
1761 // rcx: offsets vector
1762 // rdx: number of capture registers
1763 Label next_capture, done;
1764 // Capture register counter starts from number of capture registers and
1765 // counts down until wraping after zero.
1766 __ bind(&next_capture);
1767 __ subq(rdx, Immediate(1));
1768 __ j(negative, &done, Label::kNear);
1769 // Read the value from the static offsets vector buffer and make it a smi.
1770 __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
1771 __ Integer32ToSmi(rdi, rdi);
1772 // Store the smi value in the last match info.
1773 __ movp(FieldOperand(rbx,
1776 RegExpImpl::kFirstCaptureOffset),
1778 __ jmp(&next_capture);
1781 // Return last match info.
1783 __ ret(REG_EXP_EXEC_ARGUMENT_COUNT * kPointerSize);
1785 __ bind(&exception);
1786 // Result must now be exception. If there is no pending exception already a
1787 // stack overflow (on the backtrack stack) was detected in RegExp code but
1788 // haven't created the exception yet. Handle that in the runtime system.
1789 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
1790 ExternalReference pending_exception_address(
1791 Isolate::kPendingExceptionAddress, isolate);
1792 Operand pending_exception_operand =
1793 masm->ExternalOperand(pending_exception_address, rbx);
1794 __ movp(rax, pending_exception_operand);
1795 __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
1797 __ j(equal, &runtime);
1798 __ movp(pending_exception_operand, rdx);
1800 __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
1801 Label termination_exception;
1802 __ j(equal, &termination_exception, Label::kNear);
1805 __ bind(&termination_exception);
1806 __ ThrowUncatchable(rax);
1808 // Do the runtime call to execute the regexp.
1810 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
1812 // Deferred code for string handling.
1813 // (7) Not a long external string? If yes, go to (10).
1814 __ bind(¬_seq_nor_cons);
1815 // Compare flags are still set from (3).
1816 __ j(greater, ¬_long_external, Label::kNear); // Go to (10).
1818 // (8) External string. Short external strings have been ruled out.
1819 __ bind(&external_string);
1820 __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
1821 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
1822 if (FLAG_debug_code) {
1823 // Assert that we do not have a cons or slice (indirect strings) here.
1824 // Sequential strings have already been ruled out.
1825 __ testb(rbx, Immediate(kIsIndirectStringMask));
1826 __ Assert(zero, kExternalStringExpectedButNotFound);
1828 __ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
1829 // Move the pointer so that offset-wise, it looks like a sequential string.
1830 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
1831 __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
1832 STATIC_ASSERT(kTwoByteStringTag == 0);
1833 // (8a) Is the external string one byte? If yes, go to (6).
1834 __ testb(rbx, Immediate(kStringEncodingMask));
1835 __ j(not_zero, &seq_one_byte_string); // Goto (6).
1837 // rdi: subject string (flat two-byte)
1838 // rax: RegExp data (FixedArray)
1839 // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
1840 __ bind(&seq_two_byte_string);
1841 __ movp(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
1842 __ Set(rcx, 0); // Type is two byte.
1843 __ jmp(&check_code); // Go to (E).
1845 // (10) Not a string or a short external string? If yes, bail out to runtime.
1846 __ bind(¬_long_external);
1847 // Catch non-string subject or short external string.
1848 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
1849 __ testb(rbx, Immediate(kIsNotStringMask | kShortExternalStringMask));
1850 __ j(not_zero, &runtime);
1852 // (11) Sliced string. Replace subject with parent. Go to (5a).
1853 // Load offset into r14 and replace subject string with parent.
1854 __ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
1855 __ movp(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
1856 __ jmp(&check_underlying);
1857 #endif // V8_INTERPRETED_REGEXP
1861 static int NegativeComparisonResult(Condition cc) {
1862 ASSERT(cc != equal);
1863 ASSERT((cc == less) || (cc == less_equal)
1864 || (cc == greater) || (cc == greater_equal));
1865 return (cc == greater || cc == greater_equal) ? LESS : GREATER;
1869 static void CheckInputType(MacroAssembler* masm,
1871 CompareIC::State expected,
1874 if (expected == CompareIC::SMI) {
1875 __ JumpIfNotSmi(input, fail);
1876 } else if (expected == CompareIC::NUMBER) {
1877 __ JumpIfSmi(input, &ok);
1878 __ CompareMap(input, masm->isolate()->factory()->heap_number_map());
1879 __ j(not_equal, fail);
1881 // We could be strict about internalized/non-internalized here, but as long as
1882 // hydrogen doesn't care, the stub doesn't have to care either.
1887 static void BranchIfNotInternalizedString(MacroAssembler* masm,
1891 __ JumpIfSmi(object, label);
1892 __ movp(scratch, FieldOperand(object, HeapObject::kMapOffset));
1894 FieldOperand(scratch, Map::kInstanceTypeOffset));
1895 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
1896 __ testb(scratch, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
1897 __ j(not_zero, label);
1901 void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
1902 Label check_unequal_objects, done;
1903 Condition cc = GetCondition();
1904 Factory* factory = masm->isolate()->factory();
1907 CheckInputType(masm, rdx, left_, &miss);
1908 CheckInputType(masm, rax, right_, &miss);
1910 // Compare two smis.
1911 Label non_smi, smi_done;
1912 __ JumpIfNotBothSmi(rax, rdx, &non_smi);
1914 __ j(no_overflow, &smi_done);
1915 __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
1921 // The compare stub returns a positive, negative, or zero 64-bit integer
1922 // value in rax, corresponding to result of comparing the two inputs.
1923 // NOTICE! This code is only reached after a smi-fast-case check, so
1924 // it is certain that at least one operand isn't a smi.
1926 // Two identical objects are equal unless they are both NaN or undefined.
1928 Label not_identical;
1930 __ j(not_equal, ¬_identical, Label::kNear);
1933 // Check for undefined. undefined OP undefined is false even though
1934 // undefined == undefined.
1935 Label check_for_nan;
1936 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
1937 __ j(not_equal, &check_for_nan, Label::kNear);
1938 __ Set(rax, NegativeComparisonResult(cc));
1940 __ bind(&check_for_nan);
1943 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
1944 // so we do the second best thing - test it ourselves.
1946 // If it's not a heap number, then return equal for (in)equality operator.
1947 __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
1948 factory->heap_number_map());
1949 __ j(equal, &heap_number, Label::kNear);
1951 // Call runtime on identical objects. Otherwise return equal.
1952 __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
1953 __ j(above_equal, ¬_identical, Label::kNear);
1958 __ bind(&heap_number);
1959 // It is a heap number, so return equal if it's not NaN.
1960 // For NaN, return 1 for every condition except greater and
1961 // greater-equal. Return -1 for them, so the comparison yields
1962 // false for all conditions except not-equal.
1964 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
1965 __ ucomisd(xmm0, xmm0);
1966 __ setcc(parity_even, rax);
1967 // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
1968 if (cc == greater_equal || cc == greater) {
1973 __ bind(¬_identical);
1976 if (cc == equal) { // Both strict and non-strict.
1977 Label slow; // Fallthrough label.
1979 // If we're doing a strict equality comparison, we don't have to do
1980 // type conversion, so we generate code to do fast comparison for objects
1981 // and oddballs. Non-smi numbers and strings still go through the usual
1984 // If either is a Smi (we know that not both are), then they can only
1985 // be equal if the other is a HeapNumber. If so, use the slow case.
1988 __ SelectNonSmi(rbx, rax, rdx, ¬_smis);
1990 // Check if the non-smi operand is a heap number.
1991 __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
1992 factory->heap_number_map());
1993 // If heap number, handle it in the slow case.
1995 // Return non-equal. ebx (the lower half of rbx) is not zero.
2002 // If either operand is a JSObject or an oddball value, then they are not
2003 // equal since their pointers are different
2004 // There is no test for undetectability in strict equality.
2006 // If the first object is a JS object, we have done pointer comparison.
2007 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
2008 Label first_non_object;
2009 __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
2010 __ j(below, &first_non_object, Label::kNear);
2011 // Return non-zero (rax (not rax) is not zero)
2012 Label return_not_equal;
2013 STATIC_ASSERT(kHeapObjectTag != 0);
2014 __ bind(&return_not_equal);
2017 __ bind(&first_non_object);
2018 // Check for oddballs: true, false, null, undefined.
2019 __ CmpInstanceType(rcx, ODDBALL_TYPE);
2020 __ j(equal, &return_not_equal);
2022 __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
2023 __ j(above_equal, &return_not_equal);
2025 // Check for oddballs: true, false, null, undefined.
2026 __ CmpInstanceType(rcx, ODDBALL_TYPE);
2027 __ j(equal, &return_not_equal);
2029 // Fall through to the general case.
2034 // Generate the number comparison code.
2035 Label non_number_comparison;
2037 FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
2040 __ ucomisd(xmm0, xmm1);
2042 // Don't base result on EFLAGS when a NaN is involved.
2043 __ j(parity_even, &unordered, Label::kNear);
2044 // Return a result of -1, 0, or 1, based on EFLAGS.
2045 __ setcc(above, rax);
2046 __ setcc(below, rcx);
2050 // If one of the numbers was NaN, then the result is always false.
2051 // The cc is never not-equal.
2052 __ bind(&unordered);
2053 ASSERT(cc != not_equal);
2054 if (cc == less || cc == less_equal) {
2061 // The number comparison code did not provide a valid result.
2062 __ bind(&non_number_comparison);
2064 // Fast negative check for internalized-to-internalized equality.
2065 Label check_for_strings;
2067 BranchIfNotInternalizedString(
2068 masm, &check_for_strings, rax, kScratchRegister);
2069 BranchIfNotInternalizedString(
2070 masm, &check_for_strings, rdx, kScratchRegister);
2072 // We've already checked for object identity, so if both operands are
2073 // internalized strings they aren't equal. Register rax (not rax) already
2074 // holds a non-zero value, which indicates not equal, so just return.
2078 __ bind(&check_for_strings);
2080 __ JumpIfNotBothSequentialAsciiStrings(
2081 rdx, rax, rcx, rbx, &check_unequal_objects);
2083 // Inline comparison of ASCII strings.
2085 StringCompareStub::GenerateFlatAsciiStringEquals(masm,
2091 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
2101 __ Abort(kUnexpectedFallThroughFromStringComparison);
2104 __ bind(&check_unequal_objects);
2105 if (cc == equal && !strict()) {
2106 // Not strict equality. Objects are unequal if
2107 // they are both JSObjects and not undetectable,
2108 // and their pointers are different.
2109 Label not_both_objects, return_unequal;
2110 // At most one is a smi, so we can test for smi by adding the two.
2111 // A smi plus a heap object has the low bit set, a heap object plus
2112 // a heap object has the low bit clear.
2113 STATIC_ASSERT(kSmiTag == 0);
2114 STATIC_ASSERT(kSmiTagMask == 1);
2115 __ lea(rcx, Operand(rax, rdx, times_1, 0));
2116 __ testb(rcx, Immediate(kSmiTagMask));
2117 __ j(not_zero, ¬_both_objects, Label::kNear);
2118 __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
2119 __ j(below, ¬_both_objects, Label::kNear);
2120 __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
2121 __ j(below, ¬_both_objects, Label::kNear);
2122 __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
2123 Immediate(1 << Map::kIsUndetectable));
2124 __ j(zero, &return_unequal, Label::kNear);
2125 __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
2126 Immediate(1 << Map::kIsUndetectable));
2127 __ j(zero, &return_unequal, Label::kNear);
2128 // The objects are both undetectable, so they both compare as the value
2129 // undefined, and are equal.
2131 __ bind(&return_unequal);
2132 // Return non-equal by returning the non-zero object pointer in rax,
2133 // or return equal if we fell through to here.
2135 __ bind(¬_both_objects);
2138 // Push arguments below the return address to prepare jump to builtin.
2139 __ PopReturnAddressTo(rcx);
2143 // Figure out which native to call and setup the arguments.
2144 Builtins::JavaScript builtin;
2146 builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
2148 builtin = Builtins::COMPARE;
2149 __ Push(Smi::FromInt(NegativeComparisonResult(cc)));
2152 __ PushReturnAddressFrom(rcx);
2154 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
2155 // tagged as a small integer.
2156 __ InvokeBuiltin(builtin, JUMP_FUNCTION);
2163 static void GenerateRecordCallTarget(MacroAssembler* masm) {
2164 // Cache the called function in a global property cell. Cache states
2165 // are uninitialized, monomorphic (indicated by a JSFunction), and
2167 // rax : number of arguments to the construct function
2168 // rbx : cache cell for call target
2169 // rdi : the function to call
2170 Isolate* isolate = masm->isolate();
2171 Label initialize, done, miss, megamorphic, not_array_function;
2173 // Load the cache state into rcx.
2174 __ movp(rcx, FieldOperand(rbx, Cell::kValueOffset));
2176 // A monomorphic cache hit or an already megamorphic state: invoke the
2177 // function without changing the state.
2180 __ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate));
2183 // If we came here, we need to see if we are the array function.
2184 // If we didn't have a matching function, and we didn't find the megamorph
2185 // sentinel, then we have in the cell either some other function or an
2186 // AllocationSite. Do a map check on the object in rcx.
2187 Handle<Map> allocation_site_map =
2188 masm->isolate()->factory()->allocation_site_map();
2189 __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
2190 __ j(not_equal, &miss);
2192 // Make sure the function is the Array() function
2193 __ LoadArrayFunction(rcx);
2195 __ j(not_equal, &megamorphic);
2200 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2202 __ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate));
2203 __ j(equal, &initialize);
2204 // MegamorphicSentinel is an immortal immovable object (undefined) so no
2205 // write-barrier is needed.
2206 __ bind(&megamorphic);
2207 __ Move(FieldOperand(rbx, Cell::kValueOffset),
2208 TypeFeedbackCells::MegamorphicSentinel(isolate));
2211 // An uninitialized cache is patched with the function or sentinel to
2212 // indicate the ElementsKind if function is the Array constructor.
2213 __ bind(&initialize);
2214 // Make sure the function is the Array() function
2215 __ LoadArrayFunction(rcx);
2217 __ j(not_equal, ¬_array_function);
2219 // The target function is the Array constructor,
2220 // Create an AllocationSite if we don't already have it, store it in the cell
2222 FrameScope scope(masm, StackFrame::INTERNAL);
2224 // Arguments register must be smi-tagged to call out.
2225 __ Integer32ToSmi(rax, rax);
2230 CreateAllocationSiteStub create_stub;
2231 __ CallStub(&create_stub);
2236 __ SmiToInteger32(rax, rax);
2240 __ bind(¬_array_function);
2241 __ movp(FieldOperand(rbx, Cell::kValueOffset), rdi);
2242 // No need for a write barrier here - cells are rescanned.
2248 void CallFunctionStub::Generate(MacroAssembler* masm) {
2249 // rbx : cache cell for call target
2250 // rdi : the function to call
2251 Isolate* isolate = masm->isolate();
2252 Label slow, non_function, wrap, cont;
2253 StackArgumentsAccessor args(rsp, argc_);
2255 if (NeedsChecks()) {
2256 // Check that the function really is a JavaScript function.
2257 __ JumpIfSmi(rdi, &non_function);
2259 // Goto slow case if we do not have a function.
2260 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
2261 __ j(not_equal, &slow);
2263 if (RecordCallTarget()) {
2264 GenerateRecordCallTarget(masm);
2268 // Fast-case: Just invoke the function.
2269 ParameterCount actual(argc_);
2271 if (CallAsMethod()) {
2272 if (NeedsChecks()) {
2273 // Do not transform the receiver for strict mode functions.
2274 __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
2275 __ testb(FieldOperand(rcx, SharedFunctionInfo::kStrictModeByteOffset),
2276 Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
2277 __ j(not_equal, &cont);
2279 // Do not transform the receiver for natives.
2280 // SharedFunctionInfo is already loaded into rcx.
2281 __ testb(FieldOperand(rcx, SharedFunctionInfo::kNativeByteOffset),
2282 Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
2283 __ j(not_equal, &cont);
2286 // Load the receiver from the stack.
2287 __ movp(rax, args.GetReceiverOperand());
2289 if (NeedsChecks()) {
2290 __ JumpIfSmi(rax, &wrap);
2292 __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
2300 __ InvokeFunction(rdi, actual, JUMP_FUNCTION, NullCallWrapper());
2302 if (NeedsChecks()) {
2303 // Slow-case: Non-function called.
2305 if (RecordCallTarget()) {
2306 // If there is a call target cache, mark it megamorphic in the
2307 // non-function case. MegamorphicSentinel is an immortal immovable
2308 // object (undefined) so no write barrier is needed.
2309 __ Move(FieldOperand(rbx, Cell::kValueOffset),
2310 TypeFeedbackCells::MegamorphicSentinel(isolate));
2312 // Check for function proxy.
2313 __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
2314 __ j(not_equal, &non_function);
2315 __ PopReturnAddressTo(rcx);
2316 __ push(rdi); // put proxy as additional argument under return address
2317 __ PushReturnAddressFrom(rcx);
2318 __ Set(rax, argc_ + 1);
2320 __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
2322 Handle<Code> adaptor =
2323 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
2324 __ jmp(adaptor, RelocInfo::CODE_TARGET);
2327 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
2328 // of the original receiver from the call site).
2329 __ bind(&non_function);
2330 __ movp(args.GetReceiverOperand(), rdi);
2333 __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
2334 Handle<Code> adaptor =
2335 isolate->builtins()->ArgumentsAdaptorTrampoline();
2336 __ Jump(adaptor, RelocInfo::CODE_TARGET);
2339 if (CallAsMethod()) {
2341 // Wrap the receiver and patch it back onto the stack.
2342 { FrameScope frame_scope(masm, StackFrame::INTERNAL);
2345 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
2348 __ movp(args.GetReceiverOperand(), rax);
2354 void CallConstructStub::Generate(MacroAssembler* masm) {
2355 // rax : number of arguments
2356 // rbx : cache cell for call target
2357 // rdi : constructor function
2358 Label slow, non_function_call;
2360 // Check that function is not a smi.
2361 __ JumpIfSmi(rdi, &non_function_call);
2362 // Check that function is a JSFunction.
2363 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
2364 __ j(not_equal, &slow);
2366 if (RecordCallTarget()) {
2367 GenerateRecordCallTarget(masm);
2370 // Jump to the function-specific construct stub.
2371 Register jmp_reg = rcx;
2372 __ movp(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
2373 __ movp(jmp_reg, FieldOperand(jmp_reg,
2374 SharedFunctionInfo::kConstructStubOffset));
2375 __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
2378 // rdi: called object
2379 // rax: number of arguments
2383 __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
2384 __ j(not_equal, &non_function_call);
2385 __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
2388 __ bind(&non_function_call);
2389 __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
2391 // Set expected number of arguments to zero (not changing rax).
2393 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2394 RelocInfo::CODE_TARGET);
2398 bool CEntryStub::NeedsImmovableCode() {
2403 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
2404 CEntryStub::GenerateAheadOfTime(isolate);
2405 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
2406 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
2407 // It is important that the store buffer overflow stubs are generated first.
2408 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
2409 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
2410 BinaryOpICStub::GenerateAheadOfTime(isolate);
2411 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
2415 void CodeStub::GenerateFPStubs(Isolate* isolate) {
2419 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
2420 CEntryStub stub(1, kDontSaveFPRegs);
2421 stub.GetCode(isolate);
2422 CEntryStub save_doubles(1, kSaveFPRegs);
2423 save_doubles.GetCode(isolate);
2427 static void JumpIfOOM(MacroAssembler* masm,
2431 __ movp(scratch, value);
2432 STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
2433 STATIC_ASSERT(kFailureTag == 3);
2434 __ and_(scratch, Immediate(0xf));
2435 __ cmpq(scratch, Immediate(0xf));
2436 __ j(equal, oom_label);
2440 void CEntryStub::GenerateCore(MacroAssembler* masm,
2441 Label* throw_normal_exception,
2442 Label* throw_termination_exception,
2443 Label* throw_out_of_memory_exception,
2445 bool always_allocate_scope) {
2446 // rax: result parameter for PerformGC, if any.
2447 // rbx: pointer to C function (C callee-saved).
2448 // rbp: frame pointer (restored after C call).
2449 // rsp: stack pointer (restored after C call).
2450 // r14: number of arguments including receiver (C callee-saved).
2451 // r15: pointer to the first argument (C callee-saved).
2452 // This pointer is reused in LeaveExitFrame(), so it is stored in a
2453 // callee-saved register.
2455 // Simple results returned in rax (both AMD64 and Win64 calling conventions).
2456 // Complex results must be written to address passed as first argument.
2457 // AMD64 calling convention: a struct of two pointers in rax+rdx
2459 // Check stack alignment.
2460 if (FLAG_debug_code) {
2461 __ CheckStackAlignment();
2465 // Pass failure code returned from last attempt as first argument to
2466 // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
2467 // stack is known to be aligned. This function takes one argument which is
2468 // passed in register.
2469 __ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
2470 __ movp(arg_reg_1, rax);
2471 __ Move(kScratchRegister,
2472 ExternalReference::perform_gc_function(masm->isolate()));
2473 __ call(kScratchRegister);
2476 ExternalReference scope_depth =
2477 ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
2478 if (always_allocate_scope) {
2479 Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
2480 __ incl(scope_depth_operand);
2485 // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9.
2486 // Pass argv and argc as two parameters. The arguments object will
2487 // be created by stubs declared by DECLARE_RUNTIME_FUNCTION().
2488 if (result_size_ < 2) {
2489 // Pass a pointer to the Arguments object as the first argument.
2490 // Return result in single register (rax).
2491 __ movp(rcx, r14); // argc.
2492 __ movp(rdx, r15); // argv.
2493 __ Move(r8, ExternalReference::isolate_address(masm->isolate()));
2495 ASSERT_EQ(2, result_size_);
2496 // Pass a pointer to the result location as the first argument.
2497 __ lea(rcx, StackSpaceOperand(2));
2498 // Pass a pointer to the Arguments object as the second argument.
2499 __ movp(rdx, r14); // argc.
2500 __ movp(r8, r15); // argv.
2501 __ Move(r9, ExternalReference::isolate_address(masm->isolate()));
2505 // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
2506 __ movp(rdi, r14); // argc.
2507 __ movp(rsi, r15); // argv.
2508 __ Move(rdx, ExternalReference::isolate_address(masm->isolate()));
2511 // Result is in rax - do not destroy this register!
2513 if (always_allocate_scope) {
2514 Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
2515 __ decl(scope_depth_operand);
2518 // Check for failure result.
2519 Label failure_returned;
2520 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
2522 // If return value is on the stack, pop it to registers.
2523 if (result_size_ > 1) {
2524 ASSERT_EQ(2, result_size_);
2525 // Read result values stored on stack. Result is stored
2526 // above the four argument mirror slots and the two
2527 // Arguments object slots.
2528 __ movq(rax, Operand(rsp, 6 * kRegisterSize));
2529 __ movq(rdx, Operand(rsp, 7 * kRegisterSize));
2532 __ lea(rcx, Operand(rax, 1));
2533 // Lower 2 bits of rcx are 0 iff rax has failure tag.
2534 __ testl(rcx, Immediate(kFailureTagMask));
2535 __ j(zero, &failure_returned);
2537 // Exit the JavaScript to C++ exit frame.
2538 __ LeaveExitFrame(save_doubles_);
2541 // Handling of failure.
2542 __ bind(&failure_returned);
2545 // If the returned exception is RETRY_AFTER_GC continue at retry label
2546 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
2547 __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
2548 __ j(zero, &retry, Label::kNear);
2550 // Special handling of out of memory exceptions.
2551 JumpIfOOM(masm, rax, kScratchRegister, throw_out_of_memory_exception);
2553 // Retrieve the pending exception.
2554 ExternalReference pending_exception_address(
2555 Isolate::kPendingExceptionAddress, masm->isolate());
2556 Operand pending_exception_operand =
2557 masm->ExternalOperand(pending_exception_address);
2558 __ movp(rax, pending_exception_operand);
2560 // See if we just retrieved an OOM exception.
2561 JumpIfOOM(masm, rax, kScratchRegister, throw_out_of_memory_exception);
2563 // Clear the pending exception.
2564 pending_exception_operand =
2565 masm->ExternalOperand(pending_exception_address);
2566 __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
2567 __ movp(pending_exception_operand, rdx);
2569 // Special handling of termination exceptions which are uncatchable
2570 // by javascript code.
2571 __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
2572 __ j(equal, throw_termination_exception);
2574 // Handle normal exception.
2575 __ jmp(throw_normal_exception);
2582 void CEntryStub::Generate(MacroAssembler* masm) {
2583 // rax: number of arguments including receiver
2584 // rbx: pointer to C function (C callee-saved)
2585 // rbp: frame pointer of calling JS frame (restored after C call)
2586 // rsp: stack pointer (restored after C call)
2587 // rsi: current context (restored)
2589 // NOTE: Invocations of builtins may return failure objects
2590 // instead of a proper result. The builtin entry handles
2591 // this by performing a garbage collection and retrying the
2594 ProfileEntryHookStub::MaybeCallEntryHook(masm);
2596 // Enter the exit frame that transitions from JavaScript to C++.
2598 int arg_stack_space = (result_size_ < 2 ? 2 : 4);
2600 int arg_stack_space = 0;
2602 __ EnterExitFrame(arg_stack_space, save_doubles_);
2604 // rax: Holds the context at this point, but should not be used.
2605 // On entry to code generated by GenerateCore, it must hold
2606 // a failure result if the collect_garbage argument to GenerateCore
2607 // is true. This failure result can be the result of code
2608 // generated by a previous call to GenerateCore. The value
2609 // of rax is then passed to Runtime::PerformGC.
2610 // rbx: pointer to builtin function (C callee-saved).
2611 // rbp: frame pointer of exit frame (restored after C call).
2612 // rsp: stack pointer (restored after C call).
2613 // r14: number of arguments including receiver (C callee-saved).
2614 // r15: argv pointer (C callee-saved).
2616 Label throw_normal_exception;
2617 Label throw_termination_exception;
2618 Label throw_out_of_memory_exception;
2620 // Call into the runtime system.
2622 &throw_normal_exception,
2623 &throw_termination_exception,
2624 &throw_out_of_memory_exception,
2628 // Do space-specific GC and retry runtime call.
2630 &throw_normal_exception,
2631 &throw_termination_exception,
2632 &throw_out_of_memory_exception,
2636 // Do full GC and retry runtime call one final time.
2637 Failure* failure = Failure::InternalError();
2638 __ Move(rax, failure, Assembler::RelocInfoNone());
2640 &throw_normal_exception,
2641 &throw_termination_exception,
2642 &throw_out_of_memory_exception,
2646 __ bind(&throw_out_of_memory_exception);
2647 // Set external caught exception to false.
2648 Isolate* isolate = masm->isolate();
2649 ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
2651 __ Set(rax, static_cast<int64_t>(false));
2652 __ Store(external_caught, rax);
2654 // Set pending exception and rax to out of memory exception.
2655 ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
2657 Label already_have_failure;
2658 JumpIfOOM(masm, rax, kScratchRegister, &already_have_failure);
2659 __ Move(rax, Failure::OutOfMemoryException(0x1), Assembler::RelocInfoNone());
2660 __ bind(&already_have_failure);
2661 __ Store(pending_exception, rax);
2662 // Fall through to the next label.
2664 __ bind(&throw_termination_exception);
2665 __ ThrowUncatchable(rax);
2667 __ bind(&throw_normal_exception);
2672 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
2673 Label invoke, handler_entry, exit;
2674 Label not_outermost_js, not_outermost_js_2;
2676 ProfileEntryHookStub::MaybeCallEntryHook(masm);
2678 { // NOLINT. Scope block confuses linter.
2679 MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
2684 // Push the stack frame type marker twice.
2685 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
2686 // Scratch register is neither callee-save, nor an argument register on any
2687 // platform. It's free to use at this point.
2688 // Cannot use smi-register for loading yet.
2689 __ Move(kScratchRegister, Smi::FromInt(marker), Assembler::RelocInfoNone());
2690 __ push(kScratchRegister); // context slot
2691 __ push(kScratchRegister); // function slot
2692 // Save callee-saved registers (X64/Win64 calling conventions).
2698 __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
2699 __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
2704 // On Win64 XMM6-XMM15 are callee-save
2705 __ subq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
2706 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6);
2707 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1), xmm7);
2708 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2), xmm8);
2709 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3), xmm9);
2710 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4), xmm10);
2711 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5), xmm11);
2712 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6), xmm12);
2713 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7), xmm13);
2714 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8), xmm14);
2715 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9), xmm15);
2718 // Set up the roots and smi constant registers.
2719 // Needs to be done before any further smi loads.
2720 __ InitializeSmiConstantRegister();
2721 __ InitializeRootRegister();
2724 Isolate* isolate = masm->isolate();
2726 // Save copies of the top frame descriptor on the stack.
2727 ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
2729 Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
2730 __ push(c_entry_fp_operand);
2733 // If this is the outermost JS call, set js_entry_sp value.
2734 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
2735 __ Load(rax, js_entry_sp);
2737 __ j(not_zero, ¬_outermost_js);
2738 __ Push(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
2740 __ Store(js_entry_sp, rax);
2743 __ bind(¬_outermost_js);
2744 __ Push(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
2747 // Jump to a faked try block that does the invoke, with a faked catch
2748 // block that sets the pending exception.
2750 __ bind(&handler_entry);
2751 handler_offset_ = handler_entry.pos();
2752 // Caught exception: Store result (exception) in the pending exception
2753 // field in the JSEnv and return a failure sentinel.
2754 ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
2756 __ Store(pending_exception, rax);
2757 __ Move(rax, Failure::Exception(), Assembler::RelocInfoNone());
2760 // Invoke: Link this frame into the handler chain. There's only one
2761 // handler block in this code object, so its index is 0.
2763 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
2765 // Clear any pending exceptions.
2766 __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
2767 __ Store(pending_exception, rax);
2769 // Fake a receiver (NULL).
2770 __ push(Immediate(0)); // receiver
2772 // Invoke the function by calling through JS entry trampoline builtin and
2773 // pop the faked function when we return. We load the address from an
2774 // external reference instead of inlining the call target address directly
2775 // in the code, because the builtin stubs may not have been generated yet
2776 // at the time this code is generated.
2778 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
2780 __ Load(rax, construct_entry);
2782 ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
2783 __ Load(rax, entry);
2785 __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
2786 __ call(kScratchRegister);
2788 // Unlink this frame from the handler chain.
2792 // Check if the current stack frame is marked as the outermost JS frame.
2794 __ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
2795 __ j(not_equal, ¬_outermost_js_2);
2796 __ Move(kScratchRegister, js_entry_sp);
2797 __ movp(Operand(kScratchRegister, 0), Immediate(0));
2798 __ bind(¬_outermost_js_2);
2800 // Restore the top frame descriptor from the stack.
2801 { Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
2802 __ pop(c_entry_fp_operand);
2805 // Restore callee-saved registers (X64 conventions).
2807 // On Win64 XMM6-XMM15 are callee-save
2808 __ movdqu(xmm6, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0));
2809 __ movdqu(xmm7, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1));
2810 __ movdqu(xmm8, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2));
2811 __ movdqu(xmm9, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3));
2812 __ movdqu(xmm10, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4));
2813 __ movdqu(xmm11, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5));
2814 __ movdqu(xmm12, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6));
2815 __ movdqu(xmm13, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7));
2816 __ movdqu(xmm14, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8));
2817 __ movdqu(xmm15, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9));
2818 __ addq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
2823 // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
2831 __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
2833 // Restore frame pointer and return.
2839 void InstanceofStub::Generate(MacroAssembler* masm) {
2840 // Implements "value instanceof function" operator.
2841 // Expected input state with no inline cache:
2842 // rsp[0] : return address
2843 // rsp[8] : function pointer
2845 // Expected input state with an inline one-element cache:
2846 // rsp[0] : return address
2847 // rsp[8] : offset from return address to location of inline cache
2848 // rsp[16] : function pointer
2850 // Returns a bitwise zero to indicate that the value
2851 // is and instance of the function and anything else to
2852 // indicate that the value is not an instance.
2854 static const int kOffsetToMapCheckValue = 2;
2855 static const int kOffsetToResultValue = 18;
2856 // The last 4 bytes of the instruction sequence
2857 // movq(rdi, FieldOperand(rax, HeapObject::kMapOffset))
2858 // Move(kScratchRegister, Factory::the_hole_value())
2859 // in front of the hole value address.
2860 static const unsigned int kWordBeforeMapCheckValue = 0xBA49FF78;
2861 // The last 4 bytes of the instruction sequence
2862 // __ j(not_equal, &cache_miss);
2863 // __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
2864 // before the offset of the hole value in the root array.
2865 static const unsigned int kWordBeforeResultValue = 0x458B4906;
2866 // Only the inline check flag is supported on X64.
2867 ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck());
2868 int extra_argument_offset = HasCallSiteInlineCheck() ? 1 : 0;
2870 // Get the object - go slow case if it's a smi.
2872 StackArgumentsAccessor args(rsp, 2 + extra_argument_offset,
2873 ARGUMENTS_DONT_CONTAIN_RECEIVER);
2874 __ movp(rax, args.GetArgumentOperand(0));
2875 __ JumpIfSmi(rax, &slow);
2877 // Check that the left hand is a JS object. Leave its map in rax.
2878 __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
2880 __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE);
2883 // Get the prototype of the function.
2884 __ movp(rdx, args.GetArgumentOperand(1));
2885 // rdx is function, rax is map.
2887 // If there is a call site cache don't look in the global cache, but do the
2888 // real lookup and update the call site cache.
2889 if (!HasCallSiteInlineCheck()) {
2890 // Look up the function and the map in the instanceof cache.
2892 __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
2893 __ j(not_equal, &miss, Label::kNear);
2894 __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
2895 __ j(not_equal, &miss, Label::kNear);
2896 __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
2897 __ ret(2 * kPointerSize);
2901 __ TryGetFunctionPrototype(rdx, rbx, &slow, true);
2903 // Check that the function prototype is a JS object.
2904 __ JumpIfSmi(rbx, &slow);
2905 __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
2907 __ CmpInstanceType(kScratchRegister, LAST_SPEC_OBJECT_TYPE);
2910 // Register mapping:
2911 // rax is object map.
2913 // rbx is function prototype.
2914 if (!HasCallSiteInlineCheck()) {
2915 __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
2916 __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
2918 // Get return address and delta to inlined map check.
2919 __ movq(kScratchRegister, StackOperandForReturnAddress(0));
2920 __ subq(kScratchRegister, args.GetArgumentOperand(2));
2921 if (FLAG_debug_code) {
2922 __ movl(rdi, Immediate(kWordBeforeMapCheckValue));
2923 __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
2924 __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCheck);
2926 __ movp(kScratchRegister,
2927 Operand(kScratchRegister, kOffsetToMapCheckValue));
2928 __ movp(Operand(kScratchRegister, 0), rax);
2931 __ movp(rcx, FieldOperand(rax, Map::kPrototypeOffset));
2933 // Loop through the prototype chain looking for the function prototype.
2934 Label loop, is_instance, is_not_instance;
2935 __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
2938 __ j(equal, &is_instance, Label::kNear);
2939 __ cmpq(rcx, kScratchRegister);
2940 // The code at is_not_instance assumes that kScratchRegister contains a
2941 // non-zero GCable value (the null object in this case).
2942 __ j(equal, &is_not_instance, Label::kNear);
2943 __ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
2944 __ movp(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
2947 __ bind(&is_instance);
2948 if (!HasCallSiteInlineCheck()) {
2950 // Store bitwise zero in the cache. This is a Smi in GC terms.
2951 STATIC_ASSERT(kSmiTag == 0);
2952 __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
2954 // Store offset of true in the root array at the inline check site.
2955 int true_offset = 0x100 +
2956 (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
2957 // Assert it is a 1-byte signed value.
2958 ASSERT(true_offset >= 0 && true_offset < 0x100);
2959 __ movl(rax, Immediate(true_offset));
2960 __ movq(kScratchRegister, StackOperandForReturnAddress(0));
2961 __ subq(kScratchRegister, args.GetArgumentOperand(2));
2962 __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
2963 if (FLAG_debug_code) {
2964 __ movl(rax, Immediate(kWordBeforeResultValue));
2965 __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
2966 __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
2970 __ ret((2 + extra_argument_offset) * kPointerSize);
2972 __ bind(&is_not_instance);
2973 if (!HasCallSiteInlineCheck()) {
2974 // We have to store a non-zero value in the cache.
2975 __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
2977 // Store offset of false in the root array at the inline check site.
2978 int false_offset = 0x100 +
2979 (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
2980 // Assert it is a 1-byte signed value.
2981 ASSERT(false_offset >= 0 && false_offset < 0x100);
2982 __ movl(rax, Immediate(false_offset));
2983 __ movq(kScratchRegister, StackOperandForReturnAddress(0));
2984 __ subq(kScratchRegister, args.GetArgumentOperand(2));
2985 __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
2986 if (FLAG_debug_code) {
2987 __ movl(rax, Immediate(kWordBeforeResultValue));
2988 __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
2989 __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
2992 __ ret((2 + extra_argument_offset) * kPointerSize);
2994 // Slow-case: Go through the JavaScript implementation.
2996 if (HasCallSiteInlineCheck()) {
2997 // Remove extra value from the stack.
2998 __ PopReturnAddressTo(rcx);
3000 __ PushReturnAddressFrom(rcx);
3002 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
3006 // Passing arguments in registers is not supported.
3007 Register InstanceofStub::left() { return no_reg; }
3010 Register InstanceofStub::right() { return no_reg; }
3013 // -------------------------------------------------------------------------
3014 // StringCharCodeAtGenerator
3016 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
3019 Label got_char_code;
3020 Label sliced_string;
3022 // If the receiver is a smi trigger the non-string case.
3023 __ JumpIfSmi(object_, receiver_not_string_);
3025 // Fetch the instance type of the receiver into result register.
3026 __ movp(result_, FieldOperand(object_, HeapObject::kMapOffset));
3027 __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
3028 // If the receiver is not a string trigger the non-string case.
3029 __ testb(result_, Immediate(kIsNotStringMask));
3030 __ j(not_zero, receiver_not_string_);
3032 // If the index is non-smi trigger the non-smi case.
3033 __ JumpIfNotSmi(index_, &index_not_smi_);
3034 __ bind(&got_smi_index_);
3036 // Check for index out of range.
3037 __ SmiCompare(index_, FieldOperand(object_, String::kLengthOffset));
3038 __ j(above_equal, index_out_of_range_);
3040 __ SmiToInteger32(index_, index_);
3042 StringCharLoadGenerator::Generate(
3043 masm, object_, index_, result_, &call_runtime_);
3045 __ Integer32ToSmi(result_, result_);
3050 void StringCharCodeAtGenerator::GenerateSlow(
3051 MacroAssembler* masm,
3052 const RuntimeCallHelper& call_helper) {
3053 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
3055 Factory* factory = masm->isolate()->factory();
3056 // Index is not a smi.
3057 __ bind(&index_not_smi_);
3058 // If index is a heap number, try converting it to an integer.
3060 factory->heap_number_map(),
3063 call_helper.BeforeCall(masm);
3065 __ push(index_); // Consumed by runtime conversion function.
3066 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3067 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3069 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3070 // NumberToSmi discards numbers that are not exact integers.
3071 __ CallRuntime(Runtime::kNumberToSmi, 1);
3073 if (!index_.is(rax)) {
3074 // Save the conversion result before the pop instructions below
3075 // have a chance to overwrite it.
3076 __ movp(index_, rax);
3079 // Reload the instance type.
3080 __ movp(result_, FieldOperand(object_, HeapObject::kMapOffset));
3081 __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
3082 call_helper.AfterCall(masm);
3083 // If index is still not a smi, it must be out of range.
3084 __ JumpIfNotSmi(index_, index_out_of_range_);
3085 // Otherwise, return to the fast path.
3086 __ jmp(&got_smi_index_);
3088 // Call runtime. We get here when the receiver is a string and the
3089 // index is a number, but the code of getting the actual character
3090 // is too complex (e.g., when the string needs to be flattened).
3091 __ bind(&call_runtime_);
3092 call_helper.BeforeCall(masm);
3094 __ Integer32ToSmi(index_, index_);
3096 __ CallRuntime(Runtime::kStringCharCodeAt, 2);
3097 if (!result_.is(rax)) {
3098 __ movp(result_, rax);
3100 call_helper.AfterCall(masm);
3103 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3107 // -------------------------------------------------------------------------
3108 // StringCharFromCodeGenerator
3110 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3111 // Fast case of Heap::LookupSingleCharacterStringFromCode.
3112 __ JumpIfNotSmi(code_, &slow_case_);
3113 __ SmiCompare(code_, Smi::FromInt(String::kMaxOneByteCharCode));
3114 __ j(above, &slow_case_);
3116 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3117 SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
3118 __ movp(result_, FieldOperand(result_, index.reg, index.scale,
3119 FixedArray::kHeaderSize));
3120 __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
3121 __ j(equal, &slow_case_);
3126 void StringCharFromCodeGenerator::GenerateSlow(
3127 MacroAssembler* masm,
3128 const RuntimeCallHelper& call_helper) {
3129 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3131 __ bind(&slow_case_);
3132 call_helper.BeforeCall(masm);
3134 __ CallRuntime(Runtime::kCharFromCode, 1);
3135 if (!result_.is(rax)) {
3136 __ movp(result_, rax);
3138 call_helper.AfterCall(masm);
3141 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3145 void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
3150 // Copy characters using rep movs of doublewords. Align destination on 4 byte
3151 // boundary before starting rep movs. Copy remaining characters after running
3153 // Count is positive int32, dest and src are character pointers.
3154 ASSERT(dest.is(rdi)); // rep movs destination
3155 ASSERT(src.is(rsi)); // rep movs source
3156 ASSERT(count.is(rcx)); // rep movs count
3158 // Nothing to do for zero characters.
3160 __ testl(count, count);
3161 __ j(zero, &done, Label::kNear);
3163 // Make count the number of bytes to copy.
3165 STATIC_ASSERT(2 == sizeof(uc16));
3166 __ addl(count, count);
3169 // Don't enter the rep movs if there are less than 4 bytes to copy.
3171 __ testl(count, Immediate(~(kPointerSize - 1)));
3172 __ j(zero, &last_bytes, Label::kNear);
3174 // Copy from edi to esi using rep movs instruction.
3175 __ movl(kScratchRegister, count);
3176 __ shr(count, Immediate(kPointerSizeLog2)); // Number of doublewords to copy.
3179 // Find number of bytes left.
3180 __ movl(count, kScratchRegister);
3181 __ and_(count, Immediate(kPointerSize - 1));
3183 // Check if there are more bytes to copy.
3184 __ bind(&last_bytes);
3185 __ testl(count, count);
3186 __ j(zero, &done, Label::kNear);
3188 // Copy remaining characters.
3191 __ movb(kScratchRegister, Operand(src, 0));
3192 __ movb(Operand(dest, 0), kScratchRegister);
3196 __ j(not_zero, &loop);
3202 void StringHelper::GenerateHashInit(MacroAssembler* masm,
3206 // hash = (seed + character) + ((seed + character) << 10);
3207 __ LoadRoot(scratch, Heap::kHashSeedRootIndex);
3208 __ SmiToInteger32(scratch, scratch);
3209 __ addl(scratch, character);
3210 __ movl(hash, scratch);
3211 __ shll(scratch, Immediate(10));
3212 __ addl(hash, scratch);
3213 // hash ^= hash >> 6;
3214 __ movl(scratch, hash);
3215 __ shrl(scratch, Immediate(6));
3216 __ xorl(hash, scratch);
3220 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
3224 // hash += character;
3225 __ addl(hash, character);
3226 // hash += hash << 10;
3227 __ movl(scratch, hash);
3228 __ shll(scratch, Immediate(10));
3229 __ addl(hash, scratch);
3230 // hash ^= hash >> 6;
3231 __ movl(scratch, hash);
3232 __ shrl(scratch, Immediate(6));
3233 __ xorl(hash, scratch);
3237 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
3240 // hash += hash << 3;
3241 __ leal(hash, Operand(hash, hash, times_8, 0));
3242 // hash ^= hash >> 11;
3243 __ movl(scratch, hash);
3244 __ shrl(scratch, Immediate(11));
3245 __ xorl(hash, scratch);
3246 // hash += hash << 15;
3247 __ movl(scratch, hash);
3248 __ shll(scratch, Immediate(15));
3249 __ addl(hash, scratch);
3251 __ andl(hash, Immediate(String::kHashBitMask));
3253 // if (hash == 0) hash = 27;
3254 Label hash_not_zero;
3255 __ j(not_zero, &hash_not_zero);
3256 __ Set(hash, StringHasher::kZeroHash);
3257 __ bind(&hash_not_zero);
3261 void SubStringStub::Generate(MacroAssembler* masm) {
3264 // Stack frame on entry.
3265 // rsp[0] : return address
3270 enum SubStringStubArgumentIndices {
3271 STRING_ARGUMENT_INDEX,
3272 FROM_ARGUMENT_INDEX,
3274 SUB_STRING_ARGUMENT_COUNT
3277 StackArgumentsAccessor args(rsp, SUB_STRING_ARGUMENT_COUNT,
3278 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3280 // Make sure first argument is a string.
3281 __ movp(rax, args.GetArgumentOperand(STRING_ARGUMENT_INDEX));
3282 STATIC_ASSERT(kSmiTag == 0);
3283 __ testl(rax, Immediate(kSmiTagMask));
3284 __ j(zero, &runtime);
3285 Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
3286 __ j(NegateCondition(is_string), &runtime);
3289 // rbx: instance type
3290 // Calculate length of sub string using the smi values.
3291 __ movp(rcx, args.GetArgumentOperand(TO_ARGUMENT_INDEX));
3292 __ movp(rdx, args.GetArgumentOperand(FROM_ARGUMENT_INDEX));
3293 __ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
3295 __ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
3296 __ cmpq(rcx, FieldOperand(rax, String::kLengthOffset));
3297 Label not_original_string;
3298 // Shorter than original string's length: an actual substring.
3299 __ j(below, ¬_original_string, Label::kNear);
3300 // Longer than original string's length or negative: unsafe arguments.
3301 __ j(above, &runtime);
3302 // Return original string.
3303 Counters* counters = masm->isolate()->counters();
3304 __ IncrementCounter(counters->sub_string_native(), 1);
3305 __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
3306 __ bind(¬_original_string);
3309 __ SmiCompare(rcx, Smi::FromInt(1));
3310 __ j(equal, &single_char);
3312 __ SmiToInteger32(rcx, rcx);
3315 // rbx: instance type
3316 // rcx: sub string length
3317 // rdx: from index (smi)
3318 // Deal with different string types: update the index if necessary
3319 // and put the underlying string into edi.
3320 Label underlying_unpacked, sliced_string, seq_or_external_string;
3321 // If the string is not indirect, it can only be sequential or external.
3322 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
3323 STATIC_ASSERT(kIsIndirectStringMask != 0);
3324 __ testb(rbx, Immediate(kIsIndirectStringMask));
3325 __ j(zero, &seq_or_external_string, Label::kNear);
3327 __ testb(rbx, Immediate(kSlicedNotConsMask));
3328 __ j(not_zero, &sliced_string, Label::kNear);
3329 // Cons string. Check whether it is flat, then fetch first part.
3330 // Flat cons strings have an empty second part.
3331 __ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
3332 Heap::kempty_stringRootIndex);
3333 __ j(not_equal, &runtime);
3334 __ movp(rdi, FieldOperand(rax, ConsString::kFirstOffset));
3335 // Update instance type.
3336 __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
3337 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
3338 __ jmp(&underlying_unpacked, Label::kNear);
3340 __ bind(&sliced_string);
3341 // Sliced string. Fetch parent and correct start index by offset.
3342 __ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
3343 __ movp(rdi, FieldOperand(rax, SlicedString::kParentOffset));
3344 // Update instance type.
3345 __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
3346 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
3347 __ jmp(&underlying_unpacked, Label::kNear);
3349 __ bind(&seq_or_external_string);
3350 // Sequential or external string. Just move string to the correct register.
3353 __ bind(&underlying_unpacked);
3355 if (FLAG_string_slices) {
3357 // rdi: underlying subject string
3358 // rbx: instance type of underlying subject string
3359 // rdx: adjusted start index (smi)
3361 // If coming from the make_two_character_string path, the string
3362 // is too short to be sliced anyways.
3363 __ cmpq(rcx, Immediate(SlicedString::kMinLength));
3364 // Short slice. Copy instead of slicing.
3365 __ j(less, ©_routine);
3366 // Allocate new sliced string. At this point we do not reload the instance
3367 // type including the string encoding because we simply rely on the info
3368 // provided by the original string. It does not matter if the original
3369 // string's encoding is wrong because we always have to recheck encoding of
3370 // the newly created string's parent anyways due to externalized strings.
3371 Label two_byte_slice, set_slice_header;
3372 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
3373 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
3374 __ testb(rbx, Immediate(kStringEncodingMask));
3375 __ j(zero, &two_byte_slice, Label::kNear);
3376 __ AllocateAsciiSlicedString(rax, rbx, r14, &runtime);
3377 __ jmp(&set_slice_header, Label::kNear);
3378 __ bind(&two_byte_slice);
3379 __ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
3380 __ bind(&set_slice_header);
3381 __ Integer32ToSmi(rcx, rcx);
3382 __ movp(FieldOperand(rax, SlicedString::kLengthOffset), rcx);
3383 __ movp(FieldOperand(rax, SlicedString::kHashFieldOffset),
3384 Immediate(String::kEmptyHashField));
3385 __ movp(FieldOperand(rax, SlicedString::kParentOffset), rdi);
3386 __ movp(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
3387 __ IncrementCounter(counters->sub_string_native(), 1);
3388 __ ret(3 * kPointerSize);
3390 __ bind(©_routine);
3393 // rdi: underlying subject string
3394 // rbx: instance type of underlying subject string
3395 // rdx: adjusted start index (smi)
3397 // The subject string can only be external or sequential string of either
3398 // encoding at this point.
3399 Label two_byte_sequential, sequential_string;
3400 STATIC_ASSERT(kExternalStringTag != 0);
3401 STATIC_ASSERT(kSeqStringTag == 0);
3402 __ testb(rbx, Immediate(kExternalStringTag));
3403 __ j(zero, &sequential_string);
3405 // Handle external string.
3406 // Rule out short external strings.
3407 STATIC_CHECK(kShortExternalStringTag != 0);
3408 __ testb(rbx, Immediate(kShortExternalStringMask));
3409 __ j(not_zero, &runtime);
3410 __ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
3411 // Move the pointer so that offset-wise, it looks like a sequential string.
3412 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3413 __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3415 __ bind(&sequential_string);
3416 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3417 __ testb(rbx, Immediate(kStringEncodingMask));
3418 __ j(zero, &two_byte_sequential);
3420 // Allocate the result.
3421 __ AllocateAsciiString(rax, rcx, r11, r14, r15, &runtime);
3423 // rax: result string
3424 // rcx: result string length
3425 __ movp(r14, rsi); // esi used by following code.
3426 { // Locate character of sub string start.
3427 SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
3428 __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
3429 SeqOneByteString::kHeaderSize - kHeapObjectTag));
3431 // Locate first character of result.
3432 __ lea(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize));
3434 // rax: result string
3435 // rcx: result length
3436 // rdi: first character of result
3437 // rsi: character of sub string start
3438 // r14: original value of rsi
3439 StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
3440 __ movp(rsi, r14); // Restore rsi.
3441 __ IncrementCounter(counters->sub_string_native(), 1);
3442 __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
3444 __ bind(&two_byte_sequential);
3445 // Allocate the result.
3446 __ AllocateTwoByteString(rax, rcx, r11, r14, r15, &runtime);
3448 // rax: result string
3449 // rcx: result string length
3450 __ movp(r14, rsi); // esi used by following code.
3451 { // Locate character of sub string start.
3452 SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
3453 __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
3454 SeqOneByteString::kHeaderSize - kHeapObjectTag));
3456 // Locate first character of result.
3457 __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
3459 // rax: result string
3460 // rcx: result length
3461 // rdi: first character of result
3462 // rsi: character of sub string start
3463 // r14: original value of rsi
3464 StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
3465 __ movp(rsi, r14); // Restore esi.
3466 __ IncrementCounter(counters->sub_string_native(), 1);
3467 __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
3469 // Just jump to runtime to create the sub string.
3471 __ TailCallRuntime(Runtime::kSubString, 3, 1);
3473 __ bind(&single_char);
3475 // rbx: instance type
3476 // rcx: sub string length (smi)
3477 // rdx: from index (smi)
3478 StringCharAtGenerator generator(
3479 rax, rdx, rcx, rax, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
3480 generator.GenerateFast(masm);
3481 __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
3482 generator.SkipSlow(masm, &runtime);
3486 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
3490 Register scratch2) {
3491 Register length = scratch1;
3494 Label check_zero_length;
3495 __ movp(length, FieldOperand(left, String::kLengthOffset));
3496 __ SmiCompare(length, FieldOperand(right, String::kLengthOffset));
3497 __ j(equal, &check_zero_length, Label::kNear);
3498 __ Move(rax, Smi::FromInt(NOT_EQUAL));
3501 // Check if the length is zero.
3502 Label compare_chars;
3503 __ bind(&check_zero_length);
3504 STATIC_ASSERT(kSmiTag == 0);
3506 __ j(not_zero, &compare_chars, Label::kNear);
3507 __ Move(rax, Smi::FromInt(EQUAL));
3510 // Compare characters.
3511 __ bind(&compare_chars);
3512 Label strings_not_equal;
3513 GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
3514 &strings_not_equal, Label::kNear);
3516 // Characters are equal.
3517 __ Move(rax, Smi::FromInt(EQUAL));
3520 // Characters are not equal.
3521 __ bind(&strings_not_equal);
3522 __ Move(rax, Smi::FromInt(NOT_EQUAL));
3527 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
3533 Register scratch4) {
3534 // Ensure that you can always subtract a string length from a non-negative
3535 // number (e.g. another length).
3536 STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
3538 // Find minimum length and length difference.
3539 __ movp(scratch1, FieldOperand(left, String::kLengthOffset));
3540 __ movp(scratch4, scratch1);
3543 FieldOperand(right, String::kLengthOffset));
3544 // Register scratch4 now holds left.length - right.length.
3545 const Register length_difference = scratch4;
3547 __ j(less, &left_shorter, Label::kNear);
3548 // The right string isn't longer that the left one.
3549 // Get the right string's length by subtracting the (non-negative) difference
3550 // from the left string's length.
3551 __ SmiSub(scratch1, scratch1, length_difference);
3552 __ bind(&left_shorter);
3553 // Register scratch1 now holds Min(left.length, right.length).
3554 const Register min_length = scratch1;
3556 Label compare_lengths;
3557 // If min-length is zero, go directly to comparing lengths.
3558 __ SmiTest(min_length);
3559 __ j(zero, &compare_lengths, Label::kNear);
3562 Label result_not_equal;
3563 GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
3565 // In debug-code mode, SmiTest below might push
3566 // the target label outside the near range.
3569 // Completed loop without finding different characters.
3570 // Compare lengths (precomputed).
3571 __ bind(&compare_lengths);
3572 __ SmiTest(length_difference);
3573 Label length_not_equal;
3574 __ j(not_zero, &length_not_equal, Label::kNear);
3577 __ Move(rax, Smi::FromInt(EQUAL));
3580 Label result_greater;
3582 __ bind(&length_not_equal);
3583 __ j(greater, &result_greater, Label::kNear);
3584 __ jmp(&result_less, Label::kNear);
3585 __ bind(&result_not_equal);
3586 // Unequal comparison of left to right, either character or length.
3587 __ j(above, &result_greater, Label::kNear);
3588 __ bind(&result_less);
3591 __ Move(rax, Smi::FromInt(LESS));
3594 // Result is GREATER.
3595 __ bind(&result_greater);
3596 __ Move(rax, Smi::FromInt(GREATER));
3601 void StringCompareStub::GenerateAsciiCharsCompareLoop(
3602 MacroAssembler* masm,
3607 Label* chars_not_equal,
3608 Label::Distance near_jump) {
3609 // Change index to run from -length to -1 by adding length to string
3610 // start. This means that loop ends when index reaches zero, which
3611 // doesn't need an additional compare.
3612 __ SmiToInteger32(length, length);
3614 FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
3616 FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
3618 Register index = length; // index = -length;
3623 __ movb(scratch, Operand(left, index, times_1, 0));
3624 __ cmpb(scratch, Operand(right, index, times_1, 0));
3625 __ j(not_equal, chars_not_equal, near_jump);
3627 __ j(not_zero, &loop);
3631 void StringCompareStub::Generate(MacroAssembler* masm) {
3634 // Stack frame on entry.
3635 // rsp[0] : return address
3636 // rsp[8] : right string
3637 // rsp[16] : left string
3639 StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
3640 __ movp(rdx, args.GetArgumentOperand(0)); // left
3641 __ movp(rax, args.GetArgumentOperand(1)); // right
3643 // Check for identity.
3646 __ j(not_equal, ¬_same, Label::kNear);
3647 __ Move(rax, Smi::FromInt(EQUAL));
3648 Counters* counters = masm->isolate()->counters();
3649 __ IncrementCounter(counters->string_compare_native(), 1);
3650 __ ret(2 * kPointerSize);
3654 // Check that both are sequential ASCII strings.
3655 __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
3657 // Inline comparison of ASCII strings.
3658 __ IncrementCounter(counters->string_compare_native(), 1);
3659 // Drop arguments from the stack
3660 __ PopReturnAddressTo(rcx);
3661 __ addq(rsp, Immediate(2 * kPointerSize));
3662 __ PushReturnAddressFrom(rcx);
3663 GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
3665 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
3666 // tagged as a small integer.
3668 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3672 void ArrayPushStub::Generate(MacroAssembler* masm) {
3673 int argc = arguments_count();
3675 StackArgumentsAccessor args(rsp, argc);
3677 // Noop, return the length.
3678 __ movp(rax, FieldOperand(rdx, JSArray::kLengthOffset));
3679 __ ret((argc + 1) * kPointerSize);
3683 Isolate* isolate = masm->isolate();
3686 __ TailCallExternalReference(
3687 ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
3691 Label call_builtin, attempt_to_grow_elements, with_write_barrier;
3693 // Get the elements array of the object.
3694 __ movp(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
3696 if (IsFastSmiOrObjectElementsKind(elements_kind())) {
3697 // Check that the elements are in fast mode and writable.
3698 __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
3699 isolate->factory()->fixed_array_map());
3700 __ j(not_equal, &call_builtin);
3703 // Get the array's length into rax and calculate new length.
3704 __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
3705 STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
3706 __ addl(rax, Immediate(argc));
3708 // Get the elements' length into rcx.
3709 __ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
3711 // Check if we could survive without allocation.
3714 if (IsFastSmiOrObjectElementsKind(elements_kind())) {
3715 __ j(greater, &attempt_to_grow_elements);
3717 // Check if value is a smi.
3718 __ movp(rcx, args.GetArgumentOperand(1));
3719 __ JumpIfNotSmi(rcx, &with_write_barrier);
3722 __ movp(FieldOperand(rdi,
3725 FixedArray::kHeaderSize - argc * kPointerSize),
3728 __ j(greater, &call_builtin);
3730 __ movp(rcx, args.GetArgumentOperand(1));
3731 __ StoreNumberToDoubleElements(
3732 rcx, rdi, rax, xmm0, &call_builtin, argc * kDoubleSize);
3736 __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
3738 __ Integer32ToSmi(rax, rax); // Return new length as smi.
3739 __ ret((argc + 1) * kPointerSize);
3741 if (IsFastDoubleElementsKind(elements_kind())) {
3742 __ bind(&call_builtin);
3743 __ TailCallExternalReference(
3744 ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
3748 __ bind(&with_write_barrier);
3750 if (IsFastSmiElementsKind(elements_kind())) {
3751 if (FLAG_trace_elements_transitions) __ jmp(&call_builtin);
3753 __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
3754 isolate->factory()->heap_number_map());
3755 __ j(equal, &call_builtin);
3757 ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
3758 ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
3759 __ movp(rbx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
3760 __ movp(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
3761 __ movp(rbx, ContextOperand(rbx, Context::JS_ARRAY_MAPS_INDEX));
3762 const int header_size = FixedArrayBase::kHeaderSize;
3763 // Verify that the object can be transitioned in place.
3764 const int origin_offset = header_size + elements_kind() * kPointerSize;
3765 __ movp(rdi, FieldOperand(rbx, origin_offset));
3766 __ cmpq(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
3767 __ j(not_equal, &call_builtin);
3769 const int target_offset = header_size + target_kind * kPointerSize;
3770 __ movp(rbx, FieldOperand(rbx, target_offset));
3771 ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
3772 masm, DONT_TRACK_ALLOCATION_SITE, NULL);
3773 __ movp(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
3777 __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
3780 __ lea(rdx, FieldOperand(rdi,
3781 rax, times_pointer_size,
3782 FixedArray::kHeaderSize - argc * kPointerSize));
3783 __ movp(Operand(rdx, 0), rcx);
3785 __ RecordWrite(rdi, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
3788 __ Integer32ToSmi(rax, rax); // Return new length as smi.
3789 __ ret((argc + 1) * kPointerSize);
3791 __ bind(&attempt_to_grow_elements);
3792 if (!FLAG_inline_new) {
3793 __ bind(&call_builtin);
3794 __ TailCallExternalReference(
3795 ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
3799 __ movp(rbx, args.GetArgumentOperand(1));
3800 // Growing elements that are SMI-only requires special handling in case the
3801 // new element is non-Smi. For now, delegate to the builtin.
3802 Label no_fast_elements_check;
3803 __ JumpIfSmi(rbx, &no_fast_elements_check);
3804 __ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
3805 __ CheckFastObjectElements(rcx, &call_builtin, Label::kFar);
3806 __ bind(&no_fast_elements_check);
3808 ExternalReference new_space_allocation_top =
3809 ExternalReference::new_space_allocation_top_address(isolate);
3810 ExternalReference new_space_allocation_limit =
3811 ExternalReference::new_space_allocation_limit_address(isolate);
3813 const int kAllocationDelta = 4;
3814 ASSERT(kAllocationDelta >= argc);
3816 __ Load(rcx, new_space_allocation_top);
3818 // Check if it's the end of elements.
3819 __ lea(rdx, FieldOperand(rdi,
3820 rax, times_pointer_size,
3821 FixedArray::kHeaderSize - argc * kPointerSize));
3823 __ j(not_equal, &call_builtin);
3824 __ addq(rcx, Immediate(kAllocationDelta * kPointerSize));
3825 Operand limit_operand = masm->ExternalOperand(new_space_allocation_limit);
3826 __ cmpq(rcx, limit_operand);
3827 __ j(above, &call_builtin);
3829 // We fit and could grow elements.
3830 __ Store(new_space_allocation_top, rcx);
3832 // Push the argument...
3833 __ movp(Operand(rdx, 0), rbx);
3834 // ... and fill the rest with holes.
3835 __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
3836 for (int i = 1; i < kAllocationDelta; i++) {
3837 __ movp(Operand(rdx, i * kPointerSize), kScratchRegister);
3840 if (IsFastObjectElementsKind(elements_kind())) {
3841 // We know the elements array is in new space so we don't need the
3842 // remembered set, but we just pushed a value onto it so we may have to tell
3843 // the incremental marker to rescan the object that we just grew. We don't
3844 // need to worry about the holes because they are in old space and already
3846 __ RecordWrite(rdi, rdx, rbx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
3849 // Restore receiver to rdx as finish sequence assumes it's here.
3850 __ movp(rdx, args.GetReceiverOperand());
3852 // Increment element's and array's sizes.
3853 __ SmiAddConstant(FieldOperand(rdi, FixedArray::kLengthOffset),
3854 Smi::FromInt(kAllocationDelta));
3856 // Make new length a smi before returning it.
3857 __ Integer32ToSmi(rax, rax);
3858 __ movp(FieldOperand(rdx, JSArray::kLengthOffset), rax);
3860 __ ret((argc + 1) * kPointerSize);
3862 __ bind(&call_builtin);
3863 __ TailCallExternalReference(
3864 ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
3868 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
3869 // ----------- S t a t e -------------
3872 // -- rsp[0] : return address
3873 // -----------------------------------
3874 Isolate* isolate = masm->isolate();
3876 // Load rcx with the allocation site. We stick an undefined dummy value here
3877 // and replace it with the real allocation site later when we instantiate this
3878 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
3879 __ Move(rcx, handle(isolate->heap()->undefined_value()));
3881 // Make sure that we actually patched the allocation site.
3882 if (FLAG_debug_code) {
3883 __ testb(rcx, Immediate(kSmiTagMask));
3884 __ Assert(not_equal, kExpectedAllocationSite);
3885 __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
3886 isolate->factory()->allocation_site_map());
3887 __ Assert(equal, kExpectedAllocationSite);
3890 // Tail call into the stub that handles binary operations with allocation
3892 BinaryOpWithAllocationSiteStub stub(state_);
3893 __ TailCallStub(&stub);
3897 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
3898 ASSERT(state_ == CompareIC::SMI);
3900 __ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear);
3902 if (GetCondition() == equal) {
3903 // For equality we do not care about the sign of the result.
3908 __ j(no_overflow, &done, Label::kNear);
3909 // Correct sign of result in case of overflow.
3921 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
3922 ASSERT(state_ == CompareIC::NUMBER);
3925 Label unordered, maybe_undefined1, maybe_undefined2;
3928 if (left_ == CompareIC::SMI) {
3929 __ JumpIfNotSmi(rdx, &miss);
3931 if (right_ == CompareIC::SMI) {
3932 __ JumpIfNotSmi(rax, &miss);
3935 // Load left and right operand.
3936 Label done, left, left_smi, right_smi;
3937 __ JumpIfSmi(rax, &right_smi, Label::kNear);
3938 __ CompareMap(rax, masm->isolate()->factory()->heap_number_map());
3939 __ j(not_equal, &maybe_undefined1, Label::kNear);
3940 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
3941 __ jmp(&left, Label::kNear);
3942 __ bind(&right_smi);
3943 __ SmiToInteger32(rcx, rax); // Can't clobber rax yet.
3944 __ Cvtlsi2sd(xmm1, rcx);
3947 __ JumpIfSmi(rdx, &left_smi, Label::kNear);
3948 __ CompareMap(rdx, masm->isolate()->factory()->heap_number_map());
3949 __ j(not_equal, &maybe_undefined2, Label::kNear);
3950 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
3953 __ SmiToInteger32(rcx, rdx); // Can't clobber rdx yet.
3954 __ Cvtlsi2sd(xmm0, rcx);
3958 __ ucomisd(xmm0, xmm1);
3960 // Don't base result on EFLAGS when a NaN is involved.
3961 __ j(parity_even, &unordered, Label::kNear);
3963 // Return a result of -1, 0, or 1, based on EFLAGS.
3964 // Performing mov, because xor would destroy the flag register.
3965 __ movl(rax, Immediate(0));
3966 __ movl(rcx, Immediate(0));
3967 __ setcc(above, rax); // Add one to zero if carry clear and not equal.
3968 __ sbbq(rax, rcx); // Subtract one if below (aka. carry set).
3971 __ bind(&unordered);
3972 __ bind(&generic_stub);
3973 ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
3974 CompareIC::GENERIC);
3975 __ jmp(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
3977 __ bind(&maybe_undefined1);
3978 if (Token::IsOrderedRelationalCompareOp(op_)) {
3979 __ Cmp(rax, masm->isolate()->factory()->undefined_value());
3980 __ j(not_equal, &miss);
3981 __ JumpIfSmi(rdx, &unordered);
3982 __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
3983 __ j(not_equal, &maybe_undefined2, Label::kNear);
3987 __ bind(&maybe_undefined2);
3988 if (Token::IsOrderedRelationalCompareOp(op_)) {
3989 __ Cmp(rdx, masm->isolate()->factory()->undefined_value());
3990 __ j(equal, &unordered);
3998 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3999 ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
4000 ASSERT(GetCondition() == equal);
4002 // Registers containing left and right operands respectively.
4003 Register left = rdx;
4004 Register right = rax;
4005 Register tmp1 = rcx;
4006 Register tmp2 = rbx;
4008 // Check that both operands are heap objects.
4010 Condition cond = masm->CheckEitherSmi(left, right, tmp1);
4011 __ j(cond, &miss, Label::kNear);
4013 // Check that both operands are internalized strings.
4014 __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
4015 __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
4016 __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
4017 __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
4018 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
4020 __ testb(tmp1, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
4021 __ j(not_zero, &miss, Label::kNear);
4023 // Internalized strings are compared by identity.
4025 __ cmpq(left, right);
4026 // Make sure rax is non-zero. At this point input operands are
4027 // guaranteed to be non-zero.
4028 ASSERT(right.is(rax));
4029 __ j(not_equal, &done, Label::kNear);
4030 STATIC_ASSERT(EQUAL == 0);
4031 STATIC_ASSERT(kSmiTag == 0);
4032 __ Move(rax, Smi::FromInt(EQUAL));
4041 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
4042 ASSERT(state_ == CompareIC::UNIQUE_NAME);
4043 ASSERT(GetCondition() == equal);
4045 // Registers containing left and right operands respectively.
4046 Register left = rdx;
4047 Register right = rax;
4048 Register tmp1 = rcx;
4049 Register tmp2 = rbx;
4051 // Check that both operands are heap objects.
4053 Condition cond = masm->CheckEitherSmi(left, right, tmp1);
4054 __ j(cond, &miss, Label::kNear);
4056 // Check that both operands are unique names. This leaves the instance
4057 // types loaded in tmp1 and tmp2.
4058 __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
4059 __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
4060 __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
4061 __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
4063 __ JumpIfNotUniqueName(tmp1, &miss, Label::kNear);
4064 __ JumpIfNotUniqueName(tmp2, &miss, Label::kNear);
4066 // Unique names are compared by identity.
4068 __ cmpq(left, right);
4069 // Make sure rax is non-zero. At this point input operands are
4070 // guaranteed to be non-zero.
4071 ASSERT(right.is(rax));
4072 __ j(not_equal, &done, Label::kNear);
4073 STATIC_ASSERT(EQUAL == 0);
4074 STATIC_ASSERT(kSmiTag == 0);
4075 __ Move(rax, Smi::FromInt(EQUAL));
4084 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
4085 ASSERT(state_ == CompareIC::STRING);
4088 bool equality = Token::IsEqualityOp(op_);
4090 // Registers containing left and right operands respectively.
4091 Register left = rdx;
4092 Register right = rax;
4093 Register tmp1 = rcx;
4094 Register tmp2 = rbx;
4095 Register tmp3 = rdi;
4097 // Check that both operands are heap objects.
4098 Condition cond = masm->CheckEitherSmi(left, right, tmp1);
4101 // Check that both operands are strings. This leaves the instance
4102 // types loaded in tmp1 and tmp2.
4103 __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
4104 __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
4105 __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
4106 __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
4107 __ movp(tmp3, tmp1);
4108 STATIC_ASSERT(kNotStringTag != 0);
4110 __ testb(tmp3, Immediate(kIsNotStringMask));
4111 __ j(not_zero, &miss);
4113 // Fast check for identical strings.
4115 __ cmpq(left, right);
4116 __ j(not_equal, ¬_same, Label::kNear);
4117 STATIC_ASSERT(EQUAL == 0);
4118 STATIC_ASSERT(kSmiTag == 0);
4119 __ Move(rax, Smi::FromInt(EQUAL));
4122 // Handle not identical strings.
4125 // Check that both strings are internalized strings. If they are, we're done
4126 // because we already know they are not identical. We also know they are both
4130 STATIC_ASSERT(kInternalizedTag == 0);
4132 __ testb(tmp1, Immediate(kIsNotInternalizedMask));
4133 __ j(not_zero, &do_compare, Label::kNear);
4134 // Make sure rax is non-zero. At this point input operands are
4135 // guaranteed to be non-zero.
4136 ASSERT(right.is(rax));
4138 __ bind(&do_compare);
4141 // Check that both strings are sequential ASCII.
4143 __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
4145 // Compare flat ASCII strings. Returns when done.
4147 StringCompareStub::GenerateFlatAsciiStringEquals(
4148 masm, left, right, tmp1, tmp2);
4150 StringCompareStub::GenerateCompareFlatAsciiStrings(
4151 masm, left, right, tmp1, tmp2, tmp3, kScratchRegister);
4154 // Handle more complex cases in runtime.
4156 __ PopReturnAddressTo(tmp1);
4159 __ PushReturnAddressFrom(tmp1);
4161 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
4163 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
4171 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
4172 ASSERT(state_ == CompareIC::OBJECT);
4174 Condition either_smi = masm->CheckEitherSmi(rdx, rax);
4175 __ j(either_smi, &miss, Label::kNear);
4177 __ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
4178 __ j(not_equal, &miss, Label::kNear);
4179 __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
4180 __ j(not_equal, &miss, Label::kNear);
4182 ASSERT(GetCondition() == equal);
4191 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
4193 Condition either_smi = masm->CheckEitherSmi(rdx, rax);
4194 __ j(either_smi, &miss, Label::kNear);
4196 __ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset));
4197 __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
4198 __ Cmp(rcx, known_map_);
4199 __ j(not_equal, &miss, Label::kNear);
4200 __ Cmp(rbx, known_map_);
4201 __ j(not_equal, &miss, Label::kNear);
4211 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
4213 // Call the runtime system in a fresh internal frame.
4214 ExternalReference miss =
4215 ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
4217 FrameScope scope(masm, StackFrame::INTERNAL);
4222 __ Push(Smi::FromInt(op_));
4223 __ CallExternalReference(miss, 3);
4225 // Compute the entry point of the rewritten stub.
4226 __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
4231 // Do a tail call to the rewritten stub.
4236 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
4239 Register properties,
4242 ASSERT(name->IsUniqueName());
4243 // If names of slots in range from 1 to kProbes - 1 for the hash value are
4244 // not equal to the name and kProbes-th slot is not used (its name is the
4245 // undefined value), it guarantees the hash table doesn't contain the
4246 // property. It's true even if some slots represent deleted properties
4247 // (their names are the hole value).
4248 for (int i = 0; i < kInlinedProbes; i++) {
4249 // r0 points to properties hash.
4250 // Compute the masked index: (hash + i + i * i) & mask.
4251 Register index = r0;
4252 // Capacity is smi 2^n.
4253 __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
4256 Immediate(name->Hash() + NameDictionary::GetProbeOffset(i)));
4258 // Scale the index by multiplying by the entry size.
4259 ASSERT(NameDictionary::kEntrySize == 3);
4260 __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
4262 Register entity_name = r0;
4263 // Having undefined at this place means the name is not contained.
4264 ASSERT_EQ(kSmiTagSize, 1);
4265 __ movp(entity_name, Operand(properties,
4268 kElementsStartOffset - kHeapObjectTag));
4269 __ Cmp(entity_name, masm->isolate()->factory()->undefined_value());
4272 // Stop if found the property.
4273 __ Cmp(entity_name, Handle<Name>(name));
4277 // Check for the hole and skip.
4278 __ CompareRoot(entity_name, Heap::kTheHoleValueRootIndex);
4279 __ j(equal, &good, Label::kNear);
4281 // Check if the entry name is not a unique name.
4282 __ movp(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
4283 __ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset),
4288 NameDictionaryLookupStub stub(properties, r0, r0, NEGATIVE_LOOKUP);
4289 __ Push(Handle<Object>(name));
4290 __ push(Immediate(name->Hash()));
4293 __ j(not_zero, miss);
4298 // Probe the name dictionary in the |elements| register. Jump to the
4299 // |done| label if a property with the given name is found leaving the
4300 // index into the dictionary in |r1|. Jump to the |miss| label
4302 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
4309 ASSERT(!elements.is(r0));
4310 ASSERT(!elements.is(r1));
4311 ASSERT(!name.is(r0));
4312 ASSERT(!name.is(r1));
4314 __ AssertName(name);
4316 __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
4319 for (int i = 0; i < kInlinedProbes; i++) {
4320 // Compute the masked index: (hash + i + i * i) & mask.
4321 __ movl(r1, FieldOperand(name, Name::kHashFieldOffset));
4322 __ shrl(r1, Immediate(Name::kHashShift));
4324 __ addl(r1, Immediate(NameDictionary::GetProbeOffset(i)));
4328 // Scale the index by multiplying by the entry size.
4329 ASSERT(NameDictionary::kEntrySize == 3);
4330 __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
4332 // Check if the key is identical to the name.
4333 __ cmpq(name, Operand(elements, r1, times_pointer_size,
4334 kElementsStartOffset - kHeapObjectTag));
4338 NameDictionaryLookupStub stub(elements, r0, r1, POSITIVE_LOOKUP);
4340 __ movl(r0, FieldOperand(name, Name::kHashFieldOffset));
4341 __ shrl(r0, Immediate(Name::kHashShift));
4351 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
4352 // This stub overrides SometimesSetsUpAFrame() to return false. That means
4353 // we cannot call anything that could cause a GC from this stub.
4354 // Stack frame on entry:
4355 // rsp[0 * kPointerSize] : return address.
4356 // rsp[1 * kPointerSize] : key's hash.
4357 // rsp[2 * kPointerSize] : key.
4359 // dictionary_: NameDictionary to probe.
4360 // result_: used as scratch.
4361 // index_: will hold an index of entry if lookup is successful.
4362 // might alias with result_.
4364 // result_ is zero if lookup failed, non zero otherwise.
4366 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
4368 Register scratch = result_;
4370 __ SmiToInteger32(scratch, FieldOperand(dictionary_, kCapacityOffset));
4374 // If names of slots in range from 1 to kProbes - 1 for the hash value are
4375 // not equal to the name and kProbes-th slot is not used (its name is the
4376 // undefined value), it guarantees the hash table doesn't contain the
4377 // property. It's true even if some slots represent deleted properties
4378 // (their names are the null value).
4379 StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER,
4381 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
4382 // Compute the masked index: (hash + i + i * i) & mask.
4383 __ movp(scratch, args.GetArgumentOperand(1));
4385 __ addl(scratch, Immediate(NameDictionary::GetProbeOffset(i)));
4387 __ and_(scratch, Operand(rsp, 0));
4389 // Scale the index by multiplying by the entry size.
4390 ASSERT(NameDictionary::kEntrySize == 3);
4391 __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
4393 // Having undefined at this place means the name is not contained.
4394 __ movp(scratch, Operand(dictionary_,
4397 kElementsStartOffset - kHeapObjectTag));
4399 __ Cmp(scratch, masm->isolate()->factory()->undefined_value());
4400 __ j(equal, ¬_in_dictionary);
4402 // Stop if found the property.
4403 __ cmpq(scratch, args.GetArgumentOperand(0));
4404 __ j(equal, &in_dictionary);
4406 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
4407 // If we hit a key that is not a unique name during negative
4408 // lookup we have to bailout as this key might be equal to the
4409 // key we are looking for.
4411 // Check if the entry name is not a unique name.
4412 __ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
4413 __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset),
4414 &maybe_in_dictionary);
4418 __ bind(&maybe_in_dictionary);
4419 // If we are doing negative lookup then probing failure should be
4420 // treated as a lookup success. For positive lookup probing failure
4421 // should be treated as lookup failure.
4422 if (mode_ == POSITIVE_LOOKUP) {
4423 __ movp(scratch, Immediate(0));
4425 __ ret(2 * kPointerSize);
4428 __ bind(&in_dictionary);
4429 __ movp(scratch, Immediate(1));
4431 __ ret(2 * kPointerSize);
4433 __ bind(¬_in_dictionary);
4434 __ movp(scratch, Immediate(0));
4436 __ ret(2 * kPointerSize);
4440 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
4442 StoreBufferOverflowStub stub1(kDontSaveFPRegs);
4443 stub1.GetCode(isolate);
4444 StoreBufferOverflowStub stub2(kSaveFPRegs);
4445 stub2.GetCode(isolate);
4449 bool CodeStub::CanUseFPRegisters() {
4450 return true; // Always have SSE2 on x64.
4454 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
4455 // the value has just been written into the object, now this stub makes sure
4456 // we keep the GC informed. The word in the object where the value has been
4457 // written is in the address register.
4458 void RecordWriteStub::Generate(MacroAssembler* masm) {
4459 Label skip_to_incremental_noncompacting;
4460 Label skip_to_incremental_compacting;
4462 // The first two instructions are generated with labels so as to get the
4463 // offset fixed up correctly by the bind(Label*) call. We patch it back and
4464 // forth between a compare instructions (a nop in this position) and the
4465 // real branch when we start and stop incremental heap marking.
4466 // See RecordWriteStub::Patch for details.
4467 __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
4468 __ jmp(&skip_to_incremental_compacting, Label::kFar);
4470 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4471 __ RememberedSetHelper(object_,
4475 MacroAssembler::kReturnAtEnd);
4480 __ bind(&skip_to_incremental_noncompacting);
4481 GenerateIncremental(masm, INCREMENTAL);
4483 __ bind(&skip_to_incremental_compacting);
4484 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4486 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
4487 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
4488 masm->set_byte_at(0, kTwoByteNopInstruction);
4489 masm->set_byte_at(2, kFiveByteNopInstruction);
4493 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4496 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4497 Label dont_need_remembered_set;
4499 __ movp(regs_.scratch0(), Operand(regs_.address(), 0));
4500 __ JumpIfNotInNewSpace(regs_.scratch0(),
4502 &dont_need_remembered_set);
4504 __ CheckPageFlag(regs_.object(),
4506 1 << MemoryChunk::SCAN_ON_SCAVENGE,
4508 &dont_need_remembered_set);
4510 // First notify the incremental marker if necessary, then update the
4512 CheckNeedsToInformIncrementalMarker(
4513 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4514 InformIncrementalMarker(masm, mode);
4515 regs_.Restore(masm);
4516 __ RememberedSetHelper(object_,
4520 MacroAssembler::kReturnAtEnd);
4522 __ bind(&dont_need_remembered_set);
4525 CheckNeedsToInformIncrementalMarker(
4526 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4527 InformIncrementalMarker(masm, mode);
4528 regs_.Restore(masm);
4533 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
4534 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
4536 arg_reg_1.is(regs_.address()) ? kScratchRegister : regs_.address();
4537 ASSERT(!address.is(regs_.object()));
4538 ASSERT(!address.is(arg_reg_1));
4539 __ Move(address, regs_.address());
4540 __ Move(arg_reg_1, regs_.object());
4541 // TODO(gc) Can we just set address arg2 in the beginning?
4542 __ Move(arg_reg_2, address);
4543 __ LoadAddress(arg_reg_3,
4544 ExternalReference::isolate_address(masm->isolate()));
4545 int argument_count = 3;
4547 AllowExternalCallThatCantCauseGC scope(masm);
4548 __ PrepareCallCFunction(argument_count);
4549 if (mode == INCREMENTAL_COMPACTION) {
4551 ExternalReference::incremental_evacuation_record_write_function(
4555 ASSERT(mode == INCREMENTAL);
4557 ExternalReference::incremental_marking_record_write_function(
4561 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
4565 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4566 MacroAssembler* masm,
4567 OnNoNeedToInformIncrementalMarker on_no_need,
4570 Label need_incremental;
4571 Label need_incremental_pop_object;
4573 __ movp(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
4574 __ and_(regs_.scratch0(), regs_.object());
4575 __ movp(regs_.scratch1(),
4576 Operand(regs_.scratch0(),
4577 MemoryChunk::kWriteBarrierCounterOffset));
4578 __ subq(regs_.scratch1(), Immediate(1));
4579 __ movp(Operand(regs_.scratch0(),
4580 MemoryChunk::kWriteBarrierCounterOffset),
4582 __ j(negative, &need_incremental);
4584 // Let's look at the color of the object: If it is not black we don't have
4585 // to inform the incremental marker.
4586 __ JumpIfBlack(regs_.object(),
4592 regs_.Restore(masm);
4593 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4594 __ RememberedSetHelper(object_,
4598 MacroAssembler::kReturnAtEnd);
4605 // Get the value from the slot.
4606 __ movp(regs_.scratch0(), Operand(regs_.address(), 0));
4608 if (mode == INCREMENTAL_COMPACTION) {
4609 Label ensure_not_white;
4611 __ CheckPageFlag(regs_.scratch0(), // Contains value.
4612 regs_.scratch1(), // Scratch.
4613 MemoryChunk::kEvacuationCandidateMask,
4618 __ CheckPageFlag(regs_.object(),
4619 regs_.scratch1(), // Scratch.
4620 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
4624 __ bind(&ensure_not_white);
4627 // We need an extra register for this, so we push the object register
4629 __ push(regs_.object());
4630 __ EnsureNotWhite(regs_.scratch0(), // The value.
4631 regs_.scratch1(), // Scratch.
4632 regs_.object(), // Scratch.
4633 &need_incremental_pop_object,
4635 __ pop(regs_.object());
4637 regs_.Restore(masm);
4638 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4639 __ RememberedSetHelper(object_,
4643 MacroAssembler::kReturnAtEnd);
4648 __ bind(&need_incremental_pop_object);
4649 __ pop(regs_.object());
4651 __ bind(&need_incremental);
4653 // Fall through when we need to inform the incremental marker.
4657 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4658 // ----------- S t a t e -------------
4659 // -- rax : element value to store
4660 // -- rcx : element index as smi
4661 // -- rsp[0] : return address
4662 // -- rsp[8] : array literal index in function
4663 // -- rsp[16] : array literal
4664 // clobbers rbx, rdx, rdi
4665 // -----------------------------------
4668 Label double_elements;
4670 Label slow_elements;
4671 Label fast_elements;
4673 // Get array literal index, array literal and its map.
4674 StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
4675 __ movp(rdx, args.GetArgumentOperand(1));
4676 __ movp(rbx, args.GetArgumentOperand(0));
4677 __ movp(rdi, FieldOperand(rbx, JSObject::kMapOffset));
4679 __ CheckFastElements(rdi, &double_elements);
4681 // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
4682 __ JumpIfSmi(rax, &smi_element);
4683 __ CheckFastSmiElements(rdi, &fast_elements);
4685 // Store into the array literal requires a elements transition. Call into
4688 __ bind(&slow_elements);
4689 __ PopReturnAddressTo(rdi);
4693 __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
4694 __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
4696 __ PushReturnAddressFrom(rdi);
4697 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4699 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4700 __ bind(&fast_elements);
4701 __ SmiToInteger32(kScratchRegister, rcx);
4702 __ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
4703 __ lea(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
4704 FixedArrayBase::kHeaderSize));
4705 __ movp(Operand(rcx, 0), rax);
4706 // Update the write barrier for the array store.
4707 __ RecordWrite(rbx, rcx, rax,
4709 EMIT_REMEMBERED_SET,
4713 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or
4714 // FAST_*_ELEMENTS, and value is Smi.
4715 __ bind(&smi_element);
4716 __ SmiToInteger32(kScratchRegister, rcx);
4717 __ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
4718 __ movp(FieldOperand(rbx, kScratchRegister, times_pointer_size,
4719 FixedArrayBase::kHeaderSize), rax);
4722 // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
4723 __ bind(&double_elements);
4725 __ movp(r9, FieldOperand(rbx, JSObject::kElementsOffset));
4726 __ SmiToInteger32(r11, rcx);
4727 __ StoreNumberToDoubleElements(rax,
4736 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4737 CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
4738 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
4739 int parameter_count_offset =
4740 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4741 __ movp(rbx, MemOperand(rbp, parameter_count_offset));
4742 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4743 __ PopReturnAddressTo(rcx);
4744 int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE
4747 __ lea(rsp, MemOperand(rsp, rbx, times_pointer_size, additional_offset));
4748 __ jmp(rcx); // Return to IC Miss stub, continuation still on stack.
4752 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4753 if (masm->isolate()->function_entry_hook() != NULL) {
4754 ProfileEntryHookStub stub;
4755 masm->CallStub(&stub);
4760 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4761 // This stub can be called from essentially anywhere, so it needs to save
4762 // all volatile and callee-save registers.
4763 const size_t kNumSavedRegisters = 2;
4767 // Calculate the original stack pointer and store it in the second arg.
4769 Operand(rsp, kNumSavedRegisters * kRegisterSize + kPCOnStackSize));
4771 // Calculate the function address to the first arg.
4772 __ movp(arg_reg_1, Operand(rsp, kNumSavedRegisters * kRegisterSize));
4773 __ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
4775 // Save the remainder of the volatile registers.
4776 masm->PushCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
4778 // Call the entry hook function.
4779 __ Move(rax, FUNCTION_ADDR(masm->isolate()->function_entry_hook()),
4780 Assembler::RelocInfoNone());
4782 AllowExternalCallThatCantCauseGC scope(masm);
4784 const int kArgumentCount = 2;
4785 __ PrepareCallCFunction(kArgumentCount);
4786 __ CallCFunction(rax, kArgumentCount);
4788 // Restore volatile regs.
4789 masm->PopCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
4798 static void CreateArrayDispatch(MacroAssembler* masm,
4799 AllocationSiteOverrideMode mode) {
4800 if (mode == DISABLE_ALLOCATION_SITES) {
4801 T stub(GetInitialFastElementsKind(), mode);
4802 __ TailCallStub(&stub);
4803 } else if (mode == DONT_OVERRIDE) {
4804 int last_index = GetSequenceIndexFromFastElementsKind(
4805 TERMINAL_FAST_ELEMENTS_KIND);
4806 for (int i = 0; i <= last_index; ++i) {
4808 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4809 __ cmpl(rdx, Immediate(kind));
4810 __ j(not_equal, &next);
4812 __ TailCallStub(&stub);
4816 // If we reached this point there is a problem.
4817 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4824 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4825 AllocationSiteOverrideMode mode) {
4826 // rbx - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4827 // rdx - kind (if mode != DISABLE_ALLOCATION_SITES)
4828 // rax - number of arguments
4829 // rdi - constructor?
4830 // rsp[0] - return address
4831 // rsp[8] - last argument
4832 Handle<Object> undefined_sentinel(
4833 masm->isolate()->heap()->undefined_value(),
4836 Label normal_sequence;
4837 if (mode == DONT_OVERRIDE) {
4838 ASSERT(FAST_SMI_ELEMENTS == 0);
4839 ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4840 ASSERT(FAST_ELEMENTS == 2);
4841 ASSERT(FAST_HOLEY_ELEMENTS == 3);
4842 ASSERT(FAST_DOUBLE_ELEMENTS == 4);
4843 ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
4845 // is the low bit set? If so, we are holey and that is good.
4846 __ testb(rdx, Immediate(1));
4847 __ j(not_zero, &normal_sequence);
4850 // look at the first argument
4851 StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
4852 __ movp(rcx, args.GetArgumentOperand(0));
4854 __ j(zero, &normal_sequence);
4856 if (mode == DISABLE_ALLOCATION_SITES) {
4857 ElementsKind initial = GetInitialFastElementsKind();
4858 ElementsKind holey_initial = GetHoleyElementsKind(initial);
4860 ArraySingleArgumentConstructorStub stub_holey(holey_initial,
4861 DISABLE_ALLOCATION_SITES);
4862 __ TailCallStub(&stub_holey);
4864 __ bind(&normal_sequence);
4865 ArraySingleArgumentConstructorStub stub(initial,
4866 DISABLE_ALLOCATION_SITES);
4867 __ TailCallStub(&stub);
4868 } else if (mode == DONT_OVERRIDE) {
4869 // We are going to create a holey array, but our kind is non-holey.
4870 // Fix kind and retry (only if we have an allocation site in the cell).
4873 if (FLAG_debug_code) {
4874 Handle<Map> allocation_site_map =
4875 masm->isolate()->factory()->allocation_site_map();
4876 __ Cmp(FieldOperand(rbx, 0), allocation_site_map);
4877 __ Assert(equal, kExpectedAllocationSite);
4880 // Save the resulting elements kind in type info. We can't just store r3
4881 // in the AllocationSite::transition_info field because elements kind is
4882 // restricted to a portion of the field...upper bits need to be left alone.
4883 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4884 __ SmiAddConstant(FieldOperand(rbx, AllocationSite::kTransitionInfoOffset),
4885 Smi::FromInt(kFastElementsKindPackedToHoley));
4887 __ bind(&normal_sequence);
4888 int last_index = GetSequenceIndexFromFastElementsKind(
4889 TERMINAL_FAST_ELEMENTS_KIND);
4890 for (int i = 0; i <= last_index; ++i) {
4892 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4893 __ cmpl(rdx, Immediate(kind));
4894 __ j(not_equal, &next);
4895 ArraySingleArgumentConstructorStub stub(kind);
4896 __ TailCallStub(&stub);
4900 // If we reached this point there is a problem.
4901 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4909 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
4910 int to_index = GetSequenceIndexFromFastElementsKind(
4911 TERMINAL_FAST_ELEMENTS_KIND);
4912 for (int i = 0; i <= to_index; ++i) {
4913 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4915 stub.GetCode(isolate);
4916 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
4917 T stub1(kind, DISABLE_ALLOCATION_SITES);
4918 stub1.GetCode(isolate);
4924 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
4925 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
4927 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
4929 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
4934 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
4936 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
4937 for (int i = 0; i < 2; i++) {
4938 // For internal arrays we only need a few things
4939 InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
4940 stubh1.GetCode(isolate);
4941 InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
4942 stubh2.GetCode(isolate);
4943 InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
4944 stubh3.GetCode(isolate);
4949 void ArrayConstructorStub::GenerateDispatchToArrayStub(
4950 MacroAssembler* masm,
4951 AllocationSiteOverrideMode mode) {
4952 if (argument_count_ == ANY) {
4953 Label not_zero_case, not_one_case;
4955 __ j(not_zero, ¬_zero_case);
4956 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4958 __ bind(¬_zero_case);
4959 __ cmpl(rax, Immediate(1));
4960 __ j(greater, ¬_one_case);
4961 CreateArrayDispatchOneArgument(masm, mode);
4963 __ bind(¬_one_case);
4964 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4965 } else if (argument_count_ == NONE) {
4966 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4967 } else if (argument_count_ == ONE) {
4968 CreateArrayDispatchOneArgument(masm, mode);
4969 } else if (argument_count_ == MORE_THAN_ONE) {
4970 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4977 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
4978 // ----------- S t a t e -------------
4980 // -- rbx : type info cell
4981 // -- rdi : constructor
4982 // -- rsp[0] : return address
4983 // -- rsp[8] : last argument
4984 // -----------------------------------
4985 Handle<Object> undefined_sentinel(
4986 masm->isolate()->heap()->undefined_value(),
4989 if (FLAG_debug_code) {
4990 // The array construct code is only set for the global and natives
4991 // builtin Array functions which always have maps.
4993 // Initial map for the builtin Array function should be a map.
4994 __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
4995 // Will both indicate a NULL and a Smi.
4996 STATIC_ASSERT(kSmiTag == 0);
4997 Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
4998 __ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
4999 __ CmpObjectType(rcx, MAP_TYPE, rcx);
5000 __ Check(equal, kUnexpectedInitialMapForArrayFunction);
5002 // We should either have undefined in rbx or a valid cell
5004 Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
5005 __ Cmp(rbx, undefined_sentinel);
5006 __ j(equal, &okay_here);
5007 __ Cmp(FieldOperand(rbx, 0), cell_map);
5008 __ Assert(equal, kExpectedPropertyCellInRegisterRbx);
5009 __ bind(&okay_here);
5013 // If the type cell is undefined, or contains anything other than an
5014 // AllocationSite, call an array constructor that doesn't use AllocationSites.
5015 __ Cmp(rbx, undefined_sentinel);
5016 __ j(equal, &no_info);
5017 __ movp(rbx, FieldOperand(rbx, Cell::kValueOffset));
5018 __ Cmp(FieldOperand(rbx, 0),
5019 masm->isolate()->factory()->allocation_site_map());
5020 __ j(not_equal, &no_info);
5022 // Only look at the lower 16 bits of the transition info.
5023 __ movp(rdx, FieldOperand(rbx, AllocationSite::kTransitionInfoOffset));
5024 __ SmiToInteger32(rdx, rdx);
5025 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
5026 __ and_(rdx, Immediate(AllocationSite::ElementsKindBits::kMask));
5027 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
5030 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
5034 void InternalArrayConstructorStub::GenerateCase(
5035 MacroAssembler* masm, ElementsKind kind) {
5036 Label not_zero_case, not_one_case;
5037 Label normal_sequence;
5040 __ j(not_zero, ¬_zero_case);
5041 InternalArrayNoArgumentConstructorStub stub0(kind);
5042 __ TailCallStub(&stub0);
5044 __ bind(¬_zero_case);
5045 __ cmpl(rax, Immediate(1));
5046 __ j(greater, ¬_one_case);
5048 if (IsFastPackedElementsKind(kind)) {
5049 // We might need to create a holey array
5050 // look at the first argument
5051 StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
5052 __ movp(rcx, args.GetArgumentOperand(0));
5054 __ j(zero, &normal_sequence);
5056 InternalArraySingleArgumentConstructorStub
5057 stub1_holey(GetHoleyElementsKind(kind));
5058 __ TailCallStub(&stub1_holey);
5061 __ bind(&normal_sequence);
5062 InternalArraySingleArgumentConstructorStub stub1(kind);
5063 __ TailCallStub(&stub1);
5065 __ bind(¬_one_case);
5066 InternalArrayNArgumentsConstructorStub stubN(kind);
5067 __ TailCallStub(&stubN);
5071 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
5072 // ----------- S t a t e -------------
5074 // -- rbx : type info cell
5075 // -- rdi : constructor
5076 // -- rsp[0] : return address
5077 // -- rsp[8] : last argument
5078 // -----------------------------------
5080 if (FLAG_debug_code) {
5081 // The array construct code is only set for the global and natives
5082 // builtin Array functions which always have maps.
5084 // Initial map for the builtin Array function should be a map.
5085 __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
5086 // Will both indicate a NULL and a Smi.
5087 STATIC_ASSERT(kSmiTag == 0);
5088 Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
5089 __ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
5090 __ CmpObjectType(rcx, MAP_TYPE, rcx);
5091 __ Check(equal, kUnexpectedInitialMapForArrayFunction);
5094 // Figure out the right elements kind
5095 __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
5097 // Load the map's "bit field 2" into |result|. We only need the first byte,
5098 // but the following masking takes care of that anyway.
5099 __ movzxbq(rcx, FieldOperand(rcx, Map::kBitField2Offset));
5100 // Retrieve elements_kind from bit field 2.
5101 __ and_(rcx, Immediate(Map::kElementsKindMask));
5102 __ shr(rcx, Immediate(Map::kElementsKindShift));
5104 if (FLAG_debug_code) {
5106 __ cmpl(rcx, Immediate(FAST_ELEMENTS));
5108 __ cmpl(rcx, Immediate(FAST_HOLEY_ELEMENTS));
5110 kInvalidElementsKindForInternalArrayOrInternalPackedArray);
5114 Label fast_elements_case;
5115 __ cmpl(rcx, Immediate(FAST_ELEMENTS));
5116 __ j(equal, &fast_elements_case);
5117 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
5119 __ bind(&fast_elements_case);
5120 GenerateCase(masm, FAST_ELEMENTS);
5124 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
5125 // ----------- S t a t e -------------
5127 // -- rbx : call_data
5129 // -- rdx : api_function_address
5132 // -- rsp[0] : return address
5133 // -- rsp[8] : last argument
5135 // -- rsp[argc * 8] : first argument
5136 // -- rsp[(argc + 1) * 8] : receiver
5137 // -----------------------------------
5139 Register callee = rax;
5140 Register call_data = rbx;
5141 Register holder = rcx;
5142 Register api_function_address = rdx;
5143 Register return_address = rdi;
5144 Register context = rsi;
5146 int argc = ArgumentBits::decode(bit_field_);
5147 bool restore_context = RestoreContextBits::decode(bit_field_);
5148 bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
5150 typedef FunctionCallbackArguments FCA;
5152 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
5153 STATIC_ASSERT(FCA::kCalleeIndex == 5);
5154 STATIC_ASSERT(FCA::kDataIndex == 4);
5155 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
5156 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
5157 STATIC_ASSERT(FCA::kIsolateIndex == 1);
5158 STATIC_ASSERT(FCA::kHolderIndex == 0);
5159 STATIC_ASSERT(FCA::kArgsLength == 7);
5161 __ PopReturnAddressTo(return_address);
5165 // load context from callee
5166 __ movp(context, FieldOperand(callee, JSFunction::kContextOffset));
5173 Register scratch = call_data;
5174 if (!call_data_undefined) {
5175 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5179 // return value default
5183 ExternalReference::isolate_address(masm->isolate()));
5188 __ movp(scratch, rsp);
5189 // Push return address back on stack.
5190 __ PushReturnAddressFrom(return_address);
5192 // Allocate the v8::Arguments structure in the arguments' space since
5193 // it's not controlled by GC.
5194 const int kApiStackSpace = 4;
5196 __ PrepareCallApiFunction(kApiStackSpace);
5198 // FunctionCallbackInfo::implicit_args_.
5199 __ movp(StackSpaceOperand(0), scratch);
5200 __ addq(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize));
5201 __ movp(StackSpaceOperand(1), scratch); // FunctionCallbackInfo::values_.
5202 __ Set(StackSpaceOperand(2), argc); // FunctionCallbackInfo::length_.
5203 // FunctionCallbackInfo::is_construct_call_.
5204 __ Set(StackSpaceOperand(3), 0);
5206 #if defined(__MINGW64__) || defined(_WIN64)
5207 Register arguments_arg = rcx;
5208 Register callback_arg = rdx;
5210 Register arguments_arg = rdi;
5211 Register callback_arg = rsi;
5214 // It's okay if api_function_address == callback_arg
5215 // but not arguments_arg
5216 ASSERT(!api_function_address.is(arguments_arg));
5218 // v8::InvocationCallback's argument.
5219 __ lea(arguments_arg, StackSpaceOperand(0));
5221 Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
5223 StackArgumentsAccessor args_from_rbp(rbp, FCA::kArgsLength,
5224 ARGUMENTS_DONT_CONTAIN_RECEIVER);
5225 Operand context_restore_operand = args_from_rbp.GetArgumentOperand(
5226 FCA::kArgsLength - 1 - FCA::kContextSaveIndex);
5227 Operand return_value_operand = args_from_rbp.GetArgumentOperand(
5228 FCA::kArgsLength - 1 - FCA::kReturnValueOffset);
5229 __ CallApiFunctionAndReturn(
5230 api_function_address,
5233 argc + FCA::kArgsLength + 1,
5234 return_value_operand,
5235 restore_context ? &context_restore_operand : NULL);
5239 void CallApiGetterStub::Generate(MacroAssembler* masm) {
5240 // ----------- S t a t e -------------
5241 // -- rsp[0] : return address
5243 // -- rsp[16 - kArgsLength*8] : PropertyCallbackArguments object
5245 // -- r8 : api_function_address
5246 // -----------------------------------
5248 #if defined(__MINGW64__) || defined(_WIN64)
5249 Register getter_arg = r8;
5250 Register accessor_info_arg = rdx;
5251 Register name_arg = rcx;
5253 Register getter_arg = rdx;
5254 Register accessor_info_arg = rsi;
5255 Register name_arg = rdi;
5257 Register api_function_address = r8;
5258 Register scratch = rax;
5260 // v8::Arguments::values_ and handler for name.
5261 const int kStackSpace = PropertyCallbackArguments::kArgsLength + 1;
5263 // Allocate v8::AccessorInfo in non-GCed stack space.
5264 const int kArgStackSpace = 1;
5266 __ lea(name_arg, Operand(rsp, kPCOnStackSize));
5268 __ PrepareCallApiFunction(kArgStackSpace);
5269 __ lea(scratch, Operand(name_arg, 1 * kPointerSize));
5271 // v8::PropertyAccessorInfo::args_.
5272 __ movp(StackSpaceOperand(0), scratch);
5274 // The context register (rsi) has been saved in PrepareCallApiFunction and
5275 // could be used to pass arguments.
5276 __ lea(accessor_info_arg, StackSpaceOperand(0));
5278 Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
5280 // It's okay if api_function_address == getter_arg
5281 // but not accessor_info_arg or name_arg
5282 ASSERT(!api_function_address.is(accessor_info_arg) &&
5283 !api_function_address.is(name_arg));
5285 // The name handler is counted as an argument.
5286 StackArgumentsAccessor args(rbp, PropertyCallbackArguments::kArgsLength);
5287 Operand return_value_operand = args.GetArgumentOperand(
5288 PropertyCallbackArguments::kArgsLength - 1 -
5289 PropertyCallbackArguments::kReturnValueOffset);
5290 __ CallApiFunctionAndReturn(api_function_address,
5294 return_value_operand,
5301 } } // namespace v8::internal
5303 #endif // V8_TARGET_ARCH_X64