1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "src/bootstrapper.h"
10 #include "src/code-stubs.h"
11 #include "src/codegen.h"
12 #include "src/ic/handler-compiler.h"
13 #include "src/ic/ic.h"
14 #include "src/ic/stub-cache.h"
15 #include "src/isolate.h"
16 #include "src/jsregexp.h"
17 #include "src/regexp-macro-assembler.h"
18 #include "src/runtime/runtime.h"
24 static void InitializeArrayConstructorDescriptor(
25 Isolate* isolate, CodeStubDescriptor* descriptor,
26 int constant_stack_parameter_count) {
27 Address deopt_handler = Runtime::FunctionForId(
28 Runtime::kArrayConstructor)->entry;
30 if (constant_stack_parameter_count == 0) {
31 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
32 JS_FUNCTION_STUB_MODE);
34 descriptor->Initialize(rax, deopt_handler, constant_stack_parameter_count,
35 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
40 static void InitializeInternalArrayConstructorDescriptor(
41 Isolate* isolate, CodeStubDescriptor* descriptor,
42 int constant_stack_parameter_count) {
43 Address deopt_handler = Runtime::FunctionForId(
44 Runtime::kInternalArrayConstructor)->entry;
46 if (constant_stack_parameter_count == 0) {
47 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
48 JS_FUNCTION_STUB_MODE);
50 descriptor->Initialize(rax, deopt_handler, constant_stack_parameter_count,
51 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
56 void ArrayNoArgumentConstructorStub::InitializeDescriptor(
57 CodeStubDescriptor* descriptor) {
58 InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
62 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
63 CodeStubDescriptor* descriptor) {
64 InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
68 void ArrayNArgumentsConstructorStub::InitializeDescriptor(
69 CodeStubDescriptor* descriptor) {
70 InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
74 void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
75 CodeStubDescriptor* descriptor) {
76 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
80 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
81 CodeStubDescriptor* descriptor) {
82 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
86 void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
87 CodeStubDescriptor* descriptor) {
88 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
92 #define __ ACCESS_MASM(masm)
95 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
96 ExternalReference miss) {
97 // Update the static counter each time a new code stub is generated.
98 isolate()->counters()->code_stubs()->Increment();
100 CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
101 int param_count = descriptor.GetEnvironmentParameterCount();
103 // Call the runtime system in a fresh internal frame.
104 FrameScope scope(masm, StackFrame::INTERNAL);
105 DCHECK(param_count == 0 ||
106 rax.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
108 for (int i = 0; i < param_count; ++i) {
109 __ Push(descriptor.GetEnvironmentParameterRegister(i));
111 __ CallExternalReference(miss, param_count);
118 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
119 __ PushCallerSaved(save_doubles() ? kSaveFPRegs : kDontSaveFPRegs);
120 const int argument_count = 1;
121 __ PrepareCallCFunction(argument_count);
122 __ LoadAddress(arg_reg_1,
123 ExternalReference::isolate_address(isolate()));
125 AllowExternalCallThatCantCauseGC scope(masm);
127 ExternalReference::store_buffer_overflow_function(isolate()),
129 __ PopCallerSaved(save_doubles() ? kSaveFPRegs : kDontSaveFPRegs);
134 class FloatingPointHelper : public AllStatic {
136 enum ConvertUndefined {
137 CONVERT_UNDEFINED_TO_ZERO,
140 // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
141 // If the operands are not both numbers, jump to not_numbers.
142 // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
143 // NumberOperands assumes both are smis or heap numbers.
144 static void LoadSSE2UnknownOperands(MacroAssembler* masm,
149 void DoubleToIStub::Generate(MacroAssembler* masm) {
150 Register input_reg = this->source();
151 Register final_result_reg = this->destination();
152 DCHECK(is_truncating());
154 Label check_negative, process_64_bits, done;
156 int double_offset = offset();
158 // Account for return address and saved regs if input is rsp.
159 if (input_reg.is(rsp)) double_offset += 3 * kRegisterSize;
161 MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
162 MemOperand exponent_operand(MemOperand(input_reg,
163 double_offset + kDoubleSize / 2));
166 Register scratch_candidates[3] = { rbx, rdx, rdi };
167 for (int i = 0; i < 3; i++) {
168 scratch1 = scratch_candidates[i];
169 if (!final_result_reg.is(scratch1) && !input_reg.is(scratch1)) break;
172 // Since we must use rcx for shifts below, use some other register (rax)
173 // to calculate the result if ecx is the requested return register.
174 Register result_reg = final_result_reg.is(rcx) ? rax : final_result_reg;
175 // Save ecx if it isn't the return register and therefore volatile, or if it
176 // is the return register, then save the temp register we use in its stead
178 Register save_reg = final_result_reg.is(rcx) ? rax : rcx;
182 bool stash_exponent_copy = !input_reg.is(rsp);
183 __ movl(scratch1, mantissa_operand);
184 __ movsd(xmm0, mantissa_operand);
185 __ movl(rcx, exponent_operand);
186 if (stash_exponent_copy) __ pushq(rcx);
188 __ andl(rcx, Immediate(HeapNumber::kExponentMask));
189 __ shrl(rcx, Immediate(HeapNumber::kExponentShift));
190 __ leal(result_reg, MemOperand(rcx, -HeapNumber::kExponentBias));
191 __ cmpl(result_reg, Immediate(HeapNumber::kMantissaBits));
192 __ j(below, &process_64_bits);
194 // Result is entirely in lower 32-bits of mantissa
195 int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize;
196 __ subl(rcx, Immediate(delta));
197 __ xorl(result_reg, result_reg);
198 __ cmpl(rcx, Immediate(31));
200 __ shll_cl(scratch1);
201 __ jmp(&check_negative);
203 __ bind(&process_64_bits);
204 __ cvttsd2siq(result_reg, xmm0);
205 __ jmp(&done, Label::kNear);
207 // If the double was negative, negate the integer result.
208 __ bind(&check_negative);
209 __ movl(result_reg, scratch1);
211 if (stash_exponent_copy) {
212 __ cmpl(MemOperand(rsp, 0), Immediate(0));
214 __ cmpl(exponent_operand, Immediate(0));
216 __ cmovl(greater, result_reg, scratch1);
220 if (stash_exponent_copy) {
221 __ addp(rsp, Immediate(kDoubleSize));
223 if (!final_result_reg.is(result_reg)) {
224 DCHECK(final_result_reg.is(rcx));
225 __ movl(final_result_reg, result_reg);
233 void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
234 Label* not_numbers) {
235 Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
236 // Load operand in rdx into xmm0, or branch to not_numbers.
237 __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
238 __ JumpIfSmi(rdx, &load_smi_rdx);
239 __ cmpp(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
240 __ j(not_equal, not_numbers); // Argument in rdx is not a number.
241 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
242 // Load operand in rax into xmm1, or branch to not_numbers.
243 __ JumpIfSmi(rax, &load_smi_rax);
245 __ bind(&load_nonsmi_rax);
246 __ cmpp(FieldOperand(rax, HeapObject::kMapOffset), rcx);
247 __ j(not_equal, not_numbers);
248 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
251 __ bind(&load_smi_rdx);
252 __ SmiToInteger32(kScratchRegister, rdx);
253 __ Cvtlsi2sd(xmm0, kScratchRegister);
254 __ JumpIfNotSmi(rax, &load_nonsmi_rax);
256 __ bind(&load_smi_rax);
257 __ SmiToInteger32(kScratchRegister, rax);
258 __ Cvtlsi2sd(xmm1, kScratchRegister);
263 void MathPowStub::Generate(MacroAssembler* masm) {
264 const Register exponent = MathPowTaggedDescriptor::exponent();
265 DCHECK(exponent.is(rdx));
266 const Register base = rax;
267 const Register scratch = rcx;
268 const XMMRegister double_result = xmm3;
269 const XMMRegister double_base = xmm2;
270 const XMMRegister double_exponent = xmm1;
271 const XMMRegister double_scratch = xmm4;
273 Label call_runtime, done, exponent_not_smi, int_exponent;
275 // Save 1 in double_result - we need this several times later on.
276 __ movp(scratch, Immediate(1));
277 __ Cvtlsi2sd(double_result, scratch);
279 if (exponent_type() == ON_STACK) {
280 Label base_is_smi, unpack_exponent;
281 // The exponent and base are supplied as arguments on the stack.
282 // This can only happen if the stub is called from non-optimized code.
283 // Load input parameters from stack.
284 StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
285 __ movp(base, args.GetArgumentOperand(0));
286 __ movp(exponent, args.GetArgumentOperand(1));
287 __ JumpIfSmi(base, &base_is_smi, Label::kNear);
288 __ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
289 Heap::kHeapNumberMapRootIndex);
290 __ j(not_equal, &call_runtime);
292 __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
293 __ jmp(&unpack_exponent, Label::kNear);
295 __ bind(&base_is_smi);
296 __ SmiToInteger32(base, base);
297 __ Cvtlsi2sd(double_base, base);
298 __ bind(&unpack_exponent);
300 __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
301 __ SmiToInteger32(exponent, exponent);
302 __ jmp(&int_exponent);
304 __ bind(&exponent_not_smi);
305 __ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset),
306 Heap::kHeapNumberMapRootIndex);
307 __ j(not_equal, &call_runtime);
308 __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
309 } else if (exponent_type() == TAGGED) {
310 __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
311 __ SmiToInteger32(exponent, exponent);
312 __ jmp(&int_exponent);
314 __ bind(&exponent_not_smi);
315 __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
318 if (exponent_type() != INTEGER) {
319 Label fast_power, try_arithmetic_simplification;
320 // Detect integer exponents stored as double.
321 __ DoubleToI(exponent, double_exponent, double_scratch,
322 TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification,
323 &try_arithmetic_simplification,
324 &try_arithmetic_simplification);
325 __ jmp(&int_exponent);
327 __ bind(&try_arithmetic_simplification);
328 __ cvttsd2si(exponent, double_exponent);
329 // Skip to runtime if possibly NaN (indicated by the indefinite integer).
330 __ cmpl(exponent, Immediate(0x1));
331 __ j(overflow, &call_runtime);
333 if (exponent_type() == ON_STACK) {
334 // Detect square root case. Crankshaft detects constant +/-0.5 at
335 // compile time and uses DoMathPowHalf instead. We then skip this check
336 // for non-constant cases of +/-0.5 as these hardly occur.
337 Label continue_sqrt, continue_rsqrt, not_plus_half;
339 // Load double_scratch with 0.5.
340 __ movq(scratch, V8_UINT64_C(0x3FE0000000000000));
341 __ movq(double_scratch, scratch);
342 // Already ruled out NaNs for exponent.
343 __ ucomisd(double_scratch, double_exponent);
344 __ j(not_equal, ¬_plus_half, Label::kNear);
346 // Calculates square root of base. Check for the special case of
347 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
348 // According to IEEE-754, double-precision -Infinity has the highest
349 // 12 bits set and the lowest 52 bits cleared.
350 __ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
351 __ movq(double_scratch, scratch);
352 __ ucomisd(double_scratch, double_base);
353 // Comparing -Infinity with NaN results in "unordered", which sets the
354 // zero flag as if both were equal. However, it also sets the carry flag.
355 __ j(not_equal, &continue_sqrt, Label::kNear);
356 __ j(carry, &continue_sqrt, Label::kNear);
358 // Set result to Infinity in the special case.
359 __ xorps(double_result, double_result);
360 __ subsd(double_result, double_scratch);
363 __ bind(&continue_sqrt);
364 // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
365 __ xorps(double_scratch, double_scratch);
366 __ addsd(double_scratch, double_base); // Convert -0 to 0.
367 __ sqrtsd(double_result, double_scratch);
371 __ bind(¬_plus_half);
372 // Load double_scratch with -0.5 by substracting 1.
373 __ subsd(double_scratch, double_result);
374 // Already ruled out NaNs for exponent.
375 __ ucomisd(double_scratch, double_exponent);
376 __ j(not_equal, &fast_power, Label::kNear);
378 // Calculates reciprocal of square root of base. Check for the special
379 // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
380 // According to IEEE-754, double-precision -Infinity has the highest
381 // 12 bits set and the lowest 52 bits cleared.
382 __ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
383 __ movq(double_scratch, scratch);
384 __ ucomisd(double_scratch, double_base);
385 // Comparing -Infinity with NaN results in "unordered", which sets the
386 // zero flag as if both were equal. However, it also sets the carry flag.
387 __ j(not_equal, &continue_rsqrt, Label::kNear);
388 __ j(carry, &continue_rsqrt, Label::kNear);
390 // Set result to 0 in the special case.
391 __ xorps(double_result, double_result);
394 __ bind(&continue_rsqrt);
395 // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
396 __ xorps(double_exponent, double_exponent);
397 __ addsd(double_exponent, double_base); // Convert -0 to +0.
398 __ sqrtsd(double_exponent, double_exponent);
399 __ divsd(double_result, double_exponent);
403 // Using FPU instructions to calculate power.
404 Label fast_power_failed;
405 __ bind(&fast_power);
406 __ fnclex(); // Clear flags to catch exceptions later.
407 // Transfer (B)ase and (E)xponent onto the FPU register stack.
408 __ subp(rsp, Immediate(kDoubleSize));
409 __ movsd(Operand(rsp, 0), double_exponent);
410 __ fld_d(Operand(rsp, 0)); // E
411 __ movsd(Operand(rsp, 0), double_base);
412 __ fld_d(Operand(rsp, 0)); // B, E
414 // Exponent is in st(1) and base is in st(0)
415 // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
416 // FYL2X calculates st(1) * log2(st(0))
419 __ frndint(); // rnd(X), X
420 __ fsub(1); // rnd(X), X-rnd(X)
421 __ fxch(1); // X - rnd(X), rnd(X)
422 // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
423 __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
424 __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
425 __ faddp(1); // 2^(X-rnd(X)), rnd(X)
426 // FSCALE calculates st(0) * 2^st(1)
427 __ fscale(); // 2^X, rnd(X)
429 // Bail out to runtime in case of exceptions in the status word.
431 __ testb(rax, Immediate(0x5F)); // Check for all but precision exception.
432 __ j(not_zero, &fast_power_failed, Label::kNear);
433 __ fstp_d(Operand(rsp, 0));
434 __ movsd(double_result, Operand(rsp, 0));
435 __ addp(rsp, Immediate(kDoubleSize));
438 __ bind(&fast_power_failed);
440 __ addp(rsp, Immediate(kDoubleSize));
441 __ jmp(&call_runtime);
444 // Calculate power with integer exponent.
445 __ bind(&int_exponent);
446 const XMMRegister double_scratch2 = double_exponent;
447 // Back up exponent as we need to check if exponent is negative later.
448 __ movp(scratch, exponent); // Back up exponent.
449 __ movsd(double_scratch, double_base); // Back up base.
450 __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
452 // Get absolute value of exponent.
453 Label no_neg, while_true, while_false;
454 __ testl(scratch, scratch);
455 __ j(positive, &no_neg, Label::kNear);
459 __ j(zero, &while_false, Label::kNear);
460 __ shrl(scratch, Immediate(1));
461 // Above condition means CF==0 && ZF==0. This means that the
462 // bit that has been shifted out is 0 and the result is not 0.
463 __ j(above, &while_true, Label::kNear);
464 __ movsd(double_result, double_scratch);
465 __ j(zero, &while_false, Label::kNear);
467 __ bind(&while_true);
468 __ shrl(scratch, Immediate(1));
469 __ mulsd(double_scratch, double_scratch);
470 __ j(above, &while_true, Label::kNear);
471 __ mulsd(double_result, double_scratch);
472 __ j(not_zero, &while_true);
474 __ bind(&while_false);
475 // If the exponent is negative, return 1/result.
476 __ testl(exponent, exponent);
477 __ j(greater, &done);
478 __ divsd(double_scratch2, double_result);
479 __ movsd(double_result, double_scratch2);
480 // Test whether result is zero. Bail out to check for subnormal result.
481 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
482 __ xorps(double_scratch2, double_scratch2);
483 __ ucomisd(double_scratch2, double_result);
484 // double_exponent aliased as double_scratch2 has already been overwritten
485 // and may not have contained the exponent value in the first place when the
486 // input was a smi. We reset it with exponent value before bailing out.
487 __ j(not_equal, &done);
488 __ Cvtlsi2sd(double_exponent, exponent);
490 // Returning or bailing out.
491 Counters* counters = isolate()->counters();
492 if (exponent_type() == ON_STACK) {
493 // The arguments are still on the stack.
494 __ bind(&call_runtime);
495 __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
497 // The stub is called from non-optimized code, which expects the result
498 // as heap number in rax.
500 __ AllocateHeapNumber(rax, rcx, &call_runtime);
501 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
502 __ IncrementCounter(counters->math_pow(), 1);
503 __ ret(2 * kPointerSize);
505 __ bind(&call_runtime);
506 // Move base to the correct argument register. Exponent is already in xmm1.
507 __ movsd(xmm0, double_base);
508 DCHECK(double_exponent.is(xmm1));
510 AllowExternalCallThatCantCauseGC scope(masm);
511 __ PrepareCallCFunction(2);
513 ExternalReference::power_double_double_function(isolate()), 2);
515 // Return value is in xmm0.
516 __ movsd(double_result, xmm0);
519 __ IncrementCounter(counters->math_pow(), 1);
525 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
527 Register receiver = LoadDescriptor::ReceiverRegister();
528 // Ensure that the vector and slot registers won't be clobbered before
529 // calling the miss handler.
530 DCHECK(!FLAG_vector_ics ||
531 !AreAliased(r8, r9, VectorLoadICDescriptor::VectorRegister(),
532 VectorLoadICDescriptor::SlotRegister()));
534 NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r8,
537 PropertyAccessCompiler::TailCallBuiltin(
538 masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
542 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
543 CHECK(!has_new_target());
544 // The key is in rdx and the parameter count is in rax.
545 DCHECK(rdx.is(ArgumentsAccessReadDescriptor::index()));
546 DCHECK(rax.is(ArgumentsAccessReadDescriptor::parameter_count()));
548 // Check that the key is a smi.
550 __ JumpIfNotSmi(rdx, &slow);
552 // Check if the calling frame is an arguments adaptor frame. We look at the
553 // context offset, and if the frame is not a regular one, then we find a
554 // Smi instead of the context. We can't use SmiCompare here, because that
555 // only works for comparing two smis.
557 __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
558 __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
559 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
560 __ j(equal, &adaptor);
562 // Check index against formal parameters count limit passed in
563 // through register rax. Use unsigned comparison to get negative
566 __ j(above_equal, &slow);
568 // Read the argument from the stack and return it.
569 __ SmiSub(rax, rax, rdx);
570 __ SmiToInteger32(rax, rax);
571 StackArgumentsAccessor args(rbp, rax, ARGUMENTS_DONT_CONTAIN_RECEIVER);
572 __ movp(rax, args.GetArgumentOperand(0));
575 // Arguments adaptor case: Check index against actual arguments
576 // limit found in the arguments adaptor frame. Use unsigned
577 // comparison to get negative check for free.
579 __ movp(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
581 __ j(above_equal, &slow);
583 // Read the argument from the stack and return it.
584 __ SmiSub(rcx, rcx, rdx);
585 __ SmiToInteger32(rcx, rcx);
586 StackArgumentsAccessor adaptor_args(rbx, rcx,
587 ARGUMENTS_DONT_CONTAIN_RECEIVER);
588 __ movp(rax, adaptor_args.GetArgumentOperand(0));
591 // Slow-case: Handle non-smi or out-of-bounds access to arguments
592 // by calling the runtime system.
594 __ PopReturnAddressTo(rbx);
596 __ PushReturnAddressFrom(rbx);
597 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
601 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
603 // rsp[0] : return address
604 // rsp[8] : number of parameters (tagged)
605 // rsp[16] : receiver displacement
606 // rsp[24] : function
607 // Registers used over the whole function:
608 // rbx: the mapped parameter count (untagged)
609 // rax: the allocated object (tagged).
611 CHECK(!has_new_target());
613 Factory* factory = isolate()->factory();
615 StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
616 __ SmiToInteger64(rbx, args.GetArgumentOperand(2));
617 // rbx = parameter count (untagged)
619 // Check if the calling frame is an arguments adaptor frame.
621 Label adaptor_frame, try_allocate;
622 __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
623 __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
624 __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
625 __ j(equal, &adaptor_frame);
627 // No adaptor, parameter count = argument count.
629 __ jmp(&try_allocate, Label::kNear);
631 // We have an adaptor frame. Patch the parameters pointer.
632 __ bind(&adaptor_frame);
633 __ SmiToInteger64(rcx,
635 ArgumentsAdaptorFrameConstants::kLengthOffset));
636 __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
637 StandardFrameConstants::kCallerSPOffset));
638 __ movp(args.GetArgumentOperand(1), rdx);
640 // rbx = parameter count (untagged)
641 // rcx = argument count (untagged)
642 // Compute the mapped parameter count = min(rbx, rcx) in rbx.
644 __ j(less_equal, &try_allocate, Label::kNear);
647 __ bind(&try_allocate);
649 // Compute the sizes of backing store, parameter map, and arguments object.
650 // 1. Parameter map, has 2 extra words containing context and backing store.
651 const int kParameterMapHeaderSize =
652 FixedArray::kHeaderSize + 2 * kPointerSize;
653 Label no_parameter_map;
656 __ j(zero, &no_parameter_map, Label::kNear);
657 __ leap(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
658 __ bind(&no_parameter_map);
661 __ leap(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize));
663 // 3. Arguments object.
664 __ addp(r8, Immediate(Heap::kSloppyArgumentsObjectSize));
666 // Do the allocation of all three objects in one go.
667 __ Allocate(r8, rax, rdx, rdi, &runtime, TAG_OBJECT);
669 // rax = address of new object(s) (tagged)
670 // rcx = argument count (untagged)
671 // Get the arguments map from the current native context into rdi.
672 Label has_mapped_parameters, instantiate;
673 __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
674 __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
676 __ j(not_zero, &has_mapped_parameters, Label::kNear);
678 const int kIndex = Context::SLOPPY_ARGUMENTS_MAP_INDEX;
679 __ movp(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
680 __ jmp(&instantiate, Label::kNear);
682 const int kAliasedIndex = Context::ALIASED_ARGUMENTS_MAP_INDEX;
683 __ bind(&has_mapped_parameters);
684 __ movp(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex)));
685 __ bind(&instantiate);
687 // rax = address of new object (tagged)
688 // rbx = mapped parameter count (untagged)
689 // rcx = argument count (untagged)
690 // rdi = address of arguments map (tagged)
691 __ movp(FieldOperand(rax, JSObject::kMapOffset), rdi);
692 __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
693 __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
694 __ movp(FieldOperand(rax, JSObject::kElementsOffset), kScratchRegister);
696 // Set up the callee in-object property.
697 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
698 __ movp(rdx, args.GetArgumentOperand(0));
699 __ AssertNotSmi(rdx);
700 __ movp(FieldOperand(rax, JSObject::kHeaderSize +
701 Heap::kArgumentsCalleeIndex * kPointerSize),
704 // Use the length (smi tagged) and set that as an in-object property too.
705 // Note: rcx is tagged from here on.
706 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
707 __ Integer32ToSmi(rcx, rcx);
708 __ movp(FieldOperand(rax, JSObject::kHeaderSize +
709 Heap::kArgumentsLengthIndex * kPointerSize),
712 // Set up the elements pointer in the allocated arguments object.
713 // If we allocated a parameter map, edi will point there, otherwise to the
715 __ leap(rdi, Operand(rax, Heap::kSloppyArgumentsObjectSize));
716 __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
718 // rax = address of new object (tagged)
719 // rbx = mapped parameter count (untagged)
720 // rcx = argument count (tagged)
721 // rdi = address of parameter map or backing store (tagged)
723 // Initialize parameter map. If there are no mapped arguments, we're done.
724 Label skip_parameter_map;
726 __ j(zero, &skip_parameter_map);
728 __ LoadRoot(kScratchRegister, Heap::kSloppyArgumentsElementsMapRootIndex);
729 // rbx contains the untagged argument count. Add 2 and tag to write.
730 __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
731 __ Integer64PlusConstantToSmi(r9, rbx, 2);
732 __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
733 __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
734 __ leap(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
735 __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
737 // Copy the parameter slots and the holes in the arguments.
738 // We need to fill in mapped_parameter_count slots. They index the context,
739 // where parameters are stored in reverse order, at
740 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
741 // The mapped parameter thus need to get indices
742 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
743 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
744 // We loop from right to left.
745 Label parameters_loop, parameters_test;
747 // Load tagged parameter count into r9.
748 __ Integer32ToSmi(r9, rbx);
749 __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
750 __ addp(r8, args.GetArgumentOperand(2));
752 __ Move(r11, factory->the_hole_value());
754 __ leap(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
755 // r9 = loop variable (tagged)
756 // r8 = mapping index (tagged)
757 // r11 = the hole value
758 // rdx = address of parameter map (tagged)
759 // rdi = address of backing store (tagged)
760 __ jmp(¶meters_test, Label::kNear);
762 __ bind(¶meters_loop);
763 __ SmiSubConstant(r9, r9, Smi::FromInt(1));
764 __ SmiToInteger64(kScratchRegister, r9);
765 __ movp(FieldOperand(rdx, kScratchRegister,
767 kParameterMapHeaderSize),
769 __ movp(FieldOperand(rdi, kScratchRegister,
771 FixedArray::kHeaderSize),
773 __ SmiAddConstant(r8, r8, Smi::FromInt(1));
774 __ bind(¶meters_test);
776 __ j(not_zero, ¶meters_loop, Label::kNear);
778 __ bind(&skip_parameter_map);
780 // rcx = argument count (tagged)
781 // rdi = address of backing store (tagged)
782 // Copy arguments header and remaining slots (if there are any).
783 __ Move(FieldOperand(rdi, FixedArray::kMapOffset),
784 factory->fixed_array_map());
785 __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
787 Label arguments_loop, arguments_test;
789 __ movp(rdx, args.GetArgumentOperand(1));
790 // Untag rcx for the loop below.
791 __ SmiToInteger64(rcx, rcx);
792 __ leap(kScratchRegister, Operand(r8, times_pointer_size, 0));
793 __ subp(rdx, kScratchRegister);
794 __ jmp(&arguments_test, Label::kNear);
796 __ bind(&arguments_loop);
797 __ subp(rdx, Immediate(kPointerSize));
798 __ movp(r9, Operand(rdx, 0));
799 __ movp(FieldOperand(rdi, r8,
801 FixedArray::kHeaderSize),
803 __ addp(r8, Immediate(1));
805 __ bind(&arguments_test);
807 __ j(less, &arguments_loop, Label::kNear);
809 // Return and remove the on-stack parameters.
810 __ ret(3 * kPointerSize);
812 // Do the runtime call to allocate the arguments object.
813 // rcx = argument count (untagged)
815 __ Integer32ToSmi(rcx, rcx);
816 __ movp(args.GetArgumentOperand(2), rcx); // Patch argument count.
817 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
821 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
822 // rsp[0] : return address
823 // rsp[8] : number of parameters
824 // rsp[16] : receiver displacement
825 // rsp[24] : function
826 CHECK(!has_new_target());
828 // Check if the calling frame is an arguments adaptor frame.
830 __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
831 __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
832 __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
833 __ j(not_equal, &runtime);
835 // Patch the arguments.length and the parameters pointer.
836 StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
837 __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
838 __ movp(args.GetArgumentOperand(2), rcx);
839 __ SmiToInteger64(rcx, rcx);
840 __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
841 StandardFrameConstants::kCallerSPOffset));
842 __ movp(args.GetArgumentOperand(1), rdx);
845 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
849 void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
850 // rsp[0] : return address
851 // rsp[8] : index of rest parameter
852 // rsp[16] : number of parameters
853 // rsp[24] : receiver displacement
855 // Check if the calling frame is an arguments adaptor frame.
857 __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
858 __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
859 __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
860 __ j(not_equal, &runtime);
862 // Patch the arguments.length and the parameters pointer.
863 StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
864 __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
865 __ movp(args.GetArgumentOperand(1), rcx);
866 __ SmiToInteger64(rcx, rcx);
867 __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
868 StandardFrameConstants::kCallerSPOffset));
869 __ movp(args.GetArgumentOperand(0), rdx);
872 __ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
876 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
877 // Return address is on the stack.
880 Register receiver = LoadDescriptor::ReceiverRegister();
881 Register key = LoadDescriptor::NameRegister();
882 Register scratch = rax;
883 DCHECK(!scratch.is(receiver) && !scratch.is(key));
885 // Check that the key is an array index, that is Uint32.
886 STATIC_ASSERT(kSmiValueSize <= 32);
887 __ JumpUnlessNonNegativeSmi(key, &slow);
889 // Everything is fine, call runtime.
890 __ PopReturnAddressTo(scratch);
891 __ Push(receiver); // receiver
893 __ PushReturnAddressFrom(scratch);
895 // Perform tail call to the entry.
896 __ TailCallExternalReference(
897 ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
902 PropertyAccessCompiler::TailCallBuiltin(
903 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
907 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
908 // Return address is on the stack.
911 Register receiver = LoadDescriptor::ReceiverRegister();
912 Register index = LoadDescriptor::NameRegister();
913 Register scratch = rdi;
914 Register result = rax;
915 DCHECK(!scratch.is(receiver) && !scratch.is(index));
916 DCHECK(!FLAG_vector_ics ||
917 (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
918 result.is(VectorLoadICDescriptor::SlotRegister())));
920 // StringCharAtGenerator doesn't use the result register until it's passed
921 // the different miss possibilities. If it did, we would have a conflict
922 // when FLAG_vector_ics is true.
923 StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
924 &miss, // When not a string.
925 &miss, // When not a number.
926 &miss, // When index out of range.
927 STRING_INDEX_IS_ARRAY_INDEX,
929 char_at_generator.GenerateFast(masm);
932 StubRuntimeCallHelper call_helper;
933 char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
936 PropertyAccessCompiler::TailCallBuiltin(
937 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
941 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
942 // rsp[0] : return address
943 // rsp[8] : number of parameters
944 // rsp[16] : receiver displacement
945 // rsp[24] : function
947 // Check if the calling frame is an arguments adaptor frame.
948 Label adaptor_frame, try_allocate, runtime;
949 __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
950 __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
951 __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
952 __ j(equal, &adaptor_frame);
954 // Get the length from the frame.
955 StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
956 __ movp(rcx, args.GetArgumentOperand(2));
957 __ SmiToInteger64(rcx, rcx);
958 __ jmp(&try_allocate);
960 // Patch the arguments.length and the parameters pointer.
961 __ bind(&adaptor_frame);
962 __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
964 if (has_new_target()) {
965 // If the constructor was [[Call]]ed, the call will not push a new.target
966 // onto the stack. In that case the arguments array we construct is bogus,
967 // bu we do not care as the constructor throws immediately.
968 __ Cmp(rcx, Smi::FromInt(0));
969 Label skip_decrement;
970 __ j(equal, &skip_decrement);
971 // Subtract 1 from smi-tagged arguments count.
972 __ SmiToInteger32(rcx, rcx);
974 __ Integer32ToSmi(rcx, rcx);
975 __ bind(&skip_decrement);
977 __ movp(args.GetArgumentOperand(2), rcx);
978 __ SmiToInteger64(rcx, rcx);
979 __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
980 StandardFrameConstants::kCallerSPOffset));
981 __ movp(args.GetArgumentOperand(1), rdx);
983 // Try the new space allocation. Start out with computing the size of
984 // the arguments object and the elements array.
985 Label add_arguments_object;
986 __ bind(&try_allocate);
988 __ j(zero, &add_arguments_object, Label::kNear);
989 __ leap(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
990 __ bind(&add_arguments_object);
991 __ addp(rcx, Immediate(Heap::kStrictArgumentsObjectSize));
993 // Do the allocation of both objects in one go.
994 __ Allocate(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
996 // Get the arguments map from the current native context.
997 __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
998 __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
999 const int offset = Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX);
1000 __ movp(rdi, Operand(rdi, offset));
1002 __ movp(FieldOperand(rax, JSObject::kMapOffset), rdi);
1003 __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
1004 __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
1005 __ movp(FieldOperand(rax, JSObject::kElementsOffset), kScratchRegister);
1007 // Get the length (smi tagged) and set that as an in-object property too.
1008 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1009 __ movp(rcx, args.GetArgumentOperand(2));
1010 __ movp(FieldOperand(rax, JSObject::kHeaderSize +
1011 Heap::kArgumentsLengthIndex * kPointerSize),
1014 // If there are no actual arguments, we're done.
1019 // Get the parameters pointer from the stack.
1020 __ movp(rdx, args.GetArgumentOperand(1));
1022 // Set up the elements pointer in the allocated arguments object and
1023 // initialize the header in the elements fixed array.
1024 __ leap(rdi, Operand(rax, Heap::kStrictArgumentsObjectSize));
1025 __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
1026 __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
1027 __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
1030 __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
1031 // Untag the length for the loop below.
1032 __ SmiToInteger64(rcx, rcx);
1034 // Copy the fixed array slots.
1037 __ movp(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
1038 __ movp(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
1039 __ addp(rdi, Immediate(kPointerSize));
1040 __ subp(rdx, Immediate(kPointerSize));
1042 __ j(not_zero, &loop);
1044 // Return and remove the on-stack parameters.
1046 __ ret(3 * kPointerSize);
1048 // Do the runtime call to allocate the arguments object.
1050 __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
1054 void RegExpExecStub::Generate(MacroAssembler* masm) {
1055 // Just jump directly to runtime if native RegExp is not selected at compile
1056 // time or if regexp entry in generated code is turned off runtime switch or
1058 #ifdef V8_INTERPRETED_REGEXP
1059 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
1060 #else // V8_INTERPRETED_REGEXP
1062 // Stack frame on entry.
1063 // rsp[0] : return address
1064 // rsp[8] : last_match_info (expected JSArray)
1065 // rsp[16] : previous index
1066 // rsp[24] : subject string
1067 // rsp[32] : JSRegExp object
1069 enum RegExpExecStubArgumentIndices {
1070 JS_REG_EXP_OBJECT_ARGUMENT_INDEX,
1071 SUBJECT_STRING_ARGUMENT_INDEX,
1072 PREVIOUS_INDEX_ARGUMENT_INDEX,
1073 LAST_MATCH_INFO_ARGUMENT_INDEX,
1074 REG_EXP_EXEC_ARGUMENT_COUNT
1077 StackArgumentsAccessor args(rsp, REG_EXP_EXEC_ARGUMENT_COUNT,
1078 ARGUMENTS_DONT_CONTAIN_RECEIVER);
1080 // Ensure that a RegExp stack is allocated.
1081 ExternalReference address_of_regexp_stack_memory_address =
1082 ExternalReference::address_of_regexp_stack_memory_address(isolate());
1083 ExternalReference address_of_regexp_stack_memory_size =
1084 ExternalReference::address_of_regexp_stack_memory_size(isolate());
1085 __ Load(kScratchRegister, address_of_regexp_stack_memory_size);
1086 __ testp(kScratchRegister, kScratchRegister);
1087 __ j(zero, &runtime);
1089 // Check that the first argument is a JSRegExp object.
1090 __ movp(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
1091 __ JumpIfSmi(rax, &runtime);
1092 __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
1093 __ j(not_equal, &runtime);
1095 // Check that the RegExp has been compiled (data contains a fixed array).
1096 __ movp(rax, FieldOperand(rax, JSRegExp::kDataOffset));
1097 if (FLAG_debug_code) {
1098 Condition is_smi = masm->CheckSmi(rax);
1099 __ Check(NegateCondition(is_smi),
1100 kUnexpectedTypeForRegExpDataFixedArrayExpected);
1101 __ CmpObjectType(rax, FIXED_ARRAY_TYPE, kScratchRegister);
1102 __ Check(equal, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1105 // rax: RegExp data (FixedArray)
1106 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
1107 __ SmiToInteger32(rbx, FieldOperand(rax, JSRegExp::kDataTagOffset));
1108 __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
1109 __ j(not_equal, &runtime);
1111 // rax: RegExp data (FixedArray)
1112 // Check that the number of captures fit in the static offsets vector buffer.
1113 __ SmiToInteger32(rdx,
1114 FieldOperand(rax, JSRegExp::kIrregexpCaptureCountOffset));
1115 // Check (number_of_captures + 1) * 2 <= offsets vector size
1116 // Or number_of_captures <= offsets vector size / 2 - 1
1117 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
1118 __ cmpl(rdx, Immediate(Isolate::kJSRegexpStaticOffsetsVectorSize / 2 - 1));
1119 __ j(above, &runtime);
1121 // Reset offset for possibly sliced string.
1123 __ movp(rdi, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
1124 __ JumpIfSmi(rdi, &runtime);
1125 __ movp(r15, rdi); // Make a copy of the original subject string.
1126 __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
1127 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
1128 // rax: RegExp data (FixedArray)
1129 // rdi: subject string
1130 // r15: subject string
1131 // Handle subject string according to its encoding and representation:
1132 // (1) Sequential two byte? If yes, go to (9).
1133 // (2) Sequential one byte? If yes, go to (6).
1134 // (3) Anything but sequential or cons? If yes, go to (7).
1135 // (4) Cons string. If the string is flat, replace subject with first string.
1136 // Otherwise bailout.
1137 // (5a) Is subject sequential two byte? If yes, go to (9).
1138 // (5b) Is subject external? If yes, go to (8).
1139 // (6) One byte sequential. Load regexp code for one byte.
1143 // Deferred code at the end of the stub:
1144 // (7) Not a long external string? If yes, go to (10).
1145 // (8) External string. Make it, offset-wise, look like a sequential string.
1146 // (8a) Is the external string one byte? If yes, go to (6).
1147 // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
1148 // (10) Short external string or not a string? If yes, bail out to runtime.
1149 // (11) Sliced string. Replace subject with parent. Go to (5a).
1151 Label seq_one_byte_string /* 6 */, seq_two_byte_string /* 9 */,
1152 external_string /* 8 */, check_underlying /* 5a */,
1153 not_seq_nor_cons /* 7 */, check_code /* E */,
1154 not_long_external /* 10 */;
1156 // (1) Sequential two byte? If yes, go to (9).
1157 __ andb(rbx, Immediate(kIsNotStringMask |
1158 kStringRepresentationMask |
1159 kStringEncodingMask |
1160 kShortExternalStringMask));
1161 STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
1162 __ j(zero, &seq_two_byte_string); // Go to (9).
1164 // (2) Sequential one byte? If yes, go to (6).
1165 // Any other sequential string must be one byte.
1166 __ andb(rbx, Immediate(kIsNotStringMask |
1167 kStringRepresentationMask |
1168 kShortExternalStringMask));
1169 __ j(zero, &seq_one_byte_string, Label::kNear); // Go to (6).
1171 // (3) Anything but sequential or cons? If yes, go to (7).
1172 // We check whether the subject string is a cons, since sequential strings
1173 // have already been covered.
1174 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
1175 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
1176 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
1177 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
1178 __ cmpp(rbx, Immediate(kExternalStringTag));
1179 __ j(greater_equal, ¬_seq_nor_cons); // Go to (7).
1181 // (4) Cons string. Check that it's flat.
1182 // Replace subject with first string and reload instance type.
1183 __ CompareRoot(FieldOperand(rdi, ConsString::kSecondOffset),
1184 Heap::kempty_stringRootIndex);
1185 __ j(not_equal, &runtime);
1186 __ movp(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
1187 __ bind(&check_underlying);
1188 __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
1189 __ movp(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
1191 // (5a) Is subject sequential two byte? If yes, go to (9).
1192 __ testb(rbx, Immediate(kStringRepresentationMask | kStringEncodingMask));
1193 STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
1194 __ j(zero, &seq_two_byte_string); // Go to (9).
1195 // (5b) Is subject external? If yes, go to (8).
1196 __ testb(rbx, Immediate(kStringRepresentationMask));
1197 // The underlying external string is never a short external string.
1198 STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
1199 STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
1200 __ j(not_zero, &external_string); // Go to (8)
1202 // (6) One byte sequential. Load regexp code for one byte.
1203 __ bind(&seq_one_byte_string);
1204 // rax: RegExp data (FixedArray)
1205 __ movp(r11, FieldOperand(rax, JSRegExp::kDataOneByteCodeOffset));
1206 __ Set(rcx, 1); // Type is one byte.
1208 // (E) Carry on. String handling is done.
1209 __ bind(&check_code);
1210 // r11: irregexp code
1211 // Check that the irregexp code has been generated for the actual string
1212 // encoding. If it has, the field contains a code object otherwise it contains
1213 // smi (code flushing support)
1214 __ JumpIfSmi(r11, &runtime);
1216 // rdi: sequential subject string (or look-alike, external string)
1217 // r15: original subject string
1218 // rcx: encoding of subject string (1 if one_byte, 0 if two_byte);
1220 // Load used arguments before starting to push arguments for call to native
1221 // RegExp code to avoid handling changing stack height.
1222 // We have to use r15 instead of rdi to load the length because rdi might
1223 // have been only made to look like a sequential string when it actually
1224 // is an external string.
1225 __ movp(rbx, args.GetArgumentOperand(PREVIOUS_INDEX_ARGUMENT_INDEX));
1226 __ JumpIfNotSmi(rbx, &runtime);
1227 __ SmiCompare(rbx, FieldOperand(r15, String::kLengthOffset));
1228 __ j(above_equal, &runtime);
1229 __ SmiToInteger64(rbx, rbx);
1231 // rdi: subject string
1232 // rbx: previous index
1233 // rcx: encoding of subject string (1 if one_byte 0 if two_byte);
1235 // All checks done. Now push arguments for native regexp code.
1236 Counters* counters = isolate()->counters();
1237 __ IncrementCounter(counters->regexp_entry_native(), 1);
1239 // Isolates: note we add an additional parameter here (isolate pointer).
1240 static const int kRegExpExecuteArguments = 9;
1241 int argument_slots_on_stack =
1242 masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
1243 __ EnterApiExitFrame(argument_slots_on_stack);
1245 // Argument 9: Pass current isolate address.
1246 __ LoadAddress(kScratchRegister,
1247 ExternalReference::isolate_address(isolate()));
1248 __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kRegisterSize),
1251 // Argument 8: Indicate that this is a direct call from JavaScript.
1252 __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kRegisterSize),
1255 // Argument 7: Start (high end) of backtracking stack memory area.
1256 __ Move(kScratchRegister, address_of_regexp_stack_memory_address);
1257 __ movp(r9, Operand(kScratchRegister, 0));
1258 __ Move(kScratchRegister, address_of_regexp_stack_memory_size);
1259 __ addp(r9, Operand(kScratchRegister, 0));
1260 __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kRegisterSize), r9);
1262 // Argument 6: Set the number of capture registers to zero to force global
1263 // regexps to behave as non-global. This does not affect non-global regexps.
1264 // Argument 6 is passed in r9 on Linux and on the stack on Windows.
1266 __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kRegisterSize),
1272 // Argument 5: static offsets vector buffer.
1274 r8, ExternalReference::address_of_static_offsets_vector(isolate()));
1275 // Argument 5 passed in r8 on Linux and on the stack on Windows.
1277 __ movq(Operand(rsp, (argument_slots_on_stack - 5) * kRegisterSize), r8);
1280 // rdi: subject string
1281 // rbx: previous index
1282 // rcx: encoding of subject string (1 if one_byte 0 if two_byte);
1284 // r14: slice offset
1285 // r15: original subject string
1287 // Argument 2: Previous index.
1288 __ movp(arg_reg_2, rbx);
1290 // Argument 4: End of string data
1291 // Argument 3: Start of string data
1292 Label setup_two_byte, setup_rest, got_length, length_not_from_slice;
1293 // Prepare start and end index of the input.
1294 // Load the length from the original sliced string if that is the case.
1296 __ SmiToInteger32(arg_reg_3, FieldOperand(r15, String::kLengthOffset));
1297 __ addp(r14, arg_reg_3); // Using arg3 as scratch.
1299 // rbx: start index of the input
1300 // r14: end index of the input
1301 // r15: original subject string
1302 __ testb(rcx, rcx); // Last use of rcx as encoding of subject string.
1303 __ j(zero, &setup_two_byte, Label::kNear);
1305 FieldOperand(rdi, r14, times_1, SeqOneByteString::kHeaderSize));
1307 FieldOperand(rdi, rbx, times_1, SeqOneByteString::kHeaderSize));
1308 __ jmp(&setup_rest, Label::kNear);
1309 __ bind(&setup_two_byte);
1311 FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize));
1313 FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize));
1314 __ bind(&setup_rest);
1316 // Argument 1: Original subject string.
1317 // The original subject is in the previous stack frame. Therefore we have to
1318 // use rbp, which points exactly to one pointer size below the previous rsp.
1319 // (Because creating a new stack frame pushes the previous rbp onto the stack
1320 // and thereby moves up rsp by one kPointerSize.)
1321 __ movp(arg_reg_1, r15);
1323 // Locate the code entry and call it.
1324 __ addp(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
1327 __ LeaveApiExitFrame(true);
1329 // Check the result.
1332 __ cmpl(rax, Immediate(1));
1333 // We expect exactly one result since we force the called regexp to behave
1335 __ j(equal, &success, Label::kNear);
1336 __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
1337 __ j(equal, &exception);
1338 __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
1339 // If none of the above, it can only be retry.
1340 // Handle that in the runtime system.
1341 __ j(not_equal, &runtime);
1343 // For failure return null.
1344 __ LoadRoot(rax, Heap::kNullValueRootIndex);
1345 __ ret(REG_EXP_EXEC_ARGUMENT_COUNT * kPointerSize);
1347 // Load RegExp data.
1349 __ movp(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
1350 __ movp(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
1351 __ SmiToInteger32(rax,
1352 FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
1353 // Calculate number of capture registers (number_of_captures + 1) * 2.
1354 __ leal(rdx, Operand(rax, rax, times_1, 2));
1356 // rdx: Number of capture registers
1357 // Check that the fourth object is a JSArray object.
1358 __ movp(r15, args.GetArgumentOperand(LAST_MATCH_INFO_ARGUMENT_INDEX));
1359 __ JumpIfSmi(r15, &runtime);
1360 __ CmpObjectType(r15, JS_ARRAY_TYPE, kScratchRegister);
1361 __ j(not_equal, &runtime);
1362 // Check that the JSArray is in fast case.
1363 __ movp(rbx, FieldOperand(r15, JSArray::kElementsOffset));
1364 __ movp(rax, FieldOperand(rbx, HeapObject::kMapOffset));
1365 __ CompareRoot(rax, Heap::kFixedArrayMapRootIndex);
1366 __ j(not_equal, &runtime);
1367 // Check that the last match info has space for the capture registers and the
1368 // additional information. Ensure no overflow in add.
1369 STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
1370 __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
1371 __ subl(rax, Immediate(RegExpImpl::kLastMatchOverhead));
1373 __ j(greater, &runtime);
1375 // rbx: last_match_info backing store (FixedArray)
1376 // rdx: number of capture registers
1377 // Store the capture count.
1378 __ Integer32ToSmi(kScratchRegister, rdx);
1379 __ movp(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
1381 // Store last subject and last input.
1382 __ movp(rax, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
1383 __ movp(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
1385 __ RecordWriteField(rbx,
1386 RegExpImpl::kLastSubjectOffset,
1391 __ movp(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
1392 __ RecordWriteField(rbx,
1393 RegExpImpl::kLastInputOffset,
1398 // Get the static offsets vector filled by the native regexp code.
1400 rcx, ExternalReference::address_of_static_offsets_vector(isolate()));
1402 // rbx: last_match_info backing store (FixedArray)
1403 // rcx: offsets vector
1404 // rdx: number of capture registers
1405 Label next_capture, done;
1406 // Capture register counter starts from number of capture registers and
1407 // counts down until wraping after zero.
1408 __ bind(&next_capture);
1409 __ subp(rdx, Immediate(1));
1410 __ j(negative, &done, Label::kNear);
1411 // Read the value from the static offsets vector buffer and make it a smi.
1412 __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
1413 __ Integer32ToSmi(rdi, rdi);
1414 // Store the smi value in the last match info.
1415 __ movp(FieldOperand(rbx,
1418 RegExpImpl::kFirstCaptureOffset),
1420 __ jmp(&next_capture);
1423 // Return last match info.
1425 __ ret(REG_EXP_EXEC_ARGUMENT_COUNT * kPointerSize);
1427 __ bind(&exception);
1428 // Result must now be exception. If there is no pending exception already a
1429 // stack overflow (on the backtrack stack) was detected in RegExp code but
1430 // haven't created the exception yet. Handle that in the runtime system.
1431 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
1432 ExternalReference pending_exception_address(
1433 Isolate::kPendingExceptionAddress, isolate());
1434 Operand pending_exception_operand =
1435 masm->ExternalOperand(pending_exception_address, rbx);
1436 __ movp(rax, pending_exception_operand);
1437 __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
1439 __ j(equal, &runtime);
1441 // For exception, throw the exception again.
1442 __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
1444 // Do the runtime call to execute the regexp.
1446 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
1448 // Deferred code for string handling.
1449 // (7) Not a long external string? If yes, go to (10).
1450 __ bind(¬_seq_nor_cons);
1451 // Compare flags are still set from (3).
1452 __ j(greater, ¬_long_external, Label::kNear); // Go to (10).
1454 // (8) External string. Short external strings have been ruled out.
1455 __ bind(&external_string);
1456 __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
1457 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
1458 if (FLAG_debug_code) {
1459 // Assert that we do not have a cons or slice (indirect strings) here.
1460 // Sequential strings have already been ruled out.
1461 __ testb(rbx, Immediate(kIsIndirectStringMask));
1462 __ Assert(zero, kExternalStringExpectedButNotFound);
1464 __ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
1465 // Move the pointer so that offset-wise, it looks like a sequential string.
1466 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
1467 __ subp(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
1468 STATIC_ASSERT(kTwoByteStringTag == 0);
1469 // (8a) Is the external string one byte? If yes, go to (6).
1470 __ testb(rbx, Immediate(kStringEncodingMask));
1471 __ j(not_zero, &seq_one_byte_string); // Goto (6).
1473 // rdi: subject string (flat two-byte)
1474 // rax: RegExp data (FixedArray)
1475 // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
1476 __ bind(&seq_two_byte_string);
1477 __ movp(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
1478 __ Set(rcx, 0); // Type is two byte.
1479 __ jmp(&check_code); // Go to (E).
1481 // (10) Not a string or a short external string? If yes, bail out to runtime.
1482 __ bind(¬_long_external);
1483 // Catch non-string subject or short external string.
1484 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
1485 __ testb(rbx, Immediate(kIsNotStringMask | kShortExternalStringMask));
1486 __ j(not_zero, &runtime);
1488 // (11) Sliced string. Replace subject with parent. Go to (5a).
1489 // Load offset into r14 and replace subject string with parent.
1490 __ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
1491 __ movp(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
1492 __ jmp(&check_underlying);
1493 #endif // V8_INTERPRETED_REGEXP
1497 static int NegativeComparisonResult(Condition cc) {
1498 DCHECK(cc != equal);
1499 DCHECK((cc == less) || (cc == less_equal)
1500 || (cc == greater) || (cc == greater_equal));
1501 return (cc == greater || cc == greater_equal) ? LESS : GREATER;
1505 static void CheckInputType(MacroAssembler* masm, Register input,
1506 CompareICState::State expected, Label* fail) {
1508 if (expected == CompareICState::SMI) {
1509 __ JumpIfNotSmi(input, fail);
1510 } else if (expected == CompareICState::NUMBER) {
1511 __ JumpIfSmi(input, &ok);
1512 __ CompareMap(input, masm->isolate()->factory()->heap_number_map());
1513 __ j(not_equal, fail);
1515 // We could be strict about internalized/non-internalized here, but as long as
1516 // hydrogen doesn't care, the stub doesn't have to care either.
1521 static void BranchIfNotInternalizedString(MacroAssembler* masm,
1525 __ JumpIfSmi(object, label);
1526 __ movp(scratch, FieldOperand(object, HeapObject::kMapOffset));
1528 FieldOperand(scratch, Map::kInstanceTypeOffset));
1529 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
1530 __ testb(scratch, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
1531 __ j(not_zero, label);
1535 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
1536 Label check_unequal_objects, done;
1537 Condition cc = GetCondition();
1538 Factory* factory = isolate()->factory();
1541 CheckInputType(masm, rdx, left(), &miss);
1542 CheckInputType(masm, rax, right(), &miss);
1544 // Compare two smis.
1545 Label non_smi, smi_done;
1546 __ JumpIfNotBothSmi(rax, rdx, &non_smi);
1548 __ j(no_overflow, &smi_done);
1549 __ notp(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
1555 // The compare stub returns a positive, negative, or zero 64-bit integer
1556 // value in rax, corresponding to result of comparing the two inputs.
1557 // NOTICE! This code is only reached after a smi-fast-case check, so
1558 // it is certain that at least one operand isn't a smi.
1560 // Two identical objects are equal unless they are both NaN or undefined.
1562 Label not_identical;
1564 __ j(not_equal, ¬_identical, Label::kNear);
1567 // Check for undefined. undefined OP undefined is false even though
1568 // undefined == undefined.
1569 Label check_for_nan;
1570 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
1571 __ j(not_equal, &check_for_nan, Label::kNear);
1572 __ Set(rax, NegativeComparisonResult(cc));
1574 __ bind(&check_for_nan);
1577 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
1578 // so we do the second best thing - test it ourselves.
1580 // If it's not a heap number, then return equal for (in)equality operator.
1581 __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
1582 factory->heap_number_map());
1583 __ j(equal, &heap_number, Label::kNear);
1585 // Call runtime on identical objects. Otherwise return equal.
1586 __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
1587 __ j(above_equal, ¬_identical, Label::kNear);
1592 __ bind(&heap_number);
1593 // It is a heap number, so return equal if it's not NaN.
1594 // For NaN, return 1 for every condition except greater and
1595 // greater-equal. Return -1 for them, so the comparison yields
1596 // false for all conditions except not-equal.
1598 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
1599 __ ucomisd(xmm0, xmm0);
1600 __ setcc(parity_even, rax);
1601 // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
1602 if (cc == greater_equal || cc == greater) {
1607 __ bind(¬_identical);
1610 if (cc == equal) { // Both strict and non-strict.
1611 Label slow; // Fallthrough label.
1613 // If we're doing a strict equality comparison, we don't have to do
1614 // type conversion, so we generate code to do fast comparison for objects
1615 // and oddballs. Non-smi numbers and strings still go through the usual
1618 // If either is a Smi (we know that not both are), then they can only
1619 // be equal if the other is a HeapNumber. If so, use the slow case.
1622 __ SelectNonSmi(rbx, rax, rdx, ¬_smis);
1624 // Check if the non-smi operand is a heap number.
1625 __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
1626 factory->heap_number_map());
1627 // If heap number, handle it in the slow case.
1629 // Return non-equal. ebx (the lower half of rbx) is not zero.
1636 // If either operand is a JSObject or an oddball value, then they are not
1637 // equal since their pointers are different
1638 // There is no test for undetectability in strict equality.
1640 // If the first object is a JS object, we have done pointer comparison.
1641 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
1642 Label first_non_object;
1643 __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
1644 __ j(below, &first_non_object, Label::kNear);
1645 // Return non-zero (rax (not rax) is not zero)
1646 Label return_not_equal;
1647 STATIC_ASSERT(kHeapObjectTag != 0);
1648 __ bind(&return_not_equal);
1651 __ bind(&first_non_object);
1652 // Check for oddballs: true, false, null, undefined.
1653 __ CmpInstanceType(rcx, ODDBALL_TYPE);
1654 __ j(equal, &return_not_equal);
1656 __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
1657 __ j(above_equal, &return_not_equal);
1659 // Check for oddballs: true, false, null, undefined.
1660 __ CmpInstanceType(rcx, ODDBALL_TYPE);
1661 __ j(equal, &return_not_equal);
1663 // Fall through to the general case.
1668 // Generate the number comparison code.
1669 Label non_number_comparison;
1671 FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
1674 __ ucomisd(xmm0, xmm1);
1676 // Don't base result on EFLAGS when a NaN is involved.
1677 __ j(parity_even, &unordered, Label::kNear);
1678 // Return a result of -1, 0, or 1, based on EFLAGS.
1679 __ setcc(above, rax);
1680 __ setcc(below, rcx);
1684 // If one of the numbers was NaN, then the result is always false.
1685 // The cc is never not-equal.
1686 __ bind(&unordered);
1687 DCHECK(cc != not_equal);
1688 if (cc == less || cc == less_equal) {
1695 // The number comparison code did not provide a valid result.
1696 __ bind(&non_number_comparison);
1698 // Fast negative check for internalized-to-internalized equality.
1699 Label check_for_strings;
1701 BranchIfNotInternalizedString(
1702 masm, &check_for_strings, rax, kScratchRegister);
1703 BranchIfNotInternalizedString(
1704 masm, &check_for_strings, rdx, kScratchRegister);
1706 // We've already checked for object identity, so if both operands are
1707 // internalized strings they aren't equal. Register rax (not rax) already
1708 // holds a non-zero value, which indicates not equal, so just return.
1712 __ bind(&check_for_strings);
1714 __ JumpIfNotBothSequentialOneByteStrings(rdx, rax, rcx, rbx,
1715 &check_unequal_objects);
1717 // Inline comparison of one-byte strings.
1719 StringHelper::GenerateFlatOneByteStringEquals(masm, rdx, rax, rcx, rbx);
1721 StringHelper::GenerateCompareFlatOneByteStrings(masm, rdx, rax, rcx, rbx,
1726 __ Abort(kUnexpectedFallThroughFromStringComparison);
1729 __ bind(&check_unequal_objects);
1730 if (cc == equal && !strict()) {
1731 // Not strict equality. Objects are unequal if
1732 // they are both JSObjects and not undetectable,
1733 // and their pointers are different.
1734 Label not_both_objects, return_unequal;
1735 // At most one is a smi, so we can test for smi by adding the two.
1736 // A smi plus a heap object has the low bit set, a heap object plus
1737 // a heap object has the low bit clear.
1738 STATIC_ASSERT(kSmiTag == 0);
1739 STATIC_ASSERT(kSmiTagMask == 1);
1740 __ leap(rcx, Operand(rax, rdx, times_1, 0));
1741 __ testb(rcx, Immediate(kSmiTagMask));
1742 __ j(not_zero, ¬_both_objects, Label::kNear);
1743 __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
1744 __ j(below, ¬_both_objects, Label::kNear);
1745 __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
1746 __ j(below, ¬_both_objects, Label::kNear);
1747 __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
1748 Immediate(1 << Map::kIsUndetectable));
1749 __ j(zero, &return_unequal, Label::kNear);
1750 __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
1751 Immediate(1 << Map::kIsUndetectable));
1752 __ j(zero, &return_unequal, Label::kNear);
1753 // The objects are both undetectable, so they both compare as the value
1754 // undefined, and are equal.
1756 __ bind(&return_unequal);
1757 // Return non-equal by returning the non-zero object pointer in rax,
1758 // or return equal if we fell through to here.
1760 __ bind(¬_both_objects);
1763 // Push arguments below the return address to prepare jump to builtin.
1764 __ PopReturnAddressTo(rcx);
1768 // Figure out which native to call and setup the arguments.
1769 Builtins::JavaScript builtin;
1771 builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1773 builtin = Builtins::COMPARE;
1774 __ Push(Smi::FromInt(NegativeComparisonResult(cc)));
1777 __ PushReturnAddressFrom(rcx);
1779 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1780 // tagged as a small integer.
1781 __ InvokeBuiltin(builtin, JUMP_FUNCTION);
1788 static void GenerateRecordCallTarget(MacroAssembler* masm) {
1789 // Cache the called function in a feedback vector slot. Cache states
1790 // are uninitialized, monomorphic (indicated by a JSFunction), and
1792 // rax : number of arguments to the construct function
1793 // rbx : Feedback vector
1794 // rdx : slot in feedback vector (Smi)
1795 // rdi : the function to call
1796 Isolate* isolate = masm->isolate();
1797 Label initialize, done, miss, megamorphic, not_array_function,
1798 done_no_smi_convert;
1800 // Load the cache state into rcx.
1801 __ SmiToInteger32(rdx, rdx);
1802 __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
1803 FixedArray::kHeaderSize));
1805 // A monomorphic cache hit or an already megamorphic state: invoke the
1806 // function without changing the state.
1809 __ Cmp(rcx, TypeFeedbackVector::MegamorphicSentinel(isolate));
1812 if (!FLAG_pretenuring_call_new) {
1813 // If we came here, we need to see if we are the array function.
1814 // If we didn't have a matching function, and we didn't find the megamorph
1815 // sentinel, then we have in the slot either some other function or an
1816 // AllocationSite. Do a map check on the object in rcx.
1817 Handle<Map> allocation_site_map =
1818 masm->isolate()->factory()->allocation_site_map();
1819 __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
1820 __ j(not_equal, &miss);
1822 // Make sure the function is the Array() function
1823 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
1825 __ j(not_equal, &megamorphic);
1831 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
1833 __ Cmp(rcx, TypeFeedbackVector::UninitializedSentinel(isolate));
1834 __ j(equal, &initialize);
1835 // MegamorphicSentinel is an immortal immovable object (undefined) so no
1836 // write-barrier is needed.
1837 __ bind(&megamorphic);
1838 __ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
1839 TypeFeedbackVector::MegamorphicSentinel(isolate));
1842 // An uninitialized cache is patched with the function or sentinel to
1843 // indicate the ElementsKind if function is the Array constructor.
1844 __ bind(&initialize);
1846 if (!FLAG_pretenuring_call_new) {
1847 // Make sure the function is the Array() function
1848 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
1850 __ j(not_equal, ¬_array_function);
1853 FrameScope scope(masm, StackFrame::INTERNAL);
1855 // Arguments register must be smi-tagged to call out.
1856 __ Integer32ToSmi(rax, rax);
1859 __ Integer32ToSmi(rdx, rdx);
1863 CreateAllocationSiteStub create_stub(isolate);
1864 __ CallStub(&create_stub);
1870 __ SmiToInteger32(rax, rax);
1872 __ jmp(&done_no_smi_convert);
1874 __ bind(¬_array_function);
1877 __ movp(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
1880 // We won't need rdx or rbx anymore, just save rdi
1884 __ RecordWriteArray(rbx, rdi, rdx, kDontSaveFPRegs,
1885 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
1891 __ Integer32ToSmi(rdx, rdx);
1893 __ bind(&done_no_smi_convert);
1897 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
1898 // Do not transform the receiver for strict mode functions.
1899 __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
1900 __ testb(FieldOperand(rcx, SharedFunctionInfo::kStrictModeByteOffset),
1901 Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
1902 __ j(not_equal, cont);
1904 // Do not transform the receiver for natives.
1905 // SharedFunctionInfo is already loaded into rcx.
1906 __ testb(FieldOperand(rcx, SharedFunctionInfo::kNativeByteOffset),
1907 Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
1908 __ j(not_equal, cont);
1912 static void EmitSlowCase(Isolate* isolate,
1913 MacroAssembler* masm,
1914 StackArgumentsAccessor* args,
1916 Label* non_function) {
1917 // Check for function proxy.
1918 __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
1919 __ j(not_equal, non_function);
1920 __ PopReturnAddressTo(rcx);
1921 __ Push(rdi); // put proxy as additional argument under return address
1922 __ PushReturnAddressFrom(rcx);
1923 __ Set(rax, argc + 1);
1925 __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
1927 Handle<Code> adaptor =
1928 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
1929 __ jmp(adaptor, RelocInfo::CODE_TARGET);
1932 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
1933 // of the original receiver from the call site).
1934 __ bind(non_function);
1935 __ movp(args->GetReceiverOperand(), rdi);
1938 __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
1939 Handle<Code> adaptor =
1940 isolate->builtins()->ArgumentsAdaptorTrampoline();
1941 __ Jump(adaptor, RelocInfo::CODE_TARGET);
1945 static void EmitWrapCase(MacroAssembler* masm,
1946 StackArgumentsAccessor* args,
1948 // Wrap the receiver and patch it back onto the stack.
1949 { FrameScope frame_scope(masm, StackFrame::INTERNAL);
1952 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
1955 __ movp(args->GetReceiverOperand(), rax);
1960 static void CallFunctionNoFeedback(MacroAssembler* masm,
1961 int argc, bool needs_checks,
1962 bool call_as_method) {
1963 // rdi : the function to call
1965 // wrap_and_call can only be true if we are compiling a monomorphic method.
1966 Isolate* isolate = masm->isolate();
1967 Label slow, non_function, wrap, cont;
1968 StackArgumentsAccessor args(rsp, argc);
1971 // Check that the function really is a JavaScript function.
1972 __ JumpIfSmi(rdi, &non_function);
1974 // Goto slow case if we do not have a function.
1975 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
1976 __ j(not_equal, &slow);
1979 // Fast-case: Just invoke the function.
1980 ParameterCount actual(argc);
1982 if (call_as_method) {
1984 EmitContinueIfStrictOrNative(masm, &cont);
1987 // Load the receiver from the stack.
1988 __ movp(rax, args.GetReceiverOperand());
1991 __ JumpIfSmi(rax, &wrap);
1993 __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
2002 __ InvokeFunction(rdi, actual, JUMP_FUNCTION, NullCallWrapper());
2005 // Slow-case: Non-function called.
2007 EmitSlowCase(isolate, masm, &args, argc, &non_function);
2010 if (call_as_method) {
2012 EmitWrapCase(masm, &args, &cont);
2017 void CallFunctionStub::Generate(MacroAssembler* masm) {
2018 CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
2022 void CallConstructStub::Generate(MacroAssembler* masm) {
2023 // rax : number of arguments
2024 // rbx : feedback vector
2025 // rdx : (only if rbx is not the megamorphic symbol) slot in feedback
2027 // rdi : constructor function
2028 Label slow, non_function_call;
2030 // Check that function is not a smi.
2031 __ JumpIfSmi(rdi, &non_function_call);
2032 // Check that function is a JSFunction.
2033 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
2034 __ j(not_equal, &slow);
2036 if (RecordCallTarget()) {
2037 GenerateRecordCallTarget(masm);
2039 __ SmiToInteger32(rdx, rdx);
2040 if (FLAG_pretenuring_call_new) {
2041 // Put the AllocationSite from the feedback vector into ebx.
2042 // By adding kPointerSize we encode that we know the AllocationSite
2043 // entry is at the feedback vector slot given by rdx + 1.
2044 __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size,
2045 FixedArray::kHeaderSize + kPointerSize));
2047 Label feedback_register_initialized;
2048 // Put the AllocationSite from the feedback vector into rbx, or undefined.
2049 __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size,
2050 FixedArray::kHeaderSize));
2051 __ CompareRoot(FieldOperand(rbx, 0), Heap::kAllocationSiteMapRootIndex);
2052 __ j(equal, &feedback_register_initialized);
2053 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
2054 __ bind(&feedback_register_initialized);
2057 __ AssertUndefinedOrAllocationSite(rbx);
2060 // Pass original constructor to construct stub.
2061 if (IsSuperConstructorCall()) {
2062 __ movp(rdx, Operand(rsp, rax, times_pointer_size, 2 * kPointerSize));
2067 // Jump to the function-specific construct stub.
2068 Register jmp_reg = rcx;
2069 __ movp(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
2070 __ movp(jmp_reg, FieldOperand(jmp_reg,
2071 SharedFunctionInfo::kConstructStubOffset));
2072 __ leap(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
2075 // rdi: called object
2076 // rax: number of arguments
2080 __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
2081 __ j(not_equal, &non_function_call);
2082 __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
2085 __ bind(&non_function_call);
2086 __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
2088 // Set expected number of arguments to zero (not changing rax).
2090 __ Jump(isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2091 RelocInfo::CODE_TARGET);
2095 static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
2096 __ movp(vector, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
2097 __ movp(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
2098 __ movp(vector, FieldOperand(vector,
2099 SharedFunctionInfo::kFeedbackVectorOffset));
2103 void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
2105 // rdx - slot id (as integer)
2108 int argc = arg_count();
2109 ParameterCount actual(argc);
2111 __ SmiToInteger32(rdx, rdx);
2113 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
2115 __ j(not_equal, &miss);
2117 __ movp(rax, Immediate(arg_count()));
2118 __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
2119 FixedArray::kHeaderSize));
2120 // Verify that ecx contains an AllocationSite
2121 Factory* factory = masm->isolate()->factory();
2122 __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
2123 factory->allocation_site_map());
2124 __ j(not_equal, &miss);
2128 ArrayConstructorStub stub(masm->isolate(), arg_count());
2129 __ TailCallStub(&stub);
2134 // The slow case, we need this no matter what to complete a call after a miss.
2135 CallFunctionNoFeedback(masm,
2145 void CallICStub::Generate(MacroAssembler* masm) {
2149 Isolate* isolate = masm->isolate();
2150 const int with_types_offset =
2151 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
2152 const int generic_offset =
2153 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
2154 Label extra_checks_or_miss, slow_start;
2155 Label slow, non_function, wrap, cont;
2156 Label have_js_function;
2157 int argc = arg_count();
2158 StackArgumentsAccessor args(rsp, argc);
2159 ParameterCount actual(argc);
2161 // The checks. First, does rdi match the recorded monomorphic target?
2162 __ SmiToInteger32(rdx, rdx);
2164 FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
2166 // We don't know that we have a weak cell. We might have a private symbol
2167 // or an AllocationSite, but the memory is safe to examine.
2168 // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
2170 // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
2171 // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
2172 // computed, meaning that it can't appear to be a pointer. If the low bit is
2173 // 0, then hash is computed, but the 0 bit prevents the field from appearing
2175 STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
2176 STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
2177 WeakCell::kValueOffset &&
2178 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
2180 __ cmpp(rdi, FieldOperand(rcx, WeakCell::kValueOffset));
2181 __ j(not_equal, &extra_checks_or_miss);
2183 // The compare above could have been a SMI/SMI comparison. Guard against this
2184 // convincing us that we have a monomorphic JSFunction.
2185 __ JumpIfSmi(rdi, &extra_checks_or_miss);
2187 __ bind(&have_js_function);
2188 if (CallAsMethod()) {
2189 EmitContinueIfStrictOrNative(masm, &cont);
2191 // Load the receiver from the stack.
2192 __ movp(rax, args.GetReceiverOperand());
2194 __ JumpIfSmi(rax, &wrap);
2196 __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
2202 __ InvokeFunction(rdi, actual, JUMP_FUNCTION, NullCallWrapper());
2205 EmitSlowCase(isolate, masm, &args, argc, &non_function);
2207 if (CallAsMethod()) {
2209 EmitWrapCase(masm, &args, &cont);
2212 __ bind(&extra_checks_or_miss);
2213 Label uninitialized, miss;
2215 __ Cmp(rcx, TypeFeedbackVector::MegamorphicSentinel(isolate));
2216 __ j(equal, &slow_start);
2218 // The following cases attempt to handle MISS cases without going to the
2220 if (FLAG_trace_ic) {
2224 __ Cmp(rcx, TypeFeedbackVector::UninitializedSentinel(isolate));
2225 __ j(equal, &uninitialized);
2227 // We are going megamorphic. If the feedback is a JSFunction, it is fine
2228 // to handle it here. More complex cases are dealt with in the runtime.
2229 __ AssertNotSmi(rcx);
2230 __ CmpObjectType(rcx, JS_FUNCTION_TYPE, rcx);
2231 __ j(not_equal, &miss);
2232 __ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
2233 TypeFeedbackVector::MegamorphicSentinel(isolate));
2234 // We have to update statistics for runtime profiling.
2235 __ SmiAddConstant(FieldOperand(rbx, with_types_offset), Smi::FromInt(-1));
2236 __ SmiAddConstant(FieldOperand(rbx, generic_offset), Smi::FromInt(1));
2237 __ jmp(&slow_start);
2239 __ bind(&uninitialized);
2241 // We are going monomorphic, provided we actually have a JSFunction.
2242 __ JumpIfSmi(rdi, &miss);
2244 // Goto miss case if we do not have a function.
2245 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
2246 __ j(not_equal, &miss);
2248 // Make sure the function is not the Array() function, which requires special
2249 // behavior on MISS.
2250 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
2255 __ SmiAddConstant(FieldOperand(rbx, with_types_offset), Smi::FromInt(1));
2257 // Store the function. Use a stub since we need a frame for allocation.
2259 // rdx - slot (needs to be in smi form)
2262 FrameScope scope(masm, StackFrame::INTERNAL);
2263 CreateWeakCellStub create_stub(isolate);
2265 __ Integer32ToSmi(rdx, rdx);
2267 __ CallStub(&create_stub);
2271 __ jmp(&have_js_function);
2273 // We are here because tracing is on or we encountered a MISS case we can't
2279 __ bind(&slow_start);
2280 // Check that function is not a smi.
2281 __ JumpIfSmi(rdi, &non_function);
2282 // Check that function is a JSFunction.
2283 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
2284 __ j(not_equal, &slow);
2285 __ jmp(&have_js_function);
2292 void CallICStub::GenerateMiss(MacroAssembler* masm) {
2293 FrameScope scope(masm, StackFrame::INTERNAL);
2295 // Push the receiver and the function and feedback info.
2298 __ Integer32ToSmi(rdx, rdx);
2302 IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
2303 : IC::kCallIC_Customization_Miss;
2305 ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
2306 __ CallExternalReference(miss, 3);
2308 // Move result to edi and exit the internal frame.
2313 bool CEntryStub::NeedsImmovableCode() {
2318 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
2319 CEntryStub::GenerateAheadOfTime(isolate);
2320 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
2321 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
2322 // It is important that the store buffer overflow stubs are generated first.
2323 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
2324 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
2325 CreateWeakCellStub::GenerateAheadOfTime(isolate);
2326 BinaryOpICStub::GenerateAheadOfTime(isolate);
2327 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
2331 void CodeStub::GenerateFPStubs(Isolate* isolate) {
2335 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
2336 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
2338 CEntryStub save_doubles(isolate, 1, kSaveFPRegs);
2339 save_doubles.GetCode();
2343 void CEntryStub::Generate(MacroAssembler* masm) {
2344 // rax: number of arguments including receiver
2345 // rbx: pointer to C function (C callee-saved)
2346 // rbp: frame pointer of calling JS frame (restored after C call)
2347 // rsp: stack pointer (restored after C call)
2348 // rsi: current context (restored)
2350 ProfileEntryHookStub::MaybeCallEntryHook(masm);
2352 // Enter the exit frame that transitions from JavaScript to C++.
2354 int arg_stack_space = (result_size() < 2 ? 2 : 4);
2356 int arg_stack_space = 0;
2358 __ EnterExitFrame(arg_stack_space, save_doubles());
2360 // rbx: pointer to builtin function (C callee-saved).
2361 // rbp: frame pointer of exit frame (restored after C call).
2362 // rsp: stack pointer (restored after C call).
2363 // r14: number of arguments including receiver (C callee-saved).
2364 // r15: argv pointer (C callee-saved).
2366 // Simple results returned in rax (both AMD64 and Win64 calling conventions).
2367 // Complex results must be written to address passed as first argument.
2368 // AMD64 calling convention: a struct of two pointers in rax+rdx
2370 // Check stack alignment.
2371 if (FLAG_debug_code) {
2372 __ CheckStackAlignment();
2377 // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9.
2378 // Pass argv and argc as two parameters. The arguments object will
2379 // be created by stubs declared by DECLARE_RUNTIME_FUNCTION().
2380 if (result_size() < 2) {
2381 // Pass a pointer to the Arguments object as the first argument.
2382 // Return result in single register (rax).
2383 __ movp(rcx, r14); // argc.
2384 __ movp(rdx, r15); // argv.
2385 __ Move(r8, ExternalReference::isolate_address(isolate()));
2387 DCHECK_EQ(2, result_size());
2388 // Pass a pointer to the result location as the first argument.
2389 __ leap(rcx, StackSpaceOperand(2));
2390 // Pass a pointer to the Arguments object as the second argument.
2391 __ movp(rdx, r14); // argc.
2392 __ movp(r8, r15); // argv.
2393 __ Move(r9, ExternalReference::isolate_address(isolate()));
2397 // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
2398 __ movp(rdi, r14); // argc.
2399 __ movp(rsi, r15); // argv.
2400 __ Move(rdx, ExternalReference::isolate_address(isolate()));
2403 // Result is in rax - do not destroy this register!
2406 // If return value is on the stack, pop it to registers.
2407 if (result_size() > 1) {
2408 DCHECK_EQ(2, result_size());
2409 // Read result values stored on stack. Result is stored
2410 // above the four argument mirror slots and the two
2411 // Arguments object slots.
2412 __ movq(rax, Operand(rsp, 6 * kRegisterSize));
2413 __ movq(rdx, Operand(rsp, 7 * kRegisterSize));
2417 // Runtime functions should not return 'the hole'. Allowing it to escape may
2418 // lead to crashes in the IC code later.
2419 if (FLAG_debug_code) {
2421 __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
2422 __ j(not_equal, &okay, Label::kNear);
2427 // Check result for exception sentinel.
2428 Label exception_returned;
2429 __ CompareRoot(rax, Heap::kExceptionRootIndex);
2430 __ j(equal, &exception_returned);
2432 // Check that there is no pending exception, otherwise we
2433 // should have returned the exception sentinel.
2434 if (FLAG_debug_code) {
2436 __ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
2437 ExternalReference pending_exception_address(
2438 Isolate::kPendingExceptionAddress, isolate());
2439 Operand pending_exception_operand =
2440 masm->ExternalOperand(pending_exception_address);
2441 __ cmpp(r14, pending_exception_operand);
2442 __ j(equal, &okay, Label::kNear);
2447 // Exit the JavaScript to C++ exit frame.
2448 __ LeaveExitFrame(save_doubles());
2451 // Handling of exception.
2452 __ bind(&exception_returned);
2454 ExternalReference pending_handler_context_address(
2455 Isolate::kPendingHandlerContextAddress, isolate());
2456 ExternalReference pending_handler_code_address(
2457 Isolate::kPendingHandlerCodeAddress, isolate());
2458 ExternalReference pending_handler_offset_address(
2459 Isolate::kPendingHandlerOffsetAddress, isolate());
2460 ExternalReference pending_handler_fp_address(
2461 Isolate::kPendingHandlerFPAddress, isolate());
2462 ExternalReference pending_handler_sp_address(
2463 Isolate::kPendingHandlerSPAddress, isolate());
2465 // Ask the runtime for help to determine the handler. This will set rax to
2466 // contain the current pending exception, don't clobber it.
2467 ExternalReference find_handler(Runtime::kFindExceptionHandler, isolate());
2469 FrameScope scope(masm, StackFrame::MANUAL);
2470 __ movp(arg_reg_1, Immediate(0)); // argc.
2471 __ movp(arg_reg_2, Immediate(0)); // argv.
2472 __ Move(arg_reg_3, ExternalReference::isolate_address(isolate()));
2473 __ PrepareCallCFunction(3);
2474 __ CallCFunction(find_handler, 3);
2477 // Retrieve the handler context, SP and FP.
2478 __ movp(rsi, masm->ExternalOperand(pending_handler_context_address));
2479 __ movp(rsp, masm->ExternalOperand(pending_handler_sp_address));
2480 __ movp(rbp, masm->ExternalOperand(pending_handler_fp_address));
2482 // If the handler is a JS frame, restore the context to the frame. Note that
2483 // the context will be set to (rsi == 0) for non-JS frames.
2486 __ j(zero, &skip, Label::kNear);
2487 __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
2490 // Compute the handler entry address and jump to it.
2491 __ movp(rdi, masm->ExternalOperand(pending_handler_code_address));
2492 __ movp(rdx, masm->ExternalOperand(pending_handler_offset_address));
2493 __ leap(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
2498 void JSEntryStub::Generate(MacroAssembler* masm) {
2499 Label invoke, handler_entry, exit;
2500 Label not_outermost_js, not_outermost_js_2;
2502 ProfileEntryHookStub::MaybeCallEntryHook(masm);
2504 { // NOLINT. Scope block confuses linter.
2505 MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
2510 // Push the stack frame type marker twice.
2511 int marker = type();
2512 // Scratch register is neither callee-save, nor an argument register on any
2513 // platform. It's free to use at this point.
2514 // Cannot use smi-register for loading yet.
2515 __ Move(kScratchRegister, Smi::FromInt(marker), Assembler::RelocInfoNone());
2516 __ Push(kScratchRegister); // context slot
2517 __ Push(kScratchRegister); // function slot
2518 // Save callee-saved registers (X64/X32/Win64 calling conventions).
2524 __ pushq(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
2525 __ pushq(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
2530 // On Win64 XMM6-XMM15 are callee-save
2531 __ subp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
2532 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6);
2533 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1), xmm7);
2534 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2), xmm8);
2535 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3), xmm9);
2536 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4), xmm10);
2537 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5), xmm11);
2538 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6), xmm12);
2539 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7), xmm13);
2540 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8), xmm14);
2541 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9), xmm15);
2544 // Set up the roots and smi constant registers.
2545 // Needs to be done before any further smi loads.
2546 __ InitializeRootRegister();
2549 // Save copies of the top frame descriptor on the stack.
2550 ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate());
2552 Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
2553 __ Push(c_entry_fp_operand);
2556 // If this is the outermost JS call, set js_entry_sp value.
2557 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
2558 __ Load(rax, js_entry_sp);
2560 __ j(not_zero, ¬_outermost_js);
2561 __ Push(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
2563 __ Store(js_entry_sp, rax);
2566 __ bind(¬_outermost_js);
2567 __ Push(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
2570 // Jump to a faked try block that does the invoke, with a faked catch
2571 // block that sets the pending exception.
2573 __ bind(&handler_entry);
2574 handler_offset_ = handler_entry.pos();
2575 // Caught exception: Store result (exception) in the pending exception
2576 // field in the JSEnv and return a failure sentinel.
2577 ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
2579 __ Store(pending_exception, rax);
2580 __ LoadRoot(rax, Heap::kExceptionRootIndex);
2583 // Invoke: Link this frame into the handler chain.
2585 __ PushStackHandler();
2587 // Clear any pending exceptions.
2588 __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
2589 __ Store(pending_exception, rax);
2591 // Fake a receiver (NULL).
2592 __ Push(Immediate(0)); // receiver
2594 // Invoke the function by calling through JS entry trampoline builtin and
2595 // pop the faked function when we return. We load the address from an
2596 // external reference instead of inlining the call target address directly
2597 // in the code, because the builtin stubs may not have been generated yet
2598 // at the time this code is generated.
2599 if (type() == StackFrame::ENTRY_CONSTRUCT) {
2600 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
2602 __ Load(rax, construct_entry);
2604 ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
2605 __ Load(rax, entry);
2607 __ leap(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
2608 __ call(kScratchRegister);
2610 // Unlink this frame from the handler chain.
2611 __ PopStackHandler();
2614 // Check if the current stack frame is marked as the outermost JS frame.
2616 __ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
2617 __ j(not_equal, ¬_outermost_js_2);
2618 __ Move(kScratchRegister, js_entry_sp);
2619 __ movp(Operand(kScratchRegister, 0), Immediate(0));
2620 __ bind(¬_outermost_js_2);
2622 // Restore the top frame descriptor from the stack.
2623 { Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
2624 __ Pop(c_entry_fp_operand);
2627 // Restore callee-saved registers (X64 conventions).
2629 // On Win64 XMM6-XMM15 are callee-save
2630 __ movdqu(xmm6, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0));
2631 __ movdqu(xmm7, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1));
2632 __ movdqu(xmm8, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2));
2633 __ movdqu(xmm9, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3));
2634 __ movdqu(xmm10, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4));
2635 __ movdqu(xmm11, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5));
2636 __ movdqu(xmm12, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6));
2637 __ movdqu(xmm13, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7));
2638 __ movdqu(xmm14, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8));
2639 __ movdqu(xmm15, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9));
2640 __ addp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
2645 // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
2653 __ addp(rsp, Immediate(2 * kPointerSize)); // remove markers
2655 // Restore frame pointer and return.
2661 void InstanceofStub::Generate(MacroAssembler* masm) {
2662 // Implements "value instanceof function" operator.
2663 // Expected input state with no inline cache:
2664 // rsp[0] : return address
2665 // rsp[8] : function pointer
2667 // Expected input state with an inline one-element cache:
2668 // rsp[0] : return address
2669 // rsp[8] : offset from return address to location of inline cache
2670 // rsp[16] : function pointer
2672 // Returns a bitwise zero to indicate that the value
2673 // is and instance of the function and anything else to
2674 // indicate that the value is not an instance.
2676 // Fixed register usage throughout the stub.
2677 Register object = rax; // Object (lhs).
2678 Register map = rbx; // Map of the object.
2679 Register function = rdx; // Function (rhs).
2680 Register prototype = rdi; // Prototype of the function.
2681 Register scratch = rcx;
2683 static const int kOffsetToMapCheckValue = 2;
2684 static const int kOffsetToResultValue = kPointerSize == kInt64Size ? 18 : 14;
2685 // The last 4 bytes of the instruction sequence
2686 // movp(rdi, FieldOperand(rax, HeapObject::kMapOffset))
2687 // Move(kScratchRegister, Factory::the_hole_value())
2688 // in front of the hole value address.
2689 static const unsigned int kWordBeforeMapCheckValue =
2690 kPointerSize == kInt64Size ? 0xBA49FF78 : 0xBA41FF78;
2691 // The last 4 bytes of the instruction sequence
2692 // __ j(not_equal, &cache_miss);
2693 // __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
2694 // before the offset of the hole value in the root array.
2695 static const unsigned int kWordBeforeResultValue =
2696 kPointerSize == kInt64Size ? 0x458B4906 : 0x458B4106;
2698 int extra_argument_offset = HasCallSiteInlineCheck() ? 1 : 0;
2700 DCHECK_EQ(object.code(), InstanceofStub::left().code());
2701 DCHECK_EQ(function.code(), InstanceofStub::right().code());
2703 // Get the object and function - they are always both needed.
2704 // Go slow case if the object is a smi.
2706 StackArgumentsAccessor args(rsp, 2 + extra_argument_offset,
2707 ARGUMENTS_DONT_CONTAIN_RECEIVER);
2708 if (!HasArgsInRegisters()) {
2709 __ movp(object, args.GetArgumentOperand(0));
2710 __ movp(function, args.GetArgumentOperand(1));
2712 __ JumpIfSmi(object, &slow);
2714 // Check that the left hand is a JS object. Leave its map in rax.
2715 __ CmpObjectType(object, FIRST_SPEC_OBJECT_TYPE, map);
2717 __ CmpInstanceType(map, LAST_SPEC_OBJECT_TYPE);
2720 // If there is a call site cache don't look in the global cache, but do the
2721 // real lookup and update the call site cache.
2722 if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
2723 // Look up the function and the map in the instanceof cache.
2725 __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
2726 __ j(not_equal, &miss, Label::kNear);
2727 __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
2728 __ j(not_equal, &miss, Label::kNear);
2729 __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
2730 __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
2734 // Get the prototype of the function.
2735 __ TryGetFunctionPrototype(function, prototype, &slow, true);
2737 // Check that the function prototype is a JS object.
2738 __ JumpIfSmi(prototype, &slow);
2739 __ CmpObjectType(prototype, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
2741 __ CmpInstanceType(kScratchRegister, LAST_SPEC_OBJECT_TYPE);
2744 // Update the global instanceof or call site inlined cache with the current
2745 // map and function. The cached answer will be set when it is known below.
2746 if (!HasCallSiteInlineCheck()) {
2747 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
2748 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
2750 // The constants for the code patching are based on push instructions
2751 // at the call site.
2752 DCHECK(!HasArgsInRegisters());
2753 // Get return address and delta to inlined map check.
2754 __ movq(kScratchRegister, StackOperandForReturnAddress(0));
2755 __ subp(kScratchRegister, args.GetArgumentOperand(2));
2756 if (FLAG_debug_code) {
2757 __ movl(scratch, Immediate(kWordBeforeMapCheckValue));
2758 __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), scratch);
2759 __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCheck);
2761 __ movp(kScratchRegister,
2762 Operand(kScratchRegister, kOffsetToMapCheckValue));
2763 __ movp(Operand(kScratchRegister, 0), map);
2766 // Loop through the prototype chain looking for the function prototype.
2767 __ movp(scratch, FieldOperand(map, Map::kPrototypeOffset));
2768 Label loop, is_instance, is_not_instance;
2769 __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
2771 __ cmpp(scratch, prototype);
2772 __ j(equal, &is_instance, Label::kNear);
2773 __ cmpp(scratch, kScratchRegister);
2774 // The code at is_not_instance assumes that kScratchRegister contains a
2775 // non-zero GCable value (the null object in this case).
2776 __ j(equal, &is_not_instance, Label::kNear);
2777 __ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
2778 __ movp(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
2781 __ bind(&is_instance);
2782 if (!HasCallSiteInlineCheck()) {
2784 // Store bitwise zero in the cache. This is a Smi in GC terms.
2785 STATIC_ASSERT(kSmiTag == 0);
2786 __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
2787 if (ReturnTrueFalseObject()) {
2788 __ LoadRoot(rax, Heap::kTrueValueRootIndex);
2791 // Store offset of true in the root array at the inline check site.
2792 int true_offset = 0x100 +
2793 (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
2794 // Assert it is a 1-byte signed value.
2795 DCHECK(true_offset >= 0 && true_offset < 0x100);
2796 __ movl(rax, Immediate(true_offset));
2797 __ movq(kScratchRegister, StackOperandForReturnAddress(0));
2798 __ subp(kScratchRegister, args.GetArgumentOperand(2));
2799 __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
2800 if (FLAG_debug_code) {
2801 __ movl(rax, Immediate(kWordBeforeResultValue));
2802 __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
2803 __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
2805 if (!ReturnTrueFalseObject()) {
2809 __ ret(((HasArgsInRegisters() ? 0 : 2) + extra_argument_offset) *
2812 __ bind(&is_not_instance);
2813 if (!HasCallSiteInlineCheck()) {
2814 // We have to store a non-zero value in the cache.
2815 __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
2816 if (ReturnTrueFalseObject()) {
2817 __ LoadRoot(rax, Heap::kFalseValueRootIndex);
2820 // Store offset of false in the root array at the inline check site.
2821 int false_offset = 0x100 +
2822 (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
2823 // Assert it is a 1-byte signed value.
2824 DCHECK(false_offset >= 0 && false_offset < 0x100);
2825 __ movl(rax, Immediate(false_offset));
2826 __ movq(kScratchRegister, StackOperandForReturnAddress(0));
2827 __ subp(kScratchRegister, args.GetArgumentOperand(2));
2828 __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
2829 if (FLAG_debug_code) {
2830 __ movl(rax, Immediate(kWordBeforeResultValue));
2831 __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
2832 __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
2835 __ ret(((HasArgsInRegisters() ? 0 : 2) + extra_argument_offset) *
2838 // Slow-case: Go through the JavaScript implementation.
2840 if (!ReturnTrueFalseObject()) {
2841 // Tail call the builtin which returns 0 or 1.
2842 DCHECK(!HasArgsInRegisters());
2843 if (HasCallSiteInlineCheck()) {
2844 // Remove extra value from the stack.
2845 __ PopReturnAddressTo(rcx);
2847 __ PushReturnAddressFrom(rcx);
2849 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
2851 // Call the builtin and convert 0/1 to true/false.
2853 FrameScope scope(masm, StackFrame::INTERNAL);
2856 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
2858 Label true_value, done;
2860 __ j(zero, &true_value, Label::kNear);
2861 __ LoadRoot(rax, Heap::kFalseValueRootIndex);
2862 __ jmp(&done, Label::kNear);
2863 __ bind(&true_value);
2864 __ LoadRoot(rax, Heap::kTrueValueRootIndex);
2866 __ ret(((HasArgsInRegisters() ? 0 : 2) + extra_argument_offset) *
2872 // -------------------------------------------------------------------------
2873 // StringCharCodeAtGenerator
2875 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
2876 // If the receiver is a smi trigger the non-string case.
2877 if (check_mode_ == RECEIVER_IS_UNKNOWN) {
2878 __ JumpIfSmi(object_, receiver_not_string_);
2880 // Fetch the instance type of the receiver into result register.
2881 __ movp(result_, FieldOperand(object_, HeapObject::kMapOffset));
2882 __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
2883 // If the receiver is not a string trigger the non-string case.
2884 __ testb(result_, Immediate(kIsNotStringMask));
2885 __ j(not_zero, receiver_not_string_);
2888 // If the index is non-smi trigger the non-smi case.
2889 __ JumpIfNotSmi(index_, &index_not_smi_);
2890 __ bind(&got_smi_index_);
2892 // Check for index out of range.
2893 __ SmiCompare(index_, FieldOperand(object_, String::kLengthOffset));
2894 __ j(above_equal, index_out_of_range_);
2896 __ SmiToInteger32(index_, index_);
2898 StringCharLoadGenerator::Generate(
2899 masm, object_, index_, result_, &call_runtime_);
2901 __ Integer32ToSmi(result_, result_);
2906 void StringCharCodeAtGenerator::GenerateSlow(
2907 MacroAssembler* masm, EmbedMode embed_mode,
2908 const RuntimeCallHelper& call_helper) {
2909 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
2911 Factory* factory = masm->isolate()->factory();
2912 // Index is not a smi.
2913 __ bind(&index_not_smi_);
2914 // If index is a heap number, try converting it to an integer.
2916 factory->heap_number_map(),
2919 call_helper.BeforeCall(masm);
2920 if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
2921 __ Push(VectorLoadICDescriptor::VectorRegister());
2922 __ Push(VectorLoadICDescriptor::SlotRegister());
2925 __ Push(index_); // Consumed by runtime conversion function.
2926 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
2927 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
2929 DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
2930 // NumberToSmi discards numbers that are not exact integers.
2931 __ CallRuntime(Runtime::kNumberToSmi, 1);
2933 if (!index_.is(rax)) {
2934 // Save the conversion result before the pop instructions below
2935 // have a chance to overwrite it.
2936 __ movp(index_, rax);
2939 if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
2940 __ Pop(VectorLoadICDescriptor::SlotRegister());
2941 __ Pop(VectorLoadICDescriptor::VectorRegister());
2943 // Reload the instance type.
2944 __ movp(result_, FieldOperand(object_, HeapObject::kMapOffset));
2945 __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
2946 call_helper.AfterCall(masm);
2947 // If index is still not a smi, it must be out of range.
2948 __ JumpIfNotSmi(index_, index_out_of_range_);
2949 // Otherwise, return to the fast path.
2950 __ jmp(&got_smi_index_);
2952 // Call runtime. We get here when the receiver is a string and the
2953 // index is a number, but the code of getting the actual character
2954 // is too complex (e.g., when the string needs to be flattened).
2955 __ bind(&call_runtime_);
2956 call_helper.BeforeCall(masm);
2958 __ Integer32ToSmi(index_, index_);
2960 __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
2961 if (!result_.is(rax)) {
2962 __ movp(result_, rax);
2964 call_helper.AfterCall(masm);
2967 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
2971 // -------------------------------------------------------------------------
2972 // StringCharFromCodeGenerator
2974 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
2975 // Fast case of Heap::LookupSingleCharacterStringFromCode.
2976 __ JumpIfNotSmi(code_, &slow_case_);
2977 __ SmiCompare(code_, Smi::FromInt(String::kMaxOneByteCharCode));
2978 __ j(above, &slow_case_);
2980 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
2981 SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
2982 __ movp(result_, FieldOperand(result_, index.reg, index.scale,
2983 FixedArray::kHeaderSize));
2984 __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
2985 __ j(equal, &slow_case_);
2990 void StringCharFromCodeGenerator::GenerateSlow(
2991 MacroAssembler* masm,
2992 const RuntimeCallHelper& call_helper) {
2993 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
2995 __ bind(&slow_case_);
2996 call_helper.BeforeCall(masm);
2998 __ CallRuntime(Runtime::kCharFromCode, 1);
2999 if (!result_.is(rax)) {
3000 __ movp(result_, rax);
3002 call_helper.AfterCall(masm);
3005 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3009 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
3013 String::Encoding encoding) {
3014 // Nothing to do for zero characters.
3016 __ testl(count, count);
3017 __ j(zero, &done, Label::kNear);
3019 // Make count the number of bytes to copy.
3020 if (encoding == String::TWO_BYTE_ENCODING) {
3021 STATIC_ASSERT(2 == sizeof(uc16));
3022 __ addl(count, count);
3025 // Copy remaining characters.
3028 __ movb(kScratchRegister, Operand(src, 0));
3029 __ movb(Operand(dest, 0), kScratchRegister);
3033 __ j(not_zero, &loop);
3039 void SubStringStub::Generate(MacroAssembler* masm) {
3042 // Stack frame on entry.
3043 // rsp[0] : return address
3048 enum SubStringStubArgumentIndices {
3049 STRING_ARGUMENT_INDEX,
3050 FROM_ARGUMENT_INDEX,
3052 SUB_STRING_ARGUMENT_COUNT
3055 StackArgumentsAccessor args(rsp, SUB_STRING_ARGUMENT_COUNT,
3056 ARGUMENTS_DONT_CONTAIN_RECEIVER);
3058 // Make sure first argument is a string.
3059 __ movp(rax, args.GetArgumentOperand(STRING_ARGUMENT_INDEX));
3060 STATIC_ASSERT(kSmiTag == 0);
3061 __ testl(rax, Immediate(kSmiTagMask));
3062 __ j(zero, &runtime);
3063 Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
3064 __ j(NegateCondition(is_string), &runtime);
3067 // rbx: instance type
3068 // Calculate length of sub string using the smi values.
3069 __ movp(rcx, args.GetArgumentOperand(TO_ARGUMENT_INDEX));
3070 __ movp(rdx, args.GetArgumentOperand(FROM_ARGUMENT_INDEX));
3071 __ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
3073 __ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
3074 __ cmpp(rcx, FieldOperand(rax, String::kLengthOffset));
3075 Label not_original_string;
3076 // Shorter than original string's length: an actual substring.
3077 __ j(below, ¬_original_string, Label::kNear);
3078 // Longer than original string's length or negative: unsafe arguments.
3079 __ j(above, &runtime);
3080 // Return original string.
3081 Counters* counters = isolate()->counters();
3082 __ IncrementCounter(counters->sub_string_native(), 1);
3083 __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
3084 __ bind(¬_original_string);
3087 __ SmiCompare(rcx, Smi::FromInt(1));
3088 __ j(equal, &single_char);
3090 __ SmiToInteger32(rcx, rcx);
3093 // rbx: instance type
3094 // rcx: sub string length
3095 // rdx: from index (smi)
3096 // Deal with different string types: update the index if necessary
3097 // and put the underlying string into edi.
3098 Label underlying_unpacked, sliced_string, seq_or_external_string;
3099 // If the string is not indirect, it can only be sequential or external.
3100 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
3101 STATIC_ASSERT(kIsIndirectStringMask != 0);
3102 __ testb(rbx, Immediate(kIsIndirectStringMask));
3103 __ j(zero, &seq_or_external_string, Label::kNear);
3105 __ testb(rbx, Immediate(kSlicedNotConsMask));
3106 __ j(not_zero, &sliced_string, Label::kNear);
3107 // Cons string. Check whether it is flat, then fetch first part.
3108 // Flat cons strings have an empty second part.
3109 __ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
3110 Heap::kempty_stringRootIndex);
3111 __ j(not_equal, &runtime);
3112 __ movp(rdi, FieldOperand(rax, ConsString::kFirstOffset));
3113 // Update instance type.
3114 __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
3115 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
3116 __ jmp(&underlying_unpacked, Label::kNear);
3118 __ bind(&sliced_string);
3119 // Sliced string. Fetch parent and correct start index by offset.
3120 __ addp(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
3121 __ movp(rdi, FieldOperand(rax, SlicedString::kParentOffset));
3122 // Update instance type.
3123 __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
3124 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
3125 __ jmp(&underlying_unpacked, Label::kNear);
3127 __ bind(&seq_or_external_string);
3128 // Sequential or external string. Just move string to the correct register.
3131 __ bind(&underlying_unpacked);
3133 if (FLAG_string_slices) {
3135 // rdi: underlying subject string
3136 // rbx: instance type of underlying subject string
3137 // rdx: adjusted start index (smi)
3139 // If coming from the make_two_character_string path, the string
3140 // is too short to be sliced anyways.
3141 __ cmpp(rcx, Immediate(SlicedString::kMinLength));
3142 // Short slice. Copy instead of slicing.
3143 __ j(less, ©_routine);
3144 // Allocate new sliced string. At this point we do not reload the instance
3145 // type including the string encoding because we simply rely on the info
3146 // provided by the original string. It does not matter if the original
3147 // string's encoding is wrong because we always have to recheck encoding of
3148 // the newly created string's parent anyways due to externalized strings.
3149 Label two_byte_slice, set_slice_header;
3150 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
3151 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
3152 __ testb(rbx, Immediate(kStringEncodingMask));
3153 __ j(zero, &two_byte_slice, Label::kNear);
3154 __ AllocateOneByteSlicedString(rax, rbx, r14, &runtime);
3155 __ jmp(&set_slice_header, Label::kNear);
3156 __ bind(&two_byte_slice);
3157 __ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
3158 __ bind(&set_slice_header);
3159 __ Integer32ToSmi(rcx, rcx);
3160 __ movp(FieldOperand(rax, SlicedString::kLengthOffset), rcx);
3161 __ movp(FieldOperand(rax, SlicedString::kHashFieldOffset),
3162 Immediate(String::kEmptyHashField));
3163 __ movp(FieldOperand(rax, SlicedString::kParentOffset), rdi);
3164 __ movp(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
3165 __ IncrementCounter(counters->sub_string_native(), 1);
3166 __ ret(3 * kPointerSize);
3168 __ bind(©_routine);
3171 // rdi: underlying subject string
3172 // rbx: instance type of underlying subject string
3173 // rdx: adjusted start index (smi)
3175 // The subject string can only be external or sequential string of either
3176 // encoding at this point.
3177 Label two_byte_sequential, sequential_string;
3178 STATIC_ASSERT(kExternalStringTag != 0);
3179 STATIC_ASSERT(kSeqStringTag == 0);
3180 __ testb(rbx, Immediate(kExternalStringTag));
3181 __ j(zero, &sequential_string);
3183 // Handle external string.
3184 // Rule out short external strings.
3185 STATIC_ASSERT(kShortExternalStringTag != 0);
3186 __ testb(rbx, Immediate(kShortExternalStringMask));
3187 __ j(not_zero, &runtime);
3188 __ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
3189 // Move the pointer so that offset-wise, it looks like a sequential string.
3190 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3191 __ subp(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3193 __ bind(&sequential_string);
3194 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3195 __ testb(rbx, Immediate(kStringEncodingMask));
3196 __ j(zero, &two_byte_sequential);
3198 // Allocate the result.
3199 __ AllocateOneByteString(rax, rcx, r11, r14, r15, &runtime);
3201 // rax: result string
3202 // rcx: result string length
3203 { // Locate character of sub string start.
3204 SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
3205 __ leap(r14, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
3206 SeqOneByteString::kHeaderSize - kHeapObjectTag));
3208 // Locate first character of result.
3209 __ leap(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize));
3211 // rax: result string
3212 // rcx: result length
3213 // r14: first character of result
3214 // rsi: character of sub string start
3215 StringHelper::GenerateCopyCharacters(
3216 masm, rdi, r14, rcx, String::ONE_BYTE_ENCODING);
3217 __ IncrementCounter(counters->sub_string_native(), 1);
3218 __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
3220 __ bind(&two_byte_sequential);
3221 // Allocate the result.
3222 __ AllocateTwoByteString(rax, rcx, r11, r14, r15, &runtime);
3224 // rax: result string
3225 // rcx: result string length
3226 { // Locate character of sub string start.
3227 SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
3228 __ leap(r14, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
3229 SeqOneByteString::kHeaderSize - kHeapObjectTag));
3231 // Locate first character of result.
3232 __ leap(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
3234 // rax: result string
3235 // rcx: result length
3236 // rdi: first character of result
3237 // r14: character of sub string start
3238 StringHelper::GenerateCopyCharacters(
3239 masm, rdi, r14, rcx, String::TWO_BYTE_ENCODING);
3240 __ IncrementCounter(counters->sub_string_native(), 1);
3241 __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
3243 // Just jump to runtime to create the sub string.
3245 __ TailCallRuntime(Runtime::kSubStringRT, 3, 1);
3247 __ bind(&single_char);
3249 // rbx: instance type
3250 // rcx: sub string length (smi)
3251 // rdx: from index (smi)
3252 StringCharAtGenerator generator(rax, rdx, rcx, rax, &runtime, &runtime,
3253 &runtime, STRING_INDEX_IS_NUMBER,
3254 RECEIVER_IS_STRING);
3255 generator.GenerateFast(masm);
3256 __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
3257 generator.SkipSlow(masm, &runtime);
3261 void ToNumberStub::Generate(MacroAssembler* masm) {
3262 // The ToNumber stub takes one argument in rax.
3264 __ JumpIfNotSmi(rax, ¬_smi, Label::kNear);
3268 Label not_heap_number;
3269 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
3270 Heap::kHeapNumberMapRootIndex);
3271 __ j(not_equal, ¬_heap_number, Label::kNear);
3273 __ bind(¬_heap_number);
3275 Label not_string, slow_string;
3276 __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdi);
3279 __ j(above_equal, ¬_string, Label::kNear);
3280 // Check if string has a cached array index.
3281 __ testl(FieldOperand(rax, String::kHashFieldOffset),
3282 Immediate(String::kContainsCachedArrayIndexMask));
3283 __ j(not_zero, &slow_string, Label::kNear);
3284 __ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
3285 __ IndexFromHash(rax, rax);
3287 __ bind(&slow_string);
3288 __ PopReturnAddressTo(rcx); // Pop return address.
3289 __ Push(rax); // Push argument.
3290 __ PushReturnAddressFrom(rcx); // Push return address.
3291 __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
3292 __ bind(¬_string);
3295 __ CmpInstanceType(rdi, ODDBALL_TYPE);
3296 __ j(not_equal, ¬_oddball, Label::kNear);
3297 __ movp(rax, FieldOperand(rax, Oddball::kToNumberOffset));
3299 __ bind(¬_oddball);
3301 __ PopReturnAddressTo(rcx); // Pop return address.
3302 __ Push(rax); // Push argument.
3303 __ PushReturnAddressFrom(rcx); // Push return address.
3304 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
3308 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
3312 Register scratch2) {
3313 Register length = scratch1;
3316 Label check_zero_length;
3317 __ movp(length, FieldOperand(left, String::kLengthOffset));
3318 __ SmiCompare(length, FieldOperand(right, String::kLengthOffset));
3319 __ j(equal, &check_zero_length, Label::kNear);
3320 __ Move(rax, Smi::FromInt(NOT_EQUAL));
3323 // Check if the length is zero.
3324 Label compare_chars;
3325 __ bind(&check_zero_length);
3326 STATIC_ASSERT(kSmiTag == 0);
3328 __ j(not_zero, &compare_chars, Label::kNear);
3329 __ Move(rax, Smi::FromInt(EQUAL));
3332 // Compare characters.
3333 __ bind(&compare_chars);
3334 Label strings_not_equal;
3335 GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
3336 &strings_not_equal, Label::kNear);
3338 // Characters are equal.
3339 __ Move(rax, Smi::FromInt(EQUAL));
3342 // Characters are not equal.
3343 __ bind(&strings_not_equal);
3344 __ Move(rax, Smi::FromInt(NOT_EQUAL));
3349 void StringHelper::GenerateCompareFlatOneByteStrings(
3350 MacroAssembler* masm, Register left, Register right, Register scratch1,
3351 Register scratch2, Register scratch3, Register scratch4) {
3352 // Ensure that you can always subtract a string length from a non-negative
3353 // number (e.g. another length).
3354 STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
3356 // Find minimum length and length difference.
3357 __ movp(scratch1, FieldOperand(left, String::kLengthOffset));
3358 __ movp(scratch4, scratch1);
3361 FieldOperand(right, String::kLengthOffset));
3362 // Register scratch4 now holds left.length - right.length.
3363 const Register length_difference = scratch4;
3365 __ j(less, &left_shorter, Label::kNear);
3366 // The right string isn't longer that the left one.
3367 // Get the right string's length by subtracting the (non-negative) difference
3368 // from the left string's length.
3369 __ SmiSub(scratch1, scratch1, length_difference);
3370 __ bind(&left_shorter);
3371 // Register scratch1 now holds Min(left.length, right.length).
3372 const Register min_length = scratch1;
3374 Label compare_lengths;
3375 // If min-length is zero, go directly to comparing lengths.
3376 __ SmiTest(min_length);
3377 __ j(zero, &compare_lengths, Label::kNear);
3380 Label result_not_equal;
3381 GenerateOneByteCharsCompareLoop(
3382 masm, left, right, min_length, scratch2, &result_not_equal,
3383 // In debug-code mode, SmiTest below might push
3384 // the target label outside the near range.
3387 // Completed loop without finding different characters.
3388 // Compare lengths (precomputed).
3389 __ bind(&compare_lengths);
3390 __ SmiTest(length_difference);
3391 Label length_not_equal;
3392 __ j(not_zero, &length_not_equal, Label::kNear);
3395 __ Move(rax, Smi::FromInt(EQUAL));
3398 Label result_greater;
3400 __ bind(&length_not_equal);
3401 __ j(greater, &result_greater, Label::kNear);
3402 __ jmp(&result_less, Label::kNear);
3403 __ bind(&result_not_equal);
3404 // Unequal comparison of left to right, either character or length.
3405 __ j(above, &result_greater, Label::kNear);
3406 __ bind(&result_less);
3409 __ Move(rax, Smi::FromInt(LESS));
3412 // Result is GREATER.
3413 __ bind(&result_greater);
3414 __ Move(rax, Smi::FromInt(GREATER));
3419 void StringHelper::GenerateOneByteCharsCompareLoop(
3420 MacroAssembler* masm, Register left, Register right, Register length,
3421 Register scratch, Label* chars_not_equal, Label::Distance near_jump) {
3422 // Change index to run from -length to -1 by adding length to string
3423 // start. This means that loop ends when index reaches zero, which
3424 // doesn't need an additional compare.
3425 __ SmiToInteger32(length, length);
3427 FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
3429 FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
3431 Register index = length; // index = -length;
3436 __ movb(scratch, Operand(left, index, times_1, 0));
3437 __ cmpb(scratch, Operand(right, index, times_1, 0));
3438 __ j(not_equal, chars_not_equal, near_jump);
3440 __ j(not_zero, &loop);
3444 void StringCompareStub::Generate(MacroAssembler* masm) {
3447 // Stack frame on entry.
3448 // rsp[0] : return address
3449 // rsp[8] : right string
3450 // rsp[16] : left string
3452 StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
3453 __ movp(rdx, args.GetArgumentOperand(0)); // left
3454 __ movp(rax, args.GetArgumentOperand(1)); // right
3456 // Check for identity.
3459 __ j(not_equal, ¬_same, Label::kNear);
3460 __ Move(rax, Smi::FromInt(EQUAL));
3461 Counters* counters = isolate()->counters();
3462 __ IncrementCounter(counters->string_compare_native(), 1);
3463 __ ret(2 * kPointerSize);
3467 // Check that both are sequential one-byte strings.
3468 __ JumpIfNotBothSequentialOneByteStrings(rdx, rax, rcx, rbx, &runtime);
3470 // Inline comparison of one-byte strings.
3471 __ IncrementCounter(counters->string_compare_native(), 1);
3472 // Drop arguments from the stack
3473 __ PopReturnAddressTo(rcx);
3474 __ addp(rsp, Immediate(2 * kPointerSize));
3475 __ PushReturnAddressFrom(rcx);
3476 StringHelper::GenerateCompareFlatOneByteStrings(masm, rdx, rax, rcx, rbx, rdi,
3479 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
3480 // tagged as a small integer.
3482 __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
3486 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
3487 // ----------- S t a t e -------------
3490 // -- rsp[0] : return address
3491 // -----------------------------------
3493 // Load rcx with the allocation site. We stick an undefined dummy value here
3494 // and replace it with the real allocation site later when we instantiate this
3495 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
3496 __ Move(rcx, handle(isolate()->heap()->undefined_value()));
3498 // Make sure that we actually patched the allocation site.
3499 if (FLAG_debug_code) {
3500 __ testb(rcx, Immediate(kSmiTagMask));
3501 __ Assert(not_equal, kExpectedAllocationSite);
3502 __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
3503 isolate()->factory()->allocation_site_map());
3504 __ Assert(equal, kExpectedAllocationSite);
3507 // Tail call into the stub that handles binary operations with allocation
3509 BinaryOpWithAllocationSiteStub stub(isolate(), state());
3510 __ TailCallStub(&stub);
3514 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
3515 DCHECK(state() == CompareICState::SMI);
3517 __ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear);
3519 if (GetCondition() == equal) {
3520 // For equality we do not care about the sign of the result.
3525 __ j(no_overflow, &done, Label::kNear);
3526 // Correct sign of result in case of overflow.
3538 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
3539 DCHECK(state() == CompareICState::NUMBER);
3542 Label unordered, maybe_undefined1, maybe_undefined2;
3545 if (left() == CompareICState::SMI) {
3546 __ JumpIfNotSmi(rdx, &miss);
3548 if (right() == CompareICState::SMI) {
3549 __ JumpIfNotSmi(rax, &miss);
3552 // Load left and right operand.
3553 Label done, left, left_smi, right_smi;
3554 __ JumpIfSmi(rax, &right_smi, Label::kNear);
3555 __ CompareMap(rax, isolate()->factory()->heap_number_map());
3556 __ j(not_equal, &maybe_undefined1, Label::kNear);
3557 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
3558 __ jmp(&left, Label::kNear);
3559 __ bind(&right_smi);
3560 __ SmiToInteger32(rcx, rax); // Can't clobber rax yet.
3561 __ Cvtlsi2sd(xmm1, rcx);
3564 __ JumpIfSmi(rdx, &left_smi, Label::kNear);
3565 __ CompareMap(rdx, isolate()->factory()->heap_number_map());
3566 __ j(not_equal, &maybe_undefined2, Label::kNear);
3567 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
3570 __ SmiToInteger32(rcx, rdx); // Can't clobber rdx yet.
3571 __ Cvtlsi2sd(xmm0, rcx);
3575 __ ucomisd(xmm0, xmm1);
3577 // Don't base result on EFLAGS when a NaN is involved.
3578 __ j(parity_even, &unordered, Label::kNear);
3580 // Return a result of -1, 0, or 1, based on EFLAGS.
3581 // Performing mov, because xor would destroy the flag register.
3582 __ movl(rax, Immediate(0));
3583 __ movl(rcx, Immediate(0));
3584 __ setcc(above, rax); // Add one to zero if carry clear and not equal.
3585 __ sbbp(rax, rcx); // Subtract one if below (aka. carry set).
3588 __ bind(&unordered);
3589 __ bind(&generic_stub);
3590 CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
3591 CompareICState::GENERIC, CompareICState::GENERIC);
3592 __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
3594 __ bind(&maybe_undefined1);
3595 if (Token::IsOrderedRelationalCompareOp(op())) {
3596 __ Cmp(rax, isolate()->factory()->undefined_value());
3597 __ j(not_equal, &miss);
3598 __ JumpIfSmi(rdx, &unordered);
3599 __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
3600 __ j(not_equal, &maybe_undefined2, Label::kNear);
3604 __ bind(&maybe_undefined2);
3605 if (Token::IsOrderedRelationalCompareOp(op())) {
3606 __ Cmp(rdx, isolate()->factory()->undefined_value());
3607 __ j(equal, &unordered);
3615 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3616 DCHECK(state() == CompareICState::INTERNALIZED_STRING);
3617 DCHECK(GetCondition() == equal);
3619 // Registers containing left and right operands respectively.
3620 Register left = rdx;
3621 Register right = rax;
3622 Register tmp1 = rcx;
3623 Register tmp2 = rbx;
3625 // Check that both operands are heap objects.
3627 Condition cond = masm->CheckEitherSmi(left, right, tmp1);
3628 __ j(cond, &miss, Label::kNear);
3630 // Check that both operands are internalized strings.
3631 __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
3632 __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
3633 __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
3634 __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
3635 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3637 __ testb(tmp1, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
3638 __ j(not_zero, &miss, Label::kNear);
3640 // Internalized strings are compared by identity.
3642 __ cmpp(left, right);
3643 // Make sure rax is non-zero. At this point input operands are
3644 // guaranteed to be non-zero.
3645 DCHECK(right.is(rax));
3646 __ j(not_equal, &done, Label::kNear);
3647 STATIC_ASSERT(EQUAL == 0);
3648 STATIC_ASSERT(kSmiTag == 0);
3649 __ Move(rax, Smi::FromInt(EQUAL));
3658 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
3659 DCHECK(state() == CompareICState::UNIQUE_NAME);
3660 DCHECK(GetCondition() == equal);
3662 // Registers containing left and right operands respectively.
3663 Register left = rdx;
3664 Register right = rax;
3665 Register tmp1 = rcx;
3666 Register tmp2 = rbx;
3668 // Check that both operands are heap objects.
3670 Condition cond = masm->CheckEitherSmi(left, right, tmp1);
3671 __ j(cond, &miss, Label::kNear);
3673 // Check that both operands are unique names. This leaves the instance
3674 // types loaded in tmp1 and tmp2.
3675 __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
3676 __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
3677 __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
3678 __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
3680 __ JumpIfNotUniqueNameInstanceType(tmp1, &miss, Label::kNear);
3681 __ JumpIfNotUniqueNameInstanceType(tmp2, &miss, Label::kNear);
3683 // Unique names are compared by identity.
3685 __ cmpp(left, right);
3686 // Make sure rax is non-zero. At this point input operands are
3687 // guaranteed to be non-zero.
3688 DCHECK(right.is(rax));
3689 __ j(not_equal, &done, Label::kNear);
3690 STATIC_ASSERT(EQUAL == 0);
3691 STATIC_ASSERT(kSmiTag == 0);
3692 __ Move(rax, Smi::FromInt(EQUAL));
3701 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
3702 DCHECK(state() == CompareICState::STRING);
3705 bool equality = Token::IsEqualityOp(op());
3707 // Registers containing left and right operands respectively.
3708 Register left = rdx;
3709 Register right = rax;
3710 Register tmp1 = rcx;
3711 Register tmp2 = rbx;
3712 Register tmp3 = rdi;
3714 // Check that both operands are heap objects.
3715 Condition cond = masm->CheckEitherSmi(left, right, tmp1);
3718 // Check that both operands are strings. This leaves the instance
3719 // types loaded in tmp1 and tmp2.
3720 __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
3721 __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
3722 __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
3723 __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
3724 __ movp(tmp3, tmp1);
3725 STATIC_ASSERT(kNotStringTag != 0);
3727 __ testb(tmp3, Immediate(kIsNotStringMask));
3728 __ j(not_zero, &miss);
3730 // Fast check for identical strings.
3732 __ cmpp(left, right);
3733 __ j(not_equal, ¬_same, Label::kNear);
3734 STATIC_ASSERT(EQUAL == 0);
3735 STATIC_ASSERT(kSmiTag == 0);
3736 __ Move(rax, Smi::FromInt(EQUAL));
3739 // Handle not identical strings.
3742 // Check that both strings are internalized strings. If they are, we're done
3743 // because we already know they are not identical. We also know they are both
3747 STATIC_ASSERT(kInternalizedTag == 0);
3749 __ testb(tmp1, Immediate(kIsNotInternalizedMask));
3750 __ j(not_zero, &do_compare, Label::kNear);
3751 // Make sure rax is non-zero. At this point input operands are
3752 // guaranteed to be non-zero.
3753 DCHECK(right.is(rax));
3755 __ bind(&do_compare);
3758 // Check that both strings are sequential one-byte.
3760 __ JumpIfNotBothSequentialOneByteStrings(left, right, tmp1, tmp2, &runtime);
3762 // Compare flat one-byte strings. Returns when done.
3764 StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1,
3767 StringHelper::GenerateCompareFlatOneByteStrings(
3768 masm, left, right, tmp1, tmp2, tmp3, kScratchRegister);
3771 // Handle more complex cases in runtime.
3773 __ PopReturnAddressTo(tmp1);
3776 __ PushReturnAddressFrom(tmp1);
3778 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
3780 __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
3788 void CompareICStub::GenerateObjects(MacroAssembler* masm) {
3789 DCHECK(state() == CompareICState::OBJECT);
3791 Condition either_smi = masm->CheckEitherSmi(rdx, rax);
3792 __ j(either_smi, &miss, Label::kNear);
3794 __ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
3795 __ j(not_equal, &miss, Label::kNear);
3796 __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
3797 __ j(not_equal, &miss, Label::kNear);
3799 DCHECK(GetCondition() == equal);
3808 void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
3810 Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
3811 Condition either_smi = masm->CheckEitherSmi(rdx, rax);
3812 __ j(either_smi, &miss, Label::kNear);
3814 __ GetWeakValue(rdi, cell);
3815 __ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset));
3816 __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
3818 __ j(not_equal, &miss, Label::kNear);
3820 __ j(not_equal, &miss, Label::kNear);
3830 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
3832 // Call the runtime system in a fresh internal frame.
3833 ExternalReference miss =
3834 ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
3836 FrameScope scope(masm, StackFrame::INTERNAL);
3841 __ Push(Smi::FromInt(op()));
3842 __ CallExternalReference(miss, 3);
3844 // Compute the entry point of the rewritten stub.
3845 __ leap(rdi, FieldOperand(rax, Code::kHeaderSize));
3850 // Do a tail call to the rewritten stub.
3855 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
3858 Register properties,
3861 DCHECK(name->IsUniqueName());
3862 // If names of slots in range from 1 to kProbes - 1 for the hash value are
3863 // not equal to the name and kProbes-th slot is not used (its name is the
3864 // undefined value), it guarantees the hash table doesn't contain the
3865 // property. It's true even if some slots represent deleted properties
3866 // (their names are the hole value).
3867 for (int i = 0; i < kInlinedProbes; i++) {
3868 // r0 points to properties hash.
3869 // Compute the masked index: (hash + i + i * i) & mask.
3870 Register index = r0;
3871 // Capacity is smi 2^n.
3872 __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
3875 Immediate(name->Hash() + NameDictionary::GetProbeOffset(i)));
3877 // Scale the index by multiplying by the entry size.
3878 DCHECK(NameDictionary::kEntrySize == 3);
3879 __ leap(index, Operand(index, index, times_2, 0)); // index *= 3.
3881 Register entity_name = r0;
3882 // Having undefined at this place means the name is not contained.
3883 DCHECK_EQ(kSmiTagSize, 1);
3884 __ movp(entity_name, Operand(properties,
3887 kElementsStartOffset - kHeapObjectTag));
3888 __ Cmp(entity_name, masm->isolate()->factory()->undefined_value());
3891 // Stop if found the property.
3892 __ Cmp(entity_name, Handle<Name>(name));
3896 // Check for the hole and skip.
3897 __ CompareRoot(entity_name, Heap::kTheHoleValueRootIndex);
3898 __ j(equal, &good, Label::kNear);
3900 // Check if the entry name is not a unique name.
3901 __ movp(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
3902 __ JumpIfNotUniqueNameInstanceType(
3903 FieldOperand(entity_name, Map::kInstanceTypeOffset), miss);
3907 NameDictionaryLookupStub stub(masm->isolate(), properties, r0, r0,
3909 __ Push(Handle<Object>(name));
3910 __ Push(Immediate(name->Hash()));
3913 __ j(not_zero, miss);
3918 // Probe the name dictionary in the |elements| register. Jump to the
3919 // |done| label if a property with the given name is found leaving the
3920 // index into the dictionary in |r1|. Jump to the |miss| label
3922 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
3929 DCHECK(!elements.is(r0));
3930 DCHECK(!elements.is(r1));
3931 DCHECK(!name.is(r0));
3932 DCHECK(!name.is(r1));
3934 __ AssertName(name);
3936 __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
3939 for (int i = 0; i < kInlinedProbes; i++) {
3940 // Compute the masked index: (hash + i + i * i) & mask.
3941 __ movl(r1, FieldOperand(name, Name::kHashFieldOffset));
3942 __ shrl(r1, Immediate(Name::kHashShift));
3944 __ addl(r1, Immediate(NameDictionary::GetProbeOffset(i)));
3948 // Scale the index by multiplying by the entry size.
3949 DCHECK(NameDictionary::kEntrySize == 3);
3950 __ leap(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
3952 // Check if the key is identical to the name.
3953 __ cmpp(name, Operand(elements, r1, times_pointer_size,
3954 kElementsStartOffset - kHeapObjectTag));
3958 NameDictionaryLookupStub stub(masm->isolate(), elements, r0, r1,
3961 __ movl(r0, FieldOperand(name, Name::kHashFieldOffset));
3962 __ shrl(r0, Immediate(Name::kHashShift));
3972 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
3973 // This stub overrides SometimesSetsUpAFrame() to return false. That means
3974 // we cannot call anything that could cause a GC from this stub.
3975 // Stack frame on entry:
3976 // rsp[0 * kPointerSize] : return address.
3977 // rsp[1 * kPointerSize] : key's hash.
3978 // rsp[2 * kPointerSize] : key.
3980 // dictionary_: NameDictionary to probe.
3981 // result_: used as scratch.
3982 // index_: will hold an index of entry if lookup is successful.
3983 // might alias with result_.
3985 // result_ is zero if lookup failed, non zero otherwise.
3987 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
3989 Register scratch = result();
3991 __ SmiToInteger32(scratch, FieldOperand(dictionary(), kCapacityOffset));
3995 // If names of slots in range from 1 to kProbes - 1 for the hash value are
3996 // not equal to the name and kProbes-th slot is not used (its name is the
3997 // undefined value), it guarantees the hash table doesn't contain the
3998 // property. It's true even if some slots represent deleted properties
3999 // (their names are the null value).
4000 StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER,
4002 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
4003 // Compute the masked index: (hash + i + i * i) & mask.
4004 __ movp(scratch, args.GetArgumentOperand(1));
4006 __ addl(scratch, Immediate(NameDictionary::GetProbeOffset(i)));
4008 __ andp(scratch, Operand(rsp, 0));
4010 // Scale the index by multiplying by the entry size.
4011 DCHECK(NameDictionary::kEntrySize == 3);
4012 __ leap(index(), Operand(scratch, scratch, times_2, 0)); // index *= 3.
4014 // Having undefined at this place means the name is not contained.
4015 __ movp(scratch, Operand(dictionary(), index(), times_pointer_size,
4016 kElementsStartOffset - kHeapObjectTag));
4018 __ Cmp(scratch, isolate()->factory()->undefined_value());
4019 __ j(equal, ¬_in_dictionary);
4021 // Stop if found the property.
4022 __ cmpp(scratch, args.GetArgumentOperand(0));
4023 __ j(equal, &in_dictionary);
4025 if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
4026 // If we hit a key that is not a unique name during negative
4027 // lookup we have to bailout as this key might be equal to the
4028 // key we are looking for.
4030 // Check if the entry name is not a unique name.
4031 __ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
4032 __ JumpIfNotUniqueNameInstanceType(
4033 FieldOperand(scratch, Map::kInstanceTypeOffset),
4034 &maybe_in_dictionary);
4038 __ bind(&maybe_in_dictionary);
4039 // If we are doing negative lookup then probing failure should be
4040 // treated as a lookup success. For positive lookup probing failure
4041 // should be treated as lookup failure.
4042 if (mode() == POSITIVE_LOOKUP) {
4043 __ movp(scratch, Immediate(0));
4045 __ ret(2 * kPointerSize);
4048 __ bind(&in_dictionary);
4049 __ movp(scratch, Immediate(1));
4051 __ ret(2 * kPointerSize);
4053 __ bind(¬_in_dictionary);
4054 __ movp(scratch, Immediate(0));
4056 __ ret(2 * kPointerSize);
4060 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
4062 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
4064 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
4069 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
4070 // the value has just been written into the object, now this stub makes sure
4071 // we keep the GC informed. The word in the object where the value has been
4072 // written is in the address register.
4073 void RecordWriteStub::Generate(MacroAssembler* masm) {
4074 Label skip_to_incremental_noncompacting;
4075 Label skip_to_incremental_compacting;
4077 // The first two instructions are generated with labels so as to get the
4078 // offset fixed up correctly by the bind(Label*) call. We patch it back and
4079 // forth between a compare instructions (a nop in this position) and the
4080 // real branch when we start and stop incremental heap marking.
4081 // See RecordWriteStub::Patch for details.
4082 __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
4083 __ jmp(&skip_to_incremental_compacting, Label::kFar);
4085 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4086 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
4087 MacroAssembler::kReturnAtEnd);
4092 __ bind(&skip_to_incremental_noncompacting);
4093 GenerateIncremental(masm, INCREMENTAL);
4095 __ bind(&skip_to_incremental_compacting);
4096 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4098 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
4099 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
4100 masm->set_byte_at(0, kTwoByteNopInstruction);
4101 masm->set_byte_at(2, kFiveByteNopInstruction);
4105 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4108 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4109 Label dont_need_remembered_set;
4111 __ movp(regs_.scratch0(), Operand(regs_.address(), 0));
4112 __ JumpIfNotInNewSpace(regs_.scratch0(),
4114 &dont_need_remembered_set);
4116 __ CheckPageFlag(regs_.object(),
4118 1 << MemoryChunk::SCAN_ON_SCAVENGE,
4120 &dont_need_remembered_set);
4122 // First notify the incremental marker if necessary, then update the
4124 CheckNeedsToInformIncrementalMarker(
4125 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4126 InformIncrementalMarker(masm);
4127 regs_.Restore(masm);
4128 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
4129 MacroAssembler::kReturnAtEnd);
4131 __ bind(&dont_need_remembered_set);
4134 CheckNeedsToInformIncrementalMarker(
4135 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4136 InformIncrementalMarker(masm);
4137 regs_.Restore(masm);
4142 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4143 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
4145 arg_reg_1.is(regs_.address()) ? kScratchRegister : regs_.address();
4146 DCHECK(!address.is(regs_.object()));
4147 DCHECK(!address.is(arg_reg_1));
4148 __ Move(address, regs_.address());
4149 __ Move(arg_reg_1, regs_.object());
4150 // TODO(gc) Can we just set address arg2 in the beginning?
4151 __ Move(arg_reg_2, address);
4152 __ LoadAddress(arg_reg_3,
4153 ExternalReference::isolate_address(isolate()));
4154 int argument_count = 3;
4156 AllowExternalCallThatCantCauseGC scope(masm);
4157 __ PrepareCallCFunction(argument_count);
4159 ExternalReference::incremental_marking_record_write_function(isolate()),
4161 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
4165 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4166 MacroAssembler* masm,
4167 OnNoNeedToInformIncrementalMarker on_no_need,
4170 Label need_incremental;
4171 Label need_incremental_pop_object;
4173 __ movp(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
4174 __ andp(regs_.scratch0(), regs_.object());
4175 __ movp(regs_.scratch1(),
4176 Operand(regs_.scratch0(),
4177 MemoryChunk::kWriteBarrierCounterOffset));
4178 __ subp(regs_.scratch1(), Immediate(1));
4179 __ movp(Operand(regs_.scratch0(),
4180 MemoryChunk::kWriteBarrierCounterOffset),
4182 __ j(negative, &need_incremental);
4184 // Let's look at the color of the object: If it is not black we don't have
4185 // to inform the incremental marker.
4186 __ JumpIfBlack(regs_.object(),
4192 regs_.Restore(masm);
4193 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4194 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
4195 MacroAssembler::kReturnAtEnd);
4202 // Get the value from the slot.
4203 __ movp(regs_.scratch0(), Operand(regs_.address(), 0));
4205 if (mode == INCREMENTAL_COMPACTION) {
4206 Label ensure_not_white;
4208 __ CheckPageFlag(regs_.scratch0(), // Contains value.
4209 regs_.scratch1(), // Scratch.
4210 MemoryChunk::kEvacuationCandidateMask,
4215 __ CheckPageFlag(regs_.object(),
4216 regs_.scratch1(), // Scratch.
4217 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
4221 __ bind(&ensure_not_white);
4224 // We need an extra register for this, so we push the object register
4226 __ Push(regs_.object());
4227 __ EnsureNotWhite(regs_.scratch0(), // The value.
4228 regs_.scratch1(), // Scratch.
4229 regs_.object(), // Scratch.
4230 &need_incremental_pop_object,
4232 __ Pop(regs_.object());
4234 regs_.Restore(masm);
4235 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4236 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
4237 MacroAssembler::kReturnAtEnd);
4242 __ bind(&need_incremental_pop_object);
4243 __ Pop(regs_.object());
4245 __ bind(&need_incremental);
4247 // Fall through when we need to inform the incremental marker.
4251 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4252 // ----------- S t a t e -------------
4253 // -- rax : element value to store
4254 // -- rcx : element index as smi
4255 // -- rsp[0] : return address
4256 // -- rsp[8] : array literal index in function
4257 // -- rsp[16] : array literal
4258 // clobbers rbx, rdx, rdi
4259 // -----------------------------------
4262 Label double_elements;
4264 Label slow_elements;
4265 Label fast_elements;
4267 // Get array literal index, array literal and its map.
4268 StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
4269 __ movp(rdx, args.GetArgumentOperand(1));
4270 __ movp(rbx, args.GetArgumentOperand(0));
4271 __ movp(rdi, FieldOperand(rbx, JSObject::kMapOffset));
4273 __ CheckFastElements(rdi, &double_elements);
4275 // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
4276 __ JumpIfSmi(rax, &smi_element);
4277 __ CheckFastSmiElements(rdi, &fast_elements);
4279 // Store into the array literal requires a elements transition. Call into
4282 __ bind(&slow_elements);
4283 __ PopReturnAddressTo(rdi);
4287 __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
4288 __ Push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
4290 __ PushReturnAddressFrom(rdi);
4291 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4293 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4294 __ bind(&fast_elements);
4295 __ SmiToInteger32(kScratchRegister, rcx);
4296 __ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
4297 __ leap(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
4298 FixedArrayBase::kHeaderSize));
4299 __ movp(Operand(rcx, 0), rax);
4300 // Update the write barrier for the array store.
4301 __ RecordWrite(rbx, rcx, rax,
4303 EMIT_REMEMBERED_SET,
4307 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or
4308 // FAST_*_ELEMENTS, and value is Smi.
4309 __ bind(&smi_element);
4310 __ SmiToInteger32(kScratchRegister, rcx);
4311 __ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
4312 __ movp(FieldOperand(rbx, kScratchRegister, times_pointer_size,
4313 FixedArrayBase::kHeaderSize), rax);
4316 // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
4317 __ bind(&double_elements);
4319 __ movp(r9, FieldOperand(rbx, JSObject::kElementsOffset));
4320 __ SmiToInteger32(r11, rcx);
4321 __ StoreNumberToDoubleElements(rax,
4330 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4331 CEntryStub ces(isolate(), 1, kSaveFPRegs);
4332 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4333 int parameter_count_offset =
4334 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4335 __ movp(rbx, MemOperand(rbp, parameter_count_offset));
4336 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4337 __ PopReturnAddressTo(rcx);
4338 int additional_offset =
4339 function_mode() == JS_FUNCTION_STUB_MODE ? kPointerSize : 0;
4340 __ leap(rsp, MemOperand(rsp, rbx, times_pointer_size, additional_offset));
4341 __ jmp(rcx); // Return to IC Miss stub, continuation still on stack.
4345 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
4346 EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4347 VectorRawLoadStub stub(isolate(), state());
4348 stub.GenerateForTrampoline(masm);
4352 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
4353 EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4354 VectorRawKeyedLoadStub stub(isolate());
4355 stub.GenerateForTrampoline(masm);
4359 static void HandleArrayCases(MacroAssembler* masm, Register receiver,
4360 Register key, Register vector, Register slot,
4361 Register feedback, Register scratch1,
4362 Register scratch2, Register scratch3,
4363 Register scratch4, bool is_polymorphic,
4365 // feedback initially contains the feedback array
4366 Label next_loop, prepare_next;
4367 Label load_smi_map, compare_map;
4368 Label start_polymorphic;
4370 Register receiver_map = scratch1;
4371 Register counter = scratch2;
4372 Register length = scratch3;
4373 Register cached_map = scratch4;
4375 // Receiver might not be a heap object.
4376 __ JumpIfSmi(receiver, &load_smi_map);
4377 __ movp(receiver_map, FieldOperand(receiver, 0));
4378 __ bind(&compare_map);
4379 __ movp(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0)));
4380 __ cmpp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
4381 __ j(not_equal, &start_polymorphic);
4383 // found, now call handler.
4384 Register handler = feedback;
4385 __ movp(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
4386 __ leap(handler, FieldOperand(handler, Code::kHeaderSize));
4389 // Polymorphic, we have to loop from 2 to N
4390 __ bind(&start_polymorphic);
4391 __ SmiToInteger32(length, FieldOperand(feedback, FixedArray::kLengthOffset));
4392 if (!is_polymorphic) {
4393 // If the IC could be monomorphic we have to make sure we don't go past the
4394 // end of the feedback array.
4395 __ cmpl(length, Immediate(2));
4398 __ movl(counter, Immediate(2));
4400 __ bind(&next_loop);
4401 __ movp(cached_map, FieldOperand(feedback, counter, times_pointer_size,
4402 FixedArray::kHeaderSize));
4403 __ cmpp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
4404 __ j(not_equal, &prepare_next);
4405 __ movp(handler, FieldOperand(feedback, counter, times_pointer_size,
4406 FixedArray::kHeaderSize + kPointerSize));
4407 __ leap(handler, FieldOperand(handler, Code::kHeaderSize));
4410 __ bind(&prepare_next);
4411 __ addl(counter, Immediate(2));
4412 __ cmpl(counter, length);
4413 __ j(less, &next_loop);
4415 // We exhausted our array of map handler pairs.
4418 __ bind(&load_smi_map);
4419 __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
4420 __ jmp(&compare_map);
4424 static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
4425 Register key, Register vector, Register slot,
4426 Register weak_cell, Register integer_slot,
4428 // feedback initially contains the feedback array
4429 Label compare_smi_map;
4431 // Move the weak map into the weak_cell register.
4432 Register ic_map = weak_cell;
4433 __ movp(ic_map, FieldOperand(weak_cell, WeakCell::kValueOffset));
4435 // Receiver might not be a heap object.
4436 __ JumpIfSmi(receiver, &compare_smi_map);
4437 __ cmpp(ic_map, FieldOperand(receiver, 0));
4438 __ j(not_equal, miss);
4439 Register handler = weak_cell;
4440 __ movp(handler, FieldOperand(vector, integer_slot, times_pointer_size,
4441 FixedArray::kHeaderSize + kPointerSize));
4442 __ leap(handler, FieldOperand(handler, Code::kHeaderSize));
4445 // In microbenchmarks, it made sense to unroll this code so that the call to
4446 // the handler is duplicated for a HeapObject receiver and a Smi receiver.
4447 __ bind(&compare_smi_map);
4448 __ CompareRoot(ic_map, Heap::kHeapNumberMapRootIndex);
4449 __ j(not_equal, miss);
4450 __ movp(handler, FieldOperand(vector, integer_slot, times_pointer_size,
4451 FixedArray::kHeaderSize + kPointerSize));
4452 __ leap(handler, FieldOperand(handler, Code::kHeaderSize));
4457 void VectorRawLoadStub::Generate(MacroAssembler* masm) {
4458 GenerateImpl(masm, false);
4462 void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
4463 GenerateImpl(masm, true);
4467 void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4468 Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // rdx
4469 Register name = VectorLoadICDescriptor::NameRegister(); // rcx
4470 Register vector = VectorLoadICDescriptor::VectorRegister(); // rbx
4471 Register slot = VectorLoadICDescriptor::SlotRegister(); // rax
4472 Register feedback = rdi;
4473 Register integer_slot = r8;
4475 __ SmiToInteger32(integer_slot, slot);
4476 __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
4477 FixedArray::kHeaderSize));
4479 // Is it a weak cell?
4481 Label not_array, smi_key, key_okay, miss;
4482 __ CompareRoot(FieldOperand(feedback, 0), Heap::kWeakCellMapRootIndex);
4483 __ j(not_equal, &try_array);
4484 HandleMonomorphicCase(masm, receiver, name, vector, slot, feedback,
4485 integer_slot, &miss);
4487 // Is it a fixed array?
4488 __ bind(&try_array);
4489 __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
4490 __ j(not_equal, ¬_array);
4491 HandleArrayCases(masm, receiver, name, vector, slot, feedback, integer_slot,
4492 r9, r11, r15, true, &miss);
4494 __ bind(¬_array);
4495 __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
4496 __ j(not_equal, &miss);
4497 Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
4498 Code::ComputeHandlerFlags(Code::LOAD_IC));
4499 masm->isolate()->stub_cache()->GenerateProbe(
4500 masm, Code::LOAD_IC, code_flags, false, receiver, name, feedback, no_reg);
4503 LoadIC::GenerateMiss(masm);
4507 void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
4508 GenerateImpl(masm, false);
4512 void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
4513 GenerateImpl(masm, true);
4517 void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4518 Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // rdx
4519 Register key = VectorLoadICDescriptor::NameRegister(); // rcx
4520 Register vector = VectorLoadICDescriptor::VectorRegister(); // rbx
4521 Register slot = VectorLoadICDescriptor::SlotRegister(); // rax
4522 Register feedback = rdi;
4523 Register integer_slot = r8;
4525 __ SmiToInteger32(integer_slot, slot);
4526 __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
4527 FixedArray::kHeaderSize));
4529 // Is it a weak cell?
4531 Label not_array, smi_key, key_okay, miss;
4532 __ CompareRoot(FieldOperand(feedback, 0), Heap::kWeakCellMapRootIndex);
4533 __ j(not_equal, &try_array);
4534 HandleMonomorphicCase(masm, receiver, key, vector, slot, feedback,
4535 integer_slot, &miss);
4537 __ bind(&try_array);
4538 // Is it a fixed array?
4539 __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
4540 __ j(not_equal, ¬_array);
4542 // We have a polymorphic element handler.
4543 Label polymorphic, try_poly_name;
4544 __ bind(&polymorphic);
4545 HandleArrayCases(masm, receiver, key, vector, slot, feedback, integer_slot,
4546 r9, r11, r15, true, &miss);
4548 __ bind(¬_array);
4550 __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
4551 __ j(not_equal, &try_poly_name);
4552 Handle<Code> megamorphic_stub =
4553 KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
4554 __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
4556 __ bind(&try_poly_name);
4557 // We might have a name in feedback, and a fixed array in the next slot.
4558 __ cmpp(key, feedback);
4559 __ j(not_equal, &miss);
4560 // If the name comparison succeeded, we know we have a fixed array with
4561 // at least one map/handler pair.
4562 __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
4563 FixedArray::kHeaderSize + kPointerSize));
4564 HandleArrayCases(masm, receiver, key, vector, slot, feedback, integer_slot,
4565 r9, r11, r15, false, &miss);
4568 KeyedLoadIC::GenerateMiss(masm);
4572 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
4573 EmitLoadTypeFeedbackVector(masm, rbx);
4574 CallICStub stub(isolate(), state());
4575 __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
4579 void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
4580 EmitLoadTypeFeedbackVector(masm, rbx);
4581 CallIC_ArrayStub stub(isolate(), state());
4582 __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
4586 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4587 if (masm->isolate()->function_entry_hook() != NULL) {
4588 ProfileEntryHookStub stub(masm->isolate());
4589 masm->CallStub(&stub);
4594 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4595 // This stub can be called from essentially anywhere, so it needs to save
4596 // all volatile and callee-save registers.
4597 const size_t kNumSavedRegisters = 2;
4598 __ pushq(arg_reg_1);
4599 __ pushq(arg_reg_2);
4601 // Calculate the original stack pointer and store it in the second arg.
4603 Operand(rsp, kNumSavedRegisters * kRegisterSize + kPCOnStackSize));
4605 // Calculate the function address to the first arg.
4606 __ movp(arg_reg_1, Operand(rsp, kNumSavedRegisters * kRegisterSize));
4607 __ subp(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
4609 // Save the remainder of the volatile registers.
4610 masm->PushCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
4612 // Call the entry hook function.
4613 __ Move(rax, FUNCTION_ADDR(isolate()->function_entry_hook()),
4614 Assembler::RelocInfoNone());
4616 AllowExternalCallThatCantCauseGC scope(masm);
4618 const int kArgumentCount = 2;
4619 __ PrepareCallCFunction(kArgumentCount);
4620 __ CallCFunction(rax, kArgumentCount);
4622 // Restore volatile regs.
4623 masm->PopCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
4632 static void CreateArrayDispatch(MacroAssembler* masm,
4633 AllocationSiteOverrideMode mode) {
4634 if (mode == DISABLE_ALLOCATION_SITES) {
4635 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4636 __ TailCallStub(&stub);
4637 } else if (mode == DONT_OVERRIDE) {
4638 int last_index = GetSequenceIndexFromFastElementsKind(
4639 TERMINAL_FAST_ELEMENTS_KIND);
4640 for (int i = 0; i <= last_index; ++i) {
4642 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4643 __ cmpl(rdx, Immediate(kind));
4644 __ j(not_equal, &next);
4645 T stub(masm->isolate(), kind);
4646 __ TailCallStub(&stub);
4650 // If we reached this point there is a problem.
4651 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4658 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4659 AllocationSiteOverrideMode mode) {
4660 // rbx - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4661 // rdx - kind (if mode != DISABLE_ALLOCATION_SITES)
4662 // rax - number of arguments
4663 // rdi - constructor?
4664 // rsp[0] - return address
4665 // rsp[8] - last argument
4666 Handle<Object> undefined_sentinel(
4667 masm->isolate()->heap()->undefined_value(),
4670 Label normal_sequence;
4671 if (mode == DONT_OVERRIDE) {
4672 DCHECK(FAST_SMI_ELEMENTS == 0);
4673 DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
4674 DCHECK(FAST_ELEMENTS == 2);
4675 DCHECK(FAST_HOLEY_ELEMENTS == 3);
4676 DCHECK(FAST_DOUBLE_ELEMENTS == 4);
4677 DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
4679 // is the low bit set? If so, we are holey and that is good.
4680 __ testb(rdx, Immediate(1));
4681 __ j(not_zero, &normal_sequence);
4684 // look at the first argument
4685 StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
4686 __ movp(rcx, args.GetArgumentOperand(0));
4688 __ j(zero, &normal_sequence);
4690 if (mode == DISABLE_ALLOCATION_SITES) {
4691 ElementsKind initial = GetInitialFastElementsKind();
4692 ElementsKind holey_initial = GetHoleyElementsKind(initial);
4694 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
4696 DISABLE_ALLOCATION_SITES);
4697 __ TailCallStub(&stub_holey);
4699 __ bind(&normal_sequence);
4700 ArraySingleArgumentConstructorStub stub(masm->isolate(),
4702 DISABLE_ALLOCATION_SITES);
4703 __ TailCallStub(&stub);
4704 } else if (mode == DONT_OVERRIDE) {
4705 // We are going to create a holey array, but our kind is non-holey.
4706 // Fix kind and retry (only if we have an allocation site in the slot).
4709 if (FLAG_debug_code) {
4710 Handle<Map> allocation_site_map =
4711 masm->isolate()->factory()->allocation_site_map();
4712 __ Cmp(FieldOperand(rbx, 0), allocation_site_map);
4713 __ Assert(equal, kExpectedAllocationSite);
4716 // Save the resulting elements kind in type info. We can't just store r3
4717 // in the AllocationSite::transition_info field because elements kind is
4718 // restricted to a portion of the field...upper bits need to be left alone.
4719 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4720 __ SmiAddConstant(FieldOperand(rbx, AllocationSite::kTransitionInfoOffset),
4721 Smi::FromInt(kFastElementsKindPackedToHoley));
4723 __ bind(&normal_sequence);
4724 int last_index = GetSequenceIndexFromFastElementsKind(
4725 TERMINAL_FAST_ELEMENTS_KIND);
4726 for (int i = 0; i <= last_index; ++i) {
4728 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4729 __ cmpl(rdx, Immediate(kind));
4730 __ j(not_equal, &next);
4731 ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
4732 __ TailCallStub(&stub);
4736 // If we reached this point there is a problem.
4737 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4745 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
4746 int to_index = GetSequenceIndexFromFastElementsKind(
4747 TERMINAL_FAST_ELEMENTS_KIND);
4748 for (int i = 0; i <= to_index; ++i) {
4749 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4750 T stub(isolate, kind);
4752 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
4753 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
4760 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
4761 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
4763 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
4765 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
4770 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
4772 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
4773 for (int i = 0; i < 2; i++) {
4774 // For internal arrays we only need a few things
4775 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
4777 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
4779 InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
4785 void ArrayConstructorStub::GenerateDispatchToArrayStub(
4786 MacroAssembler* masm,
4787 AllocationSiteOverrideMode mode) {
4788 if (argument_count() == ANY) {
4789 Label not_zero_case, not_one_case;
4791 __ j(not_zero, ¬_zero_case);
4792 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4794 __ bind(¬_zero_case);
4795 __ cmpl(rax, Immediate(1));
4796 __ j(greater, ¬_one_case);
4797 CreateArrayDispatchOneArgument(masm, mode);
4799 __ bind(¬_one_case);
4800 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4801 } else if (argument_count() == NONE) {
4802 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4803 } else if (argument_count() == ONE) {
4804 CreateArrayDispatchOneArgument(masm, mode);
4805 } else if (argument_count() == MORE_THAN_ONE) {
4806 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4813 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
4814 // ----------- S t a t e -------------
4816 // -- rbx : AllocationSite or undefined
4817 // -- rdi : constructor
4818 // -- rdx : original constructor
4819 // -- rsp[0] : return address
4820 // -- rsp[8] : last argument
4821 // -----------------------------------
4822 if (FLAG_debug_code) {
4823 // The array construct code is only set for the global and natives
4824 // builtin Array functions which always have maps.
4826 // Initial map for the builtin Array function should be a map.
4827 __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
4828 // Will both indicate a NULL and a Smi.
4829 STATIC_ASSERT(kSmiTag == 0);
4830 Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
4831 __ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
4832 __ CmpObjectType(rcx, MAP_TYPE, rcx);
4833 __ Check(equal, kUnexpectedInitialMapForArrayFunction);
4835 // We should either have undefined in rbx or a valid AllocationSite
4836 __ AssertUndefinedOrAllocationSite(rbx);
4841 __ j(not_equal, &subclassing);
4844 // If the feedback vector is the undefined value call an array constructor
4845 // that doesn't use AllocationSites.
4846 __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
4847 __ j(equal, &no_info);
4849 // Only look at the lower 16 bits of the transition info.
4850 __ movp(rdx, FieldOperand(rbx, AllocationSite::kTransitionInfoOffset));
4851 __ SmiToInteger32(rdx, rdx);
4852 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4853 __ andp(rdx, Immediate(AllocationSite::ElementsKindBits::kMask));
4854 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
4857 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
4860 __ bind(&subclassing);
4861 __ Pop(rcx); // return address.
4866 switch (argument_count()) {
4869 __ addp(rax, Immediate(2));
4872 __ movp(rax, Immediate(2));
4875 __ movp(rax, Immediate(3));
4880 __ JumpToExternalReference(
4881 ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()),
4886 void InternalArrayConstructorStub::GenerateCase(
4887 MacroAssembler* masm, ElementsKind kind) {
4888 Label not_zero_case, not_one_case;
4889 Label normal_sequence;
4892 __ j(not_zero, ¬_zero_case);
4893 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
4894 __ TailCallStub(&stub0);
4896 __ bind(¬_zero_case);
4897 __ cmpl(rax, Immediate(1));
4898 __ j(greater, ¬_one_case);
4900 if (IsFastPackedElementsKind(kind)) {
4901 // We might need to create a holey array
4902 // look at the first argument
4903 StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
4904 __ movp(rcx, args.GetArgumentOperand(0));
4906 __ j(zero, &normal_sequence);
4908 InternalArraySingleArgumentConstructorStub
4909 stub1_holey(isolate(), GetHoleyElementsKind(kind));
4910 __ TailCallStub(&stub1_holey);
4913 __ bind(&normal_sequence);
4914 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
4915 __ TailCallStub(&stub1);
4917 __ bind(¬_one_case);
4918 InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
4919 __ TailCallStub(&stubN);
4923 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
4924 // ----------- S t a t e -------------
4926 // -- rdi : constructor
4927 // -- rsp[0] : return address
4928 // -- rsp[8] : last argument
4929 // -----------------------------------
4931 if (FLAG_debug_code) {
4932 // The array construct code is only set for the global and natives
4933 // builtin Array functions which always have maps.
4935 // Initial map for the builtin Array function should be a map.
4936 __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
4937 // Will both indicate a NULL and a Smi.
4938 STATIC_ASSERT(kSmiTag == 0);
4939 Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
4940 __ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
4941 __ CmpObjectType(rcx, MAP_TYPE, rcx);
4942 __ Check(equal, kUnexpectedInitialMapForArrayFunction);
4945 // Figure out the right elements kind
4946 __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
4948 // Load the map's "bit field 2" into |result|. We only need the first byte,
4949 // but the following masking takes care of that anyway.
4950 __ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
4951 // Retrieve elements_kind from bit field 2.
4952 __ DecodeField<Map::ElementsKindBits>(rcx);
4954 if (FLAG_debug_code) {
4956 __ cmpl(rcx, Immediate(FAST_ELEMENTS));
4958 __ cmpl(rcx, Immediate(FAST_HOLEY_ELEMENTS));
4960 kInvalidElementsKindForInternalArrayOrInternalPackedArray);
4964 Label fast_elements_case;
4965 __ cmpl(rcx, Immediate(FAST_ELEMENTS));
4966 __ j(equal, &fast_elements_case);
4967 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
4969 __ bind(&fast_elements_case);
4970 GenerateCase(masm, FAST_ELEMENTS);
4974 static int Offset(ExternalReference ref0, ExternalReference ref1) {
4975 int64_t offset = (ref0.address() - ref1.address());
4976 // Check that fits into int.
4977 DCHECK(static_cast<int>(offset) == offset);
4978 return static_cast<int>(offset);
4982 // Prepares stack to put arguments (aligns and so on). WIN64 calling
4983 // convention requires to put the pointer to the return value slot into
4984 // rcx (rcx must be preserverd until CallApiFunctionAndReturn). Saves
4985 // context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
4986 // inside the exit frame (not GCed) accessible via StackSpaceOperand.
4987 static void PrepareCallApiFunction(MacroAssembler* masm, int arg_stack_space) {
4988 __ EnterApiExitFrame(arg_stack_space);
4992 // Calls an API function. Allocates HandleScope, extracts returned value
4993 // from handle and propagates exceptions. Clobbers r14, r15, rbx and
4994 // caller-save registers. Restores context. On return removes
4995 // stack_space * kPointerSize (GCed).
4996 static void CallApiFunctionAndReturn(MacroAssembler* masm,
4997 Register function_address,
4998 ExternalReference thunk_ref,
4999 Register thunk_last_arg, int stack_space,
5000 Operand* stack_space_operand,
5001 Operand return_value_operand,
5002 Operand* context_restore_operand) {
5004 Label promote_scheduled_exception;
5005 Label delete_allocated_handles;
5006 Label leave_exit_frame;
5009 Isolate* isolate = masm->isolate();
5010 Factory* factory = isolate->factory();
5011 ExternalReference next_address =
5012 ExternalReference::handle_scope_next_address(isolate);
5013 const int kNextOffset = 0;
5014 const int kLimitOffset = Offset(
5015 ExternalReference::handle_scope_limit_address(isolate), next_address);
5016 const int kLevelOffset = Offset(
5017 ExternalReference::handle_scope_level_address(isolate), next_address);
5018 ExternalReference scheduled_exception_address =
5019 ExternalReference::scheduled_exception_address(isolate);
5021 DCHECK(rdx.is(function_address) || r8.is(function_address));
5022 // Allocate HandleScope in callee-save registers.
5023 Register prev_next_address_reg = r14;
5024 Register prev_limit_reg = rbx;
5025 Register base_reg = r15;
5026 __ Move(base_reg, next_address);
5027 __ movp(prev_next_address_reg, Operand(base_reg, kNextOffset));
5028 __ movp(prev_limit_reg, Operand(base_reg, kLimitOffset));
5029 __ addl(Operand(base_reg, kLevelOffset), Immediate(1));
5031 if (FLAG_log_timer_events) {
5032 FrameScope frame(masm, StackFrame::MANUAL);
5033 __ PushSafepointRegisters();
5034 __ PrepareCallCFunction(1);
5035 __ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate));
5036 __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
5038 __ PopSafepointRegisters();
5041 Label profiler_disabled;
5042 Label end_profiler_check;
5043 __ Move(rax, ExternalReference::is_profiling_address(isolate));
5044 __ cmpb(Operand(rax, 0), Immediate(0));
5045 __ j(zero, &profiler_disabled);
5047 // Third parameter is the address of the actual getter function.
5048 __ Move(thunk_last_arg, function_address);
5049 __ Move(rax, thunk_ref);
5050 __ jmp(&end_profiler_check);
5052 __ bind(&profiler_disabled);
5053 // Call the api function!
5054 __ Move(rax, function_address);
5056 __ bind(&end_profiler_check);
5058 // Call the api function!
5061 if (FLAG_log_timer_events) {
5062 FrameScope frame(masm, StackFrame::MANUAL);
5063 __ PushSafepointRegisters();
5064 __ PrepareCallCFunction(1);
5065 __ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate));
5066 __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
5068 __ PopSafepointRegisters();
5071 // Load the value from ReturnValue
5072 __ movp(rax, return_value_operand);
5075 // No more valid handles (the result handle was the last one). Restore
5076 // previous handle scope.
5077 __ subl(Operand(base_reg, kLevelOffset), Immediate(1));
5078 __ movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
5079 __ cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
5080 __ j(not_equal, &delete_allocated_handles);
5082 // Leave the API exit frame.
5083 __ bind(&leave_exit_frame);
5084 bool restore_context = context_restore_operand != NULL;
5085 if (restore_context) {
5086 __ movp(rsi, *context_restore_operand);
5088 if (stack_space_operand != nullptr) {
5089 __ movp(rbx, *stack_space_operand);
5091 __ LeaveApiExitFrame(!restore_context);
5093 // Check if the function scheduled an exception.
5094 __ Move(rdi, scheduled_exception_address);
5095 __ Cmp(Operand(rdi, 0), factory->the_hole_value());
5096 __ j(not_equal, &promote_scheduled_exception);
5099 // Check if the function returned a valid JavaScript value.
5101 Register return_value = rax;
5104 __ JumpIfSmi(return_value, &ok, Label::kNear);
5105 __ movp(map, FieldOperand(return_value, HeapObject::kMapOffset));
5107 __ CmpInstanceType(map, LAST_NAME_TYPE);
5108 __ j(below_equal, &ok, Label::kNear);
5110 __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
5111 __ j(above_equal, &ok, Label::kNear);
5113 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
5114 __ j(equal, &ok, Label::kNear);
5116 __ CompareRoot(return_value, Heap::kUndefinedValueRootIndex);
5117 __ j(equal, &ok, Label::kNear);
5119 __ CompareRoot(return_value, Heap::kTrueValueRootIndex);
5120 __ j(equal, &ok, Label::kNear);
5122 __ CompareRoot(return_value, Heap::kFalseValueRootIndex);
5123 __ j(equal, &ok, Label::kNear);
5125 __ CompareRoot(return_value, Heap::kNullValueRootIndex);
5126 __ j(equal, &ok, Label::kNear);
5128 __ Abort(kAPICallReturnedInvalidObject);
5133 if (stack_space_operand != nullptr) {
5134 DCHECK_EQ(stack_space, 0);
5135 __ PopReturnAddressTo(rcx);
5139 __ ret(stack_space * kPointerSize);
5142 // Re-throw by promoting a scheduled exception.
5143 __ bind(&promote_scheduled_exception);
5144 __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
5146 // HandleScope limit has changed. Delete allocated extensions.
5147 __ bind(&delete_allocated_handles);
5148 __ movp(Operand(base_reg, kLimitOffset), prev_limit_reg);
5149 __ movp(prev_limit_reg, rax);
5150 __ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate));
5152 ExternalReference::delete_handle_scope_extensions(isolate));
5154 __ movp(rax, prev_limit_reg);
5155 __ jmp(&leave_exit_frame);
5159 static void CallApiFunctionStubHelper(MacroAssembler* masm,
5160 const ParameterCount& argc,
5161 bool return_first_arg,
5162 bool call_data_undefined) {
5163 // ----------- S t a t e -------------
5165 // -- rbx : call_data
5167 // -- rdx : api_function_address
5169 // -- rax : number of arguments if argc is a register
5170 // -- rsp[0] : return address
5171 // -- rsp[8] : last argument
5173 // -- rsp[argc * 8] : first argument
5174 // -- rsp[(argc + 1) * 8] : receiver
5175 // -----------------------------------
5177 Register callee = rdi;
5178 Register call_data = rbx;
5179 Register holder = rcx;
5180 Register api_function_address = rdx;
5181 Register context = rsi;
5182 Register return_address = r8;
5184 typedef FunctionCallbackArguments FCA;
5186 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
5187 STATIC_ASSERT(FCA::kCalleeIndex == 5);
5188 STATIC_ASSERT(FCA::kDataIndex == 4);
5189 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
5190 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
5191 STATIC_ASSERT(FCA::kIsolateIndex == 1);
5192 STATIC_ASSERT(FCA::kHolderIndex == 0);
5193 STATIC_ASSERT(FCA::kArgsLength == 7);
5195 DCHECK(argc.is_immediate() || rax.is(argc.reg()));
5197 __ PopReturnAddressTo(return_address);
5207 Register scratch = call_data;
5208 if (!call_data_undefined) {
5209 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5213 // return value default
5216 __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
5221 __ movp(scratch, rsp);
5222 // Push return address back on stack.
5223 __ PushReturnAddressFrom(return_address);
5225 // load context from callee
5226 __ movp(context, FieldOperand(callee, JSFunction::kContextOffset));
5228 // Allocate the v8::Arguments structure in the arguments' space since
5229 // it's not controlled by GC.
5230 const int kApiStackSpace = 4;
5232 PrepareCallApiFunction(masm, kApiStackSpace);
5234 // FunctionCallbackInfo::implicit_args_.
5235 __ movp(StackSpaceOperand(0), scratch);
5236 if (argc.is_immediate()) {
5237 __ addp(scratch, Immediate((argc.immediate() + FCA::kArgsLength - 1) *
5239 // FunctionCallbackInfo::values_.
5240 __ movp(StackSpaceOperand(1), scratch);
5241 // FunctionCallbackInfo::length_.
5242 __ Set(StackSpaceOperand(2), argc.immediate());
5243 // FunctionCallbackInfo::is_construct_call_.
5244 __ Set(StackSpaceOperand(3), 0);
5246 __ leap(scratch, Operand(scratch, argc.reg(), times_pointer_size,
5247 (FCA::kArgsLength - 1) * kPointerSize));
5248 // FunctionCallbackInfo::values_.
5249 __ movp(StackSpaceOperand(1), scratch);
5250 // FunctionCallbackInfo::length_.
5251 __ movp(StackSpaceOperand(2), argc.reg());
5252 // FunctionCallbackInfo::is_construct_call_.
5253 __ leap(argc.reg(), Operand(argc.reg(), times_pointer_size,
5254 (FCA::kArgsLength + 1) * kPointerSize));
5255 __ movp(StackSpaceOperand(3), argc.reg());
5258 #if defined(__MINGW64__) || defined(_WIN64)
5259 Register arguments_arg = rcx;
5260 Register callback_arg = rdx;
5262 Register arguments_arg = rdi;
5263 Register callback_arg = rsi;
5266 // It's okay if api_function_address == callback_arg
5267 // but not arguments_arg
5268 DCHECK(!api_function_address.is(arguments_arg));
5270 // v8::InvocationCallback's argument.
5271 __ leap(arguments_arg, StackSpaceOperand(0));
5273 ExternalReference thunk_ref =
5274 ExternalReference::invoke_function_callback(masm->isolate());
5276 // Accessor for FunctionCallbackInfo and first js arg.
5277 StackArgumentsAccessor args_from_rbp(rbp, FCA::kArgsLength + 1,
5278 ARGUMENTS_DONT_CONTAIN_RECEIVER);
5279 Operand context_restore_operand = args_from_rbp.GetArgumentOperand(
5280 FCA::kArgsLength - FCA::kContextSaveIndex);
5281 Operand is_construct_call_operand = StackSpaceOperand(3);
5282 Operand return_value_operand = args_from_rbp.GetArgumentOperand(
5283 return_first_arg ? 0 : FCA::kArgsLength - FCA::kReturnValueOffset);
5284 int stack_space = 0;
5285 Operand* stack_space_operand = &is_construct_call_operand;
5286 if (argc.is_immediate()) {
5287 stack_space = argc.immediate() + FCA::kArgsLength + 1;
5288 stack_space_operand = nullptr;
5290 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, callback_arg,
5291 stack_space, stack_space_operand,
5292 return_value_operand, &context_restore_operand);
5296 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
5297 bool call_data_undefined = this->call_data_undefined();
5298 CallApiFunctionStubHelper(masm, ParameterCount(rax), false,
5299 call_data_undefined);
5303 void CallApiAccessorStub::Generate(MacroAssembler* masm) {
5304 bool is_store = this->is_store();
5305 int argc = this->argc();
5306 bool call_data_undefined = this->call_data_undefined();
5307 CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
5308 call_data_undefined);
5312 void CallApiGetterStub::Generate(MacroAssembler* masm) {
5313 // ----------- S t a t e -------------
5314 // -- rsp[0] : return address
5316 // -- rsp[16 - kArgsLength*8] : PropertyCallbackArguments object
5318 // -- r8 : api_function_address
5319 // -----------------------------------
5321 #if defined(__MINGW64__) || defined(_WIN64)
5322 Register getter_arg = r8;
5323 Register accessor_info_arg = rdx;
5324 Register name_arg = rcx;
5326 Register getter_arg = rdx;
5327 Register accessor_info_arg = rsi;
5328 Register name_arg = rdi;
5330 Register api_function_address = ApiGetterDescriptor::function_address();
5331 DCHECK(api_function_address.is(r8));
5332 Register scratch = rax;
5334 // v8::Arguments::values_ and handler for name.
5335 const int kStackSpace = PropertyCallbackArguments::kArgsLength + 1;
5337 // Allocate v8::AccessorInfo in non-GCed stack space.
5338 const int kArgStackSpace = 1;
5340 __ leap(name_arg, Operand(rsp, kPCOnStackSize));
5342 PrepareCallApiFunction(masm, kArgStackSpace);
5343 __ leap(scratch, Operand(name_arg, 1 * kPointerSize));
5345 // v8::PropertyAccessorInfo::args_.
5346 __ movp(StackSpaceOperand(0), scratch);
5348 // The context register (rsi) has been saved in PrepareCallApiFunction and
5349 // could be used to pass arguments.
5350 __ leap(accessor_info_arg, StackSpaceOperand(0));
5352 ExternalReference thunk_ref =
5353 ExternalReference::invoke_accessor_getter_callback(isolate());
5355 // It's okay if api_function_address == getter_arg
5356 // but not accessor_info_arg or name_arg
5357 DCHECK(!api_function_address.is(accessor_info_arg) &&
5358 !api_function_address.is(name_arg));
5360 // The name handler is counted as an argument.
5361 StackArgumentsAccessor args(rbp, PropertyCallbackArguments::kArgsLength);
5362 Operand return_value_operand = args.GetArgumentOperand(
5363 PropertyCallbackArguments::kArgsLength - 1 -
5364 PropertyCallbackArguments::kReturnValueOffset);
5365 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, getter_arg,
5366 kStackSpace, nullptr, return_value_operand, NULL);
5372 } } // namespace v8::internal
5374 #endif // V8_TARGET_ARCH_X64