1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "src/codegen.h"
10 #include "src/macro-assembler.h"
15 // -------------------------------------------------------------------------
16 // Platform-specific RuntimeCallHelper functions.
18 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
19 masm->EnterFrame(StackFrame::INTERNAL);
20 DCHECK(!masm->has_frame());
21 masm->set_has_frame(true);
25 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
26 masm->LeaveFrame(StackFrame::INTERNAL);
27 DCHECK(masm->has_frame());
28 masm->set_has_frame(false);
35 UnaryMathFunction CreateExpFunction() {
36 if (!FLAG_fast_math) return &std::exp;
39 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
40 if (buffer == NULL) return &std::exp;
41 ExternalReference::InitializeMathExpData();
43 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
44 // xmm0: raw double input.
45 XMMRegister input = xmm0;
46 XMMRegister result = xmm1;
50 MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx);
54 __ movsd(xmm0, result);
59 DCHECK(!RelocInfo::RequiresRelocation(desc));
61 CpuFeatures::FlushICache(buffer, actual_size);
62 base::OS::ProtectCode(buffer, actual_size);
63 return FUNCTION_CAST<UnaryMathFunction>(buffer);
67 UnaryMathFunction CreateSqrtFunction() {
69 // Allocate buffer in executable space.
71 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
72 if (buffer == NULL) return &std::sqrt;
74 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
75 // xmm0: raw double input.
76 // Move double input into registers.
77 __ sqrtsd(xmm0, xmm0);
82 DCHECK(!RelocInfo::RequiresRelocation(desc));
84 CpuFeatures::FlushICache(buffer, actual_size);
85 base::OS::ProtectCode(buffer, actual_size);
86 return FUNCTION_CAST<UnaryMathFunction>(buffer);
91 typedef double (*ModuloFunction)(double, double);
92 // Define custom fmod implementation.
93 ModuloFunction CreateModuloFunction() {
95 byte* buffer = static_cast<byte*>(
96 base::OS::Allocate(Assembler::kMinimalBufferSize, &actual_size, true));
98 Assembler masm(NULL, buffer, static_cast<int>(actual_size));
99 // Generated code is put into a fixed, unmovable, buffer, and not into
100 // the V8 heap. We can't, and don't, refer to any relocatable addresses
101 // (e.g. the JavaScript nan-object).
103 // Windows 64 ABI passes double arguments in xmm0, xmm1 and
104 // returns result in xmm0.
105 // Argument backing space is allocated on the stack above
106 // the return address.
109 // Load y and x (use argument backing store as temporary storage).
110 __ movsd(Operand(rsp, kRegisterSize * 2), xmm1);
111 __ movsd(Operand(rsp, kRegisterSize), xmm0);
112 __ fld_d(Operand(rsp, kRegisterSize * 2));
113 __ fld_d(Operand(rsp, kRegisterSize));
115 // Clear exception flags before operation.
120 // Clear if Illegal Operand or Zero Division exceptions are set.
121 __ testb(rax, Immediate(5));
122 __ j(zero, &no_exceptions);
124 __ bind(&no_exceptions);
127 // Compute st(0) % st(1)
129 Label partial_remainder_loop;
130 __ bind(&partial_remainder_loop);
134 __ testl(rax, Immediate(0x400 /* C2 */));
135 // If C2 is set, computation only has partial result. Loop to
136 // continue computation.
137 __ j(not_zero, &partial_remainder_loop);
142 // If Invalid Operand or Zero Division exceptions are set,
144 __ testb(rax, Immediate(5));
145 __ j(zero, &valid_result);
146 __ fstp(0); // Drop result in st(0).
147 int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
148 __ movq(rcx, kNaNValue);
149 __ movq(Operand(rsp, kRegisterSize), rcx);
150 __ movsd(xmm0, Operand(rsp, kRegisterSize));
151 __ jmp(&return_result);
153 // If result is valid, return that.
154 __ bind(&valid_result);
155 __ fstp_d(Operand(rsp, kRegisterSize));
156 __ movsd(xmm0, Operand(rsp, kRegisterSize));
158 // Clean up FPU stack and exceptions and return xmm0
159 __ bind(&return_result);
160 __ fstp(0); // Unload y.
162 Label clear_exceptions;
163 __ testb(rax, Immediate(0x3f /* Any Exception*/));
164 __ j(not_zero, &clear_exceptions);
166 __ bind(&clear_exceptions);
172 base::OS::ProtectCode(buffer, actual_size);
173 // Call the function from C++ through this pointer.
174 return FUNCTION_CAST<ModuloFunction>(buffer);
181 // -------------------------------------------------------------------------
184 #define __ ACCESS_MASM(masm)
186 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
187 MacroAssembler* masm,
192 AllocationSiteMode mode,
193 Label* allocation_memento_found) {
194 // Return address is on the stack.
195 Register scratch = rdi;
196 DCHECK(!AreAliased(receiver, key, value, target_map, scratch));
198 if (mode == TRACK_ALLOCATION_SITE) {
199 DCHECK(allocation_memento_found != NULL);
200 __ JumpIfJSArrayHasAllocationMemento(
201 receiver, scratch, allocation_memento_found);
204 // Set transitioned map.
205 __ movp(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
206 __ RecordWriteField(receiver,
207 HeapObject::kMapOffset,
216 void ElementsTransitionGenerator::GenerateSmiToDouble(
217 MacroAssembler* masm,
222 AllocationSiteMode mode,
224 // Return address is on the stack.
225 DCHECK(receiver.is(rdx));
227 DCHECK(value.is(rax));
228 DCHECK(target_map.is(rbx));
230 // The fail label is not actually used since we do not allocate.
231 Label allocated, new_backing_store, only_change_map, done;
233 if (mode == TRACK_ALLOCATION_SITE) {
234 __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
237 // Check for empty arrays, which only require a map transition and no changes
238 // to the backing store.
239 __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
240 __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
241 __ j(equal, &only_change_map);
243 __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
244 if (kPointerSize == kDoubleSize) {
245 // Check backing store for COW-ness. For COW arrays we have to
246 // allocate a new backing store.
247 __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
248 Heap::kFixedCOWArrayMapRootIndex);
249 __ j(equal, &new_backing_store);
251 // For x32 port we have to allocate a new backing store as SMI size is
252 // not equal with double size.
253 DCHECK(kDoubleSize == 2 * kPointerSize);
254 __ jmp(&new_backing_store);
257 // Check if the backing store is in new-space. If not, we need to allocate
258 // a new one since the old one is in pointer-space.
259 // If in new space, we can reuse the old backing store because it is
261 __ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);
263 __ movp(r14, r8); // Destination array equals source array.
265 // r8 : source FixedArray
266 // r9 : elements array length
267 // r14: destination FixedDoubleArray
268 // Set backing store's map
269 __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
270 __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
273 // Set transitioned map.
274 __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
275 __ RecordWriteField(rdx,
276 HeapObject::kMapOffset,
283 // Convert smis to doubles and holes to hole NaNs. The Array's length
284 // remains unchanged.
285 STATIC_ASSERT(FixedDoubleArray::kLengthOffset == FixedArray::kLengthOffset);
286 STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
288 Label loop, entry, convert_hole;
289 __ movq(r15, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
293 // Allocate new backing store.
294 __ bind(&new_backing_store);
295 __ leap(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
296 __ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT);
297 // Set backing store's map
298 __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
299 __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
300 // Set receiver's backing store.
301 __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r14);
303 __ RecordWriteField(rdx,
304 JSObject::kElementsOffset,
310 // Set backing store's length.
311 __ Integer32ToSmi(r11, r9);
312 __ movp(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
315 __ bind(&only_change_map);
316 // Set transitioned map.
317 __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
318 __ RecordWriteField(rdx,
319 HeapObject::kMapOffset,
330 FieldOperand(r8, r9, times_pointer_size, FixedArray::kHeaderSize));
331 // r9 : current element's index
332 // rbx: current element (smi-tagged)
333 __ JumpIfNotSmi(rbx, &convert_hole);
334 __ SmiToInteger32(rbx, rbx);
335 __ Cvtlsi2sd(xmm0, rbx);
336 __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
339 __ bind(&convert_hole);
341 if (FLAG_debug_code) {
342 __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
343 __ Assert(equal, kObjectFoundInSmiOnlyArray);
346 __ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
349 __ j(not_sign, &loop);
355 void ElementsTransitionGenerator::GenerateDoubleToObject(
356 MacroAssembler* masm,
361 AllocationSiteMode mode,
363 // Return address is on the stack.
364 DCHECK(receiver.is(rdx));
366 DCHECK(value.is(rax));
367 DCHECK(target_map.is(rbx));
369 Label loop, entry, convert_hole, gc_required, only_change_map;
371 if (mode == TRACK_ALLOCATION_SITE) {
372 __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
375 // Check for empty arrays, which only require a map transition and no changes
376 // to the backing store.
377 __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
378 __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
379 __ j(equal, &only_change_map);
383 __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
384 __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
385 // r8 : source FixedDoubleArray
386 // r9 : number of elements
387 __ leap(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
388 __ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
389 // r11: destination FixedArray
390 __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
391 __ movp(FieldOperand(r11, HeapObject::kMapOffset), rdi);
392 __ Integer32ToSmi(r14, r9);
393 __ movp(FieldOperand(r11, FixedArray::kLengthOffset), r14);
395 // Prepare for conversion loop.
396 __ movq(rsi, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
397 __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
399 // rdi: pointer to the-hole
401 // Allocating heap numbers in the loop below can fail and cause a jump to
402 // gc_required. We can't leave a partly initialized FixedArray behind,
403 // so pessimistically fill it with holes now.
404 Label initialization_loop, initialization_loop_entry;
405 __ jmp(&initialization_loop_entry, Label::kNear);
406 __ bind(&initialization_loop);
407 __ movp(FieldOperand(r11, r9, times_pointer_size, FixedArray::kHeaderSize),
409 __ bind(&initialization_loop_entry);
411 __ j(not_sign, &initialization_loop);
413 __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
416 // Call into runtime if GC is required.
417 __ bind(&gc_required);
419 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
422 // Box doubles into heap numbers.
424 __ movq(r14, FieldOperand(r8,
427 FixedDoubleArray::kHeaderSize));
428 // r9 : current element's index
429 // r14: current element
431 __ j(equal, &convert_hole);
433 // Non-hole double, copy value into a heap number.
434 __ AllocateHeapNumber(rax, r15, &gc_required);
435 // rax: new heap number
436 __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
437 __ movp(FieldOperand(r11,
440 FixedArray::kHeaderSize),
443 __ RecordWriteArray(r11,
449 __ jmp(&entry, Label::kNear);
451 // Replace the-hole NaN with the-hole pointer.
452 __ bind(&convert_hole);
453 __ movp(FieldOperand(r11,
456 FixedArray::kHeaderSize),
461 __ j(not_sign, &loop);
463 // Replace receiver's backing store with newly created and filled FixedArray.
464 __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r11);
465 __ RecordWriteField(rdx,
466 JSObject::kElementsOffset,
473 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
475 __ bind(&only_change_map);
476 // Set transitioned map.
477 __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
478 __ RecordWriteField(rdx,
479 HeapObject::kMapOffset,
488 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
492 Label* call_runtime) {
493 // Fetch the instance type of the receiver into result register.
494 __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
495 __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
497 // We need special handling for indirect strings.
498 Label check_sequential;
499 __ testb(result, Immediate(kIsIndirectStringMask));
500 __ j(zero, &check_sequential, Label::kNear);
502 // Dispatch on the indirect string shape: slice or cons.
504 __ testb(result, Immediate(kSlicedNotConsMask));
505 __ j(zero, &cons_string, Label::kNear);
508 Label indirect_string_loaded;
509 __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
510 __ addp(index, result);
511 __ movp(string, FieldOperand(string, SlicedString::kParentOffset));
512 __ jmp(&indirect_string_loaded, Label::kNear);
514 // Handle cons strings.
515 // Check whether the right hand side is the empty string (i.e. if
516 // this is really a flat string in a cons string). If that is not
517 // the case we would rather go to the runtime system now to flatten
519 __ bind(&cons_string);
520 __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
521 Heap::kempty_stringRootIndex);
522 __ j(not_equal, call_runtime);
523 __ movp(string, FieldOperand(string, ConsString::kFirstOffset));
525 __ bind(&indirect_string_loaded);
526 __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
527 __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
529 // Distinguish sequential and external strings. Only these two string
530 // representations can reach here (slices and flat cons strings have been
531 // reduced to the underlying sequential or external string).
533 __ bind(&check_sequential);
534 STATIC_ASSERT(kSeqStringTag == 0);
535 __ testb(result, Immediate(kStringRepresentationMask));
536 __ j(zero, &seq_string, Label::kNear);
538 // Handle external strings.
539 Label one_byte_external, done;
540 if (FLAG_debug_code) {
541 // Assert that we do not have a cons or slice (indirect strings) here.
542 // Sequential strings have already been ruled out.
543 __ testb(result, Immediate(kIsIndirectStringMask));
544 __ Assert(zero, kExternalStringExpectedButNotFound);
546 // Rule out short external strings.
547 STATIC_ASSERT(kShortExternalStringTag != 0);
548 __ testb(result, Immediate(kShortExternalStringTag));
549 __ j(not_zero, call_runtime);
551 STATIC_ASSERT(kTwoByteStringTag == 0);
552 __ testb(result, Immediate(kStringEncodingMask));
553 __ movp(result, FieldOperand(string, ExternalString::kResourceDataOffset));
554 __ j(not_equal, &one_byte_external, Label::kNear);
556 __ movzxwl(result, Operand(result, index, times_2, 0));
557 __ jmp(&done, Label::kNear);
558 __ bind(&one_byte_external);
560 __ movzxbl(result, Operand(result, index, times_1, 0));
561 __ jmp(&done, Label::kNear);
563 // Dispatch on the encoding: one-byte or two-byte.
565 __ bind(&seq_string);
566 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
567 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
568 __ testb(result, Immediate(kStringEncodingMask));
569 __ j(not_zero, &one_byte, Label::kNear);
572 // Load the two-byte character code into the result register.
573 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
574 __ movzxwl(result, FieldOperand(string,
577 SeqTwoByteString::kHeaderSize));
578 __ jmp(&done, Label::kNear);
581 // Load the byte into the result register.
583 __ movzxbl(result, FieldOperand(string,
586 SeqOneByteString::kHeaderSize));
591 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
594 XMMRegister double_scratch,
597 DCHECK(!input.is(result));
598 DCHECK(!input.is(double_scratch));
599 DCHECK(!result.is(double_scratch));
600 DCHECK(!temp1.is(temp2));
601 DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
602 DCHECK(!masm->serializer_enabled()); // External references not serializable.
606 __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
607 __ movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize));
608 __ xorpd(result, result);
609 __ ucomisd(double_scratch, input);
610 __ j(above_equal, &done);
611 __ ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize));
612 __ movsd(result, Operand(kScratchRegister, 2 * kDoubleSize));
613 __ j(above_equal, &done);
614 __ movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize));
615 __ movsd(result, Operand(kScratchRegister, 4 * kDoubleSize));
616 __ mulsd(double_scratch, input);
617 __ addsd(double_scratch, result);
618 __ movq(temp2, double_scratch);
619 __ subsd(double_scratch, result);
620 __ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
621 __ leaq(temp1, Operand(temp2, 0x1ff800));
622 __ andq(temp2, Immediate(0x7ff));
623 __ shrq(temp1, Immediate(11));
624 __ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
625 __ Move(kScratchRegister, ExternalReference::math_exp_log_table());
626 __ shlq(temp1, Immediate(52));
627 __ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0));
628 __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
629 __ subsd(double_scratch, input);
630 __ movsd(input, double_scratch);
631 __ subsd(result, double_scratch);
632 __ mulsd(input, double_scratch);
633 __ mulsd(result, input);
634 __ movq(input, temp1);
635 __ mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize));
636 __ subsd(result, double_scratch);
637 __ addsd(result, Operand(kScratchRegister, 8 * kDoubleSize));
638 __ mulsd(result, input);
646 CodeAgingHelper::CodeAgingHelper() {
647 DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
648 // The sequence of instructions that is patched out for aging code is the
649 // following boilerplate stack-building prologue that is found both in
650 // FUNCTION and OPTIMIZED_FUNCTION code:
651 CodePatcher patcher(young_sequence_.start(), young_sequence_.length());
652 patcher.masm()->pushq(rbp);
653 patcher.masm()->movp(rbp, rsp);
654 patcher.masm()->Push(rsi);
655 patcher.masm()->Push(rdi);
660 bool CodeAgingHelper::IsOld(byte* candidate) const {
661 return *candidate == kCallOpcode;
666 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
667 bool result = isolate->code_aging_helper()->IsYoung(sequence);
668 DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
673 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
674 MarkingParity* parity) {
675 if (IsYoungSequence(isolate, sequence)) {
676 *age = kNoAgeCodeAge;
677 *parity = NO_MARKING_PARITY;
679 sequence++; // Skip the kCallOpcode byte
680 Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
681 Assembler::kCallTargetAddressOffset;
682 Code* stub = GetCodeFromTargetAddress(target_address);
683 GetCodeAgeAndParity(stub, age, parity);
688 void Code::PatchPlatformCodeAge(Isolate* isolate,
691 MarkingParity parity) {
692 uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
693 if (age == kNoAgeCodeAge) {
694 isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
695 CpuFeatures::FlushICache(sequence, young_length);
697 Code* stub = GetCodeAgeStub(isolate, age, parity);
698 CodePatcher patcher(sequence, young_length);
699 patcher.masm()->call(stub->instruction_start());
701 kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
706 Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
708 int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0;
709 int displacement_to_last_argument = base_reg_.is(rsp) ?
710 kPCOnStackSize : kFPOnStackSize + kPCOnStackSize;
711 displacement_to_last_argument += extra_displacement_to_last_argument_;
712 if (argument_count_reg_.is(no_reg)) {
713 // argument[0] is at base_reg_ + displacement_to_last_argument +
714 // (argument_count_immediate_ + receiver - 1) * kPointerSize.
715 DCHECK(argument_count_immediate_ + receiver > 0);
716 return Operand(base_reg_, displacement_to_last_argument +
717 (argument_count_immediate_ + receiver - 1 - index) * kPointerSize);
719 // argument[0] is at base_reg_ + displacement_to_last_argument +
720 // argument_count_reg_ * times_pointer_size + (receiver - 1) * kPointerSize.
721 return Operand(base_reg_, argument_count_reg_, times_pointer_size,
722 displacement_to_last_argument + (receiver - 1 - index) * kPointerSize);
727 } } // namespace v8::internal
729 #endif // V8_TARGET_ARCH_X64