1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "src/codegen.h"
10 #include "src/macro-assembler.h"
15 // -------------------------------------------------------------------------
16 // Platform-specific RuntimeCallHelper functions.
18 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
19 masm->EnterFrame(StackFrame::INTERNAL);
20 DCHECK(!masm->has_frame());
21 masm->set_has_frame(true);
25 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
26 masm->LeaveFrame(StackFrame::INTERNAL);
27 DCHECK(masm->has_frame());
28 masm->set_has_frame(false);
35 UnaryMathFunction CreateExpFunction() {
36 if (!FLAG_fast_math) return &std::exp;
39 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
40 if (buffer == NULL) return &std::exp;
41 ExternalReference::InitializeMathExpData();
43 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
44 // xmm0: raw double input.
45 XMMRegister input = xmm0;
46 XMMRegister result = xmm1;
50 MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx);
54 __ movsd(xmm0, result);
59 DCHECK(!RelocInfo::RequiresRelocation(desc));
61 CpuFeatures::FlushICache(buffer, actual_size);
62 base::OS::ProtectCode(buffer, actual_size);
63 return FUNCTION_CAST<UnaryMathFunction>(buffer);
67 UnaryMathFunction CreateSqrtFunction() {
69 // Allocate buffer in executable space.
71 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
72 if (buffer == NULL) return &std::sqrt;
74 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
75 // xmm0: raw double input.
76 // Move double input into registers.
77 __ sqrtsd(xmm0, xmm0);
82 DCHECK(!RelocInfo::RequiresRelocation(desc));
84 CpuFeatures::FlushICache(buffer, actual_size);
85 base::OS::ProtectCode(buffer, actual_size);
86 return FUNCTION_CAST<UnaryMathFunction>(buffer);
91 typedef double (*ModuloFunction)(double, double);
92 // Define custom fmod implementation.
93 ModuloFunction CreateModuloFunction() {
95 byte* buffer = static_cast<byte*>(
96 base::OS::Allocate(Assembler::kMinimalBufferSize, &actual_size, true));
98 Assembler masm(NULL, buffer, static_cast<int>(actual_size));
99 // Generated code is put into a fixed, unmovable, buffer, and not into
100 // the V8 heap. We can't, and don't, refer to any relocatable addresses
101 // (e.g. the JavaScript nan-object).
103 // Windows 64 ABI passes double arguments in xmm0, xmm1 and
104 // returns result in xmm0.
105 // Argument backing space is allocated on the stack above
106 // the return address.
109 // Load y and x (use argument backing store as temporary storage).
110 __ movsd(Operand(rsp, kRegisterSize * 2), xmm1);
111 __ movsd(Operand(rsp, kRegisterSize), xmm0);
112 __ fld_d(Operand(rsp, kRegisterSize * 2));
113 __ fld_d(Operand(rsp, kRegisterSize));
115 // Clear exception flags before operation.
120 // Clear if Illegal Operand or Zero Division exceptions are set.
121 __ testb(rax, Immediate(5));
122 __ j(zero, &no_exceptions);
124 __ bind(&no_exceptions);
127 // Compute st(0) % st(1)
129 Label partial_remainder_loop;
130 __ bind(&partial_remainder_loop);
134 __ testl(rax, Immediate(0x400 /* C2 */));
135 // If C2 is set, computation only has partial result. Loop to
136 // continue computation.
137 __ j(not_zero, &partial_remainder_loop);
142 // If Invalid Operand or Zero Division exceptions are set,
144 __ testb(rax, Immediate(5));
145 __ j(zero, &valid_result);
146 __ fstp(0); // Drop result in st(0).
147 int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
148 __ movq(rcx, kNaNValue);
149 __ movq(Operand(rsp, kRegisterSize), rcx);
150 __ movsd(xmm0, Operand(rsp, kRegisterSize));
151 __ jmp(&return_result);
153 // If result is valid, return that.
154 __ bind(&valid_result);
155 __ fstp_d(Operand(rsp, kRegisterSize));
156 __ movsd(xmm0, Operand(rsp, kRegisterSize));
158 // Clean up FPU stack and exceptions and return xmm0
159 __ bind(&return_result);
160 __ fstp(0); // Unload y.
162 Label clear_exceptions;
163 __ testb(rax, Immediate(0x3f /* Any Exception*/));
164 __ j(not_zero, &clear_exceptions);
166 __ bind(&clear_exceptions);
172 base::OS::ProtectCode(buffer, actual_size);
173 // Call the function from C++ through this pointer.
174 return FUNCTION_CAST<ModuloFunction>(buffer);
181 // -------------------------------------------------------------------------
184 #define __ ACCESS_MASM(masm)
186 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
187 MacroAssembler* masm,
192 AllocationSiteMode mode,
193 Label* allocation_memento_found) {
194 // Return address is on the stack.
195 Register scratch = rdi;
196 DCHECK(!AreAliased(receiver, key, value, target_map, scratch));
198 if (mode == TRACK_ALLOCATION_SITE) {
199 DCHECK(allocation_memento_found != NULL);
200 __ JumpIfJSArrayHasAllocationMemento(
201 receiver, scratch, allocation_memento_found);
204 // Set transitioned map.
205 __ movp(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
206 __ RecordWriteField(receiver,
207 HeapObject::kMapOffset,
216 void ElementsTransitionGenerator::GenerateSmiToDouble(
217 MacroAssembler* masm,
222 AllocationSiteMode mode,
224 // Return address is on the stack.
225 DCHECK(receiver.is(rdx));
227 DCHECK(value.is(rax));
228 DCHECK(target_map.is(rbx));
230 // The fail label is not actually used since we do not allocate.
231 Label allocated, new_backing_store, only_change_map, done;
233 if (mode == TRACK_ALLOCATION_SITE) {
234 __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
237 // Check for empty arrays, which only require a map transition and no changes
238 // to the backing store.
239 __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
240 __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
241 __ j(equal, &only_change_map);
243 __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
244 if (kPointerSize == kDoubleSize) {
245 // Check backing store for COW-ness. For COW arrays we have to
246 // allocate a new backing store.
247 __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
248 Heap::kFixedCOWArrayMapRootIndex);
249 __ j(equal, &new_backing_store);
251 // For x32 port we have to allocate a new backing store as SMI size is
252 // not equal with double size.
253 DCHECK(kDoubleSize == 2 * kPointerSize);
254 __ jmp(&new_backing_store);
257 // Check if the backing store is in new-space. If not, we need to allocate
258 // a new one since the old one is in pointer-space.
259 // If in new space, we can reuse the old backing store because it is
261 __ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);
263 __ movp(r14, r8); // Destination array equals source array.
265 // r8 : source FixedArray
266 // r9 : elements array length
267 // r14: destination FixedDoubleArray
268 // Set backing store's map
269 __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
270 __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
273 // Set transitioned map.
274 __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
275 __ RecordWriteField(rdx,
276 HeapObject::kMapOffset,
283 // Convert smis to doubles and holes to hole NaNs. The Array's length
284 // remains unchanged.
285 STATIC_ASSERT(FixedDoubleArray::kLengthOffset == FixedArray::kLengthOffset);
286 STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
288 Label loop, entry, convert_hole;
289 __ movq(r15, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
293 // Allocate new backing store.
294 __ bind(&new_backing_store);
295 __ leap(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
296 __ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT);
297 // Set backing store's map
298 __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
299 __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
300 // Set receiver's backing store.
301 __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r14);
303 __ RecordWriteField(rdx,
304 JSObject::kElementsOffset,
310 // Set backing store's length.
311 __ Integer32ToSmi(r11, r9);
312 __ movp(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
315 __ bind(&only_change_map);
316 // Set transitioned map.
317 __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
318 __ RecordWriteField(rdx,
319 HeapObject::kMapOffset,
330 FieldOperand(r8, r9, times_pointer_size, FixedArray::kHeaderSize));
331 // r9 : current element's index
332 // rbx: current element (smi-tagged)
333 __ JumpIfNotSmi(rbx, &convert_hole);
334 __ SmiToInteger32(rbx, rbx);
335 __ Cvtlsi2sd(xmm0, rbx);
336 __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
339 __ bind(&convert_hole);
341 if (FLAG_debug_code) {
342 __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
343 __ Assert(equal, kObjectFoundInSmiOnlyArray);
346 __ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
349 __ j(not_sign, &loop);
355 void ElementsTransitionGenerator::GenerateDoubleToObject(
356 MacroAssembler* masm,
361 AllocationSiteMode mode,
363 // Return address is on the stack.
364 DCHECK(receiver.is(rdx));
366 DCHECK(value.is(rax));
367 DCHECK(target_map.is(rbx));
369 Label loop, entry, convert_hole, gc_required, only_change_map;
371 if (mode == TRACK_ALLOCATION_SITE) {
372 __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
375 // Check for empty arrays, which only require a map transition and no changes
376 // to the backing store.
377 __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
378 __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
379 __ j(equal, &only_change_map);
383 __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
384 __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
385 // r8 : source FixedDoubleArray
386 // r9 : number of elements
387 __ leap(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
388 __ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
389 // r11: destination FixedArray
390 __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
391 __ movp(FieldOperand(r11, HeapObject::kMapOffset), rdi);
392 __ Integer32ToSmi(r14, r9);
393 __ movp(FieldOperand(r11, FixedArray::kLengthOffset), r14);
395 // Prepare for conversion loop.
396 __ movq(rsi, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
397 __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
399 // rdi: pointer to the-hole
402 // Call into runtime if GC is required.
403 __ bind(&gc_required);
405 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
408 // Box doubles into heap numbers.
410 __ movq(r14, FieldOperand(r8,
413 FixedDoubleArray::kHeaderSize));
414 // r9 : current element's index
415 // r14: current element
417 __ j(equal, &convert_hole);
419 // Non-hole double, copy value into a heap number.
420 __ AllocateHeapNumber(rax, r15, &gc_required);
421 // rax: new heap number
422 __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
423 __ movp(FieldOperand(r11,
426 FixedArray::kHeaderSize),
429 __ RecordWriteArray(r11,
435 __ jmp(&entry, Label::kNear);
437 // Replace the-hole NaN with the-hole pointer.
438 __ bind(&convert_hole);
439 __ movp(FieldOperand(r11,
442 FixedArray::kHeaderSize),
447 __ j(not_sign, &loop);
449 // Replace receiver's backing store with newly created and filled FixedArray.
450 __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r11);
451 __ RecordWriteField(rdx,
452 JSObject::kElementsOffset,
459 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
461 __ bind(&only_change_map);
462 // Set transitioned map.
463 __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
464 __ RecordWriteField(rdx,
465 HeapObject::kMapOffset,
474 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
478 Label* call_runtime) {
479 // Fetch the instance type of the receiver into result register.
480 __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
481 __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
483 // We need special handling for indirect strings.
484 Label check_sequential;
485 __ testb(result, Immediate(kIsIndirectStringMask));
486 __ j(zero, &check_sequential, Label::kNear);
488 // Dispatch on the indirect string shape: slice or cons.
490 __ testb(result, Immediate(kSlicedNotConsMask));
491 __ j(zero, &cons_string, Label::kNear);
494 Label indirect_string_loaded;
495 __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
496 __ addp(index, result);
497 __ movp(string, FieldOperand(string, SlicedString::kParentOffset));
498 __ jmp(&indirect_string_loaded, Label::kNear);
500 // Handle cons strings.
501 // Check whether the right hand side is the empty string (i.e. if
502 // this is really a flat string in a cons string). If that is not
503 // the case we would rather go to the runtime system now to flatten
505 __ bind(&cons_string);
506 __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
507 Heap::kempty_stringRootIndex);
508 __ j(not_equal, call_runtime);
509 __ movp(string, FieldOperand(string, ConsString::kFirstOffset));
511 __ bind(&indirect_string_loaded);
512 __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
513 __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
515 // Distinguish sequential and external strings. Only these two string
516 // representations can reach here (slices and flat cons strings have been
517 // reduced to the underlying sequential or external string).
519 __ bind(&check_sequential);
520 STATIC_ASSERT(kSeqStringTag == 0);
521 __ testb(result, Immediate(kStringRepresentationMask));
522 __ j(zero, &seq_string, Label::kNear);
524 // Handle external strings.
525 Label one_byte_external, done;
526 if (FLAG_debug_code) {
527 // Assert that we do not have a cons or slice (indirect strings) here.
528 // Sequential strings have already been ruled out.
529 __ testb(result, Immediate(kIsIndirectStringMask));
530 __ Assert(zero, kExternalStringExpectedButNotFound);
532 // Rule out short external strings.
533 STATIC_ASSERT(kShortExternalStringTag != 0);
534 __ testb(result, Immediate(kShortExternalStringTag));
535 __ j(not_zero, call_runtime);
537 STATIC_ASSERT(kTwoByteStringTag == 0);
538 __ testb(result, Immediate(kStringEncodingMask));
539 __ movp(result, FieldOperand(string, ExternalString::kResourceDataOffset));
540 __ j(not_equal, &one_byte_external, Label::kNear);
542 __ movzxwl(result, Operand(result, index, times_2, 0));
543 __ jmp(&done, Label::kNear);
544 __ bind(&one_byte_external);
546 __ movzxbl(result, Operand(result, index, times_1, 0));
547 __ jmp(&done, Label::kNear);
549 // Dispatch on the encoding: one-byte or two-byte.
551 __ bind(&seq_string);
552 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
553 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
554 __ testb(result, Immediate(kStringEncodingMask));
555 __ j(not_zero, &one_byte, Label::kNear);
558 // Load the two-byte character code into the result register.
559 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
560 __ movzxwl(result, FieldOperand(string,
563 SeqTwoByteString::kHeaderSize));
564 __ jmp(&done, Label::kNear);
567 // Load the byte into the result register.
569 __ movzxbl(result, FieldOperand(string,
572 SeqOneByteString::kHeaderSize));
577 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
580 XMMRegister double_scratch,
583 DCHECK(!input.is(result));
584 DCHECK(!input.is(double_scratch));
585 DCHECK(!result.is(double_scratch));
586 DCHECK(!temp1.is(temp2));
587 DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
588 DCHECK(!masm->serializer_enabled()); // External references not serializable.
592 __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
593 __ movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize));
594 __ xorpd(result, result);
595 __ ucomisd(double_scratch, input);
596 __ j(above_equal, &done);
597 __ ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize));
598 __ movsd(result, Operand(kScratchRegister, 2 * kDoubleSize));
599 __ j(above_equal, &done);
600 __ movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize));
601 __ movsd(result, Operand(kScratchRegister, 4 * kDoubleSize));
602 __ mulsd(double_scratch, input);
603 __ addsd(double_scratch, result);
604 __ movq(temp2, double_scratch);
605 __ subsd(double_scratch, result);
606 __ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
607 __ leaq(temp1, Operand(temp2, 0x1ff800));
608 __ andq(temp2, Immediate(0x7ff));
609 __ shrq(temp1, Immediate(11));
610 __ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
611 __ Move(kScratchRegister, ExternalReference::math_exp_log_table());
612 __ shlq(temp1, Immediate(52));
613 __ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0));
614 __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
615 __ subsd(double_scratch, input);
616 __ movsd(input, double_scratch);
617 __ subsd(result, double_scratch);
618 __ mulsd(input, double_scratch);
619 __ mulsd(result, input);
620 __ movq(input, temp1);
621 __ mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize));
622 __ subsd(result, double_scratch);
623 __ addsd(result, Operand(kScratchRegister, 8 * kDoubleSize));
624 __ mulsd(result, input);
632 CodeAgingHelper::CodeAgingHelper() {
633 DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
634 // The sequence of instructions that is patched out for aging code is the
635 // following boilerplate stack-building prologue that is found both in
636 // FUNCTION and OPTIMIZED_FUNCTION code:
637 CodePatcher patcher(young_sequence_.start(), young_sequence_.length());
638 patcher.masm()->pushq(rbp);
639 patcher.masm()->movp(rbp, rsp);
640 patcher.masm()->Push(rsi);
641 patcher.masm()->Push(rdi);
646 bool CodeAgingHelper::IsOld(byte* candidate) const {
647 return *candidate == kCallOpcode;
652 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
653 bool result = isolate->code_aging_helper()->IsYoung(sequence);
654 DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
659 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
660 MarkingParity* parity) {
661 if (IsYoungSequence(isolate, sequence)) {
662 *age = kNoAgeCodeAge;
663 *parity = NO_MARKING_PARITY;
665 sequence++; // Skip the kCallOpcode byte
666 Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
667 Assembler::kCallTargetAddressOffset;
668 Code* stub = GetCodeFromTargetAddress(target_address);
669 GetCodeAgeAndParity(stub, age, parity);
674 void Code::PatchPlatformCodeAge(Isolate* isolate,
677 MarkingParity parity) {
678 uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
679 if (age == kNoAgeCodeAge) {
680 isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
681 CpuFeatures::FlushICache(sequence, young_length);
683 Code* stub = GetCodeAgeStub(isolate, age, parity);
684 CodePatcher patcher(sequence, young_length);
685 patcher.masm()->call(stub->instruction_start());
687 kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
692 Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
694 int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0;
695 int displacement_to_last_argument = base_reg_.is(rsp) ?
696 kPCOnStackSize : kFPOnStackSize + kPCOnStackSize;
697 displacement_to_last_argument += extra_displacement_to_last_argument_;
698 if (argument_count_reg_.is(no_reg)) {
699 // argument[0] is at base_reg_ + displacement_to_last_argument +
700 // (argument_count_immediate_ + receiver - 1) * kPointerSize.
701 DCHECK(argument_count_immediate_ + receiver > 0);
702 return Operand(base_reg_, displacement_to_last_argument +
703 (argument_count_immediate_ + receiver - 1 - index) * kPointerSize);
705 // argument[0] is at base_reg_ + displacement_to_last_argument +
706 // argument_count_reg_ * times_pointer_size + (receiver - 1) * kPointerSize.
707 return Operand(base_reg_, argument_count_reg_, times_pointer_size,
708 displacement_to_last_argument + (receiver - 1 - index) * kPointerSize);
713 } } // namespace v8::internal
715 #endif // V8_TARGET_ARCH_X64