1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "src/base/bits.h"
10 #include "src/base/division-by-constant.h"
11 #include "src/bootstrapper.h"
12 #include "src/codegen.h"
13 #include "src/cpu-profiler.h"
14 #include "src/debug.h"
15 #include "src/heap/heap.h"
16 #include "src/isolate-inl.h"
17 #include "src/serialize.h"
18 #include "src/x64/assembler-x64.h"
19 #include "src/x64/macro-assembler-x64.h"
24 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
25 : Assembler(arg_isolate, buffer, size),
26 generating_stub_(false),
28 root_array_available_(true) {
29 if (isolate() != NULL) {
30 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
36 static const int64_t kInvalidRootRegisterDelta = -1;
39 int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
40 if (predictable_code_size() &&
41 (other.address() < reinterpret_cast<Address>(isolate()) ||
42 other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
43 return kInvalidRootRegisterDelta;
45 Address roots_register_value = kRootRegisterBias +
46 reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
48 int64_t delta = kInvalidRootRegisterDelta; // Bogus initialization.
49 if (kPointerSize == kInt64Size) {
50 delta = other.address() - roots_register_value;
52 // For x32, zero extend the address to 64-bit and calculate the delta.
53 uint64_t o = static_cast<uint32_t>(
54 reinterpret_cast<intptr_t>(other.address()));
55 uint64_t r = static_cast<uint32_t>(
56 reinterpret_cast<intptr_t>(roots_register_value));
63 Operand MacroAssembler::ExternalOperand(ExternalReference target,
65 if (root_array_available_ && !serializer_enabled()) {
66 int64_t delta = RootRegisterDelta(target);
67 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
68 return Operand(kRootRegister, static_cast<int32_t>(delta));
71 Move(scratch, target);
72 return Operand(scratch, 0);
76 void MacroAssembler::Load(Register destination, ExternalReference source) {
77 if (root_array_available_ && !serializer_enabled()) {
78 int64_t delta = RootRegisterDelta(source);
79 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
80 movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
85 if (destination.is(rax)) {
88 Move(kScratchRegister, source);
89 movp(destination, Operand(kScratchRegister, 0));
94 void MacroAssembler::Store(ExternalReference destination, Register source) {
95 if (root_array_available_ && !serializer_enabled()) {
96 int64_t delta = RootRegisterDelta(destination);
97 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
98 movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
103 if (source.is(rax)) {
104 store_rax(destination);
106 Move(kScratchRegister, destination);
107 movp(Operand(kScratchRegister, 0), source);
112 void MacroAssembler::LoadAddress(Register destination,
113 ExternalReference source) {
114 if (root_array_available_ && !serializer_enabled()) {
115 int64_t delta = RootRegisterDelta(source);
116 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
117 leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
122 Move(destination, source);
126 int MacroAssembler::LoadAddressSize(ExternalReference source) {
127 if (root_array_available_ && !serializer_enabled()) {
128 // This calculation depends on the internals of LoadAddress.
129 // It's correctness is ensured by the asserts in the Call
130 // instruction below.
131 int64_t delta = RootRegisterDelta(source);
132 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
133 // Operand is leap(scratch, Operand(kRootRegister, delta));
134 // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
136 if (!is_int8(static_cast<int32_t>(delta))) {
137 size += 3; // Need full four-byte displacement in lea.
142 // Size of movp(destination, src);
143 return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
147 void MacroAssembler::PushAddress(ExternalReference source) {
148 int64_t address = reinterpret_cast<int64_t>(source.address());
149 if (is_int32(address) && !serializer_enabled()) {
150 if (emit_debug_code()) {
151 Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
153 Push(Immediate(static_cast<int32_t>(address)));
156 LoadAddress(kScratchRegister, source);
157 Push(kScratchRegister);
161 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
162 DCHECK(root_array_available_);
163 movp(destination, Operand(kRootRegister,
164 (index << kPointerSizeLog2) - kRootRegisterBias));
168 void MacroAssembler::LoadRootIndexed(Register destination,
169 Register variable_offset,
171 DCHECK(root_array_available_);
173 Operand(kRootRegister,
174 variable_offset, times_pointer_size,
175 (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
179 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
180 DCHECK(root_array_available_);
181 movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
186 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
187 DCHECK(root_array_available_);
188 Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
192 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
193 DCHECK(root_array_available_);
194 cmpp(with, Operand(kRootRegister,
195 (index << kPointerSizeLog2) - kRootRegisterBias));
199 void MacroAssembler::CompareRoot(const Operand& with,
200 Heap::RootListIndex index) {
201 DCHECK(root_array_available_);
202 DCHECK(!with.AddressUsesRegister(kScratchRegister));
203 LoadRoot(kScratchRegister, index);
204 cmpp(with, kScratchRegister);
208 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
211 SaveFPRegsMode save_fp,
212 RememberedSetFinalAction and_then) {
213 if (emit_debug_code()) {
215 JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
219 // Load store buffer top.
220 LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
221 // Store pointer to buffer.
222 movp(Operand(scratch, 0), addr);
223 // Increment buffer top.
224 addp(scratch, Immediate(kPointerSize));
225 // Write back new top of buffer.
226 StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
227 // Call stub on end of buffer.
229 // Check for end of buffer.
230 testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
231 if (and_then == kReturnAtEnd) {
232 Label buffer_overflowed;
233 j(not_equal, &buffer_overflowed, Label::kNear);
235 bind(&buffer_overflowed);
237 DCHECK(and_then == kFallThroughAtEnd);
238 j(equal, &done, Label::kNear);
240 StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
241 CallStub(&store_buffer_overflow);
242 if (and_then == kReturnAtEnd) {
245 DCHECK(and_then == kFallThroughAtEnd);
251 void MacroAssembler::InNewSpace(Register object,
255 Label::Distance distance) {
256 if (serializer_enabled()) {
257 // Can't do arithmetic on external references if it might get serialized.
258 // The mask isn't really an address. We load it as an external reference in
259 // case the size of the new space is different between the snapshot maker
260 // and the running system.
261 if (scratch.is(object)) {
262 Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
263 andp(scratch, kScratchRegister);
265 Move(scratch, ExternalReference::new_space_mask(isolate()));
266 andp(scratch, object);
268 Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
269 cmpp(scratch, kScratchRegister);
270 j(cc, branch, distance);
272 DCHECK(kPointerSize == kInt64Size
273 ? is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))
274 : kPointerSize == kInt32Size);
275 intptr_t new_space_start =
276 reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
277 Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
278 Assembler::RelocInfoNone());
279 if (scratch.is(object)) {
280 addp(scratch, kScratchRegister);
282 leap(scratch, Operand(object, kScratchRegister, times_1, 0));
285 Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
286 j(cc, branch, distance);
291 void MacroAssembler::RecordWriteField(
296 SaveFPRegsMode save_fp,
297 RememberedSetAction remembered_set_action,
299 PointersToHereCheck pointers_to_here_check_for_value) {
300 // First, check if a write barrier is even needed. The tests below
301 // catch stores of Smis.
304 // Skip barrier if writing a smi.
305 if (smi_check == INLINE_SMI_CHECK) {
306 JumpIfSmi(value, &done);
309 // Although the object register is tagged, the offset is relative to the start
310 // of the object, so so offset must be a multiple of kPointerSize.
311 DCHECK(IsAligned(offset, kPointerSize));
313 leap(dst, FieldOperand(object, offset));
314 if (emit_debug_code()) {
316 testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
317 j(zero, &ok, Label::kNear);
322 RecordWrite(object, dst, value, save_fp, remembered_set_action,
323 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
327 // Clobber clobbered input registers when running with the debug-code flag
328 // turned on to provoke errors.
329 if (emit_debug_code()) {
330 Move(value, kZapValue, Assembler::RelocInfoNone());
331 Move(dst, kZapValue, Assembler::RelocInfoNone());
336 void MacroAssembler::RecordWriteArray(
340 SaveFPRegsMode save_fp,
341 RememberedSetAction remembered_set_action,
343 PointersToHereCheck pointers_to_here_check_for_value) {
344 // First, check if a write barrier is even needed. The tests below
345 // catch stores of Smis.
348 // Skip barrier if writing a smi.
349 if (smi_check == INLINE_SMI_CHECK) {
350 JumpIfSmi(value, &done);
353 // Array access: calculate the destination address. Index is not a smi.
354 Register dst = index;
355 leap(dst, Operand(object, index, times_pointer_size,
356 FixedArray::kHeaderSize - kHeapObjectTag));
358 RecordWrite(object, dst, value, save_fp, remembered_set_action,
359 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
363 // Clobber clobbered input registers when running with the debug-code flag
364 // turned on to provoke errors.
365 if (emit_debug_code()) {
366 Move(value, kZapValue, Assembler::RelocInfoNone());
367 Move(index, kZapValue, Assembler::RelocInfoNone());
372 void MacroAssembler::RecordWriteForMap(Register object,
375 SaveFPRegsMode fp_mode) {
376 DCHECK(!object.is(kScratchRegister));
377 DCHECK(!object.is(map));
378 DCHECK(!object.is(dst));
379 DCHECK(!map.is(dst));
380 AssertNotSmi(object);
382 if (emit_debug_code()) {
384 if (map.is(kScratchRegister)) pushq(map);
385 CompareMap(map, isolate()->factory()->meta_map());
386 if (map.is(kScratchRegister)) popq(map);
387 j(equal, &ok, Label::kNear);
392 if (!FLAG_incremental_marking) {
396 if (emit_debug_code()) {
398 if (map.is(kScratchRegister)) pushq(map);
399 cmpp(map, FieldOperand(object, HeapObject::kMapOffset));
400 if (map.is(kScratchRegister)) popq(map);
401 j(equal, &ok, Label::kNear);
406 // Compute the address.
407 leap(dst, FieldOperand(object, HeapObject::kMapOffset));
409 // First, check if a write barrier is even needed. The tests below
410 // catch stores of smis and stores into the young generation.
413 // A single check of the map's pages interesting flag suffices, since it is
414 // only set during incremental collection, and then it's also guaranteed that
415 // the from object's page's interesting flag is also set. This optimization
416 // relies on the fact that maps can never be in new space.
418 map, // Used as scratch.
419 MemoryChunk::kPointersToHereAreInterestingMask,
424 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
430 // Count number of write barriers in generated code.
431 isolate()->counters()->write_barriers_static()->Increment();
432 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
434 // Clobber clobbered registers when running with the debug-code flag
435 // turned on to provoke errors.
436 if (emit_debug_code()) {
437 Move(dst, kZapValue, Assembler::RelocInfoNone());
438 Move(map, kZapValue, Assembler::RelocInfoNone());
443 void MacroAssembler::RecordWrite(
447 SaveFPRegsMode fp_mode,
448 RememberedSetAction remembered_set_action,
450 PointersToHereCheck pointers_to_here_check_for_value) {
451 DCHECK(!object.is(value));
452 DCHECK(!object.is(address));
453 DCHECK(!value.is(address));
454 AssertNotSmi(object);
456 if (remembered_set_action == OMIT_REMEMBERED_SET &&
457 !FLAG_incremental_marking) {
461 if (emit_debug_code()) {
463 cmpp(value, Operand(address, 0));
464 j(equal, &ok, Label::kNear);
469 // First, check if a write barrier is even needed. The tests below
470 // catch stores of smis and stores into the young generation.
473 if (smi_check == INLINE_SMI_CHECK) {
474 // Skip barrier if writing a smi.
475 JumpIfSmi(value, &done);
478 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
480 value, // Used as scratch.
481 MemoryChunk::kPointersToHereAreInterestingMask,
487 CheckPageFlag(object,
488 value, // Used as scratch.
489 MemoryChunk::kPointersFromHereAreInterestingMask,
494 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
500 // Count number of write barriers in generated code.
501 isolate()->counters()->write_barriers_static()->Increment();
502 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
504 // Clobber clobbered registers when running with the debug-code flag
505 // turned on to provoke errors.
506 if (emit_debug_code()) {
507 Move(address, kZapValue, Assembler::RelocInfoNone());
508 Move(value, kZapValue, Assembler::RelocInfoNone());
513 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
514 if (emit_debug_code()) Check(cc, reason);
518 void MacroAssembler::AssertFastElements(Register elements) {
519 if (emit_debug_code()) {
521 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
522 Heap::kFixedArrayMapRootIndex);
523 j(equal, &ok, Label::kNear);
524 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
525 Heap::kFixedDoubleArrayMapRootIndex);
526 j(equal, &ok, Label::kNear);
527 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
528 Heap::kFixedCOWArrayMapRootIndex);
529 j(equal, &ok, Label::kNear);
530 Abort(kJSObjectWithFastElementsMapHasSlowElements);
536 void MacroAssembler::Check(Condition cc, BailoutReason reason) {
538 j(cc, &L, Label::kNear);
540 // Control will not return here.
545 void MacroAssembler::CheckStackAlignment() {
546 int frame_alignment = base::OS::ActivationFrameAlignment();
547 int frame_alignment_mask = frame_alignment - 1;
548 if (frame_alignment > kPointerSize) {
549 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
550 Label alignment_as_expected;
551 testp(rsp, Immediate(frame_alignment_mask));
552 j(zero, &alignment_as_expected, Label::kNear);
553 // Abort if stack is not aligned.
555 bind(&alignment_as_expected);
560 void MacroAssembler::NegativeZeroTest(Register result,
564 testl(result, result);
565 j(not_zero, &ok, Label::kNear);
572 void MacroAssembler::Abort(BailoutReason reason) {
574 const char* msg = GetBailoutReason(reason);
576 RecordComment("Abort message: ");
580 if (FLAG_trap_on_abort) {
586 Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
587 Assembler::RelocInfoNone());
588 Push(kScratchRegister);
591 // We don't actually want to generate a pile of code for this, so just
592 // claim there is a stack frame, without generating one.
593 FrameScope scope(this, StackFrame::NONE);
594 CallRuntime(Runtime::kAbort, 1);
596 CallRuntime(Runtime::kAbort, 1);
598 // Control will not return here.
603 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
604 DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
605 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
609 void MacroAssembler::TailCallStub(CodeStub* stub) {
610 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
614 void MacroAssembler::StubReturn(int argc) {
615 DCHECK(argc >= 1 && generating_stub());
616 ret((argc - 1) * kPointerSize);
620 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
621 return has_frame_ || !stub->SometimesSetsUpAFrame();
625 void MacroAssembler::IndexFromHash(Register hash, Register index) {
626 // The assert checks that the constants for the maximum number of digits
627 // for an array index cached in the hash field and the number of bits
628 // reserved for it does not conflict.
629 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
630 (1 << String::kArrayIndexValueBits));
631 if (!hash.is(index)) {
634 DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
638 void MacroAssembler::CallRuntime(const Runtime::Function* f,
640 SaveFPRegsMode save_doubles) {
641 // If the expected number of arguments of the runtime function is
642 // constant, we check that the actual number of arguments match the
644 CHECK(f->nargs < 0 || f->nargs == num_arguments);
646 // TODO(1236192): Most runtime routines don't need the number of
647 // arguments passed in because it is constant. At some point we
648 // should remove this need and make the runtime routine entry code
650 Set(rax, num_arguments);
651 LoadAddress(rbx, ExternalReference(f, isolate()));
652 CEntryStub ces(isolate(), f->result_size, save_doubles);
657 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
659 Set(rax, num_arguments);
660 LoadAddress(rbx, ext);
662 CEntryStub stub(isolate(), 1);
667 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
670 // ----------- S t a t e -------------
671 // -- rsp[0] : return address
672 // -- rsp[8] : argument num_arguments - 1
674 // -- rsp[8 * num_arguments] : argument 0 (receiver)
675 // -----------------------------------
677 // TODO(1236192): Most runtime routines don't need the number of
678 // arguments passed in because it is constant. At some point we
679 // should remove this need and make the runtime routine entry code
681 Set(rax, num_arguments);
682 JumpToExternalReference(ext, result_size);
686 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
689 TailCallExternalReference(ExternalReference(fid, isolate()),
695 static int Offset(ExternalReference ref0, ExternalReference ref1) {
696 int64_t offset = (ref0.address() - ref1.address());
697 // Check that fits into int.
698 DCHECK(static_cast<int>(offset) == offset);
699 return static_cast<int>(offset);
703 void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
704 EnterApiExitFrame(arg_stack_space);
708 void MacroAssembler::CallApiFunctionAndReturn(
709 Register function_address,
710 ExternalReference thunk_ref,
711 Register thunk_last_arg,
713 Operand return_value_operand,
714 Operand* context_restore_operand) {
716 Label promote_scheduled_exception;
717 Label exception_handled;
718 Label delete_allocated_handles;
719 Label leave_exit_frame;
722 Factory* factory = isolate()->factory();
723 ExternalReference next_address =
724 ExternalReference::handle_scope_next_address(isolate());
725 const int kNextOffset = 0;
726 const int kLimitOffset = Offset(
727 ExternalReference::handle_scope_limit_address(isolate()),
729 const int kLevelOffset = Offset(
730 ExternalReference::handle_scope_level_address(isolate()),
732 ExternalReference scheduled_exception_address =
733 ExternalReference::scheduled_exception_address(isolate());
735 DCHECK(rdx.is(function_address) || r8.is(function_address));
736 // Allocate HandleScope in callee-save registers.
737 Register prev_next_address_reg = r14;
738 Register prev_limit_reg = rbx;
739 Register base_reg = r15;
740 Move(base_reg, next_address);
741 movp(prev_next_address_reg, Operand(base_reg, kNextOffset));
742 movp(prev_limit_reg, Operand(base_reg, kLimitOffset));
743 addl(Operand(base_reg, kLevelOffset), Immediate(1));
745 if (FLAG_log_timer_events) {
746 FrameScope frame(this, StackFrame::MANUAL);
747 PushSafepointRegisters();
748 PrepareCallCFunction(1);
749 LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
750 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
751 PopSafepointRegisters();
755 Label profiler_disabled;
756 Label end_profiler_check;
757 Move(rax, ExternalReference::is_profiling_address(isolate()));
758 cmpb(Operand(rax, 0), Immediate(0));
759 j(zero, &profiler_disabled);
761 // Third parameter is the address of the actual getter function.
762 Move(thunk_last_arg, function_address);
763 Move(rax, thunk_ref);
764 jmp(&end_profiler_check);
766 bind(&profiler_disabled);
767 // Call the api function!
768 Move(rax, function_address);
770 bind(&end_profiler_check);
772 // Call the api function!
775 if (FLAG_log_timer_events) {
776 FrameScope frame(this, StackFrame::MANUAL);
777 PushSafepointRegisters();
778 PrepareCallCFunction(1);
779 LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
780 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
781 PopSafepointRegisters();
784 // Load the value from ReturnValue
785 movp(rax, return_value_operand);
788 // No more valid handles (the result handle was the last one). Restore
789 // previous handle scope.
790 subl(Operand(base_reg, kLevelOffset), Immediate(1));
791 movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
792 cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
793 j(not_equal, &delete_allocated_handles);
794 bind(&leave_exit_frame);
796 // Check if the function scheduled an exception.
797 Move(rsi, scheduled_exception_address);
798 Cmp(Operand(rsi, 0), factory->the_hole_value());
799 j(not_equal, &promote_scheduled_exception);
800 bind(&exception_handled);
802 #if ENABLE_EXTRA_CHECKS
803 // Check if the function returned a valid JavaScript value.
805 Register return_value = rax;
808 JumpIfSmi(return_value, &ok, Label::kNear);
809 movp(map, FieldOperand(return_value, HeapObject::kMapOffset));
811 CmpInstanceType(map, FIRST_NONSTRING_TYPE);
812 j(below, &ok, Label::kNear);
814 CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
815 j(above_equal, &ok, Label::kNear);
817 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
818 j(equal, &ok, Label::kNear);
820 CompareRoot(return_value, Heap::kUndefinedValueRootIndex);
821 j(equal, &ok, Label::kNear);
823 CompareRoot(return_value, Heap::kTrueValueRootIndex);
824 j(equal, &ok, Label::kNear);
826 CompareRoot(return_value, Heap::kFalseValueRootIndex);
827 j(equal, &ok, Label::kNear);
829 CompareRoot(return_value, Heap::kNullValueRootIndex);
830 j(equal, &ok, Label::kNear);
832 Abort(kAPICallReturnedInvalidObject);
837 bool restore_context = context_restore_operand != NULL;
838 if (restore_context) {
839 movp(rsi, *context_restore_operand);
841 LeaveApiExitFrame(!restore_context);
842 ret(stack_space * kPointerSize);
844 bind(&promote_scheduled_exception);
846 FrameScope frame(this, StackFrame::INTERNAL);
847 CallRuntime(Runtime::kPromoteScheduledException, 0);
849 jmp(&exception_handled);
851 // HandleScope limit has changed. Delete allocated extensions.
852 bind(&delete_allocated_handles);
853 movp(Operand(base_reg, kLimitOffset), prev_limit_reg);
854 movp(prev_limit_reg, rax);
855 LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
857 ExternalReference::delete_handle_scope_extensions(isolate()));
859 movp(rax, prev_limit_reg);
860 jmp(&leave_exit_frame);
864 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
866 // Set the entry point and jump to the C entry runtime stub.
867 LoadAddress(rbx, ext);
868 CEntryStub ces(isolate(), result_size);
869 jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
873 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
875 const CallWrapper& call_wrapper) {
876 // You can't call a builtin without a valid frame.
877 DCHECK(flag == JUMP_FUNCTION || has_frame());
879 // Rely on the assertion to check that the number of provided
880 // arguments match the expected number of arguments. Fake a
881 // parameter count to avoid emitting code to do the check.
882 ParameterCount expected(0);
883 GetBuiltinEntry(rdx, id);
884 InvokeCode(rdx, expected, expected, flag, call_wrapper);
888 void MacroAssembler::GetBuiltinFunction(Register target,
889 Builtins::JavaScript id) {
890 // Load the builtins object into target register.
891 movp(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
892 movp(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
893 movp(target, FieldOperand(target,
894 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
898 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
899 DCHECK(!target.is(rdi));
900 // Load the JavaScript builtin function from the builtins object.
901 GetBuiltinFunction(rdi, id);
902 movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
906 #define REG(Name) { kRegister_ ## Name ## _Code }
908 static const Register saved_regs[] = {
909 REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
910 REG(r9), REG(r10), REG(r11)
915 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
918 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
921 Register exclusion3) {
922 // We don't allow a GC during a store buffer overflow so there is no need to
923 // store the registers in any particular way, but we do have to store and
925 for (int i = 0; i < kNumberOfSavedRegs; i++) {
926 Register reg = saved_regs[i];
927 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
931 // R12 to r15 are callee save on all platforms.
932 if (fp_mode == kSaveFPRegs) {
933 subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
934 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
935 XMMRegister reg = XMMRegister::from_code(i);
936 movsd(Operand(rsp, i * kDoubleSize), reg);
942 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
945 Register exclusion3) {
946 if (fp_mode == kSaveFPRegs) {
947 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
948 XMMRegister reg = XMMRegister::from_code(i);
949 movsd(reg, Operand(rsp, i * kDoubleSize));
951 addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
953 for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
954 Register reg = saved_regs[i];
955 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
962 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
968 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
974 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
975 DCHECK(!r.IsDouble());
976 if (r.IsInteger8()) {
978 } else if (r.IsUInteger8()) {
980 } else if (r.IsInteger16()) {
982 } else if (r.IsUInteger16()) {
984 } else if (r.IsInteger32()) {
992 void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
993 DCHECK(!r.IsDouble());
994 if (r.IsInteger8() || r.IsUInteger8()) {
996 } else if (r.IsInteger16() || r.IsUInteger16()) {
998 } else if (r.IsInteger32()) {
1001 if (r.IsHeapObject()) {
1003 } else if (r.IsSmi()) {
1011 void MacroAssembler::Set(Register dst, int64_t x) {
1014 } else if (is_uint32(x)) {
1015 movl(dst, Immediate(static_cast<uint32_t>(x)));
1016 } else if (is_int32(x)) {
1017 movq(dst, Immediate(static_cast<int32_t>(x)));
1024 void MacroAssembler::Set(const Operand& dst, intptr_t x) {
1025 if (kPointerSize == kInt64Size) {
1027 movp(dst, Immediate(static_cast<int32_t>(x)));
1029 Set(kScratchRegister, x);
1030 movp(dst, kScratchRegister);
1033 movp(dst, Immediate(static_cast<int32_t>(x)));
1038 // ----------------------------------------------------------------------------
1039 // Smi tagging, untagging and tag detection.
1041 bool MacroAssembler::IsUnsafeInt(const int32_t x) {
1042 static const int kMaxBits = 17;
1043 return !is_intn(x, kMaxBits);
1047 void MacroAssembler::SafeMove(Register dst, Smi* src) {
1048 DCHECK(!dst.is(kScratchRegister));
1049 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1050 if (SmiValuesAre32Bits()) {
1051 // JIT cookie can be converted to Smi.
1052 Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
1053 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1054 xorp(dst, kScratchRegister);
1056 DCHECK(SmiValuesAre31Bits());
1057 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
1058 movp(dst, Immediate(value ^ jit_cookie()));
1059 xorp(dst, Immediate(jit_cookie()));
1067 void MacroAssembler::SafePush(Smi* src) {
1068 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1069 if (SmiValuesAre32Bits()) {
1070 // JIT cookie can be converted to Smi.
1071 Push(Smi::FromInt(src->value() ^ jit_cookie()));
1072 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1073 xorp(Operand(rsp, 0), kScratchRegister);
1075 DCHECK(SmiValuesAre31Bits());
1076 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
1077 Push(Immediate(value ^ jit_cookie()));
1078 xorp(Operand(rsp, 0), Immediate(jit_cookie()));
1086 Register MacroAssembler::GetSmiConstant(Smi* source) {
1087 int value = source->value();
1089 xorl(kScratchRegister, kScratchRegister);
1090 return kScratchRegister;
1093 return kSmiConstantRegister;
1095 LoadSmiConstant(kScratchRegister, source);
1096 return kScratchRegister;
1100 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
1101 if (emit_debug_code()) {
1102 Move(dst, Smi::FromInt(kSmiConstantRegisterValue),
1103 Assembler::RelocInfoNone());
1104 cmpp(dst, kSmiConstantRegister);
1105 Assert(equal, kUninitializedKSmiConstantRegister);
1107 int value = source->value();
1112 bool negative = value < 0;
1113 unsigned int uvalue = negative ? -value : value;
1118 Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
1122 leap(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
1126 leap(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
1130 Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
1134 Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
1138 Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
1141 movp(dst, kSmiConstantRegister);
1147 Move(dst, source, Assembler::RelocInfoNone());
1156 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
1157 STATIC_ASSERT(kSmiTag == 0);
1161 shlp(dst, Immediate(kSmiShift));
1165 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
1166 if (emit_debug_code()) {
1167 testb(dst, Immediate(0x01));
1169 j(zero, &ok, Label::kNear);
1170 Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
1174 if (SmiValuesAre32Bits()) {
1175 DCHECK(kSmiShift % kBitsPerByte == 0);
1176 movl(Operand(dst, kSmiShift / kBitsPerByte), src);
1178 DCHECK(SmiValuesAre31Bits());
1179 Integer32ToSmi(kScratchRegister, src);
1180 movp(dst, kScratchRegister);
1185 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
1189 addl(dst, Immediate(constant));
1191 leal(dst, Operand(src, constant));
1193 shlp(dst, Immediate(kSmiShift));
1197 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
1198 STATIC_ASSERT(kSmiTag == 0);
1203 if (SmiValuesAre32Bits()) {
1204 shrp(dst, Immediate(kSmiShift));
1206 DCHECK(SmiValuesAre31Bits());
1207 sarl(dst, Immediate(kSmiShift));
1212 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
1213 if (SmiValuesAre32Bits()) {
1214 movl(dst, Operand(src, kSmiShift / kBitsPerByte));
1216 DCHECK(SmiValuesAre31Bits());
1218 sarl(dst, Immediate(kSmiShift));
1223 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
1224 STATIC_ASSERT(kSmiTag == 0);
1228 sarp(dst, Immediate(kSmiShift));
1229 if (kPointerSize == kInt32Size) {
1230 // Sign extend to 64-bit.
1236 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
1237 if (SmiValuesAre32Bits()) {
1238 movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
1240 DCHECK(SmiValuesAre31Bits());
1242 SmiToInteger64(dst, dst);
1247 void MacroAssembler::SmiTest(Register src) {
1253 void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1260 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
1266 void MacroAssembler::Cmp(Register dst, Smi* src) {
1267 DCHECK(!dst.is(kScratchRegister));
1268 if (src->value() == 0) {
1271 Register constant_reg = GetSmiConstant(src);
1272 cmpp(dst, constant_reg);
1277 void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
1284 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
1291 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
1293 if (SmiValuesAre32Bits()) {
1294 cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
1296 DCHECK(SmiValuesAre31Bits());
1297 cmpl(dst, Immediate(src));
1302 void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
1303 // The Operand cannot use the smi register.
1304 Register smi_reg = GetSmiConstant(src);
1305 DCHECK(!dst.AddressUsesRegister(smi_reg));
1310 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
1311 if (SmiValuesAre32Bits()) {
1312 cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
1314 DCHECK(SmiValuesAre31Bits());
1315 SmiToInteger32(kScratchRegister, dst);
1316 cmpl(kScratchRegister, src);
1321 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
1327 SmiToInteger64(dst, src);
1333 if (power < kSmiShift) {
1334 sarp(dst, Immediate(kSmiShift - power));
1335 } else if (power > kSmiShift) {
1336 shlp(dst, Immediate(power - kSmiShift));
1341 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
1344 DCHECK((0 <= power) && (power < 32));
1346 shrp(dst, Immediate(power + kSmiShift));
1348 UNIMPLEMENTED(); // Not used.
1353 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1355 Label::Distance near_jump) {
1356 if (dst.is(src1) || dst.is(src2)) {
1357 DCHECK(!src1.is(kScratchRegister));
1358 DCHECK(!src2.is(kScratchRegister));
1359 movp(kScratchRegister, src1);
1360 orp(kScratchRegister, src2);
1361 JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
1362 movp(dst, kScratchRegister);
1366 JumpIfNotSmi(dst, on_not_smis, near_jump);
1371 Condition MacroAssembler::CheckSmi(Register src) {
1372 STATIC_ASSERT(kSmiTag == 0);
1373 testb(src, Immediate(kSmiTagMask));
1378 Condition MacroAssembler::CheckSmi(const Operand& src) {
1379 STATIC_ASSERT(kSmiTag == 0);
1380 testb(src, Immediate(kSmiTagMask));
1385 Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
1386 STATIC_ASSERT(kSmiTag == 0);
1387 // Test that both bits of the mask 0x8000000000000001 are zero.
1388 movp(kScratchRegister, src);
1389 rolp(kScratchRegister, Immediate(1));
1390 testb(kScratchRegister, Immediate(3));
1395 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
1396 if (first.is(second)) {
1397 return CheckSmi(first);
1399 STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
1400 if (SmiValuesAre32Bits()) {
1401 leal(kScratchRegister, Operand(first, second, times_1, 0));
1402 testb(kScratchRegister, Immediate(0x03));
1404 DCHECK(SmiValuesAre31Bits());
1405 movl(kScratchRegister, first);
1406 orl(kScratchRegister, second);
1407 testb(kScratchRegister, Immediate(kSmiTagMask));
1413 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
1415 if (first.is(second)) {
1416 return CheckNonNegativeSmi(first);
1418 movp(kScratchRegister, first);
1419 orp(kScratchRegister, second);
1420 rolp(kScratchRegister, Immediate(1));
1421 testl(kScratchRegister, Immediate(3));
1426 Condition MacroAssembler::CheckEitherSmi(Register first,
1429 if (first.is(second)) {
1430 return CheckSmi(first);
1432 if (scratch.is(second)) {
1433 andl(scratch, first);
1435 if (!scratch.is(first)) {
1436 movl(scratch, first);
1438 andl(scratch, second);
1440 testb(scratch, Immediate(kSmiTagMask));
1445 Condition MacroAssembler::CheckIsMinSmi(Register src) {
1446 DCHECK(!src.is(kScratchRegister));
1447 // If we overflow by subtracting one, it's the minimal smi value.
1448 cmpp(src, kSmiConstantRegister);
1453 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
1454 if (SmiValuesAre32Bits()) {
1455 // A 32-bit integer value can always be converted to a smi.
1458 DCHECK(SmiValuesAre31Bits());
1459 cmpl(src, Immediate(0xc0000000));
1465 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
1466 if (SmiValuesAre32Bits()) {
1467 // An unsigned 32-bit integer value is valid as long as the high bit
1472 DCHECK(SmiValuesAre31Bits());
1473 testl(src, Immediate(0xc0000000));
1479 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
1481 andl(dst, Immediate(kSmiTagMask));
1483 movl(dst, Immediate(kSmiTagMask));
1489 void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
1490 if (!(src.AddressUsesRegister(dst))) {
1491 movl(dst, Immediate(kSmiTagMask));
1495 andl(dst, Immediate(kSmiTagMask));
1500 void MacroAssembler::JumpIfValidSmiValue(Register src,
1502 Label::Distance near_jump) {
1503 Condition is_valid = CheckInteger32ValidSmiValue(src);
1504 j(is_valid, on_valid, near_jump);
1508 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1510 Label::Distance near_jump) {
1511 Condition is_valid = CheckInteger32ValidSmiValue(src);
1512 j(NegateCondition(is_valid), on_invalid, near_jump);
1516 void MacroAssembler::JumpIfUIntValidSmiValue(Register src,
1518 Label::Distance near_jump) {
1519 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1520 j(is_valid, on_valid, near_jump);
1524 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1526 Label::Distance near_jump) {
1527 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1528 j(NegateCondition(is_valid), on_invalid, near_jump);
1532 void MacroAssembler::JumpIfSmi(Register src,
1534 Label::Distance near_jump) {
1535 Condition smi = CheckSmi(src);
1536 j(smi, on_smi, near_jump);
1540 void MacroAssembler::JumpIfNotSmi(Register src,
1542 Label::Distance near_jump) {
1543 Condition smi = CheckSmi(src);
1544 j(NegateCondition(smi), on_not_smi, near_jump);
1548 void MacroAssembler::JumpUnlessNonNegativeSmi(
1549 Register src, Label* on_not_smi_or_negative,
1550 Label::Distance near_jump) {
1551 Condition non_negative_smi = CheckNonNegativeSmi(src);
1552 j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
1556 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1559 Label::Distance near_jump) {
1560 SmiCompare(src, constant);
1561 j(equal, on_equals, near_jump);
1565 void MacroAssembler::JumpIfNotBothSmi(Register src1,
1567 Label* on_not_both_smi,
1568 Label::Distance near_jump) {
1569 Condition both_smi = CheckBothSmi(src1, src2);
1570 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1574 void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
1576 Label* on_not_both_smi,
1577 Label::Distance near_jump) {
1578 Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
1579 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1583 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1584 if (constant->value() == 0) {
1589 } else if (dst.is(src)) {
1590 DCHECK(!dst.is(kScratchRegister));
1591 switch (constant->value()) {
1593 addp(dst, kSmiConstantRegister);
1596 leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1599 leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1602 leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1605 Register constant_reg = GetSmiConstant(constant);
1606 addp(dst, constant_reg);
1610 switch (constant->value()) {
1612 leap(dst, Operand(src, kSmiConstantRegister, times_1, 0));
1615 leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1618 leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1621 leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1624 LoadSmiConstant(dst, constant);
1632 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1633 if (constant->value() != 0) {
1634 if (SmiValuesAre32Bits()) {
1635 addl(Operand(dst, kSmiShift / kBitsPerByte),
1636 Immediate(constant->value()));
1638 DCHECK(SmiValuesAre31Bits());
1639 addp(dst, Immediate(constant));
1645 void MacroAssembler::SmiAddConstant(Register dst,
1648 SmiOperationExecutionMode mode,
1649 Label* bailout_label,
1650 Label::Distance near_jump) {
1651 if (constant->value() == 0) {
1655 } else if (dst.is(src)) {
1656 DCHECK(!dst.is(kScratchRegister));
1657 LoadSmiConstant(kScratchRegister, constant);
1658 addp(dst, kScratchRegister);
1659 if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
1660 j(no_overflow, bailout_label, near_jump);
1661 DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
1662 subp(dst, kScratchRegister);
1663 } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
1664 if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
1666 j(no_overflow, &done, Label::kNear);
1667 subp(dst, kScratchRegister);
1668 jmp(bailout_label, near_jump);
1671 // Bailout if overflow without reserving src.
1672 j(overflow, bailout_label, near_jump);
1675 CHECK(mode.IsEmpty());
1678 DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
1679 DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW));
1680 LoadSmiConstant(dst, constant);
1682 j(overflow, bailout_label, near_jump);
1687 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1688 if (constant->value() == 0) {
1692 } else if (dst.is(src)) {
1693 DCHECK(!dst.is(kScratchRegister));
1694 Register constant_reg = GetSmiConstant(constant);
1695 subp(dst, constant_reg);
1697 if (constant->value() == Smi::kMinValue) {
1698 LoadSmiConstant(dst, constant);
1699 // Adding and subtracting the min-value gives the same result, it only
1700 // differs on the overflow bit, which we don't check here.
1703 // Subtract by adding the negation.
1704 LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1711 void MacroAssembler::SmiSubConstant(Register dst,
1714 SmiOperationExecutionMode mode,
1715 Label* bailout_label,
1716 Label::Distance near_jump) {
1717 if (constant->value() == 0) {
1721 } else if (dst.is(src)) {
1722 DCHECK(!dst.is(kScratchRegister));
1723 LoadSmiConstant(kScratchRegister, constant);
1724 subp(dst, kScratchRegister);
1725 if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
1726 j(no_overflow, bailout_label, near_jump);
1727 DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
1728 addp(dst, kScratchRegister);
1729 } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
1730 if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
1732 j(no_overflow, &done, Label::kNear);
1733 addp(dst, kScratchRegister);
1734 jmp(bailout_label, near_jump);
1737 // Bailout if overflow without reserving src.
1738 j(overflow, bailout_label, near_jump);
1741 CHECK(mode.IsEmpty());
1744 DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
1745 DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW));
1746 if (constant->value() == Smi::kMinValue) {
1747 DCHECK(!dst.is(kScratchRegister));
1749 LoadSmiConstant(kScratchRegister, constant);
1750 subp(dst, kScratchRegister);
1751 j(overflow, bailout_label, near_jump);
1753 // Subtract by adding the negation.
1754 LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1756 j(overflow, bailout_label, near_jump);
1762 void MacroAssembler::SmiNeg(Register dst,
1764 Label* on_smi_result,
1765 Label::Distance near_jump) {
1767 DCHECK(!dst.is(kScratchRegister));
1768 movp(kScratchRegister, src);
1769 negp(dst); // Low 32 bits are retained as zero by negation.
1770 // Test if result is zero or Smi::kMinValue.
1771 cmpp(dst, kScratchRegister);
1772 j(not_equal, on_smi_result, near_jump);
1773 movp(src, kScratchRegister);
1778 // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1779 j(not_equal, on_smi_result, near_jump);
1785 static void SmiAddHelper(MacroAssembler* masm,
1789 Label* on_not_smi_result,
1790 Label::Distance near_jump) {
1793 masm->addp(dst, src2);
1794 masm->j(no_overflow, &done, Label::kNear);
1796 masm->subp(dst, src2);
1797 masm->jmp(on_not_smi_result, near_jump);
1800 masm->movp(dst, src1);
1801 masm->addp(dst, src2);
1802 masm->j(overflow, on_not_smi_result, near_jump);
1807 void MacroAssembler::SmiAdd(Register dst,
1810 Label* on_not_smi_result,
1811 Label::Distance near_jump) {
1812 DCHECK_NOT_NULL(on_not_smi_result);
1813 DCHECK(!dst.is(src2));
1814 SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1818 void MacroAssembler::SmiAdd(Register dst,
1820 const Operand& src2,
1821 Label* on_not_smi_result,
1822 Label::Distance near_jump) {
1823 DCHECK_NOT_NULL(on_not_smi_result);
1824 DCHECK(!src2.AddressUsesRegister(dst));
1825 SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1829 void MacroAssembler::SmiAdd(Register dst,
1832 // No overflow checking. Use only when it's known that
1833 // overflowing is impossible.
1834 if (!dst.is(src1)) {
1835 if (emit_debug_code()) {
1836 movp(kScratchRegister, src1);
1837 addp(kScratchRegister, src2);
1838 Check(no_overflow, kSmiAdditionOverflow);
1840 leap(dst, Operand(src1, src2, times_1, 0));
1843 Assert(no_overflow, kSmiAdditionOverflow);
1849 static void SmiSubHelper(MacroAssembler* masm,
1853 Label* on_not_smi_result,
1854 Label::Distance near_jump) {
1857 masm->subp(dst, src2);
1858 masm->j(no_overflow, &done, Label::kNear);
1860 masm->addp(dst, src2);
1861 masm->jmp(on_not_smi_result, near_jump);
1864 masm->movp(dst, src1);
1865 masm->subp(dst, src2);
1866 masm->j(overflow, on_not_smi_result, near_jump);
1871 void MacroAssembler::SmiSub(Register dst,
1874 Label* on_not_smi_result,
1875 Label::Distance near_jump) {
1876 DCHECK_NOT_NULL(on_not_smi_result);
1877 DCHECK(!dst.is(src2));
1878 SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1882 void MacroAssembler::SmiSub(Register dst,
1884 const Operand& src2,
1885 Label* on_not_smi_result,
1886 Label::Distance near_jump) {
1887 DCHECK_NOT_NULL(on_not_smi_result);
1888 DCHECK(!src2.AddressUsesRegister(dst));
1889 SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1894 static void SmiSubNoOverflowHelper(MacroAssembler* masm,
1898 // No overflow checking. Use only when it's known that
1899 // overflowing is impossible (e.g., subtracting two positive smis).
1900 if (!dst.is(src1)) {
1901 masm->movp(dst, src1);
1903 masm->subp(dst, src2);
1904 masm->Assert(no_overflow, kSmiSubtractionOverflow);
1908 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1909 DCHECK(!dst.is(src2));
1910 SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
1914 void MacroAssembler::SmiSub(Register dst,
1916 const Operand& src2) {
1917 SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
1921 void MacroAssembler::SmiMul(Register dst,
1924 Label* on_not_smi_result,
1925 Label::Distance near_jump) {
1926 DCHECK(!dst.is(src2));
1927 DCHECK(!dst.is(kScratchRegister));
1928 DCHECK(!src1.is(kScratchRegister));
1929 DCHECK(!src2.is(kScratchRegister));
1932 Label failure, zero_correct_result;
1933 movp(kScratchRegister, src1); // Create backup for later testing.
1934 SmiToInteger64(dst, src1);
1936 j(overflow, &failure, Label::kNear);
1938 // Check for negative zero result. If product is zero, and one
1939 // argument is negative, go to slow case.
1940 Label correct_result;
1942 j(not_zero, &correct_result, Label::kNear);
1944 movp(dst, kScratchRegister);
1946 // Result was positive zero.
1947 j(positive, &zero_correct_result, Label::kNear);
1949 bind(&failure); // Reused failure exit, restores src1.
1950 movp(src1, kScratchRegister);
1951 jmp(on_not_smi_result, near_jump);
1953 bind(&zero_correct_result);
1956 bind(&correct_result);
1958 SmiToInteger64(dst, src1);
1960 j(overflow, on_not_smi_result, near_jump);
1961 // Check for negative zero result. If product is zero, and one
1962 // argument is negative, go to slow case.
1963 Label correct_result;
1965 j(not_zero, &correct_result, Label::kNear);
1966 // One of src1 and src2 is zero, the check whether the other is
1968 movp(kScratchRegister, src1);
1969 xorp(kScratchRegister, src2);
1970 j(negative, on_not_smi_result, near_jump);
1971 bind(&correct_result);
1976 void MacroAssembler::SmiDiv(Register dst,
1979 Label* on_not_smi_result,
1980 Label::Distance near_jump) {
1981 DCHECK(!src1.is(kScratchRegister));
1982 DCHECK(!src2.is(kScratchRegister));
1983 DCHECK(!dst.is(kScratchRegister));
1984 DCHECK(!src2.is(rax));
1985 DCHECK(!src2.is(rdx));
1986 DCHECK(!src1.is(rdx));
1988 // Check for 0 divisor (result is +/-Infinity).
1990 j(zero, on_not_smi_result, near_jump);
1993 movp(kScratchRegister, src1);
1995 SmiToInteger32(rax, src1);
1996 // We need to rule out dividing Smi::kMinValue by -1, since that would
1997 // overflow in idiv and raise an exception.
1998 // We combine this with negative zero test (negative zero only happens
1999 // when dividing zero by a negative number).
2001 // We overshoot a little and go to slow case if we divide min-value
2002 // by any negative value, not just -1.
2004 testl(rax, Immediate(~Smi::kMinValue));
2005 j(not_zero, &safe_div, Label::kNear);
2008 j(positive, &safe_div, Label::kNear);
2009 movp(src1, kScratchRegister);
2010 jmp(on_not_smi_result, near_jump);
2012 j(negative, on_not_smi_result, near_jump);
2016 SmiToInteger32(src2, src2);
2017 // Sign extend src1 into edx:eax.
2020 Integer32ToSmi(src2, src2);
2021 // Check that the remainder is zero.
2025 j(zero, &smi_result, Label::kNear);
2026 movp(src1, kScratchRegister);
2027 jmp(on_not_smi_result, near_jump);
2030 j(not_zero, on_not_smi_result, near_jump);
2032 if (!dst.is(src1) && src1.is(rax)) {
2033 movp(src1, kScratchRegister);
2035 Integer32ToSmi(dst, rax);
2039 void MacroAssembler::SmiMod(Register dst,
2042 Label* on_not_smi_result,
2043 Label::Distance near_jump) {
2044 DCHECK(!dst.is(kScratchRegister));
2045 DCHECK(!src1.is(kScratchRegister));
2046 DCHECK(!src2.is(kScratchRegister));
2047 DCHECK(!src2.is(rax));
2048 DCHECK(!src2.is(rdx));
2049 DCHECK(!src1.is(rdx));
2050 DCHECK(!src1.is(src2));
2053 j(zero, on_not_smi_result, near_jump);
2056 movp(kScratchRegister, src1);
2058 SmiToInteger32(rax, src1);
2059 SmiToInteger32(src2, src2);
2061 // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
2063 cmpl(rax, Immediate(Smi::kMinValue));
2064 j(not_equal, &safe_div, Label::kNear);
2065 cmpl(src2, Immediate(-1));
2066 j(not_equal, &safe_div, Label::kNear);
2067 // Retag inputs and go slow case.
2068 Integer32ToSmi(src2, src2);
2070 movp(src1, kScratchRegister);
2072 jmp(on_not_smi_result, near_jump);
2075 // Sign extend eax into edx:eax.
2078 // Restore smi tags on inputs.
2079 Integer32ToSmi(src2, src2);
2081 movp(src1, kScratchRegister);
2083 // Check for a negative zero result. If the result is zero, and the
2084 // dividend is negative, go slow to return a floating point negative zero.
2087 j(not_zero, &smi_result, Label::kNear);
2089 j(negative, on_not_smi_result, near_jump);
2091 Integer32ToSmi(dst, rdx);
2095 void MacroAssembler::SmiNot(Register dst, Register src) {
2096 DCHECK(!dst.is(kScratchRegister));
2097 DCHECK(!src.is(kScratchRegister));
2098 if (SmiValuesAre32Bits()) {
2099 // Set tag and padding bits before negating, so that they are zero
2101 movl(kScratchRegister, Immediate(~0));
2103 DCHECK(SmiValuesAre31Bits());
2104 movl(kScratchRegister, Immediate(1));
2107 xorp(dst, kScratchRegister);
2109 leap(dst, Operand(src, kScratchRegister, times_1, 0));
2115 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
2116 DCHECK(!dst.is(src2));
2117 if (!dst.is(src1)) {
2124 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
2125 if (constant->value() == 0) {
2127 } else if (dst.is(src)) {
2128 DCHECK(!dst.is(kScratchRegister));
2129 Register constant_reg = GetSmiConstant(constant);
2130 andp(dst, constant_reg);
2132 LoadSmiConstant(dst, constant);
2138 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
2139 if (!dst.is(src1)) {
2140 DCHECK(!src1.is(src2));
2147 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
2149 DCHECK(!dst.is(kScratchRegister));
2150 Register constant_reg = GetSmiConstant(constant);
2151 orp(dst, constant_reg);
2153 LoadSmiConstant(dst, constant);
2159 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
2160 if (!dst.is(src1)) {
2161 DCHECK(!src1.is(src2));
2168 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
2170 DCHECK(!dst.is(kScratchRegister));
2171 Register constant_reg = GetSmiConstant(constant);
2172 xorp(dst, constant_reg);
2174 LoadSmiConstant(dst, constant);
2180 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
2183 DCHECK(is_uint5(shift_value));
2184 if (shift_value > 0) {
2186 sarp(dst, Immediate(shift_value + kSmiShift));
2187 shlp(dst, Immediate(kSmiShift));
2189 UNIMPLEMENTED(); // Not used.
2195 void MacroAssembler::SmiShiftLeftConstant(Register dst,
2198 Label* on_not_smi_result,
2199 Label::Distance near_jump) {
2200 if (SmiValuesAre32Bits()) {
2204 if (shift_value > 0) {
2205 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2206 shlq(dst, Immediate(shift_value & 0x1f));
2209 DCHECK(SmiValuesAre31Bits());
2211 UNIMPLEMENTED(); // Not used.
2213 SmiToInteger32(dst, src);
2214 shll(dst, Immediate(shift_value));
2215 JumpIfNotValidSmiValue(dst, on_not_smi_result, near_jump);
2216 Integer32ToSmi(dst, dst);
2222 void MacroAssembler::SmiShiftLogicalRightConstant(
2223 Register dst, Register src, int shift_value,
2224 Label* on_not_smi_result, Label::Distance near_jump) {
2225 // Logic right shift interprets its result as an *unsigned* number.
2227 UNIMPLEMENTED(); // Not used.
2229 if (shift_value == 0) {
2231 j(negative, on_not_smi_result, near_jump);
2233 if (SmiValuesAre32Bits()) {
2235 shrp(dst, Immediate(shift_value + kSmiShift));
2236 shlp(dst, Immediate(kSmiShift));
2238 DCHECK(SmiValuesAre31Bits());
2239 SmiToInteger32(dst, src);
2240 shrp(dst, Immediate(shift_value));
2241 JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump);
2242 Integer32ToSmi(dst, dst);
2248 void MacroAssembler::SmiShiftLeft(Register dst,
2251 Label* on_not_smi_result,
2252 Label::Distance near_jump) {
2253 if (SmiValuesAre32Bits()) {
2254 DCHECK(!dst.is(rcx));
2255 if (!dst.is(src1)) {
2258 // Untag shift amount.
2259 SmiToInteger32(rcx, src2);
2260 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2261 andp(rcx, Immediate(0x1f));
2264 DCHECK(SmiValuesAre31Bits());
2265 DCHECK(!dst.is(kScratchRegister));
2266 DCHECK(!src1.is(kScratchRegister));
2267 DCHECK(!src2.is(kScratchRegister));
2268 DCHECK(!dst.is(src2));
2269 DCHECK(!dst.is(rcx));
2271 if (src1.is(rcx) || src2.is(rcx)) {
2272 movq(kScratchRegister, rcx);
2275 UNIMPLEMENTED(); // Not used.
2278 SmiToInteger32(dst, src1);
2279 SmiToInteger32(rcx, src2);
2281 JumpIfValidSmiValue(dst, &valid_result, Label::kNear);
2282 // As src1 or src2 could not be dst, we do not need to restore them for
2284 if (src1.is(rcx) || src2.is(rcx)) {
2286 movq(src1, kScratchRegister);
2288 movq(src2, kScratchRegister);
2291 jmp(on_not_smi_result, near_jump);
2292 bind(&valid_result);
2293 Integer32ToSmi(dst, dst);
2299 void MacroAssembler::SmiShiftLogicalRight(Register dst,
2302 Label* on_not_smi_result,
2303 Label::Distance near_jump) {
2304 DCHECK(!dst.is(kScratchRegister));
2305 DCHECK(!src1.is(kScratchRegister));
2306 DCHECK(!src2.is(kScratchRegister));
2307 DCHECK(!dst.is(src2));
2308 DCHECK(!dst.is(rcx));
2309 if (src1.is(rcx) || src2.is(rcx)) {
2310 movq(kScratchRegister, rcx);
2313 UNIMPLEMENTED(); // Not used.
2316 SmiToInteger32(dst, src1);
2317 SmiToInteger32(rcx, src2);
2319 JumpIfUIntValidSmiValue(dst, &valid_result, Label::kNear);
2320 // As src1 or src2 could not be dst, we do not need to restore them for
2322 if (src1.is(rcx) || src2.is(rcx)) {
2324 movq(src1, kScratchRegister);
2326 movq(src2, kScratchRegister);
2329 jmp(on_not_smi_result, near_jump);
2330 bind(&valid_result);
2331 Integer32ToSmi(dst, dst);
2336 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
2339 DCHECK(!dst.is(kScratchRegister));
2340 DCHECK(!src1.is(kScratchRegister));
2341 DCHECK(!src2.is(kScratchRegister));
2342 DCHECK(!dst.is(rcx));
2344 SmiToInteger32(rcx, src2);
2345 if (!dst.is(src1)) {
2348 SmiToInteger32(dst, dst);
2350 Integer32ToSmi(dst, dst);
2354 void MacroAssembler::SelectNonSmi(Register dst,
2358 Label::Distance near_jump) {
2359 DCHECK(!dst.is(kScratchRegister));
2360 DCHECK(!src1.is(kScratchRegister));
2361 DCHECK(!src2.is(kScratchRegister));
2362 DCHECK(!dst.is(src1));
2363 DCHECK(!dst.is(src2));
2364 // Both operands must not be smis.
2366 Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
2367 Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
2369 STATIC_ASSERT(kSmiTag == 0);
2370 DCHECK_EQ(0, Smi::FromInt(0));
2371 movl(kScratchRegister, Immediate(kSmiTagMask));
2372 andp(kScratchRegister, src1);
2373 testl(kScratchRegister, src2);
2374 // If non-zero then both are smis.
2375 j(not_zero, on_not_smis, near_jump);
2377 // Exactly one operand is a smi.
2378 DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
2379 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
2380 subp(kScratchRegister, Immediate(1));
2381 // If src1 is a smi, then scratch register all 1s, else it is all 0s.
2384 andp(dst, kScratchRegister);
2385 // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
2387 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
2391 SmiIndex MacroAssembler::SmiToIndex(Register dst,
2394 if (SmiValuesAre32Bits()) {
2395 DCHECK(is_uint6(shift));
2396 // There is a possible optimization if shift is in the range 60-63, but that
2397 // will (and must) never happen.
2401 if (shift < kSmiShift) {
2402 sarp(dst, Immediate(kSmiShift - shift));
2404 shlp(dst, Immediate(shift - kSmiShift));
2406 return SmiIndex(dst, times_1);
2408 DCHECK(SmiValuesAre31Bits());
2409 DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2413 // We have to sign extend the index register to 64-bit as the SMI might
2416 if (shift == times_1) {
2417 sarq(dst, Immediate(kSmiShift));
2418 return SmiIndex(dst, times_1);
2420 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2425 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
2428 if (SmiValuesAre32Bits()) {
2429 // Register src holds a positive smi.
2430 DCHECK(is_uint6(shift));
2435 if (shift < kSmiShift) {
2436 sarp(dst, Immediate(kSmiShift - shift));
2438 shlp(dst, Immediate(shift - kSmiShift));
2440 return SmiIndex(dst, times_1);
2442 DCHECK(SmiValuesAre31Bits());
2443 DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2448 if (shift == times_1) {
2449 sarq(dst, Immediate(kSmiShift));
2450 return SmiIndex(dst, times_1);
2452 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2457 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
2458 if (SmiValuesAre32Bits()) {
2459 DCHECK_EQ(0, kSmiShift % kBitsPerByte);
2460 addl(dst, Operand(src, kSmiShift / kBitsPerByte));
2462 DCHECK(SmiValuesAre31Bits());
2463 SmiToInteger32(kScratchRegister, src);
2464 addl(dst, kScratchRegister);
2469 void MacroAssembler::Push(Smi* source) {
2470 intptr_t smi = reinterpret_cast<intptr_t>(source);
2471 if (is_int32(smi)) {
2472 Push(Immediate(static_cast<int32_t>(smi)));
2474 Register constant = GetSmiConstant(source);
2480 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
2481 DCHECK(!src.is(scratch));
2484 shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2485 shlp(src, Immediate(kSmiShift));
2488 shlp(scratch, Immediate(kSmiShift));
2493 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
2494 DCHECK(!dst.is(scratch));
2497 shrp(scratch, Immediate(kSmiShift));
2499 shrp(dst, Immediate(kSmiShift));
2501 shlp(dst, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2506 void MacroAssembler::Test(const Operand& src, Smi* source) {
2507 if (SmiValuesAre32Bits()) {
2508 testl(Operand(src, kIntSize), Immediate(source->value()));
2510 DCHECK(SmiValuesAre31Bits());
2511 testl(src, Immediate(source));
2516 // ----------------------------------------------------------------------------
2519 void MacroAssembler::LookupNumberStringCache(Register object,
2524 // Use of registers. Register result is used as a temporary.
2525 Register number_string_cache = result;
2526 Register mask = scratch1;
2527 Register scratch = scratch2;
2529 // Load the number string cache.
2530 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2532 // Make the hash mask from the length of the number string cache. It
2533 // contains two elements (number and string) for each cache entry.
2535 mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
2536 shrl(mask, Immediate(1));
2537 subp(mask, Immediate(1)); // Make mask.
2539 // Calculate the entry in the number string cache. The hash value in the
2540 // number string cache for smis is just the smi value, and the hash for
2541 // doubles is the xor of the upper and lower words. See
2542 // Heap::GetNumberStringCache.
2544 Label load_result_from_cache;
2545 JumpIfSmi(object, &is_smi);
2547 isolate()->factory()->heap_number_map(),
2551 STATIC_ASSERT(8 == kDoubleSize);
2552 movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
2553 xorp(scratch, FieldOperand(object, HeapNumber::kValueOffset));
2554 andp(scratch, mask);
2555 // Each entry in string cache consists of two pointer sized fields,
2556 // but times_twice_pointer_size (multiplication by 16) scale factor
2557 // is not supported by addrmode on x64 platform.
2558 // So we have to premultiply entry index before lookup.
2559 shlp(scratch, Immediate(kPointerSizeLog2 + 1));
2561 Register index = scratch;
2562 Register probe = mask;
2564 FieldOperand(number_string_cache,
2567 FixedArray::kHeaderSize));
2568 JumpIfSmi(probe, not_found);
2569 movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
2570 ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
2571 j(parity_even, not_found); // Bail out if NaN is involved.
2572 j(not_equal, not_found); // The cache did not contain this value.
2573 jmp(&load_result_from_cache);
2576 SmiToInteger32(scratch, object);
2577 andp(scratch, mask);
2578 // Each entry in string cache consists of two pointer sized fields,
2579 // but times_twice_pointer_size (multiplication by 16) scale factor
2580 // is not supported by addrmode on x64 platform.
2581 // So we have to premultiply entry index before lookup.
2582 shlp(scratch, Immediate(kPointerSizeLog2 + 1));
2584 // Check if the entry is the smi we are looking for.
2586 FieldOperand(number_string_cache,
2589 FixedArray::kHeaderSize));
2590 j(not_equal, not_found);
2592 // Get the result from the cache.
2593 bind(&load_result_from_cache);
2595 FieldOperand(number_string_cache,
2598 FixedArray::kHeaderSize + kPointerSize));
2599 IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
2603 void MacroAssembler::JumpIfNotString(Register object,
2604 Register object_map,
2606 Label::Distance near_jump) {
2607 Condition is_smi = CheckSmi(object);
2608 j(is_smi, not_string, near_jump);
2609 CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
2610 j(above_equal, not_string, near_jump);
2614 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(
2615 Register first_object, Register second_object, Register scratch1,
2616 Register scratch2, Label* on_fail, Label::Distance near_jump) {
2617 // Check that both objects are not smis.
2618 Condition either_smi = CheckEitherSmi(first_object, second_object);
2619 j(either_smi, on_fail, near_jump);
2621 // Load instance type for both strings.
2622 movp(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
2623 movp(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
2624 movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2625 movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2627 // Check that both are flat one-byte strings.
2628 DCHECK(kNotStringTag != 0);
2629 const int kFlatOneByteStringMask =
2630 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2631 const int kFlatOneByteStringTag =
2632 kStringTag | kOneByteStringTag | kSeqStringTag;
2634 andl(scratch1, Immediate(kFlatOneByteStringMask));
2635 andl(scratch2, Immediate(kFlatOneByteStringMask));
2636 // Interleave the bits to check both scratch1 and scratch2 in one test.
2637 DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
2638 leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2640 Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
2641 j(not_equal, on_fail, near_jump);
2645 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
2646 Register instance_type, Register scratch, Label* failure,
2647 Label::Distance near_jump) {
2648 if (!scratch.is(instance_type)) {
2649 movl(scratch, instance_type);
2652 const int kFlatOneByteStringMask =
2653 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2655 andl(scratch, Immediate(kFlatOneByteStringMask));
2656 cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
2657 j(not_equal, failure, near_jump);
2661 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
2662 Register first_object_instance_type, Register second_object_instance_type,
2663 Register scratch1, Register scratch2, Label* on_fail,
2664 Label::Distance near_jump) {
2665 // Load instance type for both strings.
2666 movp(scratch1, first_object_instance_type);
2667 movp(scratch2, second_object_instance_type);
2669 // Check that both are flat one-byte strings.
2670 DCHECK(kNotStringTag != 0);
2671 const int kFlatOneByteStringMask =
2672 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2673 const int kFlatOneByteStringTag =
2674 kStringTag | kOneByteStringTag | kSeqStringTag;
2676 andl(scratch1, Immediate(kFlatOneByteStringMask));
2677 andl(scratch2, Immediate(kFlatOneByteStringMask));
2678 // Interleave the bits to check both scratch1 and scratch2 in one test.
2679 DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
2680 leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2682 Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
2683 j(not_equal, on_fail, near_jump);
2688 static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
2689 T operand_or_register,
2690 Label* not_unique_name,
2691 Label::Distance distance) {
2692 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2694 masm->testb(operand_or_register,
2695 Immediate(kIsNotStringMask | kIsNotInternalizedMask));
2696 masm->j(zero, &succeed, Label::kNear);
2697 masm->cmpb(operand_or_register, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
2698 masm->j(not_equal, not_unique_name, distance);
2700 masm->bind(&succeed);
2704 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
2705 Label* not_unique_name,
2706 Label::Distance distance) {
2707 JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
2711 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
2712 Label* not_unique_name,
2713 Label::Distance distance) {
2714 JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
2718 void MacroAssembler::Move(Register dst, Register src) {
2725 void MacroAssembler::Move(Register dst, Handle<Object> source) {
2726 AllowDeferredHandleDereference smi_check;
2727 if (source->IsSmi()) {
2728 Move(dst, Smi::cast(*source));
2730 MoveHeapObject(dst, source);
2735 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
2736 AllowDeferredHandleDereference smi_check;
2737 if (source->IsSmi()) {
2738 Move(dst, Smi::cast(*source));
2740 MoveHeapObject(kScratchRegister, source);
2741 movp(dst, kScratchRegister);
2746 void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
2750 unsigned cnt = base::bits::CountPopulation32(src);
2751 unsigned nlz = base::bits::CountLeadingZeros32(src);
2752 unsigned ntz = base::bits::CountTrailingZeros32(src);
2753 if (nlz + cnt + ntz == 32) {
2756 psrld(dst, 32 - cnt);
2758 pslld(dst, 32 - cnt);
2759 if (nlz != 0) psrld(dst, nlz);
2762 movl(kScratchRegister, Immediate(src));
2763 movq(dst, kScratchRegister);
2769 void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
2770 uint32_t lower = static_cast<uint32_t>(src);
2771 uint32_t upper = static_cast<uint32_t>(src >> 32);
2775 unsigned cnt = base::bits::CountPopulation64(src);
2776 unsigned nlz = base::bits::CountLeadingZeros64(src);
2777 unsigned ntz = base::bits::CountTrailingZeros64(src);
2778 if (nlz + cnt + ntz == 64) {
2781 psrlq(dst, 64 - cnt);
2783 psllq(dst, 64 - cnt);
2784 if (nlz != 0) psrlq(dst, nlz);
2786 } else if (lower == 0) {
2790 movq(kScratchRegister, src);
2791 movq(dst, kScratchRegister);
2797 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
2798 AllowDeferredHandleDereference smi_check;
2799 if (source->IsSmi()) {
2800 Cmp(dst, Smi::cast(*source));
2802 MoveHeapObject(kScratchRegister, source);
2803 cmpp(dst, kScratchRegister);
2808 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
2809 AllowDeferredHandleDereference smi_check;
2810 if (source->IsSmi()) {
2811 Cmp(dst, Smi::cast(*source));
2813 MoveHeapObject(kScratchRegister, source);
2814 cmpp(dst, kScratchRegister);
2819 void MacroAssembler::Push(Handle<Object> source) {
2820 AllowDeferredHandleDereference smi_check;
2821 if (source->IsSmi()) {
2822 Push(Smi::cast(*source));
2824 MoveHeapObject(kScratchRegister, source);
2825 Push(kScratchRegister);
2830 void MacroAssembler::MoveHeapObject(Register result,
2831 Handle<Object> object) {
2832 AllowDeferredHandleDereference using_raw_address;
2833 DCHECK(object->IsHeapObject());
2834 if (isolate()->heap()->InNewSpace(*object)) {
2835 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2836 Move(result, cell, RelocInfo::CELL);
2837 movp(result, Operand(result, 0));
2839 Move(result, object, RelocInfo::EMBEDDED_OBJECT);
2844 void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
2846 AllowDeferredHandleDereference embedding_raw_address;
2847 load_rax(cell.location(), RelocInfo::CELL);
2849 Move(dst, cell, RelocInfo::CELL);
2850 movp(dst, Operand(dst, 0));
2855 void MacroAssembler::Drop(int stack_elements) {
2856 if (stack_elements > 0) {
2857 addp(rsp, Immediate(stack_elements * kPointerSize));
2862 void MacroAssembler::DropUnderReturnAddress(int stack_elements,
2864 DCHECK(stack_elements > 0);
2865 if (kPointerSize == kInt64Size && stack_elements == 1) {
2866 popq(MemOperand(rsp, 0));
2870 PopReturnAddressTo(scratch);
2871 Drop(stack_elements);
2872 PushReturnAddressFrom(scratch);
2876 void MacroAssembler::Push(Register src) {
2877 if (kPointerSize == kInt64Size) {
2880 // x32 uses 64-bit push for rbp in the prologue.
2881 DCHECK(src.code() != rbp.code());
2882 leal(rsp, Operand(rsp, -4));
2883 movp(Operand(rsp, 0), src);
2888 void MacroAssembler::Push(const Operand& src) {
2889 if (kPointerSize == kInt64Size) {
2892 movp(kScratchRegister, src);
2893 leal(rsp, Operand(rsp, -4));
2894 movp(Operand(rsp, 0), kScratchRegister);
2899 void MacroAssembler::PushQuad(const Operand& src) {
2900 if (kPointerSize == kInt64Size) {
2903 movp(kScratchRegister, src);
2904 pushq(kScratchRegister);
2909 void MacroAssembler::Push(Immediate value) {
2910 if (kPointerSize == kInt64Size) {
2913 leal(rsp, Operand(rsp, -4));
2914 movp(Operand(rsp, 0), value);
2919 void MacroAssembler::PushImm32(int32_t imm32) {
2920 if (kPointerSize == kInt64Size) {
2923 leal(rsp, Operand(rsp, -4));
2924 movp(Operand(rsp, 0), Immediate(imm32));
2929 void MacroAssembler::Pop(Register dst) {
2930 if (kPointerSize == kInt64Size) {
2933 // x32 uses 64-bit pop for rbp in the epilogue.
2934 DCHECK(dst.code() != rbp.code());
2935 movp(dst, Operand(rsp, 0));
2936 leal(rsp, Operand(rsp, 4));
2941 void MacroAssembler::Pop(const Operand& dst) {
2942 if (kPointerSize == kInt64Size) {
2945 Register scratch = dst.AddressUsesRegister(kScratchRegister)
2946 ? kSmiConstantRegister : kScratchRegister;
2947 movp(scratch, Operand(rsp, 0));
2949 leal(rsp, Operand(rsp, 4));
2950 if (scratch.is(kSmiConstantRegister)) {
2951 // Restore kSmiConstantRegister.
2952 movp(kSmiConstantRegister,
2953 reinterpret_cast<void*>(Smi::FromInt(kSmiConstantRegisterValue)),
2954 Assembler::RelocInfoNone());
2960 void MacroAssembler::PopQuad(const Operand& dst) {
2961 if (kPointerSize == kInt64Size) {
2964 popq(kScratchRegister);
2965 movp(dst, kScratchRegister);
2970 void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst,
2973 DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
2974 offset <= SharedFunctionInfo::kSize &&
2975 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
2976 if (kPointerSize == kInt64Size) {
2977 movsxlq(dst, FieldOperand(base, offset));
2979 movp(dst, FieldOperand(base, offset));
2980 SmiToInteger32(dst, dst);
2985 void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base,
2988 DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
2989 offset <= SharedFunctionInfo::kSize &&
2990 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
2991 if (kPointerSize == kInt32Size) {
2992 // On x32, this field is represented by SMI.
2995 int byte_offset = bits / kBitsPerByte;
2996 int bit_in_byte = bits & (kBitsPerByte - 1);
2997 testb(FieldOperand(base, offset + byte_offset), Immediate(1 << bit_in_byte));
3001 void MacroAssembler::Jump(ExternalReference ext) {
3002 LoadAddress(kScratchRegister, ext);
3003 jmp(kScratchRegister);
3007 void MacroAssembler::Jump(const Operand& op) {
3008 if (kPointerSize == kInt64Size) {
3011 movp(kScratchRegister, op);
3012 jmp(kScratchRegister);
3017 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
3018 Move(kScratchRegister, destination, rmode);
3019 jmp(kScratchRegister);
3023 void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
3024 // TODO(X64): Inline this
3025 jmp(code_object, rmode);
3029 int MacroAssembler::CallSize(ExternalReference ext) {
3030 // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
3031 return LoadAddressSize(ext) +
3032 Assembler::kCallScratchRegisterInstructionLength;
3036 void MacroAssembler::Call(ExternalReference ext) {
3038 int end_position = pc_offset() + CallSize(ext);
3040 LoadAddress(kScratchRegister, ext);
3041 call(kScratchRegister);
3043 CHECK_EQ(end_position, pc_offset());
3048 void MacroAssembler::Call(const Operand& op) {
3049 if (kPointerSize == kInt64Size) {
3052 movp(kScratchRegister, op);
3053 call(kScratchRegister);
3058 void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
3060 int end_position = pc_offset() + CallSize(destination);
3062 Move(kScratchRegister, destination, rmode);
3063 call(kScratchRegister);
3065 CHECK_EQ(pc_offset(), end_position);
3070 void MacroAssembler::Call(Handle<Code> code_object,
3071 RelocInfo::Mode rmode,
3072 TypeFeedbackId ast_id) {
3074 int end_position = pc_offset() + CallSize(code_object);
3076 DCHECK(RelocInfo::IsCodeTarget(rmode) ||
3077 rmode == RelocInfo::CODE_AGE_SEQUENCE);
3078 call(code_object, rmode, ast_id);
3080 CHECK_EQ(end_position, pc_offset());
3085 void MacroAssembler::Pushad() {
3090 // Not pushing rsp or rbp.
3095 // r10 is kScratchRegister.
3097 // r12 is kSmiConstantRegister.
3098 // r13 is kRootRegister.
3101 STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
3102 // Use lea for symmetry with Popad.
3104 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
3105 leap(rsp, Operand(rsp, -sp_delta));
3109 void MacroAssembler::Popad() {
3110 // Popad must not change the flags, so use lea instead of addq.
3112 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
3113 leap(rsp, Operand(rsp, sp_delta));
3128 void MacroAssembler::Dropad() {
3129 addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
3133 // Order general registers are pushed by Pushad:
3134 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
3136 MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
3156 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
3157 const Immediate& imm) {
3158 movp(SafepointRegisterSlot(dst), imm);
3162 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
3163 movp(SafepointRegisterSlot(dst), src);
3167 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
3168 movp(dst, SafepointRegisterSlot(src));
3172 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
3173 return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
3177 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
3178 int handler_index) {
3179 // Adjust this code if not the case.
3180 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
3182 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3183 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3184 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3185 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3186 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3188 // We will build up the handler from the bottom by pushing on the stack.
3189 // First push the frame pointer and context.
3190 if (kind == StackHandler::JS_ENTRY) {
3191 // The frame pointer does not point to a JS frame so we save NULL for
3192 // rbp. We expect the code throwing an exception to check rbp before
3193 // dereferencing it to restore the context.
3194 pushq(Immediate(0)); // NULL frame pointer.
3195 Push(Smi::FromInt(0)); // No context.
3201 // Push the state and the code object.
3203 StackHandler::IndexField::encode(handler_index) |
3204 StackHandler::KindField::encode(kind);
3205 Push(Immediate(state));
3208 // Link the current handler as the next handler.
3209 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3210 Push(ExternalOperand(handler_address));
3211 // Set this new handler as the current one.
3212 movp(ExternalOperand(handler_address), rsp);
3216 void MacroAssembler::PopTryHandler() {
3217 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3218 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3219 Pop(ExternalOperand(handler_address));
3220 addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
3224 void MacroAssembler::JumpToHandlerEntry() {
3225 // Compute the handler entry address and jump to it. The handler table is
3226 // a fixed array of (smi-tagged) code offsets.
3227 // rax = exception, rdi = code object, rdx = state.
3228 movp(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
3229 shrp(rdx, Immediate(StackHandler::kKindWidth));
3231 FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
3232 SmiToInteger64(rdx, rdx);
3233 leap(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
3238 void MacroAssembler::Throw(Register value) {
3239 // Adjust this code if not the case.
3240 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
3242 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3243 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3244 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3245 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3246 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3248 // The exception is expected in rax.
3249 if (!value.is(rax)) {
3252 // Drop the stack pointer to the top of the top handler.
3253 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3254 movp(rsp, ExternalOperand(handler_address));
3255 // Restore the next handler.
3256 Pop(ExternalOperand(handler_address));
3258 // Remove the code object and state, compute the handler address in rdi.
3259 Pop(rdi); // Code object.
3260 Pop(rdx); // Offset and state.
3262 // Restore the context and frame pointer.
3263 Pop(rsi); // Context.
3264 popq(rbp); // Frame pointer.
3266 // If the handler is a JS frame, restore the context to the frame.
3267 // (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
3271 j(zero, &skip, Label::kNear);
3272 movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
3275 JumpToHandlerEntry();
3279 void MacroAssembler::ThrowUncatchable(Register value) {
3280 // Adjust this code if not the case.
3281 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
3283 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3284 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3285 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3286 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3287 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3289 // The exception is expected in rax.
3290 if (!value.is(rax)) {
3293 // Drop the stack pointer to the top of the top stack handler.
3294 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3295 Load(rsp, handler_address);
3297 // Unwind the handlers until the top ENTRY handler is found.
3298 Label fetch_next, check_kind;
3299 jmp(&check_kind, Label::kNear);
3301 movp(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
3304 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
3305 testl(Operand(rsp, StackHandlerConstants::kStateOffset),
3306 Immediate(StackHandler::KindField::kMask));
3307 j(not_zero, &fetch_next);
3309 // Set the top handler address to next handler past the top ENTRY handler.
3310 Pop(ExternalOperand(handler_address));
3312 // Remove the code object and state, compute the handler address in rdi.
3313 Pop(rdi); // Code object.
3314 Pop(rdx); // Offset and state.
3316 // Clear the context pointer and frame pointer (0 was saved in the handler).
3320 JumpToHandlerEntry();
3324 void MacroAssembler::Ret() {
3329 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
3330 if (is_uint16(bytes_dropped)) {
3333 PopReturnAddressTo(scratch);
3334 addp(rsp, Immediate(bytes_dropped));
3335 PushReturnAddressFrom(scratch);
3341 void MacroAssembler::FCmp() {
3347 void MacroAssembler::CmpObjectType(Register heap_object,
3350 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3351 CmpInstanceType(map, type);
3355 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
3356 cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
3357 Immediate(static_cast<int8_t>(type)));
3361 void MacroAssembler::CheckFastElements(Register map,
3363 Label::Distance distance) {
3364 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3365 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3366 STATIC_ASSERT(FAST_ELEMENTS == 2);
3367 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3368 cmpb(FieldOperand(map, Map::kBitField2Offset),
3369 Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3370 j(above, fail, distance);
3374 void MacroAssembler::CheckFastObjectElements(Register map,
3376 Label::Distance distance) {
3377 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3378 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3379 STATIC_ASSERT(FAST_ELEMENTS == 2);
3380 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3381 cmpb(FieldOperand(map, Map::kBitField2Offset),
3382 Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3383 j(below_equal, fail, distance);
3384 cmpb(FieldOperand(map, Map::kBitField2Offset),
3385 Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3386 j(above, fail, distance);
3390 void MacroAssembler::CheckFastSmiElements(Register map,
3392 Label::Distance distance) {
3393 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3394 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3395 cmpb(FieldOperand(map, Map::kBitField2Offset),
3396 Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3397 j(above, fail, distance);
3401 void MacroAssembler::StoreNumberToDoubleElements(
3402 Register maybe_number,
3405 XMMRegister xmm_scratch,
3407 int elements_offset) {
3408 Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
3410 JumpIfSmi(maybe_number, &smi_value, Label::kNear);
3412 CheckMap(maybe_number,
3413 isolate()->factory()->heap_number_map(),
3417 // Double value, canonicalize NaN.
3418 uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
3419 cmpl(FieldOperand(maybe_number, offset),
3420 Immediate(kNaNOrInfinityLowerBoundUpper32));
3421 j(greater_equal, &maybe_nan, Label::kNear);
3424 movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
3425 bind(&have_double_value);
3426 movsd(FieldOperand(elements, index, times_8,
3427 FixedDoubleArray::kHeaderSize - elements_offset),
3432 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3433 // it's an Infinity, and the non-NaN code path applies.
3434 j(greater, &is_nan, Label::kNear);
3435 cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
3438 // Convert all NaNs to the same canonical NaN value when they are stored in
3439 // the double array.
3440 Set(kScratchRegister,
3442 FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
3443 movq(xmm_scratch, kScratchRegister);
3444 jmp(&have_double_value, Label::kNear);
3447 // Value is a smi. convert to a double and store.
3448 // Preserve original value.
3449 SmiToInteger32(kScratchRegister, maybe_number);
3450 Cvtlsi2sd(xmm_scratch, kScratchRegister);
3451 movsd(FieldOperand(elements, index, times_8,
3452 FixedDoubleArray::kHeaderSize - elements_offset),
3458 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
3459 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3463 void MacroAssembler::CheckMap(Register obj,
3466 SmiCheckType smi_check_type) {
3467 if (smi_check_type == DO_SMI_CHECK) {
3468 JumpIfSmi(obj, fail);
3471 CompareMap(obj, map);
3476 void MacroAssembler::ClampUint8(Register reg) {
3478 testl(reg, Immediate(0xFFFFFF00));
3479 j(zero, &done, Label::kNear);
3480 setcc(negative, reg); // 1 if negative, 0 if positive.
3481 decb(reg); // 0 if negative, 255 if positive.
3486 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
3487 XMMRegister temp_xmm_reg,
3488 Register result_reg) {
3491 xorps(temp_xmm_reg, temp_xmm_reg);
3492 cvtsd2si(result_reg, input_reg);
3493 testl(result_reg, Immediate(0xFFFFFF00));
3494 j(zero, &done, Label::kNear);
3495 cmpl(result_reg, Immediate(1));
3496 j(overflow, &conv_failure, Label::kNear);
3497 movl(result_reg, Immediate(0));
3498 setcc(sign, result_reg);
3499 subl(result_reg, Immediate(1));
3500 andl(result_reg, Immediate(255));
3501 jmp(&done, Label::kNear);
3502 bind(&conv_failure);
3504 ucomisd(input_reg, temp_xmm_reg);
3505 j(below, &done, Label::kNear);
3506 Set(result_reg, 255);
3511 void MacroAssembler::LoadUint32(XMMRegister dst,
3513 if (FLAG_debug_code) {
3514 cmpq(src, Immediate(0xffffffff));
3515 Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
3517 cvtqsi2sd(dst, src);
3521 void MacroAssembler::SlowTruncateToI(Register result_reg,
3524 DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
3525 call(stub.GetCode(), RelocInfo::CODE_TARGET);
3529 void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
3530 Register input_reg) {
3532 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3533 cvttsd2siq(result_reg, xmm0);
3534 cmpq(result_reg, Immediate(1));
3535 j(no_overflow, &done, Label::kNear);
3538 if (input_reg.is(result_reg)) {
3539 subp(rsp, Immediate(kDoubleSize));
3540 movsd(MemOperand(rsp, 0), xmm0);
3541 SlowTruncateToI(result_reg, rsp, 0);
3542 addp(rsp, Immediate(kDoubleSize));
3544 SlowTruncateToI(result_reg, input_reg);
3548 // Keep our invariant that the upper 32 bits are zero.
3549 movl(result_reg, result_reg);
3553 void MacroAssembler::TruncateDoubleToI(Register result_reg,
3554 XMMRegister input_reg) {
3556 cvttsd2siq(result_reg, input_reg);
3557 cmpq(result_reg, Immediate(1));
3558 j(no_overflow, &done, Label::kNear);
3560 subp(rsp, Immediate(kDoubleSize));
3561 movsd(MemOperand(rsp, 0), input_reg);
3562 SlowTruncateToI(result_reg, rsp, 0);
3563 addp(rsp, Immediate(kDoubleSize));
3566 // Keep our invariant that the upper 32 bits are zero.
3567 movl(result_reg, result_reg);
3571 void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
3572 XMMRegister scratch,
3573 MinusZeroMode minus_zero_mode,
3574 Label* lost_precision, Label* is_nan,
3575 Label* minus_zero, Label::Distance dst) {
3576 cvttsd2si(result_reg, input_reg);
3577 Cvtlsi2sd(xmm0, result_reg);
3578 ucomisd(xmm0, input_reg);
3579 j(not_equal, lost_precision, dst);
3580 j(parity_even, is_nan, dst); // NaN.
3581 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
3583 // The integer converted back is equal to the original. We
3584 // only have to test if we got -0 as an input.
3585 testl(result_reg, result_reg);
3586 j(not_zero, &done, Label::kNear);
3587 movmskpd(result_reg, input_reg);
3588 // Bit 0 contains the sign of the double in input_reg.
3589 // If input was positive, we are ok and return 0, otherwise
3590 // jump to minus_zero.
3591 andl(result_reg, Immediate(1));
3592 j(not_zero, minus_zero, dst);
3598 void MacroAssembler::LoadInstanceDescriptors(Register map,
3599 Register descriptors) {
3600 movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
3604 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3605 movl(dst, FieldOperand(map, Map::kBitField3Offset));
3606 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3610 void MacroAssembler::EnumLength(Register dst, Register map) {
3611 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3612 movl(dst, FieldOperand(map, Map::kBitField3Offset));
3613 andl(dst, Immediate(Map::EnumLengthBits::kMask));
3614 Integer32ToSmi(dst, dst);
3618 void MacroAssembler::DispatchMap(Register obj,
3621 Handle<Code> success,
3622 SmiCheckType smi_check_type) {
3624 if (smi_check_type == DO_SMI_CHECK) {
3625 JumpIfSmi(obj, &fail);
3627 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3628 j(equal, success, RelocInfo::CODE_TARGET);
3634 void MacroAssembler::AssertNumber(Register object) {
3635 if (emit_debug_code()) {
3637 Condition is_smi = CheckSmi(object);
3638 j(is_smi, &ok, Label::kNear);
3639 Cmp(FieldOperand(object, HeapObject::kMapOffset),
3640 isolate()->factory()->heap_number_map());
3641 Check(equal, kOperandIsNotANumber);
3647 void MacroAssembler::AssertNotSmi(Register object) {
3648 if (emit_debug_code()) {
3649 Condition is_smi = CheckSmi(object);
3650 Check(NegateCondition(is_smi), kOperandIsASmi);
3655 void MacroAssembler::AssertSmi(Register object) {
3656 if (emit_debug_code()) {
3657 Condition is_smi = CheckSmi(object);
3658 Check(is_smi, kOperandIsNotASmi);
3663 void MacroAssembler::AssertSmi(const Operand& object) {
3664 if (emit_debug_code()) {
3665 Condition is_smi = CheckSmi(object);
3666 Check(is_smi, kOperandIsNotASmi);
3671 void MacroAssembler::AssertZeroExtended(Register int32_register) {
3672 if (emit_debug_code()) {
3673 DCHECK(!int32_register.is(kScratchRegister));
3674 movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
3675 cmpq(kScratchRegister, int32_register);
3676 Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
3681 void MacroAssembler::AssertString(Register object) {
3682 if (emit_debug_code()) {
3683 testb(object, Immediate(kSmiTagMask));
3684 Check(not_equal, kOperandIsASmiAndNotAString);
3686 movp(object, FieldOperand(object, HeapObject::kMapOffset));
3687 CmpInstanceType(object, FIRST_NONSTRING_TYPE);
3689 Check(below, kOperandIsNotAString);
3694 void MacroAssembler::AssertName(Register object) {
3695 if (emit_debug_code()) {
3696 testb(object, Immediate(kSmiTagMask));
3697 Check(not_equal, kOperandIsASmiAndNotAName);
3699 movp(object, FieldOperand(object, HeapObject::kMapOffset));
3700 CmpInstanceType(object, LAST_NAME_TYPE);
3702 Check(below_equal, kOperandIsNotAName);
3707 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
3708 if (emit_debug_code()) {
3709 Label done_checking;
3710 AssertNotSmi(object);
3711 Cmp(object, isolate()->factory()->undefined_value());
3712 j(equal, &done_checking);
3713 Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
3714 Assert(equal, kExpectedUndefinedOrCell);
3715 bind(&done_checking);
3720 void MacroAssembler::AssertRootValue(Register src,
3721 Heap::RootListIndex root_value_index,
3722 BailoutReason reason) {
3723 if (emit_debug_code()) {
3724 DCHECK(!src.is(kScratchRegister));
3725 LoadRoot(kScratchRegister, root_value_index);
3726 cmpp(src, kScratchRegister);
3727 Check(equal, reason);
3733 Condition MacroAssembler::IsObjectStringType(Register heap_object,
3735 Register instance_type) {
3736 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3737 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3738 STATIC_ASSERT(kNotStringTag != 0);
3739 testb(instance_type, Immediate(kIsNotStringMask));
3744 Condition MacroAssembler::IsObjectNameType(Register heap_object,
3746 Register instance_type) {
3747 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3748 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3749 cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
3754 void MacroAssembler::TryGetFunctionPrototype(Register function,
3757 bool miss_on_bound_function) {
3759 if (miss_on_bound_function) {
3760 // Check that the receiver isn't a smi.
3761 testl(function, Immediate(kSmiTagMask));
3764 // Check that the function really is a function.
3765 CmpObjectType(function, JS_FUNCTION_TYPE, result);
3768 movp(kScratchRegister,
3769 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3770 // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
3772 TestBitSharedFunctionInfoSpecialField(kScratchRegister,
3773 SharedFunctionInfo::kCompilerHintsOffset,
3774 SharedFunctionInfo::kBoundFunction);
3777 // Make sure that the function has an instance prototype.
3778 testb(FieldOperand(result, Map::kBitFieldOffset),
3779 Immediate(1 << Map::kHasNonInstancePrototype));
3780 j(not_zero, &non_instance, Label::kNear);
3783 // Get the prototype or initial map from the function.
3785 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3787 // If the prototype or initial map is the hole, don't return it and
3788 // simply miss the cache instead. This will allow us to allocate a
3789 // prototype object on-demand in the runtime system.
3790 CompareRoot(result, Heap::kTheHoleValueRootIndex);
3793 // If the function does not have an initial map, we're done.
3795 CmpObjectType(result, MAP_TYPE, kScratchRegister);
3796 j(not_equal, &done, Label::kNear);
3798 // Get the prototype from the initial map.
3799 movp(result, FieldOperand(result, Map::kPrototypeOffset));
3801 if (miss_on_bound_function) {
3802 jmp(&done, Label::kNear);
3804 // Non-instance prototype: Fetch prototype from constructor field
3806 bind(&non_instance);
3807 movp(result, FieldOperand(result, Map::kConstructorOffset));
3815 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
3816 if (FLAG_native_code_counters && counter->Enabled()) {
3817 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3818 movl(counter_operand, Immediate(value));
3823 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
3825 if (FLAG_native_code_counters && counter->Enabled()) {
3826 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3828 incl(counter_operand);
3830 addl(counter_operand, Immediate(value));
3836 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
3838 if (FLAG_native_code_counters && counter->Enabled()) {
3839 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3841 decl(counter_operand);
3843 subl(counter_operand, Immediate(value));
3849 void MacroAssembler::DebugBreak() {
3850 Set(rax, 0); // No arguments.
3851 LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
3852 CEntryStub ces(isolate(), 1);
3853 DCHECK(AllowThisStubCall(&ces));
3854 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3858 void MacroAssembler::InvokeCode(Register code,
3859 const ParameterCount& expected,
3860 const ParameterCount& actual,
3862 const CallWrapper& call_wrapper) {
3863 // You can't call a function without a valid frame.
3864 DCHECK(flag == JUMP_FUNCTION || has_frame());
3867 bool definitely_mismatches = false;
3868 InvokePrologue(expected,
3870 Handle<Code>::null(),
3873 &definitely_mismatches,
3877 if (!definitely_mismatches) {
3878 if (flag == CALL_FUNCTION) {
3879 call_wrapper.BeforeCall(CallSize(code));
3881 call_wrapper.AfterCall();
3883 DCHECK(flag == JUMP_FUNCTION);
3891 void MacroAssembler::InvokeFunction(Register function,
3892 const ParameterCount& actual,
3894 const CallWrapper& call_wrapper) {
3895 // You can't call a function without a valid frame.
3896 DCHECK(flag == JUMP_FUNCTION || has_frame());
3898 DCHECK(function.is(rdi));
3899 movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3900 movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
3901 LoadSharedFunctionInfoSpecialField(rbx, rdx,
3902 SharedFunctionInfo::kFormalParameterCountOffset);
3903 // Advances rdx to the end of the Code object header, to the start of
3904 // the executable code.
3905 movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3907 ParameterCount expected(rbx);
3908 InvokeCode(rdx, expected, actual, flag, call_wrapper);
3912 void MacroAssembler::InvokeFunction(Register function,
3913 const ParameterCount& expected,
3914 const ParameterCount& actual,
3916 const CallWrapper& call_wrapper) {
3917 // You can't call a function without a valid frame.
3918 DCHECK(flag == JUMP_FUNCTION || has_frame());
3920 DCHECK(function.is(rdi));
3921 movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
3922 // Advances rdx to the end of the Code object header, to the start of
3923 // the executable code.
3924 movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3926 InvokeCode(rdx, expected, actual, flag, call_wrapper);
3930 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3931 const ParameterCount& expected,
3932 const ParameterCount& actual,
3934 const CallWrapper& call_wrapper) {
3935 Move(rdi, function);
3936 InvokeFunction(rdi, expected, actual, flag, call_wrapper);
3940 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3941 const ParameterCount& actual,
3942 Handle<Code> code_constant,
3943 Register code_register,
3945 bool* definitely_mismatches,
3947 Label::Distance near_jump,
3948 const CallWrapper& call_wrapper) {
3949 bool definitely_matches = false;
3950 *definitely_mismatches = false;
3952 if (expected.is_immediate()) {
3953 DCHECK(actual.is_immediate());
3954 if (expected.immediate() == actual.immediate()) {
3955 definitely_matches = true;
3957 Set(rax, actual.immediate());
3958 if (expected.immediate() ==
3959 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
3960 // Don't worry about adapting arguments for built-ins that
3961 // don't want that done. Skip adaption code by making it look
3962 // like we have a match between expected and actual number of
3964 definitely_matches = true;
3966 *definitely_mismatches = true;
3967 Set(rbx, expected.immediate());
3971 if (actual.is_immediate()) {
3972 // Expected is in register, actual is immediate. This is the
3973 // case when we invoke function values without going through the
3975 cmpp(expected.reg(), Immediate(actual.immediate()));
3976 j(equal, &invoke, Label::kNear);
3977 DCHECK(expected.reg().is(rbx));
3978 Set(rax, actual.immediate());
3979 } else if (!expected.reg().is(actual.reg())) {
3980 // Both expected and actual are in (different) registers. This
3981 // is the case when we invoke functions using call and apply.
3982 cmpp(expected.reg(), actual.reg());
3983 j(equal, &invoke, Label::kNear);
3984 DCHECK(actual.reg().is(rax));
3985 DCHECK(expected.reg().is(rbx));
3989 if (!definitely_matches) {
3990 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
3991 if (!code_constant.is_null()) {
3992 Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
3993 addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
3994 } else if (!code_register.is(rdx)) {
3995 movp(rdx, code_register);
3998 if (flag == CALL_FUNCTION) {
3999 call_wrapper.BeforeCall(CallSize(adaptor));
4000 Call(adaptor, RelocInfo::CODE_TARGET);
4001 call_wrapper.AfterCall();
4002 if (!*definitely_mismatches) {
4003 jmp(done, near_jump);
4006 Jump(adaptor, RelocInfo::CODE_TARGET);
4013 void MacroAssembler::StubPrologue() {
4014 pushq(rbp); // Caller's frame pointer.
4016 Push(rsi); // Callee's context.
4017 Push(Smi::FromInt(StackFrame::STUB));
4021 void MacroAssembler::Prologue(bool code_pre_aging) {
4022 PredictableCodeSizeScope predictible_code_size_scope(this,
4023 kNoCodeAgeSequenceLength);
4024 if (code_pre_aging) {
4025 // Pre-age the code.
4026 Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
4027 RelocInfo::CODE_AGE_SEQUENCE);
4028 Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
4030 pushq(rbp); // Caller's frame pointer.
4032 Push(rsi); // Callee's context.
4033 Push(rdi); // Callee's JS function.
4038 void MacroAssembler::EnterFrame(StackFrame::Type type,
4039 bool load_constant_pool_pointer_reg) {
4040 // Out-of-line constant pool not implemented on x64.
4045 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4048 Push(rsi); // Context.
4049 Push(Smi::FromInt(type));
4050 Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
4051 Push(kScratchRegister);
4052 if (emit_debug_code()) {
4053 Move(kScratchRegister,
4054 isolate()->factory()->undefined_value(),
4055 RelocInfo::EMBEDDED_OBJECT);
4056 cmpp(Operand(rsp, 0), kScratchRegister);
4057 Check(not_equal, kCodeObjectNotProperlyPatched);
4062 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4063 if (emit_debug_code()) {
4064 Move(kScratchRegister, Smi::FromInt(type));
4065 cmpp(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
4066 Check(equal, kStackFrameTypesMustMatch);
4073 void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
4074 // Set up the frame structure on the stack.
4075 // All constants are relative to the frame pointer of the exit frame.
4076 DCHECK(ExitFrameConstants::kCallerSPDisplacement ==
4077 kFPOnStackSize + kPCOnStackSize);
4078 DCHECK(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
4079 DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
4083 // Reserve room for entry stack pointer and push the code object.
4084 DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
4085 Push(Immediate(0)); // Saved entry sp, patched before call.
4086 Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
4087 Push(kScratchRegister); // Accessed from EditFrame::code_slot.
4089 // Save the frame pointer and the context in top.
4091 movp(r14, rax); // Backup rax in callee-save register.
4094 Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
4095 Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
4096 Store(ExternalReference(Isolate::kCFunctionAddress, isolate()), rbx);
4100 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
4101 bool save_doubles) {
4103 const int kShadowSpace = 4;
4104 arg_stack_space += kShadowSpace;
4106 // Optionally save all XMM registers.
4108 int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
4109 arg_stack_space * kRegisterSize;
4110 subp(rsp, Immediate(space));
4111 int offset = -2 * kPointerSize;
4112 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
4113 XMMRegister reg = XMMRegister::FromAllocationIndex(i);
4114 movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
4116 } else if (arg_stack_space > 0) {
4117 subp(rsp, Immediate(arg_stack_space * kRegisterSize));
4120 // Get the required frame alignment for the OS.
4121 const int kFrameAlignment = base::OS::ActivationFrameAlignment();
4122 if (kFrameAlignment > 0) {
4123 DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
4124 DCHECK(is_int8(kFrameAlignment));
4125 andp(rsp, Immediate(-kFrameAlignment));
4128 // Patch the saved entry sp.
4129 movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
4133 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
4134 EnterExitFramePrologue(true);
4136 // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
4137 // so it must be retained across the C-call.
4138 int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
4139 leap(r15, Operand(rbp, r14, times_pointer_size, offset));
4141 EnterExitFrameEpilogue(arg_stack_space, save_doubles);
4145 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
4146 EnterExitFramePrologue(false);
4147 EnterExitFrameEpilogue(arg_stack_space, false);
4151 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
4155 int offset = -2 * kPointerSize;
4156 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
4157 XMMRegister reg = XMMRegister::FromAllocationIndex(i);
4158 movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
4161 // Get the return address from the stack and restore the frame pointer.
4162 movp(rcx, Operand(rbp, kFPOnStackSize));
4163 movp(rbp, Operand(rbp, 0 * kPointerSize));
4165 // Drop everything up to and including the arguments and the receiver
4166 // from the caller stack.
4167 leap(rsp, Operand(r15, 1 * kPointerSize));
4169 PushReturnAddressFrom(rcx);
4171 LeaveExitFrameEpilogue(true);
4175 void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
4179 LeaveExitFrameEpilogue(restore_context);
4183 void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
4184 // Restore current context from top and clear it in debug mode.
4185 ExternalReference context_address(Isolate::kContextAddress, isolate());
4186 Operand context_operand = ExternalOperand(context_address);
4187 if (restore_context) {
4188 movp(rsi, context_operand);
4191 movp(context_operand, Immediate(0));
4194 // Clear the top frame.
4195 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
4197 Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
4198 movp(c_entry_fp_operand, Immediate(0));
4202 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
4205 Label same_contexts;
4207 DCHECK(!holder_reg.is(scratch));
4208 DCHECK(!scratch.is(kScratchRegister));
4209 // Load current lexical context from the stack frame.
4210 movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
4212 // When generating debug code, make sure the lexical context is set.
4213 if (emit_debug_code()) {
4214 cmpp(scratch, Immediate(0));
4215 Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
4217 // Load the native context of the current context.
4219 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
4220 movp(scratch, FieldOperand(scratch, offset));
4221 movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
4223 // Check the context is a native context.
4224 if (emit_debug_code()) {
4225 Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
4226 isolate()->factory()->native_context_map());
4227 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
4230 // Check if both contexts are the same.
4231 cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4232 j(equal, &same_contexts);
4234 // Compare security tokens.
4235 // Check that the security token in the calling global object is
4236 // compatible with the security token in the receiving global
4239 // Check the context is a native context.
4240 if (emit_debug_code()) {
4241 // Preserve original value of holder_reg.
4244 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4245 CompareRoot(holder_reg, Heap::kNullValueRootIndex);
4246 Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
4248 // Read the first word and compare to native_context_map(),
4249 movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
4250 CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
4251 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
4255 movp(kScratchRegister,
4256 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4258 Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
4259 movp(scratch, FieldOperand(scratch, token_offset));
4260 cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
4263 bind(&same_contexts);
4267 // Compute the hash code from the untagged key. This must be kept in sync with
4268 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
4269 // code-stub-hydrogen.cc
4270 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
4271 // First of all we assign the hash seed to scratch.
4272 LoadRoot(scratch, Heap::kHashSeedRootIndex);
4273 SmiToInteger32(scratch, scratch);
4275 // Xor original key with a seed.
4278 // Compute the hash code from the untagged key. This must be kept in sync
4279 // with ComputeIntegerHash in utils.h.
4281 // hash = ~hash + (hash << 15);
4284 shll(scratch, Immediate(15));
4286 // hash = hash ^ (hash >> 12);
4288 shrl(scratch, Immediate(12));
4290 // hash = hash + (hash << 2);
4291 leal(r0, Operand(r0, r0, times_4, 0));
4292 // hash = hash ^ (hash >> 4);
4294 shrl(scratch, Immediate(4));
4296 // hash = hash * 2057;
4297 imull(r0, r0, Immediate(2057));
4298 // hash = hash ^ (hash >> 16);
4300 shrl(scratch, Immediate(16));
4306 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
4315 // elements - holds the slow-case elements of the receiver on entry.
4316 // Unchanged unless 'result' is the same register.
4318 // key - holds the smi key on entry.
4319 // Unchanged unless 'result' is the same register.
4321 // Scratch registers:
4323 // r0 - holds the untagged key on entry and holds the hash once computed.
4325 // r1 - used to hold the capacity mask of the dictionary
4327 // r2 - used for the index into the dictionary.
4329 // result - holds the result on exit if the load succeeded.
4330 // Allowed to be the same as 'key' or 'result'.
4331 // Unchanged on bailout so 'key' or 'result' can be used
4332 // in further computation.
4336 GetNumberHash(r0, r1);
4338 // Compute capacity mask.
4339 SmiToInteger32(r1, FieldOperand(elements,
4340 SeededNumberDictionary::kCapacityOffset));
4343 // Generate an unrolled loop that performs a few probes before giving up.
4344 for (int i = 0; i < kNumberDictionaryProbes; i++) {
4345 // Use r2 for index calculations and keep the hash intact in r0.
4347 // Compute the masked index: (hash + i + i * i) & mask.
4349 addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
4353 // Scale the index by multiplying by the entry size.
4354 DCHECK(SeededNumberDictionary::kEntrySize == 3);
4355 leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
4357 // Check if the key matches.
4358 cmpp(key, FieldOperand(elements,
4361 SeededNumberDictionary::kElementsStartOffset));
4362 if (i != (kNumberDictionaryProbes - 1)) {
4370 // Check that the value is a normal propety.
4371 const int kDetailsOffset =
4372 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
4373 DCHECK_EQ(NORMAL, 0);
4374 Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
4375 Smi::FromInt(PropertyDetails::TypeField::kMask));
4378 // Get the value at the masked, scaled index.
4379 const int kValueOffset =
4380 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
4381 movp(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
4385 void MacroAssembler::LoadAllocationTopHelper(Register result,
4387 AllocationFlags flags) {
4388 ExternalReference allocation_top =
4389 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4391 // Just return if allocation top is already known.
4392 if ((flags & RESULT_CONTAINS_TOP) != 0) {
4393 // No use of scratch if allocation top is provided.
4394 DCHECK(!scratch.is_valid());
4396 // Assert that result actually contains top on entry.
4397 Operand top_operand = ExternalOperand(allocation_top);
4398 cmpp(result, top_operand);
4399 Check(equal, kUnexpectedAllocationTop);
4404 // Move address of new object to result. Use scratch register if available,
4405 // and keep address in scratch until call to UpdateAllocationTopHelper.
4406 if (scratch.is_valid()) {
4407 LoadAddress(scratch, allocation_top);
4408 movp(result, Operand(scratch, 0));
4410 Load(result, allocation_top);
4415 void MacroAssembler::MakeSureDoubleAlignedHelper(Register result,
4418 AllocationFlags flags) {
4419 if (kPointerSize == kDoubleSize) {
4420 if (FLAG_debug_code) {
4421 testl(result, Immediate(kDoubleAlignmentMask));
4422 Check(zero, kAllocationIsNotDoubleAligned);
4425 // Align the next allocation. Storing the filler map without checking top
4426 // is safe in new-space because the limit of the heap is aligned there.
4427 DCHECK(kPointerSize * 2 == kDoubleSize);
4428 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
4429 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
4430 // Make sure scratch is not clobbered by this function as it might be
4431 // used in UpdateAllocationTopHelper later.
4432 DCHECK(!scratch.is(kScratchRegister));
4434 testl(result, Immediate(kDoubleAlignmentMask));
4435 j(zero, &aligned, Label::kNear);
4436 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
4437 ExternalReference allocation_limit =
4438 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4439 cmpp(result, ExternalOperand(allocation_limit));
4440 j(above_equal, gc_required);
4442 LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex);
4443 movp(Operand(result, 0), kScratchRegister);
4444 addp(result, Immediate(kDoubleSize / 2));
4450 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
4452 AllocationFlags flags) {
4453 if (emit_debug_code()) {
4454 testp(result_end, Immediate(kObjectAlignmentMask));
4455 Check(zero, kUnalignedAllocationInNewSpace);
4458 ExternalReference allocation_top =
4459 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4462 if (scratch.is_valid()) {
4463 // Scratch already contains address of allocation top.
4464 movp(Operand(scratch, 0), result_end);
4466 Store(allocation_top, result_end);
4471 void MacroAssembler::Allocate(int object_size,
4473 Register result_end,
4476 AllocationFlags flags) {
4477 DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
4478 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
4479 if (!FLAG_inline_new) {
4480 if (emit_debug_code()) {
4481 // Trash the registers to simulate an allocation failure.
4482 movl(result, Immediate(0x7091));
4483 if (result_end.is_valid()) {
4484 movl(result_end, Immediate(0x7191));
4486 if (scratch.is_valid()) {
4487 movl(scratch, Immediate(0x7291));
4493 DCHECK(!result.is(result_end));
4495 // Load address of new object into result.
4496 LoadAllocationTopHelper(result, scratch, flags);
4498 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4499 MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4502 // Calculate new top and bail out if new space is exhausted.
4503 ExternalReference allocation_limit =
4504 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4506 Register top_reg = result_end.is_valid() ? result_end : result;
4508 if (!top_reg.is(result)) {
4509 movp(top_reg, result);
4511 addp(top_reg, Immediate(object_size));
4512 j(carry, gc_required);
4513 Operand limit_operand = ExternalOperand(allocation_limit);
4514 cmpp(top_reg, limit_operand);
4515 j(above, gc_required);
4517 // Update allocation top.
4518 UpdateAllocationTopHelper(top_reg, scratch, flags);
4520 bool tag_result = (flags & TAG_OBJECT) != 0;
4521 if (top_reg.is(result)) {
4523 subp(result, Immediate(object_size - kHeapObjectTag));
4525 subp(result, Immediate(object_size));
4527 } else if (tag_result) {
4528 // Tag the result if requested.
4529 DCHECK(kHeapObjectTag == 1);
4535 void MacroAssembler::Allocate(int header_size,
4536 ScaleFactor element_size,
4537 Register element_count,
4539 Register result_end,
4542 AllocationFlags flags) {
4543 DCHECK((flags & SIZE_IN_WORDS) == 0);
4544 leap(result_end, Operand(element_count, element_size, header_size));
4545 Allocate(result_end, result, result_end, scratch, gc_required, flags);
4549 void MacroAssembler::Allocate(Register object_size,
4551 Register result_end,
4554 AllocationFlags flags) {
4555 DCHECK((flags & SIZE_IN_WORDS) == 0);
4556 if (!FLAG_inline_new) {
4557 if (emit_debug_code()) {
4558 // Trash the registers to simulate an allocation failure.
4559 movl(result, Immediate(0x7091));
4560 movl(result_end, Immediate(0x7191));
4561 if (scratch.is_valid()) {
4562 movl(scratch, Immediate(0x7291));
4564 // object_size is left unchanged by this function.
4569 DCHECK(!result.is(result_end));
4571 // Load address of new object into result.
4572 LoadAllocationTopHelper(result, scratch, flags);
4574 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4575 MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4578 // Calculate new top and bail out if new space is exhausted.
4579 ExternalReference allocation_limit =
4580 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4581 if (!object_size.is(result_end)) {
4582 movp(result_end, object_size);
4584 addp(result_end, result);
4585 j(carry, gc_required);
4586 Operand limit_operand = ExternalOperand(allocation_limit);
4587 cmpp(result_end, limit_operand);
4588 j(above, gc_required);
4590 // Update allocation top.
4591 UpdateAllocationTopHelper(result_end, scratch, flags);
4593 // Tag the result if requested.
4594 if ((flags & TAG_OBJECT) != 0) {
4595 addp(result, Immediate(kHeapObjectTag));
4600 void MacroAssembler::UndoAllocationInNewSpace(Register object) {
4601 ExternalReference new_space_allocation_top =
4602 ExternalReference::new_space_allocation_top_address(isolate());
4604 // Make sure the object has no tag before resetting top.
4605 andp(object, Immediate(~kHeapObjectTagMask));
4606 Operand top_operand = ExternalOperand(new_space_allocation_top);
4608 cmpp(object, top_operand);
4609 Check(below, kUndoAllocationOfNonAllocatedMemory);
4611 movp(top_operand, object);
4615 void MacroAssembler::AllocateHeapNumber(Register result,
4619 // Allocate heap number in new space.
4620 Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
4622 Heap::RootListIndex map_index = mode == MUTABLE
4623 ? Heap::kMutableHeapNumberMapRootIndex
4624 : Heap::kHeapNumberMapRootIndex;
4627 LoadRoot(kScratchRegister, map_index);
4628 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4632 void MacroAssembler::AllocateTwoByteString(Register result,
4637 Label* gc_required) {
4638 // Calculate the number of bytes needed for the characters in the string while
4639 // observing object alignment.
4640 const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
4641 kObjectAlignmentMask;
4642 DCHECK(kShortSize == 2);
4643 // scratch1 = length * 2 + kObjectAlignmentMask.
4644 leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
4646 andp(scratch1, Immediate(~kObjectAlignmentMask));
4647 if (kHeaderAlignment > 0) {
4648 subp(scratch1, Immediate(kHeaderAlignment));
4651 // Allocate two byte string in new space.
4652 Allocate(SeqTwoByteString::kHeaderSize,
4661 // Set the map, length and hash field.
4662 LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
4663 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4664 Integer32ToSmi(scratch1, length);
4665 movp(FieldOperand(result, String::kLengthOffset), scratch1);
4666 movp(FieldOperand(result, String::kHashFieldOffset),
4667 Immediate(String::kEmptyHashField));
4671 void MacroAssembler::AllocateOneByteString(Register result, Register length,
4672 Register scratch1, Register scratch2,
4674 Label* gc_required) {
4675 // Calculate the number of bytes needed for the characters in the string while
4676 // observing object alignment.
4677 const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
4678 kObjectAlignmentMask;
4679 movl(scratch1, length);
4680 DCHECK(kCharSize == 1);
4681 addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
4682 andp(scratch1, Immediate(~kObjectAlignmentMask));
4683 if (kHeaderAlignment > 0) {
4684 subp(scratch1, Immediate(kHeaderAlignment));
4687 // Allocate one-byte string in new space.
4688 Allocate(SeqOneByteString::kHeaderSize,
4697 // Set the map, length and hash field.
4698 LoadRoot(kScratchRegister, Heap::kOneByteStringMapRootIndex);
4699 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4700 Integer32ToSmi(scratch1, length);
4701 movp(FieldOperand(result, String::kLengthOffset), scratch1);
4702 movp(FieldOperand(result, String::kHashFieldOffset),
4703 Immediate(String::kEmptyHashField));
4707 void MacroAssembler::AllocateTwoByteConsString(Register result,
4710 Label* gc_required) {
4711 // Allocate heap number in new space.
4712 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4715 // Set the map. The other fields are left uninitialized.
4716 LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
4717 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4721 void MacroAssembler::AllocateOneByteConsString(Register result,
4724 Label* gc_required) {
4725 Allocate(ConsString::kSize,
4732 // Set the map. The other fields are left uninitialized.
4733 LoadRoot(kScratchRegister, Heap::kConsOneByteStringMapRootIndex);
4734 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4738 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
4741 Label* gc_required) {
4742 // Allocate heap number in new space.
4743 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4746 // Set the map. The other fields are left uninitialized.
4747 LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
4748 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4752 void MacroAssembler::AllocateOneByteSlicedString(Register result,
4755 Label* gc_required) {
4756 // Allocate heap number in new space.
4757 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4760 // Set the map. The other fields are left uninitialized.
4761 LoadRoot(kScratchRegister, Heap::kSlicedOneByteStringMapRootIndex);
4762 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4766 // Copy memory, byte-by-byte, from source to destination. Not optimized for
4767 // long or aligned copies. The contents of scratch and length are destroyed.
4768 // Destination is incremented by length, source, length and scratch are
4770 // A simpler loop is faster on small copies, but slower on large ones.
4771 // The cld() instruction must have been emitted, to set the direction flag(),
4772 // before calling this function.
4773 void MacroAssembler::CopyBytes(Register destination,
4778 DCHECK(min_length >= 0);
4779 if (emit_debug_code()) {
4780 cmpl(length, Immediate(min_length));
4781 Assert(greater_equal, kInvalidMinLength);
4783 Label short_loop, len8, len16, len24, done, short_string;
4785 const int kLongStringLimit = 4 * kPointerSize;
4786 if (min_length <= kLongStringLimit) {
4787 cmpl(length, Immediate(kPointerSize));
4788 j(below, &short_string, Label::kNear);
4791 DCHECK(source.is(rsi));
4792 DCHECK(destination.is(rdi));
4793 DCHECK(length.is(rcx));
4795 if (min_length <= kLongStringLimit) {
4796 cmpl(length, Immediate(2 * kPointerSize));
4797 j(below_equal, &len8, Label::kNear);
4798 cmpl(length, Immediate(3 * kPointerSize));
4799 j(below_equal, &len16, Label::kNear);
4800 cmpl(length, Immediate(4 * kPointerSize));
4801 j(below_equal, &len24, Label::kNear);
4804 // Because source is 8-byte aligned in our uses of this function,
4805 // we keep source aligned for the rep movs operation by copying the odd bytes
4806 // at the end of the ranges.
4807 movp(scratch, length);
4808 shrl(length, Immediate(kPointerSizeLog2));
4810 // Move remaining bytes of length.
4811 andl(scratch, Immediate(kPointerSize - 1));
4812 movp(length, Operand(source, scratch, times_1, -kPointerSize));
4813 movp(Operand(destination, scratch, times_1, -kPointerSize), length);
4814 addp(destination, scratch);
4816 if (min_length <= kLongStringLimit) {
4817 jmp(&done, Label::kNear);
4819 movp(scratch, Operand(source, 2 * kPointerSize));
4820 movp(Operand(destination, 2 * kPointerSize), scratch);
4822 movp(scratch, Operand(source, kPointerSize));
4823 movp(Operand(destination, kPointerSize), scratch);
4825 movp(scratch, Operand(source, 0));
4826 movp(Operand(destination, 0), scratch);
4827 // Move remaining bytes of length.
4828 movp(scratch, Operand(source, length, times_1, -kPointerSize));
4829 movp(Operand(destination, length, times_1, -kPointerSize), scratch);
4830 addp(destination, length);
4831 jmp(&done, Label::kNear);
4833 bind(&short_string);
4834 if (min_length == 0) {
4835 testl(length, length);
4836 j(zero, &done, Label::kNear);
4840 movb(scratch, Operand(source, 0));
4841 movb(Operand(destination, 0), scratch);
4845 j(not_zero, &short_loop);
4852 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
4853 Register end_offset,
4858 movp(Operand(start_offset, 0), filler);
4859 addp(start_offset, Immediate(kPointerSize));
4861 cmpp(start_offset, end_offset);
4866 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4867 if (context_chain_length > 0) {
4868 // Move up the chain of contexts to the context containing the slot.
4869 movp(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4870 for (int i = 1; i < context_chain_length; i++) {
4871 movp(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4874 // Slot is in the current function context. Move it into the
4875 // destination register in case we store into it (the write barrier
4876 // cannot be allowed to destroy the context in rsi).
4880 // We should not have found a with context by walking the context
4881 // chain (i.e., the static scope chain and runtime context chain do
4882 // not agree). A variable occurring in such a scope should have
4883 // slot type LOOKUP and not CONTEXT.
4884 if (emit_debug_code()) {
4885 CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
4886 Heap::kWithContextMapRootIndex);
4887 Check(not_equal, kVariableResolvedToWithContext);
4892 void MacroAssembler::LoadTransitionedArrayMapConditional(
4893 ElementsKind expected_kind,
4894 ElementsKind transitioned_kind,
4895 Register map_in_out,
4897 Label* no_map_match) {
4898 // Load the global or builtins object from the current context.
4900 Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4901 movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
4903 // Check that the function's map is the same as the expected cached map.
4904 movp(scratch, Operand(scratch,
4905 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4907 int offset = expected_kind * kPointerSize +
4908 FixedArrayBase::kHeaderSize;
4909 cmpp(map_in_out, FieldOperand(scratch, offset));
4910 j(not_equal, no_map_match);
4912 // Use the transitioned cached map.
4913 offset = transitioned_kind * kPointerSize +
4914 FixedArrayBase::kHeaderSize;
4915 movp(map_in_out, FieldOperand(scratch, offset));
4920 static const int kRegisterPassedArguments = 4;
4922 static const int kRegisterPassedArguments = 6;
4925 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4926 // Load the global or builtins object from the current context.
4928 Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4929 // Load the native context from the global or builtins object.
4930 movp(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
4931 // Load the function from the native context.
4932 movp(function, Operand(function, Context::SlotOffset(index)));
4936 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4938 // Load the initial map. The global functions all have initial maps.
4939 movp(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4940 if (emit_debug_code()) {
4942 CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
4945 Abort(kGlobalFunctionsMustHaveInitialMap);
4951 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
4952 // On Windows 64 stack slots are reserved by the caller for all arguments
4953 // including the ones passed in registers, and space is always allocated for
4954 // the four register arguments even if the function takes fewer than four
4956 // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
4957 // and the caller does not reserve stack slots for them.
4958 DCHECK(num_arguments >= 0);
4960 const int kMinimumStackSlots = kRegisterPassedArguments;
4961 if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
4962 return num_arguments;
4964 if (num_arguments < kRegisterPassedArguments) return 0;
4965 return num_arguments - kRegisterPassedArguments;
4970 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
4973 uint32_t encoding_mask) {
4975 JumpIfNotSmi(string, &is_object);
4980 movp(value, FieldOperand(string, HeapObject::kMapOffset));
4981 movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset));
4983 andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
4984 cmpp(value, Immediate(encoding_mask));
4986 Check(equal, kUnexpectedStringType);
4988 // The index is assumed to be untagged coming in, tag it to compare with the
4989 // string length without using a temp register, it is restored at the end of
4991 Integer32ToSmi(index, index);
4992 SmiCompare(index, FieldOperand(string, String::kLengthOffset));
4993 Check(less, kIndexIsTooLarge);
4995 SmiCompare(index, Smi::FromInt(0));
4996 Check(greater_equal, kIndexIsNegative);
4998 // Restore the index
4999 SmiToInteger32(index, index);
5003 void MacroAssembler::PrepareCallCFunction(int num_arguments) {
5004 int frame_alignment = base::OS::ActivationFrameAlignment();
5005 DCHECK(frame_alignment != 0);
5006 DCHECK(num_arguments >= 0);
5008 // Make stack end at alignment and allocate space for arguments and old rsp.
5009 movp(kScratchRegister, rsp);
5010 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5011 int argument_slots_on_stack =
5012 ArgumentStackSlotsForCFunctionCall(num_arguments);
5013 subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
5014 andp(rsp, Immediate(-frame_alignment));
5015 movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
5019 void MacroAssembler::CallCFunction(ExternalReference function,
5020 int num_arguments) {
5021 LoadAddress(rax, function);
5022 CallCFunction(rax, num_arguments);
5026 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
5027 DCHECK(has_frame());
5028 // Check stack alignment.
5029 if (emit_debug_code()) {
5030 CheckStackAlignment();
5034 DCHECK(base::OS::ActivationFrameAlignment() != 0);
5035 DCHECK(num_arguments >= 0);
5036 int argument_slots_on_stack =
5037 ArgumentStackSlotsForCFunctionCall(num_arguments);
5038 movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
5043 bool AreAliased(Register reg1,
5051 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
5052 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
5053 reg7.is_valid() + reg8.is_valid();
5056 if (reg1.is_valid()) regs |= reg1.bit();
5057 if (reg2.is_valid()) regs |= reg2.bit();
5058 if (reg3.is_valid()) regs |= reg3.bit();
5059 if (reg4.is_valid()) regs |= reg4.bit();
5060 if (reg5.is_valid()) regs |= reg5.bit();
5061 if (reg6.is_valid()) regs |= reg6.bit();
5062 if (reg7.is_valid()) regs |= reg7.bit();
5063 if (reg8.is_valid()) regs |= reg8.bit();
5064 int n_of_non_aliasing_regs = NumRegs(regs);
5066 return n_of_valid_regs != n_of_non_aliasing_regs;
5071 CodePatcher::CodePatcher(byte* address, int size)
5072 : address_(address),
5074 masm_(NULL, address, size + Assembler::kGap) {
5075 // Create a new macro assembler pointing to the address of the code to patch.
5076 // The size is adjusted with kGap on order for the assembler to generate size
5077 // bytes of instructions without failing with buffer size constraints.
5078 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5082 CodePatcher::~CodePatcher() {
5083 // Indicate that code has changed.
5084 CpuFeatures::FlushICache(address_, size_);
5086 // Check that the code was patched as expected.
5087 DCHECK(masm_.pc_ == address_ + size_);
5088 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5092 void MacroAssembler::CheckPageFlag(
5097 Label* condition_met,
5098 Label::Distance condition_met_distance) {
5099 DCHECK(cc == zero || cc == not_zero);
5100 if (scratch.is(object)) {
5101 andp(scratch, Immediate(~Page::kPageAlignmentMask));
5103 movp(scratch, Immediate(~Page::kPageAlignmentMask));
5104 andp(scratch, object);
5106 if (mask < (1 << kBitsPerByte)) {
5107 testb(Operand(scratch, MemoryChunk::kFlagsOffset),
5108 Immediate(static_cast<uint8_t>(mask)));
5110 testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
5112 j(cc, condition_met, condition_met_distance);
5116 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
5118 Label* if_deprecated) {
5119 if (map->CanBeDeprecated()) {
5121 movl(scratch, FieldOperand(scratch, Map::kBitField3Offset));
5122 andl(scratch, Immediate(Map::Deprecated::kMask));
5123 j(not_zero, if_deprecated);
5128 void MacroAssembler::JumpIfBlack(Register object,
5129 Register bitmap_scratch,
5130 Register mask_scratch,
5132 Label::Distance on_black_distance) {
5133 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
5134 GetMarkBits(object, bitmap_scratch, mask_scratch);
5136 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5137 // The mask_scratch register contains a 1 at the position of the first bit
5138 // and a 0 at all other positions, including the position of the second bit.
5139 movp(rcx, mask_scratch);
5140 // Make rcx into a mask that covers both marking bits using the operation
5141 // rcx = mask | (mask << 1).
5142 leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
5143 // Note that we are using a 4-byte aligned 8-byte load.
5144 andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
5145 cmpp(mask_scratch, rcx);
5146 j(equal, on_black, on_black_distance);
5150 // Detect some, but not all, common pointer-free objects. This is used by the
5151 // incremental write barrier which doesn't care about oddballs (they are always
5152 // marked black immediately so this code is not hit).
5153 void MacroAssembler::JumpIfDataObject(
5156 Label* not_data_object,
5157 Label::Distance not_data_object_distance) {
5158 Label is_data_object;
5159 movp(scratch, FieldOperand(value, HeapObject::kMapOffset));
5160 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5161 j(equal, &is_data_object, Label::kNear);
5162 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5163 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5164 // If it's a string and it's not a cons string then it's an object containing
5166 testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
5167 Immediate(kIsIndirectStringMask | kIsNotStringMask));
5168 j(not_zero, not_data_object, not_data_object_distance);
5169 bind(&is_data_object);
5173 void MacroAssembler::GetMarkBits(Register addr_reg,
5174 Register bitmap_reg,
5175 Register mask_reg) {
5176 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
5177 movp(bitmap_reg, addr_reg);
5178 // Sign extended 32 bit immediate.
5179 andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
5180 movp(rcx, addr_reg);
5182 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
5183 shrl(rcx, Immediate(shift));
5185 Immediate((Page::kPageAlignmentMask >> shift) &
5186 ~(Bitmap::kBytesPerCell - 1)));
5188 addp(bitmap_reg, rcx);
5189 movp(rcx, addr_reg);
5190 shrl(rcx, Immediate(kPointerSizeLog2));
5191 andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
5192 movl(mask_reg, Immediate(1));
5197 void MacroAssembler::EnsureNotWhite(
5199 Register bitmap_scratch,
5200 Register mask_scratch,
5201 Label* value_is_white_and_not_data,
5202 Label::Distance distance) {
5203 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
5204 GetMarkBits(value, bitmap_scratch, mask_scratch);
5206 // If the value is black or grey we don't need to do anything.
5207 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5208 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5209 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
5210 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5214 // Since both black and grey have a 1 in the first position and white does
5215 // not have a 1 there we only need to check one bit.
5216 testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5217 j(not_zero, &done, Label::kNear);
5219 if (emit_debug_code()) {
5220 // Check for impossible bit pattern.
5223 // shl. May overflow making the check conservative.
5224 addp(mask_scratch, mask_scratch);
5225 testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5226 j(zero, &ok, Label::kNear);
5232 // Value is white. We check whether it is data that doesn't need scanning.
5233 // Currently only checks for HeapNumber and non-cons strings.
5234 Register map = rcx; // Holds map while checking type.
5235 Register length = rcx; // Holds length of object after checking type.
5236 Label not_heap_number;
5237 Label is_data_object;
5239 // Check for heap-number
5240 movp(map, FieldOperand(value, HeapObject::kMapOffset));
5241 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
5242 j(not_equal, ¬_heap_number, Label::kNear);
5243 movp(length, Immediate(HeapNumber::kSize));
5244 jmp(&is_data_object, Label::kNear);
5246 bind(¬_heap_number);
5247 // Check for strings.
5248 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5249 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5250 // If it's a string and it's not a cons string then it's an object containing
5252 Register instance_type = rcx;
5253 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
5254 testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
5255 j(not_zero, value_is_white_and_not_data);
5256 // It's a non-indirect (non-cons and non-slice) string.
5257 // If it's external, the length is just ExternalString::kSize.
5258 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5260 // External strings are the only ones with the kExternalStringTag bit
5262 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
5263 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
5264 testb(instance_type, Immediate(kExternalStringTag));
5265 j(zero, ¬_external, Label::kNear);
5266 movp(length, Immediate(ExternalString::kSize));
5267 jmp(&is_data_object, Label::kNear);
5269 bind(¬_external);
5270 // Sequential string, either Latin1 or UC16.
5271 DCHECK(kOneByteStringTag == 0x04);
5272 andp(length, Immediate(kStringEncodingMask));
5273 xorp(length, Immediate(kStringEncodingMask));
5274 addp(length, Immediate(0x04));
5275 // Value now either 4 (if Latin1) or 8 (if UC16), i.e. char-size shifted by 2.
5276 imulp(length, FieldOperand(value, String::kLengthOffset));
5277 shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
5278 addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
5279 andp(length, Immediate(~kObjectAlignmentMask));
5281 bind(&is_data_object);
5282 // Value is a data object, and it is white. Mark it black. Since we know
5283 // that the object is white we can make it black by flipping one bit.
5284 orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5286 andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
5287 addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
5293 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5295 Register empty_fixed_array_value = r8;
5296 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5299 // Check if the enum length field is properly initialized, indicating that
5300 // there is an enum cache.
5301 movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5303 EnumLength(rdx, rbx);
5304 Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
5305 j(equal, call_runtime);
5311 movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5313 // For all objects but the receiver, check that the cache is empty.
5314 EnumLength(rdx, rbx);
5315 Cmp(rdx, Smi::FromInt(0));
5316 j(not_equal, call_runtime);
5320 // Check that there are no elements. Register rcx contains the current JS
5321 // object we've reached through the prototype chain.
5323 cmpp(empty_fixed_array_value,
5324 FieldOperand(rcx, JSObject::kElementsOffset));
5325 j(equal, &no_elements);
5327 // Second chance, the object may be using the empty slow element dictionary.
5328 LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
5329 cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
5330 j(not_equal, call_runtime);
5333 movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
5334 cmpp(rcx, null_value);
5335 j(not_equal, &next);
5338 void MacroAssembler::TestJSArrayForAllocationMemento(
5339 Register receiver_reg,
5340 Register scratch_reg,
5341 Label* no_memento_found) {
5342 ExternalReference new_space_start =
5343 ExternalReference::new_space_start(isolate());
5344 ExternalReference new_space_allocation_top =
5345 ExternalReference::new_space_allocation_top_address(isolate());
5347 leap(scratch_reg, Operand(receiver_reg,
5348 JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5349 Move(kScratchRegister, new_space_start);
5350 cmpp(scratch_reg, kScratchRegister);
5351 j(less, no_memento_found);
5352 cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
5353 j(greater, no_memento_found);
5354 CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
5355 Heap::kAllocationMementoMapRootIndex);
5359 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5364 DCHECK(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
5365 DCHECK(!scratch1.is(scratch0));
5366 Register current = scratch0;
5369 movp(current, object);
5371 // Loop based on the map going up the prototype chain.
5373 movp(current, FieldOperand(current, HeapObject::kMapOffset));
5374 movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
5375 DecodeField<Map::ElementsKindBits>(scratch1);
5376 cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
5378 movp(current, FieldOperand(current, Map::kPrototypeOffset));
5379 CompareRoot(current, Heap::kNullValueRootIndex);
5380 j(not_equal, &loop_again);
5384 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
5385 DCHECK(!dividend.is(rax));
5386 DCHECK(!dividend.is(rdx));
5387 base::MagicNumbersForDivision<uint32_t> mag =
5388 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
5389 movl(rax, Immediate(mag.multiplier));
5391 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
5392 if (divisor > 0 && neg) addl(rdx, dividend);
5393 if (divisor < 0 && !neg && mag.multiplier > 0) subl(rdx, dividend);
5394 if (mag.shift > 0) sarl(rdx, Immediate(mag.shift));
5395 movl(rax, dividend);
5396 shrl(rax, Immediate(31));
5401 } } // namespace v8::internal
5403 #endif // V8_TARGET_ARCH_X64