1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "src/base/bits.h"
10 #include "src/base/division-by-constant.h"
11 #include "src/bootstrapper.h"
12 #include "src/codegen.h"
13 #include "src/cpu-profiler.h"
14 #include "src/debug.h"
15 #include "src/heap/heap.h"
16 #include "src/isolate-inl.h"
17 #include "src/serialize.h"
18 #include "src/x64/assembler-x64.h"
19 #include "src/x64/macro-assembler-x64.h"
24 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
25 : Assembler(arg_isolate, buffer, size),
26 generating_stub_(false),
28 root_array_available_(true) {
29 if (isolate() != NULL) {
30 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
36 static const int64_t kInvalidRootRegisterDelta = -1;
39 int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
40 if (predictable_code_size() &&
41 (other.address() < reinterpret_cast<Address>(isolate()) ||
42 other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
43 return kInvalidRootRegisterDelta;
45 Address roots_register_value = kRootRegisterBias +
46 reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
48 int64_t delta = kInvalidRootRegisterDelta; // Bogus initialization.
49 if (kPointerSize == kInt64Size) {
50 delta = other.address() - roots_register_value;
52 // For x32, zero extend the address to 64-bit and calculate the delta.
53 uint64_t o = static_cast<uint32_t>(
54 reinterpret_cast<intptr_t>(other.address()));
55 uint64_t r = static_cast<uint32_t>(
56 reinterpret_cast<intptr_t>(roots_register_value));
63 Operand MacroAssembler::ExternalOperand(ExternalReference target,
65 if (root_array_available_ && !serializer_enabled()) {
66 int64_t delta = RootRegisterDelta(target);
67 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
68 return Operand(kRootRegister, static_cast<int32_t>(delta));
71 Move(scratch, target);
72 return Operand(scratch, 0);
76 void MacroAssembler::Load(Register destination, ExternalReference source) {
77 if (root_array_available_ && !serializer_enabled()) {
78 int64_t delta = RootRegisterDelta(source);
79 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
80 movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
85 if (destination.is(rax)) {
88 Move(kScratchRegister, source);
89 movp(destination, Operand(kScratchRegister, 0));
94 void MacroAssembler::Store(ExternalReference destination, Register source) {
95 if (root_array_available_ && !serializer_enabled()) {
96 int64_t delta = RootRegisterDelta(destination);
97 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
98 movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
103 if (source.is(rax)) {
104 store_rax(destination);
106 Move(kScratchRegister, destination);
107 movp(Operand(kScratchRegister, 0), source);
112 void MacroAssembler::LoadAddress(Register destination,
113 ExternalReference source) {
114 if (root_array_available_ && !serializer_enabled()) {
115 int64_t delta = RootRegisterDelta(source);
116 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
117 leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
122 Move(destination, source);
126 int MacroAssembler::LoadAddressSize(ExternalReference source) {
127 if (root_array_available_ && !serializer_enabled()) {
128 // This calculation depends on the internals of LoadAddress.
129 // It's correctness is ensured by the asserts in the Call
130 // instruction below.
131 int64_t delta = RootRegisterDelta(source);
132 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
133 // Operand is leap(scratch, Operand(kRootRegister, delta));
134 // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
136 if (!is_int8(static_cast<int32_t>(delta))) {
137 size += 3; // Need full four-byte displacement in lea.
142 // Size of movp(destination, src);
143 return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
147 void MacroAssembler::PushAddress(ExternalReference source) {
148 int64_t address = reinterpret_cast<int64_t>(source.address());
149 if (is_int32(address) && !serializer_enabled()) {
150 if (emit_debug_code()) {
151 Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
153 Push(Immediate(static_cast<int32_t>(address)));
156 LoadAddress(kScratchRegister, source);
157 Push(kScratchRegister);
161 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
162 DCHECK(root_array_available_);
163 movp(destination, Operand(kRootRegister,
164 (index << kPointerSizeLog2) - kRootRegisterBias));
168 void MacroAssembler::LoadRootIndexed(Register destination,
169 Register variable_offset,
171 DCHECK(root_array_available_);
173 Operand(kRootRegister,
174 variable_offset, times_pointer_size,
175 (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
179 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
180 DCHECK(root_array_available_);
181 movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
186 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
187 DCHECK(root_array_available_);
188 Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
192 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
193 DCHECK(root_array_available_);
194 cmpp(with, Operand(kRootRegister,
195 (index << kPointerSizeLog2) - kRootRegisterBias));
199 void MacroAssembler::CompareRoot(const Operand& with,
200 Heap::RootListIndex index) {
201 DCHECK(root_array_available_);
202 DCHECK(!with.AddressUsesRegister(kScratchRegister));
203 LoadRoot(kScratchRegister, index);
204 cmpp(with, kScratchRegister);
208 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
211 SaveFPRegsMode save_fp,
212 RememberedSetFinalAction and_then) {
213 if (emit_debug_code()) {
215 JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
219 // Load store buffer top.
220 LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
221 // Store pointer to buffer.
222 movp(Operand(scratch, 0), addr);
223 // Increment buffer top.
224 addp(scratch, Immediate(kPointerSize));
225 // Write back new top of buffer.
226 StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
227 // Call stub on end of buffer.
229 // Check for end of buffer.
230 testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
231 if (and_then == kReturnAtEnd) {
232 Label buffer_overflowed;
233 j(not_equal, &buffer_overflowed, Label::kNear);
235 bind(&buffer_overflowed);
237 DCHECK(and_then == kFallThroughAtEnd);
238 j(equal, &done, Label::kNear);
240 StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
241 CallStub(&store_buffer_overflow);
242 if (and_then == kReturnAtEnd) {
245 DCHECK(and_then == kFallThroughAtEnd);
251 void MacroAssembler::InNewSpace(Register object,
255 Label::Distance distance) {
256 if (serializer_enabled()) {
257 // Can't do arithmetic on external references if it might get serialized.
258 // The mask isn't really an address. We load it as an external reference in
259 // case the size of the new space is different between the snapshot maker
260 // and the running system.
261 if (scratch.is(object)) {
262 Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
263 andp(scratch, kScratchRegister);
265 Move(scratch, ExternalReference::new_space_mask(isolate()));
266 andp(scratch, object);
268 Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
269 cmpp(scratch, kScratchRegister);
270 j(cc, branch, distance);
272 DCHECK(kPointerSize == kInt64Size
273 ? is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))
274 : kPointerSize == kInt32Size);
275 intptr_t new_space_start =
276 reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
277 Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
278 Assembler::RelocInfoNone());
279 if (scratch.is(object)) {
280 addp(scratch, kScratchRegister);
282 leap(scratch, Operand(object, kScratchRegister, times_1, 0));
285 Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
286 j(cc, branch, distance);
291 void MacroAssembler::RecordWriteField(
296 SaveFPRegsMode save_fp,
297 RememberedSetAction remembered_set_action,
299 PointersToHereCheck pointers_to_here_check_for_value) {
300 // First, check if a write barrier is even needed. The tests below
301 // catch stores of Smis.
304 // Skip barrier if writing a smi.
305 if (smi_check == INLINE_SMI_CHECK) {
306 JumpIfSmi(value, &done);
309 // Although the object register is tagged, the offset is relative to the start
310 // of the object, so so offset must be a multiple of kPointerSize.
311 DCHECK(IsAligned(offset, kPointerSize));
313 leap(dst, FieldOperand(object, offset));
314 if (emit_debug_code()) {
316 testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
317 j(zero, &ok, Label::kNear);
322 RecordWrite(object, dst, value, save_fp, remembered_set_action,
323 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
327 // Clobber clobbered input registers when running with the debug-code flag
328 // turned on to provoke errors.
329 if (emit_debug_code()) {
330 Move(value, kZapValue, Assembler::RelocInfoNone());
331 Move(dst, kZapValue, Assembler::RelocInfoNone());
336 void MacroAssembler::RecordWriteArray(
340 SaveFPRegsMode save_fp,
341 RememberedSetAction remembered_set_action,
343 PointersToHereCheck pointers_to_here_check_for_value) {
344 // First, check if a write barrier is even needed. The tests below
345 // catch stores of Smis.
348 // Skip barrier if writing a smi.
349 if (smi_check == INLINE_SMI_CHECK) {
350 JumpIfSmi(value, &done);
353 // Array access: calculate the destination address. Index is not a smi.
354 Register dst = index;
355 leap(dst, Operand(object, index, times_pointer_size,
356 FixedArray::kHeaderSize - kHeapObjectTag));
358 RecordWrite(object, dst, value, save_fp, remembered_set_action,
359 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
363 // Clobber clobbered input registers when running with the debug-code flag
364 // turned on to provoke errors.
365 if (emit_debug_code()) {
366 Move(value, kZapValue, Assembler::RelocInfoNone());
367 Move(index, kZapValue, Assembler::RelocInfoNone());
372 void MacroAssembler::RecordWriteForMap(Register object,
375 SaveFPRegsMode fp_mode) {
376 DCHECK(!object.is(kScratchRegister));
377 DCHECK(!object.is(map));
378 DCHECK(!object.is(dst));
379 DCHECK(!map.is(dst));
380 AssertNotSmi(object);
382 if (emit_debug_code()) {
384 if (map.is(kScratchRegister)) pushq(map);
385 CompareMap(map, isolate()->factory()->meta_map());
386 if (map.is(kScratchRegister)) popq(map);
387 j(equal, &ok, Label::kNear);
392 if (!FLAG_incremental_marking) {
396 if (emit_debug_code()) {
398 if (map.is(kScratchRegister)) pushq(map);
399 cmpp(map, FieldOperand(object, HeapObject::kMapOffset));
400 if (map.is(kScratchRegister)) popq(map);
401 j(equal, &ok, Label::kNear);
406 // Compute the address.
407 leap(dst, FieldOperand(object, HeapObject::kMapOffset));
409 // First, check if a write barrier is even needed. The tests below
410 // catch stores of smis and stores into the young generation.
413 // A single check of the map's pages interesting flag suffices, since it is
414 // only set during incremental collection, and then it's also guaranteed that
415 // the from object's page's interesting flag is also set. This optimization
416 // relies on the fact that maps can never be in new space.
418 map, // Used as scratch.
419 MemoryChunk::kPointersToHereAreInterestingMask,
424 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
430 // Count number of write barriers in generated code.
431 isolate()->counters()->write_barriers_static()->Increment();
432 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
434 // Clobber clobbered registers when running with the debug-code flag
435 // turned on to provoke errors.
436 if (emit_debug_code()) {
437 Move(dst, kZapValue, Assembler::RelocInfoNone());
438 Move(map, kZapValue, Assembler::RelocInfoNone());
443 void MacroAssembler::RecordWrite(
447 SaveFPRegsMode fp_mode,
448 RememberedSetAction remembered_set_action,
450 PointersToHereCheck pointers_to_here_check_for_value) {
451 DCHECK(!object.is(value));
452 DCHECK(!object.is(address));
453 DCHECK(!value.is(address));
454 AssertNotSmi(object);
456 if (remembered_set_action == OMIT_REMEMBERED_SET &&
457 !FLAG_incremental_marking) {
461 if (emit_debug_code()) {
463 cmpp(value, Operand(address, 0));
464 j(equal, &ok, Label::kNear);
469 // First, check if a write barrier is even needed. The tests below
470 // catch stores of smis and stores into the young generation.
473 if (smi_check == INLINE_SMI_CHECK) {
474 // Skip barrier if writing a smi.
475 JumpIfSmi(value, &done);
478 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
480 value, // Used as scratch.
481 MemoryChunk::kPointersToHereAreInterestingMask,
487 CheckPageFlag(object,
488 value, // Used as scratch.
489 MemoryChunk::kPointersFromHereAreInterestingMask,
494 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
500 // Count number of write barriers in generated code.
501 isolate()->counters()->write_barriers_static()->Increment();
502 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
504 // Clobber clobbered registers when running with the debug-code flag
505 // turned on to provoke errors.
506 if (emit_debug_code()) {
507 Move(address, kZapValue, Assembler::RelocInfoNone());
508 Move(value, kZapValue, Assembler::RelocInfoNone());
513 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
514 if (emit_debug_code()) Check(cc, reason);
518 void MacroAssembler::AssertFastElements(Register elements) {
519 if (emit_debug_code()) {
521 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
522 Heap::kFixedArrayMapRootIndex);
523 j(equal, &ok, Label::kNear);
524 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
525 Heap::kFixedDoubleArrayMapRootIndex);
526 j(equal, &ok, Label::kNear);
527 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
528 Heap::kFixedCOWArrayMapRootIndex);
529 j(equal, &ok, Label::kNear);
530 Abort(kJSObjectWithFastElementsMapHasSlowElements);
536 void MacroAssembler::Check(Condition cc, BailoutReason reason) {
538 j(cc, &L, Label::kNear);
540 // Control will not return here.
545 void MacroAssembler::CheckStackAlignment() {
546 int frame_alignment = base::OS::ActivationFrameAlignment();
547 int frame_alignment_mask = frame_alignment - 1;
548 if (frame_alignment > kPointerSize) {
549 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
550 Label alignment_as_expected;
551 testp(rsp, Immediate(frame_alignment_mask));
552 j(zero, &alignment_as_expected, Label::kNear);
553 // Abort if stack is not aligned.
555 bind(&alignment_as_expected);
560 void MacroAssembler::NegativeZeroTest(Register result,
564 testl(result, result);
565 j(not_zero, &ok, Label::kNear);
572 void MacroAssembler::Abort(BailoutReason reason) {
574 const char* msg = GetBailoutReason(reason);
576 RecordComment("Abort message: ");
580 if (FLAG_trap_on_abort) {
586 Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
587 Assembler::RelocInfoNone());
588 Push(kScratchRegister);
591 // We don't actually want to generate a pile of code for this, so just
592 // claim there is a stack frame, without generating one.
593 FrameScope scope(this, StackFrame::NONE);
594 CallRuntime(Runtime::kAbort, 1);
596 CallRuntime(Runtime::kAbort, 1);
598 // Control will not return here.
603 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
604 DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
605 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
609 void MacroAssembler::TailCallStub(CodeStub* stub) {
610 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
614 void MacroAssembler::StubReturn(int argc) {
615 DCHECK(argc >= 1 && generating_stub());
616 ret((argc - 1) * kPointerSize);
620 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
621 return has_frame_ || !stub->SometimesSetsUpAFrame();
625 void MacroAssembler::IndexFromHash(Register hash, Register index) {
626 // The assert checks that the constants for the maximum number of digits
627 // for an array index cached in the hash field and the number of bits
628 // reserved for it does not conflict.
629 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
630 (1 << String::kArrayIndexValueBits));
631 if (!hash.is(index)) {
634 DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
638 void MacroAssembler::CallRuntime(const Runtime::Function* f,
640 SaveFPRegsMode save_doubles) {
641 // If the expected number of arguments of the runtime function is
642 // constant, we check that the actual number of arguments match the
644 CHECK(f->nargs < 0 || f->nargs == num_arguments);
646 // TODO(1236192): Most runtime routines don't need the number of
647 // arguments passed in because it is constant. At some point we
648 // should remove this need and make the runtime routine entry code
650 Set(rax, num_arguments);
651 LoadAddress(rbx, ExternalReference(f, isolate()));
652 CEntryStub ces(isolate(), f->result_size, save_doubles);
657 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
659 Set(rax, num_arguments);
660 LoadAddress(rbx, ext);
662 CEntryStub stub(isolate(), 1);
667 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
670 // ----------- S t a t e -------------
671 // -- rsp[0] : return address
672 // -- rsp[8] : argument num_arguments - 1
674 // -- rsp[8 * num_arguments] : argument 0 (receiver)
675 // -----------------------------------
677 // TODO(1236192): Most runtime routines don't need the number of
678 // arguments passed in because it is constant. At some point we
679 // should remove this need and make the runtime routine entry code
681 Set(rax, num_arguments);
682 JumpToExternalReference(ext, result_size);
686 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
689 TailCallExternalReference(ExternalReference(fid, isolate()),
695 static int Offset(ExternalReference ref0, ExternalReference ref1) {
696 int64_t offset = (ref0.address() - ref1.address());
697 // Check that fits into int.
698 DCHECK(static_cast<int>(offset) == offset);
699 return static_cast<int>(offset);
703 void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
704 EnterApiExitFrame(arg_stack_space);
708 void MacroAssembler::CallApiFunctionAndReturn(
709 Register function_address,
710 ExternalReference thunk_ref,
711 Register thunk_last_arg,
713 Operand return_value_operand,
714 Operand* context_restore_operand) {
716 Label promote_scheduled_exception;
717 Label exception_handled;
718 Label delete_allocated_handles;
719 Label leave_exit_frame;
722 Factory* factory = isolate()->factory();
723 ExternalReference next_address =
724 ExternalReference::handle_scope_next_address(isolate());
725 const int kNextOffset = 0;
726 const int kLimitOffset = Offset(
727 ExternalReference::handle_scope_limit_address(isolate()),
729 const int kLevelOffset = Offset(
730 ExternalReference::handle_scope_level_address(isolate()),
732 ExternalReference scheduled_exception_address =
733 ExternalReference::scheduled_exception_address(isolate());
735 DCHECK(rdx.is(function_address) || r8.is(function_address));
736 // Allocate HandleScope in callee-save registers.
737 Register prev_next_address_reg = r14;
738 Register prev_limit_reg = rbx;
739 Register base_reg = r15;
740 Move(base_reg, next_address);
741 movp(prev_next_address_reg, Operand(base_reg, kNextOffset));
742 movp(prev_limit_reg, Operand(base_reg, kLimitOffset));
743 addl(Operand(base_reg, kLevelOffset), Immediate(1));
745 if (FLAG_log_timer_events) {
746 FrameScope frame(this, StackFrame::MANUAL);
747 PushSafepointRegisters();
748 PrepareCallCFunction(1);
749 LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
750 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
751 PopSafepointRegisters();
755 Label profiler_disabled;
756 Label end_profiler_check;
757 Move(rax, ExternalReference::is_profiling_address(isolate()));
758 cmpb(Operand(rax, 0), Immediate(0));
759 j(zero, &profiler_disabled);
761 // Third parameter is the address of the actual getter function.
762 Move(thunk_last_arg, function_address);
763 Move(rax, thunk_ref);
764 jmp(&end_profiler_check);
766 bind(&profiler_disabled);
767 // Call the api function!
768 Move(rax, function_address);
770 bind(&end_profiler_check);
772 // Call the api function!
775 if (FLAG_log_timer_events) {
776 FrameScope frame(this, StackFrame::MANUAL);
777 PushSafepointRegisters();
778 PrepareCallCFunction(1);
779 LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
780 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
781 PopSafepointRegisters();
784 // Load the value from ReturnValue
785 movp(rax, return_value_operand);
788 // No more valid handles (the result handle was the last one). Restore
789 // previous handle scope.
790 subl(Operand(base_reg, kLevelOffset), Immediate(1));
791 movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
792 cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
793 j(not_equal, &delete_allocated_handles);
794 bind(&leave_exit_frame);
796 // Check if the function scheduled an exception.
797 Move(rsi, scheduled_exception_address);
798 Cmp(Operand(rsi, 0), factory->the_hole_value());
799 j(not_equal, &promote_scheduled_exception);
800 bind(&exception_handled);
802 #if ENABLE_EXTRA_CHECKS
803 // Check if the function returned a valid JavaScript value.
805 Register return_value = rax;
808 JumpIfSmi(return_value, &ok, Label::kNear);
809 movp(map, FieldOperand(return_value, HeapObject::kMapOffset));
811 CmpInstanceType(map, FIRST_NONSTRING_TYPE);
812 j(below, &ok, Label::kNear);
814 CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
815 j(above_equal, &ok, Label::kNear);
817 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
818 j(equal, &ok, Label::kNear);
820 CompareRoot(return_value, Heap::kUndefinedValueRootIndex);
821 j(equal, &ok, Label::kNear);
823 CompareRoot(return_value, Heap::kTrueValueRootIndex);
824 j(equal, &ok, Label::kNear);
826 CompareRoot(return_value, Heap::kFalseValueRootIndex);
827 j(equal, &ok, Label::kNear);
829 CompareRoot(return_value, Heap::kNullValueRootIndex);
830 j(equal, &ok, Label::kNear);
832 Abort(kAPICallReturnedInvalidObject);
837 bool restore_context = context_restore_operand != NULL;
838 if (restore_context) {
839 movp(rsi, *context_restore_operand);
841 LeaveApiExitFrame(!restore_context);
842 ret(stack_space * kPointerSize);
844 bind(&promote_scheduled_exception);
846 FrameScope frame(this, StackFrame::INTERNAL);
847 CallRuntime(Runtime::kPromoteScheduledException, 0);
849 jmp(&exception_handled);
851 // HandleScope limit has changed. Delete allocated extensions.
852 bind(&delete_allocated_handles);
853 movp(Operand(base_reg, kLimitOffset), prev_limit_reg);
854 movp(prev_limit_reg, rax);
855 LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
857 ExternalReference::delete_handle_scope_extensions(isolate()));
859 movp(rax, prev_limit_reg);
860 jmp(&leave_exit_frame);
864 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
866 // Set the entry point and jump to the C entry runtime stub.
867 LoadAddress(rbx, ext);
868 CEntryStub ces(isolate(), result_size);
869 jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
873 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
875 const CallWrapper& call_wrapper) {
876 // You can't call a builtin without a valid frame.
877 DCHECK(flag == JUMP_FUNCTION || has_frame());
879 // Rely on the assertion to check that the number of provided
880 // arguments match the expected number of arguments. Fake a
881 // parameter count to avoid emitting code to do the check.
882 ParameterCount expected(0);
883 GetBuiltinEntry(rdx, id);
884 InvokeCode(rdx, expected, expected, flag, call_wrapper);
888 void MacroAssembler::GetBuiltinFunction(Register target,
889 Builtins::JavaScript id) {
890 // Load the builtins object into target register.
891 movp(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
892 movp(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
893 movp(target, FieldOperand(target,
894 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
898 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
899 DCHECK(!target.is(rdi));
900 // Load the JavaScript builtin function from the builtins object.
901 GetBuiltinFunction(rdi, id);
902 movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
906 #define REG(Name) { kRegister_ ## Name ## _Code }
908 static const Register saved_regs[] = {
909 REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
910 REG(r9), REG(r10), REG(r11)
915 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
918 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
921 Register exclusion3) {
922 // We don't allow a GC during a store buffer overflow so there is no need to
923 // store the registers in any particular way, but we do have to store and
925 for (int i = 0; i < kNumberOfSavedRegs; i++) {
926 Register reg = saved_regs[i];
927 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
931 // R12 to r15 are callee save on all platforms.
932 if (fp_mode == kSaveFPRegs) {
933 subp(rsp, Immediate(kSIMD128Size * XMMRegister::kMaxNumRegisters));
934 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
935 XMMRegister reg = XMMRegister::from_code(i);
936 movups(Operand(rsp, i * kSIMD128Size), reg);
942 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
945 Register exclusion3) {
946 if (fp_mode == kSaveFPRegs) {
947 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
948 XMMRegister reg = XMMRegister::from_code(i);
949 movups(reg, Operand(rsp, i * kSIMD128Size));
951 addp(rsp, Immediate(kSIMD128Size * XMMRegister::kMaxNumRegisters));
953 for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
954 Register reg = saved_regs[i];
955 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
962 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
968 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
974 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
975 DCHECK(!r.IsDouble());
976 if (r.IsInteger8()) {
978 } else if (r.IsUInteger8()) {
980 } else if (r.IsInteger16()) {
982 } else if (r.IsUInteger16()) {
984 } else if (r.IsInteger32()) {
992 void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
993 DCHECK(!r.IsDouble());
994 if (r.IsInteger8() || r.IsUInteger8()) {
996 } else if (r.IsInteger16() || r.IsUInteger16()) {
998 } else if (r.IsInteger32()) {
1001 if (r.IsHeapObject()) {
1003 } else if (r.IsSmi()) {
1011 void MacroAssembler::Set(Register dst, int64_t x) {
1014 } else if (is_uint32(x)) {
1015 movl(dst, Immediate(static_cast<uint32_t>(x)));
1016 } else if (is_int32(x)) {
1017 movq(dst, Immediate(static_cast<int32_t>(x)));
1024 void MacroAssembler::Set(const Operand& dst, intptr_t x) {
1025 if (kPointerSize == kInt64Size) {
1027 movp(dst, Immediate(static_cast<int32_t>(x)));
1029 Set(kScratchRegister, x);
1030 movp(dst, kScratchRegister);
1033 movp(dst, Immediate(static_cast<int32_t>(x)));
1038 // ----------------------------------------------------------------------------
1039 // Smi tagging, untagging and tag detection.
1041 bool MacroAssembler::IsUnsafeInt(const int32_t x) {
1042 static const int kMaxBits = 17;
1043 return !is_intn(x, kMaxBits);
1047 void MacroAssembler::SafeMove(Register dst, Smi* src) {
1048 DCHECK(!dst.is(kScratchRegister));
1049 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1050 if (SmiValuesAre32Bits()) {
1051 // JIT cookie can be converted to Smi.
1052 Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
1053 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1054 xorp(dst, kScratchRegister);
1056 DCHECK(SmiValuesAre31Bits());
1057 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
1058 movp(dst, Immediate(value ^ jit_cookie()));
1059 xorp(dst, Immediate(jit_cookie()));
1067 void MacroAssembler::SafePush(Smi* src) {
1068 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1069 if (SmiValuesAre32Bits()) {
1070 // JIT cookie can be converted to Smi.
1071 Push(Smi::FromInt(src->value() ^ jit_cookie()));
1072 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1073 xorp(Operand(rsp, 0), kScratchRegister);
1075 DCHECK(SmiValuesAre31Bits());
1076 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
1077 Push(Immediate(value ^ jit_cookie()));
1078 xorp(Operand(rsp, 0), Immediate(jit_cookie()));
1086 Register MacroAssembler::GetSmiConstant(Smi* source) {
1087 int value = source->value();
1089 xorl(kScratchRegister, kScratchRegister);
1090 return kScratchRegister;
1093 return kSmiConstantRegister;
1095 LoadSmiConstant(kScratchRegister, source);
1096 return kScratchRegister;
1100 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
1101 if (emit_debug_code()) {
1102 Move(dst, Smi::FromInt(kSmiConstantRegisterValue),
1103 Assembler::RelocInfoNone());
1104 cmpp(dst, kSmiConstantRegister);
1105 Assert(equal, kUninitializedKSmiConstantRegister);
1107 int value = source->value();
1112 bool negative = value < 0;
1113 unsigned int uvalue = negative ? -value : value;
1118 Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
1122 leap(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
1126 leap(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
1130 Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
1134 Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
1138 Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
1141 movp(dst, kSmiConstantRegister);
1147 Move(dst, source, Assembler::RelocInfoNone());
1156 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
1157 STATIC_ASSERT(kSmiTag == 0);
1161 shlp(dst, Immediate(kSmiShift));
1165 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
1166 if (emit_debug_code()) {
1167 testb(dst, Immediate(0x01));
1169 j(zero, &ok, Label::kNear);
1170 Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
1174 if (SmiValuesAre32Bits()) {
1175 DCHECK(kSmiShift % kBitsPerByte == 0);
1176 movl(Operand(dst, kSmiShift / kBitsPerByte), src);
1178 DCHECK(SmiValuesAre31Bits());
1179 Integer32ToSmi(kScratchRegister, src);
1180 movp(dst, kScratchRegister);
1185 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
1189 addl(dst, Immediate(constant));
1191 leal(dst, Operand(src, constant));
1193 shlp(dst, Immediate(kSmiShift));
1197 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
1198 STATIC_ASSERT(kSmiTag == 0);
1203 if (SmiValuesAre32Bits()) {
1204 shrp(dst, Immediate(kSmiShift));
1206 DCHECK(SmiValuesAre31Bits());
1207 sarl(dst, Immediate(kSmiShift));
1212 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
1213 if (SmiValuesAre32Bits()) {
1214 movl(dst, Operand(src, kSmiShift / kBitsPerByte));
1216 DCHECK(SmiValuesAre31Bits());
1218 sarl(dst, Immediate(kSmiShift));
1223 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
1224 STATIC_ASSERT(kSmiTag == 0);
1228 sarp(dst, Immediate(kSmiShift));
1229 if (kPointerSize == kInt32Size) {
1230 // Sign extend to 64-bit.
1236 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
1237 if (SmiValuesAre32Bits()) {
1238 movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
1240 DCHECK(SmiValuesAre31Bits());
1242 SmiToInteger64(dst, dst);
1247 void MacroAssembler::SmiTest(Register src) {
1253 void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1260 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
1266 void MacroAssembler::Cmp(Register dst, Smi* src) {
1267 DCHECK(!dst.is(kScratchRegister));
1268 if (src->value() == 0) {
1271 Register constant_reg = GetSmiConstant(src);
1272 cmpp(dst, constant_reg);
1277 void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
1284 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
1291 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
1293 if (SmiValuesAre32Bits()) {
1294 cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
1296 DCHECK(SmiValuesAre31Bits());
1297 cmpl(dst, Immediate(src));
1302 void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
1303 // The Operand cannot use the smi register.
1304 Register smi_reg = GetSmiConstant(src);
1305 DCHECK(!dst.AddressUsesRegister(smi_reg));
1310 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
1311 if (SmiValuesAre32Bits()) {
1312 cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
1314 DCHECK(SmiValuesAre31Bits());
1315 SmiToInteger32(kScratchRegister, dst);
1316 cmpl(kScratchRegister, src);
1321 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
1327 SmiToInteger64(dst, src);
1333 if (power < kSmiShift) {
1334 sarp(dst, Immediate(kSmiShift - power));
1335 } else if (power > kSmiShift) {
1336 shlp(dst, Immediate(power - kSmiShift));
1341 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
1344 DCHECK((0 <= power) && (power < 32));
1346 shrp(dst, Immediate(power + kSmiShift));
1348 UNIMPLEMENTED(); // Not used.
1353 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1355 Label::Distance near_jump) {
1356 if (dst.is(src1) || dst.is(src2)) {
1357 DCHECK(!src1.is(kScratchRegister));
1358 DCHECK(!src2.is(kScratchRegister));
1359 movp(kScratchRegister, src1);
1360 orp(kScratchRegister, src2);
1361 JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
1362 movp(dst, kScratchRegister);
1366 JumpIfNotSmi(dst, on_not_smis, near_jump);
1371 Condition MacroAssembler::CheckSmi(Register src) {
1372 STATIC_ASSERT(kSmiTag == 0);
1373 testb(src, Immediate(kSmiTagMask));
1378 Condition MacroAssembler::CheckSmi(const Operand& src) {
1379 STATIC_ASSERT(kSmiTag == 0);
1380 testb(src, Immediate(kSmiTagMask));
1385 Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
1386 STATIC_ASSERT(kSmiTag == 0);
1387 // Test that both bits of the mask 0x8000000000000001 are zero.
1388 movp(kScratchRegister, src);
1389 rolp(kScratchRegister, Immediate(1));
1390 testb(kScratchRegister, Immediate(3));
1395 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
1396 if (first.is(second)) {
1397 return CheckSmi(first);
1399 STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
1400 if (SmiValuesAre32Bits()) {
1401 leal(kScratchRegister, Operand(first, second, times_1, 0));
1402 testb(kScratchRegister, Immediate(0x03));
1404 DCHECK(SmiValuesAre31Bits());
1405 movl(kScratchRegister, first);
1406 orl(kScratchRegister, second);
1407 testb(kScratchRegister, Immediate(kSmiTagMask));
1413 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
1415 if (first.is(second)) {
1416 return CheckNonNegativeSmi(first);
1418 movp(kScratchRegister, first);
1419 orp(kScratchRegister, second);
1420 rolp(kScratchRegister, Immediate(1));
1421 testl(kScratchRegister, Immediate(3));
1426 Condition MacroAssembler::CheckEitherSmi(Register first,
1429 if (first.is(second)) {
1430 return CheckSmi(first);
1432 if (scratch.is(second)) {
1433 andl(scratch, first);
1435 if (!scratch.is(first)) {
1436 movl(scratch, first);
1438 andl(scratch, second);
1440 testb(scratch, Immediate(kSmiTagMask));
1445 Condition MacroAssembler::CheckIsMinSmi(Register src) {
1446 DCHECK(!src.is(kScratchRegister));
1447 // If we overflow by subtracting one, it's the minimal smi value.
1448 cmpp(src, kSmiConstantRegister);
1453 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
1454 if (SmiValuesAre32Bits()) {
1455 // A 32-bit integer value can always be converted to a smi.
1458 DCHECK(SmiValuesAre31Bits());
1459 cmpl(src, Immediate(0xc0000000));
1465 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
1466 if (SmiValuesAre32Bits()) {
1467 // An unsigned 32-bit integer value is valid as long as the high bit
1472 DCHECK(SmiValuesAre31Bits());
1473 testl(src, Immediate(0xc0000000));
1479 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
1481 andl(dst, Immediate(kSmiTagMask));
1483 movl(dst, Immediate(kSmiTagMask));
1489 void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
1490 if (!(src.AddressUsesRegister(dst))) {
1491 movl(dst, Immediate(kSmiTagMask));
1495 andl(dst, Immediate(kSmiTagMask));
1500 void MacroAssembler::JumpIfValidSmiValue(Register src,
1502 Label::Distance near_jump) {
1503 Condition is_valid = CheckInteger32ValidSmiValue(src);
1504 j(is_valid, on_valid, near_jump);
1508 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1510 Label::Distance near_jump) {
1511 Condition is_valid = CheckInteger32ValidSmiValue(src);
1512 j(NegateCondition(is_valid), on_invalid, near_jump);
1516 void MacroAssembler::JumpIfUIntValidSmiValue(Register src,
1518 Label::Distance near_jump) {
1519 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1520 j(is_valid, on_valid, near_jump);
1524 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1526 Label::Distance near_jump) {
1527 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1528 j(NegateCondition(is_valid), on_invalid, near_jump);
1532 void MacroAssembler::JumpIfSmi(Register src,
1534 Label::Distance near_jump) {
1535 Condition smi = CheckSmi(src);
1536 j(smi, on_smi, near_jump);
1540 void MacroAssembler::JumpIfNotSmi(Register src,
1542 Label::Distance near_jump) {
1543 Condition smi = CheckSmi(src);
1544 j(NegateCondition(smi), on_not_smi, near_jump);
1548 void MacroAssembler::JumpUnlessNonNegativeSmi(
1549 Register src, Label* on_not_smi_or_negative,
1550 Label::Distance near_jump) {
1551 Condition non_negative_smi = CheckNonNegativeSmi(src);
1552 j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
1556 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1559 Label::Distance near_jump) {
1560 SmiCompare(src, constant);
1561 j(equal, on_equals, near_jump);
1565 void MacroAssembler::JumpIfNotBothSmi(Register src1,
1567 Label* on_not_both_smi,
1568 Label::Distance near_jump) {
1569 Condition both_smi = CheckBothSmi(src1, src2);
1570 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1574 void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
1576 Label* on_not_both_smi,
1577 Label::Distance near_jump) {
1578 Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
1579 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1583 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1584 if (constant->value() == 0) {
1589 } else if (dst.is(src)) {
1590 DCHECK(!dst.is(kScratchRegister));
1591 switch (constant->value()) {
1593 addp(dst, kSmiConstantRegister);
1596 leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1599 leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1602 leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1605 Register constant_reg = GetSmiConstant(constant);
1606 addp(dst, constant_reg);
1610 switch (constant->value()) {
1612 leap(dst, Operand(src, kSmiConstantRegister, times_1, 0));
1615 leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1618 leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1621 leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1624 LoadSmiConstant(dst, constant);
1632 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1633 if (constant->value() != 0) {
1634 if (SmiValuesAre32Bits()) {
1635 addl(Operand(dst, kSmiShift / kBitsPerByte),
1636 Immediate(constant->value()));
1638 DCHECK(SmiValuesAre31Bits());
1639 addp(dst, Immediate(constant));
1645 void MacroAssembler::SmiAddConstant(Register dst,
1648 SmiOperationExecutionMode mode,
1649 Label* bailout_label,
1650 Label::Distance near_jump) {
1651 if (constant->value() == 0) {
1655 } else if (dst.is(src)) {
1656 DCHECK(!dst.is(kScratchRegister));
1657 LoadSmiConstant(kScratchRegister, constant);
1658 addp(dst, kScratchRegister);
1659 if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
1660 j(no_overflow, bailout_label, near_jump);
1661 DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
1662 subp(dst, kScratchRegister);
1663 } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
1664 if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
1666 j(no_overflow, &done, Label::kNear);
1667 subp(dst, kScratchRegister);
1668 jmp(bailout_label, near_jump);
1671 // Bailout if overflow without reserving src.
1672 j(overflow, bailout_label, near_jump);
1675 CHECK(mode.IsEmpty());
1678 DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
1679 DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW));
1680 LoadSmiConstant(dst, constant);
1682 j(overflow, bailout_label, near_jump);
1687 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1688 if (constant->value() == 0) {
1692 } else if (dst.is(src)) {
1693 DCHECK(!dst.is(kScratchRegister));
1694 Register constant_reg = GetSmiConstant(constant);
1695 subp(dst, constant_reg);
1697 if (constant->value() == Smi::kMinValue) {
1698 LoadSmiConstant(dst, constant);
1699 // Adding and subtracting the min-value gives the same result, it only
1700 // differs on the overflow bit, which we don't check here.
1703 // Subtract by adding the negation.
1704 LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1711 void MacroAssembler::SmiSubConstant(Register dst,
1714 SmiOperationExecutionMode mode,
1715 Label* bailout_label,
1716 Label::Distance near_jump) {
1717 if (constant->value() == 0) {
1721 } else if (dst.is(src)) {
1722 DCHECK(!dst.is(kScratchRegister));
1723 LoadSmiConstant(kScratchRegister, constant);
1724 subp(dst, kScratchRegister);
1725 if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
1726 j(no_overflow, bailout_label, near_jump);
1727 DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
1728 addp(dst, kScratchRegister);
1729 } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
1730 if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
1732 j(no_overflow, &done, Label::kNear);
1733 addp(dst, kScratchRegister);
1734 jmp(bailout_label, near_jump);
1737 // Bailout if overflow without reserving src.
1738 j(overflow, bailout_label, near_jump);
1741 CHECK(mode.IsEmpty());
1744 DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
1745 DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW));
1746 if (constant->value() == Smi::kMinValue) {
1747 DCHECK(!dst.is(kScratchRegister));
1749 LoadSmiConstant(kScratchRegister, constant);
1750 subp(dst, kScratchRegister);
1751 j(overflow, bailout_label, near_jump);
1753 // Subtract by adding the negation.
1754 LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1756 j(overflow, bailout_label, near_jump);
1762 void MacroAssembler::SmiNeg(Register dst,
1764 Label* on_smi_result,
1765 Label::Distance near_jump) {
1767 DCHECK(!dst.is(kScratchRegister));
1768 movp(kScratchRegister, src);
1769 negp(dst); // Low 32 bits are retained as zero by negation.
1770 // Test if result is zero or Smi::kMinValue.
1771 cmpp(dst, kScratchRegister);
1772 j(not_equal, on_smi_result, near_jump);
1773 movp(src, kScratchRegister);
1778 // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1779 j(not_equal, on_smi_result, near_jump);
1785 static void SmiAddHelper(MacroAssembler* masm,
1789 Label* on_not_smi_result,
1790 Label::Distance near_jump) {
1793 masm->addp(dst, src2);
1794 masm->j(no_overflow, &done, Label::kNear);
1796 masm->subp(dst, src2);
1797 masm->jmp(on_not_smi_result, near_jump);
1800 masm->movp(dst, src1);
1801 masm->addp(dst, src2);
1802 masm->j(overflow, on_not_smi_result, near_jump);
1807 void MacroAssembler::SmiAdd(Register dst,
1810 Label* on_not_smi_result,
1811 Label::Distance near_jump) {
1812 DCHECK_NOT_NULL(on_not_smi_result);
1813 DCHECK(!dst.is(src2));
1814 SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1818 void MacroAssembler::SmiAdd(Register dst,
1820 const Operand& src2,
1821 Label* on_not_smi_result,
1822 Label::Distance near_jump) {
1823 DCHECK_NOT_NULL(on_not_smi_result);
1824 DCHECK(!src2.AddressUsesRegister(dst));
1825 SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1829 void MacroAssembler::SmiAdd(Register dst,
1832 // No overflow checking. Use only when it's known that
1833 // overflowing is impossible.
1834 if (!dst.is(src1)) {
1835 if (emit_debug_code()) {
1836 movp(kScratchRegister, src1);
1837 addp(kScratchRegister, src2);
1838 Check(no_overflow, kSmiAdditionOverflow);
1840 leap(dst, Operand(src1, src2, times_1, 0));
1843 Assert(no_overflow, kSmiAdditionOverflow);
1849 static void SmiSubHelper(MacroAssembler* masm,
1853 Label* on_not_smi_result,
1854 Label::Distance near_jump) {
1857 masm->subp(dst, src2);
1858 masm->j(no_overflow, &done, Label::kNear);
1860 masm->addp(dst, src2);
1861 masm->jmp(on_not_smi_result, near_jump);
1864 masm->movp(dst, src1);
1865 masm->subp(dst, src2);
1866 masm->j(overflow, on_not_smi_result, near_jump);
1871 void MacroAssembler::SmiSub(Register dst,
1874 Label* on_not_smi_result,
1875 Label::Distance near_jump) {
1876 DCHECK_NOT_NULL(on_not_smi_result);
1877 DCHECK(!dst.is(src2));
1878 SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1882 void MacroAssembler::SmiSub(Register dst,
1884 const Operand& src2,
1885 Label* on_not_smi_result,
1886 Label::Distance near_jump) {
1887 DCHECK_NOT_NULL(on_not_smi_result);
1888 DCHECK(!src2.AddressUsesRegister(dst));
1889 SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1894 static void SmiSubNoOverflowHelper(MacroAssembler* masm,
1898 // No overflow checking. Use only when it's known that
1899 // overflowing is impossible (e.g., subtracting two positive smis).
1900 if (!dst.is(src1)) {
1901 masm->movp(dst, src1);
1903 masm->subp(dst, src2);
1904 masm->Assert(no_overflow, kSmiSubtractionOverflow);
1908 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1909 DCHECK(!dst.is(src2));
1910 SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
1914 void MacroAssembler::SmiSub(Register dst,
1916 const Operand& src2) {
1917 SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
1921 void MacroAssembler::SmiMul(Register dst,
1924 Label* on_not_smi_result,
1925 Label::Distance near_jump) {
1926 DCHECK(!dst.is(src2));
1927 DCHECK(!dst.is(kScratchRegister));
1928 DCHECK(!src1.is(kScratchRegister));
1929 DCHECK(!src2.is(kScratchRegister));
1932 Label failure, zero_correct_result;
1933 movp(kScratchRegister, src1); // Create backup for later testing.
1934 SmiToInteger64(dst, src1);
1936 j(overflow, &failure, Label::kNear);
1938 // Check for negative zero result. If product is zero, and one
1939 // argument is negative, go to slow case.
1940 Label correct_result;
1942 j(not_zero, &correct_result, Label::kNear);
1944 movp(dst, kScratchRegister);
1946 // Result was positive zero.
1947 j(positive, &zero_correct_result, Label::kNear);
1949 bind(&failure); // Reused failure exit, restores src1.
1950 movp(src1, kScratchRegister);
1951 jmp(on_not_smi_result, near_jump);
1953 bind(&zero_correct_result);
1956 bind(&correct_result);
1958 SmiToInteger64(dst, src1);
1960 j(overflow, on_not_smi_result, near_jump);
1961 // Check for negative zero result. If product is zero, and one
1962 // argument is negative, go to slow case.
1963 Label correct_result;
1965 j(not_zero, &correct_result, Label::kNear);
1966 // One of src1 and src2 is zero, the check whether the other is
1968 movp(kScratchRegister, src1);
1969 xorp(kScratchRegister, src2);
1970 j(negative, on_not_smi_result, near_jump);
1971 bind(&correct_result);
1976 void MacroAssembler::SmiDiv(Register dst,
1979 Label* on_not_smi_result,
1980 Label::Distance near_jump) {
1981 DCHECK(!src1.is(kScratchRegister));
1982 DCHECK(!src2.is(kScratchRegister));
1983 DCHECK(!dst.is(kScratchRegister));
1984 DCHECK(!src2.is(rax));
1985 DCHECK(!src2.is(rdx));
1986 DCHECK(!src1.is(rdx));
1988 // Check for 0 divisor (result is +/-Infinity).
1990 j(zero, on_not_smi_result, near_jump);
1993 movp(kScratchRegister, src1);
1995 SmiToInteger32(rax, src1);
1996 // We need to rule out dividing Smi::kMinValue by -1, since that would
1997 // overflow in idiv and raise an exception.
1998 // We combine this with negative zero test (negative zero only happens
1999 // when dividing zero by a negative number).
2001 // We overshoot a little and go to slow case if we divide min-value
2002 // by any negative value, not just -1.
2004 testl(rax, Immediate(~Smi::kMinValue));
2005 j(not_zero, &safe_div, Label::kNear);
2008 j(positive, &safe_div, Label::kNear);
2009 movp(src1, kScratchRegister);
2010 jmp(on_not_smi_result, near_jump);
2012 j(negative, on_not_smi_result, near_jump);
2016 SmiToInteger32(src2, src2);
2017 // Sign extend src1 into edx:eax.
2020 Integer32ToSmi(src2, src2);
2021 // Check that the remainder is zero.
2025 j(zero, &smi_result, Label::kNear);
2026 movp(src1, kScratchRegister);
2027 jmp(on_not_smi_result, near_jump);
2030 j(not_zero, on_not_smi_result, near_jump);
2032 if (!dst.is(src1) && src1.is(rax)) {
2033 movp(src1, kScratchRegister);
2035 Integer32ToSmi(dst, rax);
2039 void MacroAssembler::SmiMod(Register dst,
2042 Label* on_not_smi_result,
2043 Label::Distance near_jump) {
2044 DCHECK(!dst.is(kScratchRegister));
2045 DCHECK(!src1.is(kScratchRegister));
2046 DCHECK(!src2.is(kScratchRegister));
2047 DCHECK(!src2.is(rax));
2048 DCHECK(!src2.is(rdx));
2049 DCHECK(!src1.is(rdx));
2050 DCHECK(!src1.is(src2));
2053 j(zero, on_not_smi_result, near_jump);
2056 movp(kScratchRegister, src1);
2058 SmiToInteger32(rax, src1);
2059 SmiToInteger32(src2, src2);
2061 // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
2063 cmpl(rax, Immediate(Smi::kMinValue));
2064 j(not_equal, &safe_div, Label::kNear);
2065 cmpl(src2, Immediate(-1));
2066 j(not_equal, &safe_div, Label::kNear);
2067 // Retag inputs and go slow case.
2068 Integer32ToSmi(src2, src2);
2070 movp(src1, kScratchRegister);
2072 jmp(on_not_smi_result, near_jump);
2075 // Sign extend eax into edx:eax.
2078 // Restore smi tags on inputs.
2079 Integer32ToSmi(src2, src2);
2081 movp(src1, kScratchRegister);
2083 // Check for a negative zero result. If the result is zero, and the
2084 // dividend is negative, go slow to return a floating point negative zero.
2087 j(not_zero, &smi_result, Label::kNear);
2089 j(negative, on_not_smi_result, near_jump);
2091 Integer32ToSmi(dst, rdx);
2095 void MacroAssembler::SmiNot(Register dst, Register src) {
2096 DCHECK(!dst.is(kScratchRegister));
2097 DCHECK(!src.is(kScratchRegister));
2098 if (SmiValuesAre32Bits()) {
2099 // Set tag and padding bits before negating, so that they are zero
2101 movl(kScratchRegister, Immediate(~0));
2103 DCHECK(SmiValuesAre31Bits());
2104 movl(kScratchRegister, Immediate(1));
2107 xorp(dst, kScratchRegister);
2109 leap(dst, Operand(src, kScratchRegister, times_1, 0));
2115 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
2116 DCHECK(!dst.is(src2));
2117 if (!dst.is(src1)) {
2124 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
2125 if (constant->value() == 0) {
2127 } else if (dst.is(src)) {
2128 DCHECK(!dst.is(kScratchRegister));
2129 Register constant_reg = GetSmiConstant(constant);
2130 andp(dst, constant_reg);
2132 LoadSmiConstant(dst, constant);
2138 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
2139 if (!dst.is(src1)) {
2140 DCHECK(!src1.is(src2));
2147 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
2149 DCHECK(!dst.is(kScratchRegister));
2150 Register constant_reg = GetSmiConstant(constant);
2151 orp(dst, constant_reg);
2153 LoadSmiConstant(dst, constant);
2159 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
2160 if (!dst.is(src1)) {
2161 DCHECK(!src1.is(src2));
2168 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
2170 DCHECK(!dst.is(kScratchRegister));
2171 Register constant_reg = GetSmiConstant(constant);
2172 xorp(dst, constant_reg);
2174 LoadSmiConstant(dst, constant);
2180 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
2183 DCHECK(is_uint5(shift_value));
2184 if (shift_value > 0) {
2186 sarp(dst, Immediate(shift_value + kSmiShift));
2187 shlp(dst, Immediate(kSmiShift));
2189 UNIMPLEMENTED(); // Not used.
2195 void MacroAssembler::SmiShiftLeftConstant(Register dst,
2198 Label* on_not_smi_result,
2199 Label::Distance near_jump) {
2200 if (SmiValuesAre32Bits()) {
2204 if (shift_value > 0) {
2205 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2206 shlq(dst, Immediate(shift_value & 0x1f));
2209 DCHECK(SmiValuesAre31Bits());
2211 UNIMPLEMENTED(); // Not used.
2213 SmiToInteger32(dst, src);
2214 shll(dst, Immediate(shift_value));
2215 JumpIfNotValidSmiValue(dst, on_not_smi_result, near_jump);
2216 Integer32ToSmi(dst, dst);
2222 void MacroAssembler::SmiShiftLogicalRightConstant(
2223 Register dst, Register src, int shift_value,
2224 Label* on_not_smi_result, Label::Distance near_jump) {
2225 // Logic right shift interprets its result as an *unsigned* number.
2227 UNIMPLEMENTED(); // Not used.
2229 if (shift_value == 0) {
2231 j(negative, on_not_smi_result, near_jump);
2233 if (SmiValuesAre32Bits()) {
2235 shrp(dst, Immediate(shift_value + kSmiShift));
2236 shlp(dst, Immediate(kSmiShift));
2238 DCHECK(SmiValuesAre31Bits());
2239 SmiToInteger32(dst, src);
2240 shrp(dst, Immediate(shift_value));
2241 JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump);
2242 Integer32ToSmi(dst, dst);
2248 void MacroAssembler::SmiShiftLeft(Register dst,
2251 Label* on_not_smi_result,
2252 Label::Distance near_jump) {
2253 if (SmiValuesAre32Bits()) {
2254 DCHECK(!dst.is(rcx));
2255 if (!dst.is(src1)) {
2258 // Untag shift amount.
2259 SmiToInteger32(rcx, src2);
2260 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2261 andp(rcx, Immediate(0x1f));
2264 DCHECK(SmiValuesAre31Bits());
2265 DCHECK(!dst.is(kScratchRegister));
2266 DCHECK(!src1.is(kScratchRegister));
2267 DCHECK(!src2.is(kScratchRegister));
2268 DCHECK(!dst.is(src2));
2269 DCHECK(!dst.is(rcx));
2271 if (src1.is(rcx) || src2.is(rcx)) {
2272 movq(kScratchRegister, rcx);
2275 UNIMPLEMENTED(); // Not used.
2278 SmiToInteger32(dst, src1);
2279 SmiToInteger32(rcx, src2);
2281 JumpIfValidSmiValue(dst, &valid_result, Label::kNear);
2282 // As src1 or src2 could not be dst, we do not need to restore them for
2284 if (src1.is(rcx) || src2.is(rcx)) {
2286 movq(src1, kScratchRegister);
2288 movq(src2, kScratchRegister);
2291 jmp(on_not_smi_result, near_jump);
2292 bind(&valid_result);
2293 Integer32ToSmi(dst, dst);
2299 void MacroAssembler::SmiShiftLogicalRight(Register dst,
2302 Label* on_not_smi_result,
2303 Label::Distance near_jump) {
2304 DCHECK(!dst.is(kScratchRegister));
2305 DCHECK(!src1.is(kScratchRegister));
2306 DCHECK(!src2.is(kScratchRegister));
2307 DCHECK(!dst.is(src2));
2308 DCHECK(!dst.is(rcx));
2309 if (src1.is(rcx) || src2.is(rcx)) {
2310 movq(kScratchRegister, rcx);
2313 UNIMPLEMENTED(); // Not used.
2316 SmiToInteger32(dst, src1);
2317 SmiToInteger32(rcx, src2);
2319 JumpIfUIntValidSmiValue(dst, &valid_result, Label::kNear);
2320 // As src1 or src2 could not be dst, we do not need to restore them for
2322 if (src1.is(rcx) || src2.is(rcx)) {
2324 movq(src1, kScratchRegister);
2326 movq(src2, kScratchRegister);
2329 jmp(on_not_smi_result, near_jump);
2330 bind(&valid_result);
2331 Integer32ToSmi(dst, dst);
2336 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
2339 DCHECK(!dst.is(kScratchRegister));
2340 DCHECK(!src1.is(kScratchRegister));
2341 DCHECK(!src2.is(kScratchRegister));
2342 DCHECK(!dst.is(rcx));
2344 SmiToInteger32(rcx, src2);
2345 if (!dst.is(src1)) {
2348 SmiToInteger32(dst, dst);
2350 Integer32ToSmi(dst, dst);
2354 void MacroAssembler::SelectNonSmi(Register dst,
2358 Label::Distance near_jump) {
2359 DCHECK(!dst.is(kScratchRegister));
2360 DCHECK(!src1.is(kScratchRegister));
2361 DCHECK(!src2.is(kScratchRegister));
2362 DCHECK(!dst.is(src1));
2363 DCHECK(!dst.is(src2));
2364 // Both operands must not be smis.
2366 Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
2367 Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
2369 STATIC_ASSERT(kSmiTag == 0);
2370 DCHECK_EQ(0, Smi::FromInt(0));
2371 movl(kScratchRegister, Immediate(kSmiTagMask));
2372 andp(kScratchRegister, src1);
2373 testl(kScratchRegister, src2);
2374 // If non-zero then both are smis.
2375 j(not_zero, on_not_smis, near_jump);
2377 // Exactly one operand is a smi.
2378 DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
2379 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
2380 subp(kScratchRegister, Immediate(1));
2381 // If src1 is a smi, then scratch register all 1s, else it is all 0s.
2384 andp(dst, kScratchRegister);
2385 // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
2387 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
2391 SmiIndex MacroAssembler::SmiToIndex(Register dst,
2394 if (SmiValuesAre32Bits()) {
2395 DCHECK(is_uint6(shift));
2396 // There is a possible optimization if shift is in the range 60-63, but that
2397 // will (and must) never happen.
2401 if (shift < kSmiShift) {
2402 sarp(dst, Immediate(kSmiShift - shift));
2404 shlp(dst, Immediate(shift - kSmiShift));
2406 return SmiIndex(dst, times_1);
2408 DCHECK(SmiValuesAre31Bits());
2409 DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2413 // We have to sign extend the index register to 64-bit as the SMI might
2416 if (shift == times_1) {
2417 sarq(dst, Immediate(kSmiShift));
2418 return SmiIndex(dst, times_1);
2420 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2425 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
2428 if (SmiValuesAre32Bits()) {
2429 // Register src holds a positive smi.
2430 DCHECK(is_uint6(shift));
2435 if (shift < kSmiShift) {
2436 sarp(dst, Immediate(kSmiShift - shift));
2438 shlp(dst, Immediate(shift - kSmiShift));
2440 return SmiIndex(dst, times_1);
2442 DCHECK(SmiValuesAre31Bits());
2443 DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2448 if (shift == times_1) {
2449 sarq(dst, Immediate(kSmiShift));
2450 return SmiIndex(dst, times_1);
2452 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2457 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
2458 if (SmiValuesAre32Bits()) {
2459 DCHECK_EQ(0, kSmiShift % kBitsPerByte);
2460 addl(dst, Operand(src, kSmiShift / kBitsPerByte));
2462 DCHECK(SmiValuesAre31Bits());
2463 SmiToInteger32(kScratchRegister, src);
2464 addl(dst, kScratchRegister);
2469 void MacroAssembler::Push(Smi* source) {
2470 intptr_t smi = reinterpret_cast<intptr_t>(source);
2471 if (is_int32(smi)) {
2472 Push(Immediate(static_cast<int32_t>(smi)));
2474 Register constant = GetSmiConstant(source);
2480 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
2481 DCHECK(!src.is(scratch));
2484 shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2485 shlp(src, Immediate(kSmiShift));
2488 shlp(scratch, Immediate(kSmiShift));
2493 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
2494 DCHECK(!dst.is(scratch));
2497 shrp(scratch, Immediate(kSmiShift));
2499 shrp(dst, Immediate(kSmiShift));
2501 shlp(dst, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2506 void MacroAssembler::Test(const Operand& src, Smi* source) {
2507 if (SmiValuesAre32Bits()) {
2508 testl(Operand(src, kIntSize), Immediate(source->value()));
2510 DCHECK(SmiValuesAre31Bits());
2511 testl(src, Immediate(source));
2516 // ----------------------------------------------------------------------------
2519 void MacroAssembler::LookupNumberStringCache(Register object,
2524 // Use of registers. Register result is used as a temporary.
2525 Register number_string_cache = result;
2526 Register mask = scratch1;
2527 Register scratch = scratch2;
2529 // Load the number string cache.
2530 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2532 // Make the hash mask from the length of the number string cache. It
2533 // contains two elements (number and string) for each cache entry.
2535 mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
2536 shrl(mask, Immediate(1));
2537 subp(mask, Immediate(1)); // Make mask.
2539 // Calculate the entry in the number string cache. The hash value in the
2540 // number string cache for smis is just the smi value, and the hash for
2541 // doubles is the xor of the upper and lower words. See
2542 // Heap::GetNumberStringCache.
2544 Label load_result_from_cache;
2545 JumpIfSmi(object, &is_smi);
2547 isolate()->factory()->heap_number_map(),
2551 STATIC_ASSERT(8 == kDoubleSize);
2552 movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
2553 xorp(scratch, FieldOperand(object, HeapNumber::kValueOffset));
2554 andp(scratch, mask);
2555 // Each entry in string cache consists of two pointer sized fields,
2556 // but times_twice_pointer_size (multiplication by 16) scale factor
2557 // is not supported by addrmode on x64 platform.
2558 // So we have to premultiply entry index before lookup.
2559 shlp(scratch, Immediate(kPointerSizeLog2 + 1));
2561 Register index = scratch;
2562 Register probe = mask;
2564 FieldOperand(number_string_cache,
2567 FixedArray::kHeaderSize));
2568 JumpIfSmi(probe, not_found);
2569 movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
2570 ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
2571 j(parity_even, not_found); // Bail out if NaN is involved.
2572 j(not_equal, not_found); // The cache did not contain this value.
2573 jmp(&load_result_from_cache);
2576 SmiToInteger32(scratch, object);
2577 andp(scratch, mask);
2578 // Each entry in string cache consists of two pointer sized fields,
2579 // but times_twice_pointer_size (multiplication by 16) scale factor
2580 // is not supported by addrmode on x64 platform.
2581 // So we have to premultiply entry index before lookup.
2582 shlp(scratch, Immediate(kPointerSizeLog2 + 1));
2584 // Check if the entry is the smi we are looking for.
2586 FieldOperand(number_string_cache,
2589 FixedArray::kHeaderSize));
2590 j(not_equal, not_found);
2592 // Get the result from the cache.
2593 bind(&load_result_from_cache);
2595 FieldOperand(number_string_cache,
2598 FixedArray::kHeaderSize + kPointerSize));
2599 IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
2603 void MacroAssembler::absps(XMMRegister dst) {
2604 static const struct V8_ALIGNED(16) {
2609 } float_absolute_constant =
2610 { 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF };
2611 Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_absolute_constant));
2612 andps(dst, Operand(kScratchRegister, 0));
2616 void MacroAssembler::abspd(XMMRegister dst) {
2617 static const struct V8_ALIGNED(16) {
2620 } double_absolute_constant =
2621 { V8_UINT64_C(0x7FFFFFFFFFFFFFFF), V8_UINT64_C(0x7FFFFFFFFFFFFFFF) };
2622 Set(kScratchRegister, reinterpret_cast<intptr_t>(&double_absolute_constant));
2623 andpd(dst, Operand(kScratchRegister, 0));
2627 void MacroAssembler::negateps(XMMRegister dst) {
2628 static const struct V8_ALIGNED(16) {
2633 } float_negate_constant =
2634 { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
2635 Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_negate_constant));
2636 xorps(dst, Operand(kScratchRegister, 0));
2640 void MacroAssembler::negatepd(XMMRegister dst) {
2641 static const struct V8_ALIGNED(16) {
2644 } double_absolute_constant =
2645 { V8_UINT64_C(0x8000000000000000), V8_UINT64_C(0x8000000000000000) };
2646 Set(kScratchRegister, reinterpret_cast<intptr_t>(&double_absolute_constant));
2647 xorpd(dst, Operand(kScratchRegister, 0));
2651 void MacroAssembler::notps(XMMRegister dst) {
2652 static const struct V8_ALIGNED(16) {
2657 } float_not_constant =
2658 { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
2659 Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_not_constant));
2660 xorps(dst, Operand(kScratchRegister, 0));
2664 void MacroAssembler::pnegd(XMMRegister dst) {
2665 static const struct V8_ALIGNED(16) {
2670 } int32_one_constant = { 0x1, 0x1, 0x1, 0x1 };
2672 Set(kScratchRegister, reinterpret_cast<intptr_t>(&int32_one_constant));
2673 paddd(dst, Operand(kScratchRegister, 0));
2678 void MacroAssembler::JumpIfNotString(Register object,
2679 Register object_map,
2681 Label::Distance near_jump) {
2682 Condition is_smi = CheckSmi(object);
2683 j(is_smi, not_string, near_jump);
2684 CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
2685 j(above_equal, not_string, near_jump);
2689 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(
2690 Register first_object, Register second_object, Register scratch1,
2691 Register scratch2, Label* on_fail, Label::Distance near_jump) {
2692 // Check that both objects are not smis.
2693 Condition either_smi = CheckEitherSmi(first_object, second_object);
2694 j(either_smi, on_fail, near_jump);
2696 // Load instance type for both strings.
2697 movp(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
2698 movp(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
2699 movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2700 movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2702 // Check that both are flat one-byte strings.
2703 DCHECK(kNotStringTag != 0);
2704 const int kFlatOneByteStringMask =
2705 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2706 const int kFlatOneByteStringTag =
2707 kStringTag | kOneByteStringTag | kSeqStringTag;
2709 andl(scratch1, Immediate(kFlatOneByteStringMask));
2710 andl(scratch2, Immediate(kFlatOneByteStringMask));
2711 // Interleave the bits to check both scratch1 and scratch2 in one test.
2712 DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
2713 leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2715 Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
2716 j(not_equal, on_fail, near_jump);
2720 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
2721 Register instance_type, Register scratch, Label* failure,
2722 Label::Distance near_jump) {
2723 if (!scratch.is(instance_type)) {
2724 movl(scratch, instance_type);
2727 const int kFlatOneByteStringMask =
2728 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2730 andl(scratch, Immediate(kFlatOneByteStringMask));
2731 cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
2732 j(not_equal, failure, near_jump);
2736 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
2737 Register first_object_instance_type, Register second_object_instance_type,
2738 Register scratch1, Register scratch2, Label* on_fail,
2739 Label::Distance near_jump) {
2740 // Load instance type for both strings.
2741 movp(scratch1, first_object_instance_type);
2742 movp(scratch2, second_object_instance_type);
2744 // Check that both are flat one-byte strings.
2745 DCHECK(kNotStringTag != 0);
2746 const int kFlatOneByteStringMask =
2747 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2748 const int kFlatOneByteStringTag =
2749 kStringTag | kOneByteStringTag | kSeqStringTag;
2751 andl(scratch1, Immediate(kFlatOneByteStringMask));
2752 andl(scratch2, Immediate(kFlatOneByteStringMask));
2753 // Interleave the bits to check both scratch1 and scratch2 in one test.
2754 DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
2755 leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2757 Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
2758 j(not_equal, on_fail, near_jump);
2763 static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
2764 T operand_or_register,
2765 Label* not_unique_name,
2766 Label::Distance distance) {
2767 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2769 masm->testb(operand_or_register,
2770 Immediate(kIsNotStringMask | kIsNotInternalizedMask));
2771 masm->j(zero, &succeed, Label::kNear);
2772 masm->cmpb(operand_or_register, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
2773 masm->j(not_equal, not_unique_name, distance);
2775 masm->bind(&succeed);
2779 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
2780 Label* not_unique_name,
2781 Label::Distance distance) {
2782 JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
2786 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
2787 Label* not_unique_name,
2788 Label::Distance distance) {
2789 JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
2793 void MacroAssembler::Move(Register dst, Register src) {
2800 void MacroAssembler::Move(Register dst, Handle<Object> source) {
2801 AllowDeferredHandleDereference smi_check;
2802 if (source->IsSmi()) {
2803 Move(dst, Smi::cast(*source));
2805 MoveHeapObject(dst, source);
2810 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
2811 AllowDeferredHandleDereference smi_check;
2812 if (source->IsSmi()) {
2813 Move(dst, Smi::cast(*source));
2815 MoveHeapObject(kScratchRegister, source);
2816 movp(dst, kScratchRegister);
2821 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
2822 AllowDeferredHandleDereference smi_check;
2823 if (source->IsSmi()) {
2824 Cmp(dst, Smi::cast(*source));
2826 MoveHeapObject(kScratchRegister, source);
2827 cmpp(dst, kScratchRegister);
2832 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
2833 AllowDeferredHandleDereference smi_check;
2834 if (source->IsSmi()) {
2835 Cmp(dst, Smi::cast(*source));
2837 MoveHeapObject(kScratchRegister, source);
2838 cmpp(dst, kScratchRegister);
2843 void MacroAssembler::Push(Handle<Object> source) {
2844 AllowDeferredHandleDereference smi_check;
2845 if (source->IsSmi()) {
2846 Push(Smi::cast(*source));
2848 MoveHeapObject(kScratchRegister, source);
2849 Push(kScratchRegister);
2854 void MacroAssembler::MoveHeapObject(Register result,
2855 Handle<Object> object) {
2856 AllowDeferredHandleDereference using_raw_address;
2857 DCHECK(object->IsHeapObject());
2858 if (isolate()->heap()->InNewSpace(*object)) {
2859 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2860 Move(result, cell, RelocInfo::CELL);
2861 movp(result, Operand(result, 0));
2863 Move(result, object, RelocInfo::EMBEDDED_OBJECT);
2868 void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
2870 AllowDeferredHandleDereference embedding_raw_address;
2871 load_rax(cell.location(), RelocInfo::CELL);
2873 Move(dst, cell, RelocInfo::CELL);
2874 movp(dst, Operand(dst, 0));
2879 void MacroAssembler::Drop(int stack_elements) {
2880 if (stack_elements > 0) {
2881 addp(rsp, Immediate(stack_elements * kPointerSize));
2886 void MacroAssembler::DropUnderReturnAddress(int stack_elements,
2888 DCHECK(stack_elements > 0);
2889 if (kPointerSize == kInt64Size && stack_elements == 1) {
2890 popq(MemOperand(rsp, 0));
2894 PopReturnAddressTo(scratch);
2895 Drop(stack_elements);
2896 PushReturnAddressFrom(scratch);
2900 void MacroAssembler::Push(Register src) {
2901 if (kPointerSize == kInt64Size) {
2904 // x32 uses 64-bit push for rbp in the prologue.
2905 DCHECK(src.code() != rbp.code());
2906 leal(rsp, Operand(rsp, -4));
2907 movp(Operand(rsp, 0), src);
2912 void MacroAssembler::Push(const Operand& src) {
2913 if (kPointerSize == kInt64Size) {
2916 movp(kScratchRegister, src);
2917 leal(rsp, Operand(rsp, -4));
2918 movp(Operand(rsp, 0), kScratchRegister);
2923 void MacroAssembler::PushQuad(const Operand& src) {
2924 if (kPointerSize == kInt64Size) {
2927 movp(kScratchRegister, src);
2928 pushq(kScratchRegister);
2933 void MacroAssembler::Push(Immediate value) {
2934 if (kPointerSize == kInt64Size) {
2937 leal(rsp, Operand(rsp, -4));
2938 movp(Operand(rsp, 0), value);
2943 void MacroAssembler::PushImm32(int32_t imm32) {
2944 if (kPointerSize == kInt64Size) {
2947 leal(rsp, Operand(rsp, -4));
2948 movp(Operand(rsp, 0), Immediate(imm32));
2953 void MacroAssembler::Pop(Register dst) {
2954 if (kPointerSize == kInt64Size) {
2957 // x32 uses 64-bit pop for rbp in the epilogue.
2958 DCHECK(dst.code() != rbp.code());
2959 movp(dst, Operand(rsp, 0));
2960 leal(rsp, Operand(rsp, 4));
2965 void MacroAssembler::Pop(const Operand& dst) {
2966 if (kPointerSize == kInt64Size) {
2969 Register scratch = dst.AddressUsesRegister(kScratchRegister)
2970 ? kSmiConstantRegister : kScratchRegister;
2971 movp(scratch, Operand(rsp, 0));
2973 leal(rsp, Operand(rsp, 4));
2974 if (scratch.is(kSmiConstantRegister)) {
2975 // Restore kSmiConstantRegister.
2976 movp(kSmiConstantRegister,
2977 reinterpret_cast<void*>(Smi::FromInt(kSmiConstantRegisterValue)),
2978 Assembler::RelocInfoNone());
2984 void MacroAssembler::PopQuad(const Operand& dst) {
2985 if (kPointerSize == kInt64Size) {
2988 popq(kScratchRegister);
2989 movp(dst, kScratchRegister);
2994 void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst,
2997 DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
2998 offset <= SharedFunctionInfo::kSize &&
2999 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
3000 if (kPointerSize == kInt64Size) {
3001 movsxlq(dst, FieldOperand(base, offset));
3003 movp(dst, FieldOperand(base, offset));
3004 SmiToInteger32(dst, dst);
3009 void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base,
3012 DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
3013 offset <= SharedFunctionInfo::kSize &&
3014 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
3015 if (kPointerSize == kInt32Size) {
3016 // On x32, this field is represented by SMI.
3019 int byte_offset = bits / kBitsPerByte;
3020 int bit_in_byte = bits & (kBitsPerByte - 1);
3021 testb(FieldOperand(base, offset + byte_offset), Immediate(1 << bit_in_byte));
3025 void MacroAssembler::Jump(ExternalReference ext) {
3026 LoadAddress(kScratchRegister, ext);
3027 jmp(kScratchRegister);
3031 void MacroAssembler::Jump(const Operand& op) {
3032 if (kPointerSize == kInt64Size) {
3035 movp(kScratchRegister, op);
3036 jmp(kScratchRegister);
3041 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
3042 Move(kScratchRegister, destination, rmode);
3043 jmp(kScratchRegister);
3047 void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
3048 // TODO(X64): Inline this
3049 jmp(code_object, rmode);
3053 int MacroAssembler::CallSize(ExternalReference ext) {
3054 // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
3055 return LoadAddressSize(ext) +
3056 Assembler::kCallScratchRegisterInstructionLength;
3060 void MacroAssembler::Call(ExternalReference ext) {
3062 int end_position = pc_offset() + CallSize(ext);
3064 LoadAddress(kScratchRegister, ext);
3065 call(kScratchRegister);
3067 CHECK_EQ(end_position, pc_offset());
3072 void MacroAssembler::Call(const Operand& op) {
3073 if (kPointerSize == kInt64Size) {
3076 movp(kScratchRegister, op);
3077 call(kScratchRegister);
3082 void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
3084 int end_position = pc_offset() + CallSize(destination);
3086 Move(kScratchRegister, destination, rmode);
3087 call(kScratchRegister);
3089 CHECK_EQ(pc_offset(), end_position);
3094 void MacroAssembler::Call(Handle<Code> code_object,
3095 RelocInfo::Mode rmode,
3096 TypeFeedbackId ast_id) {
3098 int end_position = pc_offset() + CallSize(code_object);
3100 DCHECK(RelocInfo::IsCodeTarget(rmode) ||
3101 rmode == RelocInfo::CODE_AGE_SEQUENCE);
3102 call(code_object, rmode, ast_id);
3104 CHECK_EQ(end_position, pc_offset());
3109 void MacroAssembler::Pushad() {
3114 // Not pushing rsp or rbp.
3119 // r10 is kScratchRegister.
3121 // r12 is kSmiConstantRegister.
3122 // r13 is kRootRegister.
3125 STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
3126 // Use lea for symmetry with Popad.
3128 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
3129 leap(rsp, Operand(rsp, -sp_delta));
3133 void MacroAssembler::Popad() {
3134 // Popad must not change the flags, so use lea instead of addq.
3136 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
3137 leap(rsp, Operand(rsp, sp_delta));
3152 void MacroAssembler::Dropad() {
3153 addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
3157 // Order general registers are pushed by Pushad:
3158 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
3160 MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
3180 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
3181 const Immediate& imm) {
3182 movp(SafepointRegisterSlot(dst), imm);
3186 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
3187 movp(SafepointRegisterSlot(dst), src);
3191 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
3192 movp(dst, SafepointRegisterSlot(src));
3196 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
3197 return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
3201 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
3202 int handler_index) {
3203 // Adjust this code if not the case.
3204 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
3206 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3207 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3208 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3209 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3210 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3212 // We will build up the handler from the bottom by pushing on the stack.
3213 // First push the frame pointer and context.
3214 if (kind == StackHandler::JS_ENTRY) {
3215 // The frame pointer does not point to a JS frame so we save NULL for
3216 // rbp. We expect the code throwing an exception to check rbp before
3217 // dereferencing it to restore the context.
3218 pushq(Immediate(0)); // NULL frame pointer.
3219 Push(Smi::FromInt(0)); // No context.
3225 // Push the state and the code object.
3227 StackHandler::IndexField::encode(handler_index) |
3228 StackHandler::KindField::encode(kind);
3229 Push(Immediate(state));
3232 // Link the current handler as the next handler.
3233 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3234 Push(ExternalOperand(handler_address));
3235 // Set this new handler as the current one.
3236 movp(ExternalOperand(handler_address), rsp);
3240 void MacroAssembler::PopTryHandler() {
3241 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3242 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3243 Pop(ExternalOperand(handler_address));
3244 addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
3248 void MacroAssembler::JumpToHandlerEntry() {
3249 // Compute the handler entry address and jump to it. The handler table is
3250 // a fixed array of (smi-tagged) code offsets.
3251 // rax = exception, rdi = code object, rdx = state.
3252 movp(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
3253 shrp(rdx, Immediate(StackHandler::kKindWidth));
3255 FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
3256 SmiToInteger64(rdx, rdx);
3257 leap(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
3262 void MacroAssembler::Throw(Register value) {
3263 // Adjust this code if not the case.
3264 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
3266 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3267 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3268 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3269 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3270 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3272 // The exception is expected in rax.
3273 if (!value.is(rax)) {
3276 // Drop the stack pointer to the top of the top handler.
3277 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3278 movp(rsp, ExternalOperand(handler_address));
3279 // Restore the next handler.
3280 Pop(ExternalOperand(handler_address));
3282 // Remove the code object and state, compute the handler address in rdi.
3283 Pop(rdi); // Code object.
3284 Pop(rdx); // Offset and state.
3286 // Restore the context and frame pointer.
3287 Pop(rsi); // Context.
3288 popq(rbp); // Frame pointer.
3290 // If the handler is a JS frame, restore the context to the frame.
3291 // (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
3295 j(zero, &skip, Label::kNear);
3296 movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
3299 JumpToHandlerEntry();
3303 void MacroAssembler::ThrowUncatchable(Register value) {
3304 // Adjust this code if not the case.
3305 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
3307 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3308 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3309 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3310 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3311 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3313 // The exception is expected in rax.
3314 if (!value.is(rax)) {
3317 // Drop the stack pointer to the top of the top stack handler.
3318 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3319 Load(rsp, handler_address);
3321 // Unwind the handlers until the top ENTRY handler is found.
3322 Label fetch_next, check_kind;
3323 jmp(&check_kind, Label::kNear);
3325 movp(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
3328 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
3329 testl(Operand(rsp, StackHandlerConstants::kStateOffset),
3330 Immediate(StackHandler::KindField::kMask));
3331 j(not_zero, &fetch_next);
3333 // Set the top handler address to next handler past the top ENTRY handler.
3334 Pop(ExternalOperand(handler_address));
3336 // Remove the code object and state, compute the handler address in rdi.
3337 Pop(rdi); // Code object.
3338 Pop(rdx); // Offset and state.
3340 // Clear the context pointer and frame pointer (0 was saved in the handler).
3344 JumpToHandlerEntry();
3348 void MacroAssembler::Ret() {
3353 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
3354 if (is_uint16(bytes_dropped)) {
3357 PopReturnAddressTo(scratch);
3358 addp(rsp, Immediate(bytes_dropped));
3359 PushReturnAddressFrom(scratch);
3365 void MacroAssembler::FCmp() {
3371 void MacroAssembler::CmpObjectType(Register heap_object,
3374 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3375 CmpInstanceType(map, type);
3379 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
3380 cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
3381 Immediate(static_cast<int8_t>(type)));
3385 void MacroAssembler::CheckFastElements(Register map,
3387 Label::Distance distance) {
3388 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3389 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3390 STATIC_ASSERT(FAST_ELEMENTS == 2);
3391 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3392 cmpb(FieldOperand(map, Map::kBitField2Offset),
3393 Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3394 j(above, fail, distance);
3398 void MacroAssembler::CheckFastObjectElements(Register map,
3400 Label::Distance distance) {
3401 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3402 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3403 STATIC_ASSERT(FAST_ELEMENTS == 2);
3404 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3405 cmpb(FieldOperand(map, Map::kBitField2Offset),
3406 Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3407 j(below_equal, fail, distance);
3408 cmpb(FieldOperand(map, Map::kBitField2Offset),
3409 Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3410 j(above, fail, distance);
3414 void MacroAssembler::CheckFastSmiElements(Register map,
3416 Label::Distance distance) {
3417 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3418 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3419 cmpb(FieldOperand(map, Map::kBitField2Offset),
3420 Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3421 j(above, fail, distance);
3425 void MacroAssembler::StoreNumberToDoubleElements(
3426 Register maybe_number,
3429 XMMRegister xmm_scratch,
3431 int elements_offset) {
3432 Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
3434 JumpIfSmi(maybe_number, &smi_value, Label::kNear);
3436 CheckMap(maybe_number,
3437 isolate()->factory()->heap_number_map(),
3441 // Double value, canonicalize NaN.
3442 uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
3443 cmpl(FieldOperand(maybe_number, offset),
3444 Immediate(kNaNOrInfinityLowerBoundUpper32));
3445 j(greater_equal, &maybe_nan, Label::kNear);
3448 movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
3449 bind(&have_double_value);
3450 movsd(FieldOperand(elements, index, times_8,
3451 FixedDoubleArray::kHeaderSize - elements_offset),
3456 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3457 // it's an Infinity, and the non-NaN code path applies.
3458 j(greater, &is_nan, Label::kNear);
3459 cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
3462 // Convert all NaNs to the same canonical NaN value when they are stored in
3463 // the double array.
3464 Set(kScratchRegister,
3466 FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
3467 movq(xmm_scratch, kScratchRegister);
3468 jmp(&have_double_value, Label::kNear);
3471 // Value is a smi. convert to a double and store.
3472 // Preserve original value.
3473 SmiToInteger32(kScratchRegister, maybe_number);
3474 Cvtlsi2sd(xmm_scratch, kScratchRegister);
3475 movsd(FieldOperand(elements, index, times_8,
3476 FixedDoubleArray::kHeaderSize - elements_offset),
3482 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
3483 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3487 void MacroAssembler::CheckMap(Register obj,
3490 SmiCheckType smi_check_type) {
3491 if (smi_check_type == DO_SMI_CHECK) {
3492 JumpIfSmi(obj, fail);
3495 CompareMap(obj, map);
3500 void MacroAssembler::ClampUint8(Register reg) {
3502 testl(reg, Immediate(0xFFFFFF00));
3503 j(zero, &done, Label::kNear);
3504 setcc(negative, reg); // 1 if negative, 0 if positive.
3505 decb(reg); // 0 if negative, 255 if positive.
3510 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
3511 XMMRegister temp_xmm_reg,
3512 Register result_reg) {
3515 xorps(temp_xmm_reg, temp_xmm_reg);
3516 cvtsd2si(result_reg, input_reg);
3517 testl(result_reg, Immediate(0xFFFFFF00));
3518 j(zero, &done, Label::kNear);
3519 cmpl(result_reg, Immediate(1));
3520 j(overflow, &conv_failure, Label::kNear);
3521 movl(result_reg, Immediate(0));
3522 setcc(sign, result_reg);
3523 subl(result_reg, Immediate(1));
3524 andl(result_reg, Immediate(255));
3525 jmp(&done, Label::kNear);
3526 bind(&conv_failure);
3528 ucomisd(input_reg, temp_xmm_reg);
3529 j(below, &done, Label::kNear);
3530 Set(result_reg, 255);
3535 void MacroAssembler::LoadUint32(XMMRegister dst,
3537 if (FLAG_debug_code) {
3538 cmpq(src, Immediate(0xffffffff));
3539 Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
3541 cvtqsi2sd(dst, src);
3545 void MacroAssembler::SlowTruncateToI(Register result_reg,
3548 DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
3549 call(stub.GetCode(), RelocInfo::CODE_TARGET);
3553 void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
3554 Register input_reg) {
3556 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3557 cvttsd2siq(result_reg, xmm0);
3558 cmpq(result_reg, Immediate(1));
3559 j(no_overflow, &done, Label::kNear);
3562 if (input_reg.is(result_reg)) {
3563 subp(rsp, Immediate(kDoubleSize));
3564 movsd(MemOperand(rsp, 0), xmm0);
3565 SlowTruncateToI(result_reg, rsp, 0);
3566 addp(rsp, Immediate(kDoubleSize));
3568 SlowTruncateToI(result_reg, input_reg);
3572 // Keep our invariant that the upper 32 bits are zero.
3573 movl(result_reg, result_reg);
3577 void MacroAssembler::TruncateDoubleToI(Register result_reg,
3578 XMMRegister input_reg) {
3580 cvttsd2siq(result_reg, input_reg);
3581 cmpq(result_reg, Immediate(1));
3582 j(no_overflow, &done, Label::kNear);
3584 subp(rsp, Immediate(kDoubleSize));
3585 movsd(MemOperand(rsp, 0), input_reg);
3586 SlowTruncateToI(result_reg, rsp, 0);
3587 addp(rsp, Immediate(kDoubleSize));
3590 // Keep our invariant that the upper 32 bits are zero.
3591 movl(result_reg, result_reg);
3595 void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
3596 XMMRegister scratch,
3597 MinusZeroMode minus_zero_mode,
3598 Label* lost_precision, Label* is_nan,
3599 Label* minus_zero, Label::Distance dst) {
3600 cvttsd2si(result_reg, input_reg);
3601 Cvtlsi2sd(xmm0, result_reg);
3602 ucomisd(xmm0, input_reg);
3603 j(not_equal, lost_precision, dst);
3604 j(parity_even, is_nan, dst); // NaN.
3605 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
3607 // The integer converted back is equal to the original. We
3608 // only have to test if we got -0 as an input.
3609 testl(result_reg, result_reg);
3610 j(not_zero, &done, Label::kNear);
3611 movmskpd(result_reg, input_reg);
3612 // Bit 0 contains the sign of the double in input_reg.
3613 // If input was positive, we are ok and return 0, otherwise
3614 // jump to minus_zero.
3615 andl(result_reg, Immediate(1));
3616 j(not_zero, minus_zero, dst);
3622 void MacroAssembler::LoadInstanceDescriptors(Register map,
3623 Register descriptors) {
3624 movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
3628 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3629 movl(dst, FieldOperand(map, Map::kBitField3Offset));
3630 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3634 void MacroAssembler::EnumLength(Register dst, Register map) {
3635 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3636 movl(dst, FieldOperand(map, Map::kBitField3Offset));
3637 andl(dst, Immediate(Map::EnumLengthBits::kMask));
3638 Integer32ToSmi(dst, dst);
3642 void MacroAssembler::DispatchMap(Register obj,
3645 Handle<Code> success,
3646 SmiCheckType smi_check_type) {
3648 if (smi_check_type == DO_SMI_CHECK) {
3649 JumpIfSmi(obj, &fail);
3651 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3652 j(equal, success, RelocInfo::CODE_TARGET);
3658 void MacroAssembler::AssertNumber(Register object) {
3659 if (emit_debug_code()) {
3661 Condition is_smi = CheckSmi(object);
3662 j(is_smi, &ok, Label::kNear);
3663 Cmp(FieldOperand(object, HeapObject::kMapOffset),
3664 isolate()->factory()->heap_number_map());
3665 Check(equal, kOperandIsNotANumber);
3671 void MacroAssembler::AssertNotSmi(Register object) {
3672 if (emit_debug_code()) {
3673 Condition is_smi = CheckSmi(object);
3674 Check(NegateCondition(is_smi), kOperandIsASmi);
3679 void MacroAssembler::AssertSmi(Register object) {
3680 if (emit_debug_code()) {
3681 Condition is_smi = CheckSmi(object);
3682 Check(is_smi, kOperandIsNotASmi);
3687 void MacroAssembler::AssertSmi(const Operand& object) {
3688 if (emit_debug_code()) {
3689 Condition is_smi = CheckSmi(object);
3690 Check(is_smi, kOperandIsNotASmi);
3695 void MacroAssembler::AssertZeroExtended(Register int32_register) {
3696 if (emit_debug_code()) {
3697 DCHECK(!int32_register.is(kScratchRegister));
3698 movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
3699 cmpq(kScratchRegister, int32_register);
3700 Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
3705 void MacroAssembler::AssertString(Register object) {
3706 if (emit_debug_code()) {
3707 testb(object, Immediate(kSmiTagMask));
3708 Check(not_equal, kOperandIsASmiAndNotAString);
3710 movp(object, FieldOperand(object, HeapObject::kMapOffset));
3711 CmpInstanceType(object, FIRST_NONSTRING_TYPE);
3713 Check(below, kOperandIsNotAString);
3718 void MacroAssembler::AssertName(Register object) {
3719 if (emit_debug_code()) {
3720 testb(object, Immediate(kSmiTagMask));
3721 Check(not_equal, kOperandIsASmiAndNotAName);
3723 movp(object, FieldOperand(object, HeapObject::kMapOffset));
3724 CmpInstanceType(object, LAST_NAME_TYPE);
3726 Check(below_equal, kOperandIsNotAName);
3731 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
3732 if (emit_debug_code()) {
3733 Label done_checking;
3734 AssertNotSmi(object);
3735 Cmp(object, isolate()->factory()->undefined_value());
3736 j(equal, &done_checking);
3737 Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
3738 Assert(equal, kExpectedUndefinedOrCell);
3739 bind(&done_checking);
3744 void MacroAssembler::AssertRootValue(Register src,
3745 Heap::RootListIndex root_value_index,
3746 BailoutReason reason) {
3747 if (emit_debug_code()) {
3748 DCHECK(!src.is(kScratchRegister));
3749 LoadRoot(kScratchRegister, root_value_index);
3750 cmpp(src, kScratchRegister);
3751 Check(equal, reason);
3757 Condition MacroAssembler::IsObjectStringType(Register heap_object,
3759 Register instance_type) {
3760 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3761 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3762 STATIC_ASSERT(kNotStringTag != 0);
3763 testb(instance_type, Immediate(kIsNotStringMask));
3768 Condition MacroAssembler::IsObjectNameType(Register heap_object,
3770 Register instance_type) {
3771 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3772 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3773 cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
3778 void MacroAssembler::TryGetFunctionPrototype(Register function,
3781 bool miss_on_bound_function) {
3783 if (miss_on_bound_function) {
3784 // Check that the receiver isn't a smi.
3785 testl(function, Immediate(kSmiTagMask));
3788 // Check that the function really is a function.
3789 CmpObjectType(function, JS_FUNCTION_TYPE, result);
3792 movp(kScratchRegister,
3793 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3794 // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
3796 TestBitSharedFunctionInfoSpecialField(kScratchRegister,
3797 SharedFunctionInfo::kCompilerHintsOffset,
3798 SharedFunctionInfo::kBoundFunction);
3801 // Make sure that the function has an instance prototype.
3802 testb(FieldOperand(result, Map::kBitFieldOffset),
3803 Immediate(1 << Map::kHasNonInstancePrototype));
3804 j(not_zero, &non_instance, Label::kNear);
3807 // Get the prototype or initial map from the function.
3809 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3811 // If the prototype or initial map is the hole, don't return it and
3812 // simply miss the cache instead. This will allow us to allocate a
3813 // prototype object on-demand in the runtime system.
3814 CompareRoot(result, Heap::kTheHoleValueRootIndex);
3817 // If the function does not have an initial map, we're done.
3819 CmpObjectType(result, MAP_TYPE, kScratchRegister);
3820 j(not_equal, &done, Label::kNear);
3822 // Get the prototype from the initial map.
3823 movp(result, FieldOperand(result, Map::kPrototypeOffset));
3825 if (miss_on_bound_function) {
3826 jmp(&done, Label::kNear);
3828 // Non-instance prototype: Fetch prototype from constructor field
3830 bind(&non_instance);
3831 movp(result, FieldOperand(result, Map::kConstructorOffset));
3839 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
3840 if (FLAG_native_code_counters && counter->Enabled()) {
3841 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3842 movl(counter_operand, Immediate(value));
3847 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
3849 if (FLAG_native_code_counters && counter->Enabled()) {
3850 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3852 incl(counter_operand);
3854 addl(counter_operand, Immediate(value));
3860 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
3862 if (FLAG_native_code_counters && counter->Enabled()) {
3863 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3865 decl(counter_operand);
3867 subl(counter_operand, Immediate(value));
3873 void MacroAssembler::DebugBreak() {
3874 Set(rax, 0); // No arguments.
3875 LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
3876 CEntryStub ces(isolate(), 1);
3877 DCHECK(AllowThisStubCall(&ces));
3878 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3882 void MacroAssembler::InvokeCode(Register code,
3883 const ParameterCount& expected,
3884 const ParameterCount& actual,
3886 const CallWrapper& call_wrapper) {
3887 // You can't call a function without a valid frame.
3888 DCHECK(flag == JUMP_FUNCTION || has_frame());
3891 bool definitely_mismatches = false;
3892 InvokePrologue(expected,
3894 Handle<Code>::null(),
3897 &definitely_mismatches,
3901 if (!definitely_mismatches) {
3902 if (flag == CALL_FUNCTION) {
3903 call_wrapper.BeforeCall(CallSize(code));
3905 call_wrapper.AfterCall();
3907 DCHECK(flag == JUMP_FUNCTION);
3915 void MacroAssembler::InvokeFunction(Register function,
3916 const ParameterCount& actual,
3918 const CallWrapper& call_wrapper) {
3919 // You can't call a function without a valid frame.
3920 DCHECK(flag == JUMP_FUNCTION || has_frame());
3922 DCHECK(function.is(rdi));
3923 movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3924 movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
3925 LoadSharedFunctionInfoSpecialField(rbx, rdx,
3926 SharedFunctionInfo::kFormalParameterCountOffset);
3927 // Advances rdx to the end of the Code object header, to the start of
3928 // the executable code.
3929 movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3931 ParameterCount expected(rbx);
3932 InvokeCode(rdx, expected, actual, flag, call_wrapper);
3936 void MacroAssembler::InvokeFunction(Register function,
3937 const ParameterCount& expected,
3938 const ParameterCount& actual,
3940 const CallWrapper& call_wrapper) {
3941 // You can't call a function without a valid frame.
3942 DCHECK(flag == JUMP_FUNCTION || has_frame());
3944 DCHECK(function.is(rdi));
3945 movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
3946 // Advances rdx to the end of the Code object header, to the start of
3947 // the executable code.
3948 movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3950 InvokeCode(rdx, expected, actual, flag, call_wrapper);
3954 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3955 const ParameterCount& expected,
3956 const ParameterCount& actual,
3958 const CallWrapper& call_wrapper) {
3959 Move(rdi, function);
3960 InvokeFunction(rdi, expected, actual, flag, call_wrapper);
3964 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3965 const ParameterCount& actual,
3966 Handle<Code> code_constant,
3967 Register code_register,
3969 bool* definitely_mismatches,
3971 Label::Distance near_jump,
3972 const CallWrapper& call_wrapper) {
3973 bool definitely_matches = false;
3974 *definitely_mismatches = false;
3976 if (expected.is_immediate()) {
3977 DCHECK(actual.is_immediate());
3978 if (expected.immediate() == actual.immediate()) {
3979 definitely_matches = true;
3981 Set(rax, actual.immediate());
3982 if (expected.immediate() ==
3983 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
3984 // Don't worry about adapting arguments for built-ins that
3985 // don't want that done. Skip adaption code by making it look
3986 // like we have a match between expected and actual number of
3988 definitely_matches = true;
3990 *definitely_mismatches = true;
3991 Set(rbx, expected.immediate());
3995 if (actual.is_immediate()) {
3996 // Expected is in register, actual is immediate. This is the
3997 // case when we invoke function values without going through the
3999 cmpp(expected.reg(), Immediate(actual.immediate()));
4000 j(equal, &invoke, Label::kNear);
4001 DCHECK(expected.reg().is(rbx));
4002 Set(rax, actual.immediate());
4003 } else if (!expected.reg().is(actual.reg())) {
4004 // Both expected and actual are in (different) registers. This
4005 // is the case when we invoke functions using call and apply.
4006 cmpp(expected.reg(), actual.reg());
4007 j(equal, &invoke, Label::kNear);
4008 DCHECK(actual.reg().is(rax));
4009 DCHECK(expected.reg().is(rbx));
4013 if (!definitely_matches) {
4014 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
4015 if (!code_constant.is_null()) {
4016 Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
4017 addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
4018 } else if (!code_register.is(rdx)) {
4019 movp(rdx, code_register);
4022 if (flag == CALL_FUNCTION) {
4023 call_wrapper.BeforeCall(CallSize(adaptor));
4024 Call(adaptor, RelocInfo::CODE_TARGET);
4025 call_wrapper.AfterCall();
4026 if (!*definitely_mismatches) {
4027 jmp(done, near_jump);
4030 Jump(adaptor, RelocInfo::CODE_TARGET);
4037 void MacroAssembler::StubPrologue() {
4038 pushq(rbp); // Caller's frame pointer.
4040 Push(rsi); // Callee's context.
4041 Push(Smi::FromInt(StackFrame::STUB));
4045 void MacroAssembler::Prologue(bool code_pre_aging) {
4046 PredictableCodeSizeScope predictible_code_size_scope(this,
4047 kNoCodeAgeSequenceLength);
4048 if (code_pre_aging) {
4049 // Pre-age the code.
4050 Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
4051 RelocInfo::CODE_AGE_SEQUENCE);
4052 Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
4054 pushq(rbp); // Caller's frame pointer.
4056 Push(rsi); // Callee's context.
4057 Push(rdi); // Callee's JS function.
4062 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4065 Push(rsi); // Context.
4066 Push(Smi::FromInt(type));
4067 Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
4068 Push(kScratchRegister);
4069 if (emit_debug_code()) {
4070 Move(kScratchRegister,
4071 isolate()->factory()->undefined_value(),
4072 RelocInfo::EMBEDDED_OBJECT);
4073 cmpp(Operand(rsp, 0), kScratchRegister);
4074 Check(not_equal, kCodeObjectNotProperlyPatched);
4079 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4080 if (emit_debug_code()) {
4081 Move(kScratchRegister, Smi::FromInt(type));
4082 cmpp(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
4083 Check(equal, kStackFrameTypesMustMatch);
4090 void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
4091 // Set up the frame structure on the stack.
4092 // All constants are relative to the frame pointer of the exit frame.
4093 DCHECK(ExitFrameConstants::kCallerSPDisplacement ==
4094 kFPOnStackSize + kPCOnStackSize);
4095 DCHECK(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
4096 DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
4100 // Reserve room for entry stack pointer and push the code object.
4101 DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
4102 Push(Immediate(0)); // Saved entry sp, patched before call.
4103 Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
4104 Push(kScratchRegister); // Accessed from EditFrame::code_slot.
4106 // Save the frame pointer and the context in top.
4108 movp(r14, rax); // Backup rax in callee-save register.
4111 Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
4112 Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
4116 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
4117 bool save_doubles) {
4119 const int kShadowSpace = 4;
4120 arg_stack_space += kShadowSpace;
4122 // Optionally save all XMM registers.
4124 int space = XMMRegister::kMaxNumAllocatableRegisters * kSIMD128Size +
4125 arg_stack_space * kRegisterSize;
4126 subp(rsp, Immediate(space));
4127 int offset = -2 * kPointerSize;
4128 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
4129 XMMRegister reg = XMMRegister::FromAllocationIndex(i);
4130 movups(Operand(rbp, offset - ((i + 1) * kSIMD128Size)), reg);
4132 } else if (arg_stack_space > 0) {
4133 subp(rsp, Immediate(arg_stack_space * kRegisterSize));
4136 // Get the required frame alignment for the OS.
4137 const int kFrameAlignment = base::OS::ActivationFrameAlignment();
4138 if (kFrameAlignment > 0) {
4139 DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
4140 DCHECK(is_int8(kFrameAlignment));
4141 andp(rsp, Immediate(-kFrameAlignment));
4144 // Patch the saved entry sp.
4145 movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
4149 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
4150 EnterExitFramePrologue(true);
4152 // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
4153 // so it must be retained across the C-call.
4154 int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
4155 leap(r15, Operand(rbp, r14, times_pointer_size, offset));
4157 EnterExitFrameEpilogue(arg_stack_space, save_doubles);
4161 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
4162 EnterExitFramePrologue(false);
4163 EnterExitFrameEpilogue(arg_stack_space, false);
4167 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
4171 int offset = -2 * kPointerSize;
4172 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
4173 XMMRegister reg = XMMRegister::FromAllocationIndex(i);
4174 movups(reg, Operand(rbp, offset - ((i + 1) * kSIMD128Size)));
4177 // Get the return address from the stack and restore the frame pointer.
4178 movp(rcx, Operand(rbp, kFPOnStackSize));
4179 movp(rbp, Operand(rbp, 0 * kPointerSize));
4181 // Drop everything up to and including the arguments and the receiver
4182 // from the caller stack.
4183 leap(rsp, Operand(r15, 1 * kPointerSize));
4185 PushReturnAddressFrom(rcx);
4187 LeaveExitFrameEpilogue(true);
4191 void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
4195 LeaveExitFrameEpilogue(restore_context);
4199 void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
4200 // Restore current context from top and clear it in debug mode.
4201 ExternalReference context_address(Isolate::kContextAddress, isolate());
4202 Operand context_operand = ExternalOperand(context_address);
4203 if (restore_context) {
4204 movp(rsi, context_operand);
4207 movp(context_operand, Immediate(0));
4210 // Clear the top frame.
4211 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
4213 Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
4214 movp(c_entry_fp_operand, Immediate(0));
4218 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
4221 Label same_contexts;
4223 DCHECK(!holder_reg.is(scratch));
4224 DCHECK(!scratch.is(kScratchRegister));
4225 // Load current lexical context from the stack frame.
4226 movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
4228 // When generating debug code, make sure the lexical context is set.
4229 if (emit_debug_code()) {
4230 cmpp(scratch, Immediate(0));
4231 Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
4233 // Load the native context of the current context.
4235 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
4236 movp(scratch, FieldOperand(scratch, offset));
4237 movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
4239 // Check the context is a native context.
4240 if (emit_debug_code()) {
4241 Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
4242 isolate()->factory()->native_context_map());
4243 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
4246 // Check if both contexts are the same.
4247 cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4248 j(equal, &same_contexts);
4250 // Compare security tokens.
4251 // Check that the security token in the calling global object is
4252 // compatible with the security token in the receiving global
4255 // Check the context is a native context.
4256 if (emit_debug_code()) {
4257 // Preserve original value of holder_reg.
4260 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4261 CompareRoot(holder_reg, Heap::kNullValueRootIndex);
4262 Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
4264 // Read the first word and compare to native_context_map(),
4265 movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
4266 CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
4267 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
4271 movp(kScratchRegister,
4272 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4274 Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
4275 movp(scratch, FieldOperand(scratch, token_offset));
4276 cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
4279 bind(&same_contexts);
4283 // Compute the hash code from the untagged key. This must be kept in sync with
4284 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
4285 // code-stub-hydrogen.cc
4286 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
4287 // First of all we assign the hash seed to scratch.
4288 LoadRoot(scratch, Heap::kHashSeedRootIndex);
4289 SmiToInteger32(scratch, scratch);
4291 // Xor original key with a seed.
4294 // Compute the hash code from the untagged key. This must be kept in sync
4295 // with ComputeIntegerHash in utils.h.
4297 // hash = ~hash + (hash << 15);
4300 shll(scratch, Immediate(15));
4302 // hash = hash ^ (hash >> 12);
4304 shrl(scratch, Immediate(12));
4306 // hash = hash + (hash << 2);
4307 leal(r0, Operand(r0, r0, times_4, 0));
4308 // hash = hash ^ (hash >> 4);
4310 shrl(scratch, Immediate(4));
4312 // hash = hash * 2057;
4313 imull(r0, r0, Immediate(2057));
4314 // hash = hash ^ (hash >> 16);
4316 shrl(scratch, Immediate(16));
4322 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
4331 // elements - holds the slow-case elements of the receiver on entry.
4332 // Unchanged unless 'result' is the same register.
4334 // key - holds the smi key on entry.
4335 // Unchanged unless 'result' is the same register.
4337 // Scratch registers:
4339 // r0 - holds the untagged key on entry and holds the hash once computed.
4341 // r1 - used to hold the capacity mask of the dictionary
4343 // r2 - used for the index into the dictionary.
4345 // result - holds the result on exit if the load succeeded.
4346 // Allowed to be the same as 'key' or 'result'.
4347 // Unchanged on bailout so 'key' or 'result' can be used
4348 // in further computation.
4352 GetNumberHash(r0, r1);
4354 // Compute capacity mask.
4355 SmiToInteger32(r1, FieldOperand(elements,
4356 SeededNumberDictionary::kCapacityOffset));
4359 // Generate an unrolled loop that performs a few probes before giving up.
4360 for (int i = 0; i < kNumberDictionaryProbes; i++) {
4361 // Use r2 for index calculations and keep the hash intact in r0.
4363 // Compute the masked index: (hash + i + i * i) & mask.
4365 addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
4369 // Scale the index by multiplying by the entry size.
4370 DCHECK(SeededNumberDictionary::kEntrySize == 3);
4371 leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
4373 // Check if the key matches.
4374 cmpp(key, FieldOperand(elements,
4377 SeededNumberDictionary::kElementsStartOffset));
4378 if (i != (kNumberDictionaryProbes - 1)) {
4386 // Check that the value is a normal propety.
4387 const int kDetailsOffset =
4388 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
4389 DCHECK_EQ(NORMAL, 0);
4390 Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
4391 Smi::FromInt(PropertyDetails::TypeField::kMask));
4394 // Get the value at the masked, scaled index.
4395 const int kValueOffset =
4396 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
4397 movp(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
4401 void MacroAssembler::LoadAllocationTopHelper(Register result,
4403 AllocationFlags flags) {
4404 ExternalReference allocation_top =
4405 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4407 // Just return if allocation top is already known.
4408 if ((flags & RESULT_CONTAINS_TOP) != 0) {
4409 // No use of scratch if allocation top is provided.
4410 DCHECK(!scratch.is_valid());
4412 // Assert that result actually contains top on entry.
4413 Operand top_operand = ExternalOperand(allocation_top);
4414 cmpp(result, top_operand);
4415 Check(equal, kUnexpectedAllocationTop);
4420 // Move address of new object to result. Use scratch register if available,
4421 // and keep address in scratch until call to UpdateAllocationTopHelper.
4422 if (scratch.is_valid()) {
4423 LoadAddress(scratch, allocation_top);
4424 movp(result, Operand(scratch, 0));
4426 Load(result, allocation_top);
4431 void MacroAssembler::MakeSureDoubleAlignedHelper(Register result,
4434 AllocationFlags flags) {
4435 if (kPointerSize == kDoubleSize) {
4436 if (FLAG_debug_code) {
4437 testl(result, Immediate(kDoubleAlignmentMask));
4438 Check(zero, kAllocationIsNotDoubleAligned);
4441 // Align the next allocation. Storing the filler map without checking top
4442 // is safe in new-space because the limit of the heap is aligned there.
4443 DCHECK(kPointerSize * 2 == kDoubleSize);
4444 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
4445 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
4446 // Make sure scratch is not clobbered by this function as it might be
4447 // used in UpdateAllocationTopHelper later.
4448 DCHECK(!scratch.is(kScratchRegister));
4450 testl(result, Immediate(kDoubleAlignmentMask));
4451 j(zero, &aligned, Label::kNear);
4452 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
4453 ExternalReference allocation_limit =
4454 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4455 cmpp(result, ExternalOperand(allocation_limit));
4456 j(above_equal, gc_required);
4458 LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex);
4459 movp(Operand(result, 0), kScratchRegister);
4460 addp(result, Immediate(kDoubleSize / 2));
4466 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
4468 AllocationFlags flags) {
4469 if (emit_debug_code()) {
4470 testp(result_end, Immediate(kObjectAlignmentMask));
4471 Check(zero, kUnalignedAllocationInNewSpace);
4474 ExternalReference allocation_top =
4475 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4478 if (scratch.is_valid()) {
4479 // Scratch already contains address of allocation top.
4480 movp(Operand(scratch, 0), result_end);
4482 Store(allocation_top, result_end);
4487 void MacroAssembler::Allocate(int object_size,
4489 Register result_end,
4492 AllocationFlags flags) {
4493 DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
4494 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
4495 if (!FLAG_inline_new) {
4496 if (emit_debug_code()) {
4497 // Trash the registers to simulate an allocation failure.
4498 movl(result, Immediate(0x7091));
4499 if (result_end.is_valid()) {
4500 movl(result_end, Immediate(0x7191));
4502 if (scratch.is_valid()) {
4503 movl(scratch, Immediate(0x7291));
4509 DCHECK(!result.is(result_end));
4511 // Load address of new object into result.
4512 LoadAllocationTopHelper(result, scratch, flags);
4514 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4515 MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4518 // Calculate new top and bail out if new space is exhausted.
4519 ExternalReference allocation_limit =
4520 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4522 Register top_reg = result_end.is_valid() ? result_end : result;
4524 if (!top_reg.is(result)) {
4525 movp(top_reg, result);
4527 addp(top_reg, Immediate(object_size));
4528 j(carry, gc_required);
4529 Operand limit_operand = ExternalOperand(allocation_limit);
4530 cmpp(top_reg, limit_operand);
4531 j(above, gc_required);
4533 // Update allocation top.
4534 UpdateAllocationTopHelper(top_reg, scratch, flags);
4536 bool tag_result = (flags & TAG_OBJECT) != 0;
4537 if (top_reg.is(result)) {
4539 subp(result, Immediate(object_size - kHeapObjectTag));
4541 subp(result, Immediate(object_size));
4543 } else if (tag_result) {
4544 // Tag the result if requested.
4545 DCHECK(kHeapObjectTag == 1);
4551 void MacroAssembler::Allocate(int header_size,
4552 ScaleFactor element_size,
4553 Register element_count,
4555 Register result_end,
4558 AllocationFlags flags) {
4559 DCHECK((flags & SIZE_IN_WORDS) == 0);
4560 leap(result_end, Operand(element_count, element_size, header_size));
4561 Allocate(result_end, result, result_end, scratch, gc_required, flags);
4565 void MacroAssembler::Allocate(Register object_size,
4567 Register result_end,
4570 AllocationFlags flags) {
4571 DCHECK((flags & SIZE_IN_WORDS) == 0);
4572 if (!FLAG_inline_new) {
4573 if (emit_debug_code()) {
4574 // Trash the registers to simulate an allocation failure.
4575 movl(result, Immediate(0x7091));
4576 movl(result_end, Immediate(0x7191));
4577 if (scratch.is_valid()) {
4578 movl(scratch, Immediate(0x7291));
4580 // object_size is left unchanged by this function.
4585 DCHECK(!result.is(result_end));
4587 // Load address of new object into result.
4588 LoadAllocationTopHelper(result, scratch, flags);
4590 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4591 MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4594 // Calculate new top and bail out if new space is exhausted.
4595 ExternalReference allocation_limit =
4596 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4597 if (!object_size.is(result_end)) {
4598 movp(result_end, object_size);
4600 addp(result_end, result);
4601 j(carry, gc_required);
4602 Operand limit_operand = ExternalOperand(allocation_limit);
4603 cmpp(result_end, limit_operand);
4604 j(above, gc_required);
4606 // Update allocation top.
4607 UpdateAllocationTopHelper(result_end, scratch, flags);
4609 // Tag the result if requested.
4610 if ((flags & TAG_OBJECT) != 0) {
4611 addp(result, Immediate(kHeapObjectTag));
4616 void MacroAssembler::UndoAllocationInNewSpace(Register object) {
4617 ExternalReference new_space_allocation_top =
4618 ExternalReference::new_space_allocation_top_address(isolate());
4620 // Make sure the object has no tag before resetting top.
4621 andp(object, Immediate(~kHeapObjectTagMask));
4622 Operand top_operand = ExternalOperand(new_space_allocation_top);
4624 cmpp(object, top_operand);
4625 Check(below, kUndoAllocationOfNonAllocatedMemory);
4627 movp(top_operand, object);
4631 void MacroAssembler::AllocateHeapNumber(Register result,
4635 // Allocate heap number in new space.
4636 Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
4638 Heap::RootListIndex map_index = mode == MUTABLE
4639 ? Heap::kMutableHeapNumberMapRootIndex
4640 : Heap::kHeapNumberMapRootIndex;
4643 LoadRoot(kScratchRegister, map_index);
4644 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4648 #define SIMD128_HEAP_ALLOCATE_FUNCTIONS(V) \
4649 V(Float32x4, float32x4, FLOAT32x4) \
4650 V(Float64x2, float64x2, FLOAT64x2) \
4651 V(Int32x4, int32x4, INT32x4)
4653 #define DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION(Type, type, TYPE) \
4654 void MacroAssembler::Allocate##Type(Register result, \
4655 Register scratch1, \
4656 Register scratch2, \
4657 Register scratch3, \
4658 Label* gc_required) { \
4659 /* Allocate SIMD128 object. */ \
4660 Allocate(Type::kSize, result, scratch1, no_reg, gc_required, TAG_OBJECT);\
4661 /* Load the initial map and assign to new allocated object. */ \
4662 movp(scratch1, Operand(rbp, StandardFrameConstants::kContextOffset)); \
4665 Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); \
4667 FieldOperand(scratch1, GlobalObject::kNativeContextOffset)); \
4670 Context::SlotOffset(Context::TYPE##_FUNCTION_INDEX))); \
4671 LoadGlobalFunctionInitialMap(scratch1, scratch1); \
4672 movp(FieldOperand(result, JSObject::kMapOffset), \
4674 /* Initialize the properties and elements. */ \
4675 MoveHeapObject(kScratchRegister, \
4676 isolate()->factory()->empty_fixed_array()); \
4677 movp(FieldOperand(result, JSObject::kPropertiesOffset), \
4678 kScratchRegister); \
4679 movp(FieldOperand(result, JSObject::kElementsOffset), \
4680 kScratchRegister); \
4681 /* Allocate FixedTypedArray object. */ \
4682 Allocate(FixedTypedArrayBase::kDataOffset + k##Type##Size, \
4683 scratch1, scratch2, no_reg, gc_required, TAG_OBJECT); \
4684 MoveHeapObject(kScratchRegister, \
4685 isolate()->factory()->fixed_##type##_array_map()); \
4686 movp(FieldOperand(scratch1, FixedTypedArrayBase::kMapOffset), \
4687 kScratchRegister); \
4688 movp(scratch3, Immediate(1)); \
4689 Integer32ToSmi(scratch2, scratch3); \
4690 movp(FieldOperand(scratch1, FixedTypedArrayBase::kLengthOffset), \
4692 /* Assign FixedTypedArray object to SIMD128 object. */ \
4693 movp(FieldOperand(result, Type::kValueOffset), scratch1); \
4696 SIMD128_HEAP_ALLOCATE_FUNCTIONS(DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION)
4699 void MacroAssembler::AllocateTwoByteString(Register result,
4704 Label* gc_required) {
4705 // Calculate the number of bytes needed for the characters in the string while
4706 // observing object alignment.
4707 const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
4708 kObjectAlignmentMask;
4709 DCHECK(kShortSize == 2);
4710 // scratch1 = length * 2 + kObjectAlignmentMask.
4711 leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
4713 andp(scratch1, Immediate(~kObjectAlignmentMask));
4714 if (kHeaderAlignment > 0) {
4715 subp(scratch1, Immediate(kHeaderAlignment));
4718 // Allocate two byte string in new space.
4719 Allocate(SeqTwoByteString::kHeaderSize,
4728 // Set the map, length and hash field.
4729 LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
4730 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4731 Integer32ToSmi(scratch1, length);
4732 movp(FieldOperand(result, String::kLengthOffset), scratch1);
4733 movp(FieldOperand(result, String::kHashFieldOffset),
4734 Immediate(String::kEmptyHashField));
4738 void MacroAssembler::AllocateOneByteString(Register result, Register length,
4739 Register scratch1, Register scratch2,
4741 Label* gc_required) {
4742 // Calculate the number of bytes needed for the characters in the string while
4743 // observing object alignment.
4744 const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
4745 kObjectAlignmentMask;
4746 movl(scratch1, length);
4747 DCHECK(kCharSize == 1);
4748 addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
4749 andp(scratch1, Immediate(~kObjectAlignmentMask));
4750 if (kHeaderAlignment > 0) {
4751 subp(scratch1, Immediate(kHeaderAlignment));
4754 // Allocate one-byte string in new space.
4755 Allocate(SeqOneByteString::kHeaderSize,
4764 // Set the map, length and hash field.
4765 LoadRoot(kScratchRegister, Heap::kOneByteStringMapRootIndex);
4766 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4767 Integer32ToSmi(scratch1, length);
4768 movp(FieldOperand(result, String::kLengthOffset), scratch1);
4769 movp(FieldOperand(result, String::kHashFieldOffset),
4770 Immediate(String::kEmptyHashField));
4774 void MacroAssembler::AllocateTwoByteConsString(Register result,
4777 Label* gc_required) {
4778 // Allocate heap number in new space.
4779 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4782 // Set the map. The other fields are left uninitialized.
4783 LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
4784 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4788 void MacroAssembler::AllocateOneByteConsString(Register result,
4791 Label* gc_required) {
4792 Allocate(ConsString::kSize,
4799 // Set the map. The other fields are left uninitialized.
4800 LoadRoot(kScratchRegister, Heap::kConsOneByteStringMapRootIndex);
4801 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4805 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
4808 Label* gc_required) {
4809 // Allocate heap number in new space.
4810 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4813 // Set the map. The other fields are left uninitialized.
4814 LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
4815 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4819 void MacroAssembler::AllocateOneByteSlicedString(Register result,
4822 Label* gc_required) {
4823 // Allocate heap number in new space.
4824 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4827 // Set the map. The other fields are left uninitialized.
4828 LoadRoot(kScratchRegister, Heap::kSlicedOneByteStringMapRootIndex);
4829 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4833 // Copy memory, byte-by-byte, from source to destination. Not optimized for
4834 // long or aligned copies. The contents of scratch and length are destroyed.
4835 // Destination is incremented by length, source, length and scratch are
4837 // A simpler loop is faster on small copies, but slower on large ones.
4838 // The cld() instruction must have been emitted, to set the direction flag(),
4839 // before calling this function.
4840 void MacroAssembler::CopyBytes(Register destination,
4845 DCHECK(min_length >= 0);
4846 if (emit_debug_code()) {
4847 cmpl(length, Immediate(min_length));
4848 Assert(greater_equal, kInvalidMinLength);
4850 Label short_loop, len8, len16, len24, done, short_string;
4852 const int kLongStringLimit = 4 * kPointerSize;
4853 if (min_length <= kLongStringLimit) {
4854 cmpl(length, Immediate(kPointerSize));
4855 j(below, &short_string, Label::kNear);
4858 DCHECK(source.is(rsi));
4859 DCHECK(destination.is(rdi));
4860 DCHECK(length.is(rcx));
4862 if (min_length <= kLongStringLimit) {
4863 cmpl(length, Immediate(2 * kPointerSize));
4864 j(below_equal, &len8, Label::kNear);
4865 cmpl(length, Immediate(3 * kPointerSize));
4866 j(below_equal, &len16, Label::kNear);
4867 cmpl(length, Immediate(4 * kPointerSize));
4868 j(below_equal, &len24, Label::kNear);
4871 // Because source is 8-byte aligned in our uses of this function,
4872 // we keep source aligned for the rep movs operation by copying the odd bytes
4873 // at the end of the ranges.
4874 movp(scratch, length);
4875 shrl(length, Immediate(kPointerSizeLog2));
4877 // Move remaining bytes of length.
4878 andl(scratch, Immediate(kPointerSize - 1));
4879 movp(length, Operand(source, scratch, times_1, -kPointerSize));
4880 movp(Operand(destination, scratch, times_1, -kPointerSize), length);
4881 addp(destination, scratch);
4883 if (min_length <= kLongStringLimit) {
4884 jmp(&done, Label::kNear);
4886 movp(scratch, Operand(source, 2 * kPointerSize));
4887 movp(Operand(destination, 2 * kPointerSize), scratch);
4889 movp(scratch, Operand(source, kPointerSize));
4890 movp(Operand(destination, kPointerSize), scratch);
4892 movp(scratch, Operand(source, 0));
4893 movp(Operand(destination, 0), scratch);
4894 // Move remaining bytes of length.
4895 movp(scratch, Operand(source, length, times_1, -kPointerSize));
4896 movp(Operand(destination, length, times_1, -kPointerSize), scratch);
4897 addp(destination, length);
4898 jmp(&done, Label::kNear);
4900 bind(&short_string);
4901 if (min_length == 0) {
4902 testl(length, length);
4903 j(zero, &done, Label::kNear);
4907 movb(scratch, Operand(source, 0));
4908 movb(Operand(destination, 0), scratch);
4912 j(not_zero, &short_loop);
4919 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
4920 Register end_offset,
4925 movp(Operand(start_offset, 0), filler);
4926 addp(start_offset, Immediate(kPointerSize));
4928 cmpp(start_offset, end_offset);
4933 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4934 if (context_chain_length > 0) {
4935 // Move up the chain of contexts to the context containing the slot.
4936 movp(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4937 for (int i = 1; i < context_chain_length; i++) {
4938 movp(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4941 // Slot is in the current function context. Move it into the
4942 // destination register in case we store into it (the write barrier
4943 // cannot be allowed to destroy the context in rsi).
4947 // We should not have found a with context by walking the context
4948 // chain (i.e., the static scope chain and runtime context chain do
4949 // not agree). A variable occurring in such a scope should have
4950 // slot type LOOKUP and not CONTEXT.
4951 if (emit_debug_code()) {
4952 CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
4953 Heap::kWithContextMapRootIndex);
4954 Check(not_equal, kVariableResolvedToWithContext);
4959 void MacroAssembler::LoadTransitionedArrayMapConditional(
4960 ElementsKind expected_kind,
4961 ElementsKind transitioned_kind,
4962 Register map_in_out,
4964 Label* no_map_match) {
4965 // Load the global or builtins object from the current context.
4967 Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4968 movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
4970 // Check that the function's map is the same as the expected cached map.
4971 movp(scratch, Operand(scratch,
4972 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4974 int offset = expected_kind * kPointerSize +
4975 FixedArrayBase::kHeaderSize;
4976 cmpp(map_in_out, FieldOperand(scratch, offset));
4977 j(not_equal, no_map_match);
4979 // Use the transitioned cached map.
4980 offset = transitioned_kind * kPointerSize +
4981 FixedArrayBase::kHeaderSize;
4982 movp(map_in_out, FieldOperand(scratch, offset));
4987 static const int kRegisterPassedArguments = 4;
4989 static const int kRegisterPassedArguments = 6;
4992 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4993 // Load the global or builtins object from the current context.
4995 Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4996 // Load the native context from the global or builtins object.
4997 movp(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
4998 // Load the function from the native context.
4999 movp(function, Operand(function, Context::SlotOffset(index)));
5003 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
5005 // Load the initial map. The global functions all have initial maps.
5006 movp(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
5007 if (emit_debug_code()) {
5009 CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
5012 Abort(kGlobalFunctionsMustHaveInitialMap);
5018 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
5019 // On Windows 64 stack slots are reserved by the caller for all arguments
5020 // including the ones passed in registers, and space is always allocated for
5021 // the four register arguments even if the function takes fewer than four
5023 // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
5024 // and the caller does not reserve stack slots for them.
5025 DCHECK(num_arguments >= 0);
5027 const int kMinimumStackSlots = kRegisterPassedArguments;
5028 if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
5029 return num_arguments;
5031 if (num_arguments < kRegisterPassedArguments) return 0;
5032 return num_arguments - kRegisterPassedArguments;
5037 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5040 uint32_t encoding_mask) {
5042 JumpIfNotSmi(string, &is_object);
5047 movp(value, FieldOperand(string, HeapObject::kMapOffset));
5048 movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset));
5050 andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
5051 cmpp(value, Immediate(encoding_mask));
5053 Check(equal, kUnexpectedStringType);
5055 // The index is assumed to be untagged coming in, tag it to compare with the
5056 // string length without using a temp register, it is restored at the end of
5058 Integer32ToSmi(index, index);
5059 SmiCompare(index, FieldOperand(string, String::kLengthOffset));
5060 Check(less, kIndexIsTooLarge);
5062 SmiCompare(index, Smi::FromInt(0));
5063 Check(greater_equal, kIndexIsNegative);
5065 // Restore the index
5066 SmiToInteger32(index, index);
5070 void MacroAssembler::PrepareCallCFunction(int num_arguments) {
5071 int frame_alignment = base::OS::ActivationFrameAlignment();
5072 DCHECK(frame_alignment != 0);
5073 DCHECK(num_arguments >= 0);
5075 // Make stack end at alignment and allocate space for arguments and old rsp.
5076 movp(kScratchRegister, rsp);
5077 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5078 int argument_slots_on_stack =
5079 ArgumentStackSlotsForCFunctionCall(num_arguments);
5080 subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
5081 andp(rsp, Immediate(-frame_alignment));
5082 movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
5086 void MacroAssembler::CallCFunction(ExternalReference function,
5087 int num_arguments) {
5088 LoadAddress(rax, function);
5089 CallCFunction(rax, num_arguments);
5093 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
5094 DCHECK(has_frame());
5095 // Check stack alignment.
5096 if (emit_debug_code()) {
5097 CheckStackAlignment();
5101 DCHECK(base::OS::ActivationFrameAlignment() != 0);
5102 DCHECK(num_arguments >= 0);
5103 int argument_slots_on_stack =
5104 ArgumentStackSlotsForCFunctionCall(num_arguments);
5105 movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
5110 bool AreAliased(Register reg1,
5118 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
5119 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
5120 reg7.is_valid() + reg8.is_valid();
5123 if (reg1.is_valid()) regs |= reg1.bit();
5124 if (reg2.is_valid()) regs |= reg2.bit();
5125 if (reg3.is_valid()) regs |= reg3.bit();
5126 if (reg4.is_valid()) regs |= reg4.bit();
5127 if (reg5.is_valid()) regs |= reg5.bit();
5128 if (reg6.is_valid()) regs |= reg6.bit();
5129 if (reg7.is_valid()) regs |= reg7.bit();
5130 if (reg8.is_valid()) regs |= reg8.bit();
5131 int n_of_non_aliasing_regs = NumRegs(regs);
5133 return n_of_valid_regs != n_of_non_aliasing_regs;
5138 CodePatcher::CodePatcher(byte* address, int size)
5139 : address_(address),
5141 masm_(NULL, address, size + Assembler::kGap) {
5142 // Create a new macro assembler pointing to the address of the code to patch.
5143 // The size is adjusted with kGap on order for the assembler to generate size
5144 // bytes of instructions without failing with buffer size constraints.
5145 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5149 CodePatcher::~CodePatcher() {
5150 // Indicate that code has changed.
5151 CpuFeatures::FlushICache(address_, size_);
5153 // Check that the code was patched as expected.
5154 DCHECK(masm_.pc_ == address_ + size_);
5155 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5159 void MacroAssembler::CheckPageFlag(
5164 Label* condition_met,
5165 Label::Distance condition_met_distance) {
5166 DCHECK(cc == zero || cc == not_zero);
5167 if (scratch.is(object)) {
5168 andp(scratch, Immediate(~Page::kPageAlignmentMask));
5170 movp(scratch, Immediate(~Page::kPageAlignmentMask));
5171 andp(scratch, object);
5173 if (mask < (1 << kBitsPerByte)) {
5174 testb(Operand(scratch, MemoryChunk::kFlagsOffset),
5175 Immediate(static_cast<uint8_t>(mask)));
5177 testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
5179 j(cc, condition_met, condition_met_distance);
5183 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
5185 Label* if_deprecated) {
5186 if (map->CanBeDeprecated()) {
5188 movl(scratch, FieldOperand(scratch, Map::kBitField3Offset));
5189 andl(scratch, Immediate(Map::Deprecated::kMask));
5190 j(not_zero, if_deprecated);
5195 void MacroAssembler::JumpIfBlack(Register object,
5196 Register bitmap_scratch,
5197 Register mask_scratch,
5199 Label::Distance on_black_distance) {
5200 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
5201 GetMarkBits(object, bitmap_scratch, mask_scratch);
5203 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5204 // The mask_scratch register contains a 1 at the position of the first bit
5205 // and a 0 at all other positions, including the position of the second bit.
5206 movp(rcx, mask_scratch);
5207 // Make rcx into a mask that covers both marking bits using the operation
5208 // rcx = mask | (mask << 1).
5209 leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
5210 // Note that we are using a 4-byte aligned 8-byte load.
5211 andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
5212 cmpp(mask_scratch, rcx);
5213 j(equal, on_black, on_black_distance);
5217 // Detect some, but not all, common pointer-free objects. This is used by the
5218 // incremental write barrier which doesn't care about oddballs (they are always
5219 // marked black immediately so this code is not hit).
5220 void MacroAssembler::JumpIfDataObject(
5223 Label* not_data_object,
5224 Label::Distance not_data_object_distance) {
5225 Label is_data_object;
5226 movp(scratch, FieldOperand(value, HeapObject::kMapOffset));
5227 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5228 j(equal, &is_data_object, Label::kNear);
5229 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5230 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5231 // If it's a string and it's not a cons string then it's an object containing
5233 testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
5234 Immediate(kIsIndirectStringMask | kIsNotStringMask));
5235 j(not_zero, not_data_object, not_data_object_distance);
5236 bind(&is_data_object);
5240 void MacroAssembler::GetMarkBits(Register addr_reg,
5241 Register bitmap_reg,
5242 Register mask_reg) {
5243 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
5244 movp(bitmap_reg, addr_reg);
5245 // Sign extended 32 bit immediate.
5246 andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
5247 movp(rcx, addr_reg);
5249 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
5250 shrl(rcx, Immediate(shift));
5252 Immediate((Page::kPageAlignmentMask >> shift) &
5253 ~(Bitmap::kBytesPerCell - 1)));
5255 addp(bitmap_reg, rcx);
5256 movp(rcx, addr_reg);
5257 shrl(rcx, Immediate(kPointerSizeLog2));
5258 andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
5259 movl(mask_reg, Immediate(1));
5264 void MacroAssembler::EnsureNotWhite(
5266 Register bitmap_scratch,
5267 Register mask_scratch,
5268 Label* value_is_white_and_not_data,
5269 Label::Distance distance) {
5270 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
5271 GetMarkBits(value, bitmap_scratch, mask_scratch);
5273 // If the value is black or grey we don't need to do anything.
5274 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5275 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5276 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
5277 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5281 // Since both black and grey have a 1 in the first position and white does
5282 // not have a 1 there we only need to check one bit.
5283 testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5284 j(not_zero, &done, Label::kNear);
5286 if (emit_debug_code()) {
5287 // Check for impossible bit pattern.
5290 // shl. May overflow making the check conservative.
5291 addp(mask_scratch, mask_scratch);
5292 testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5293 j(zero, &ok, Label::kNear);
5299 // Value is white. We check whether it is data that doesn't need scanning.
5300 // Currently only checks for HeapNumber and non-cons strings.
5301 Register map = rcx; // Holds map while checking type.
5302 Register length = rcx; // Holds length of object after checking type.
5303 Label not_heap_number;
5304 Label is_data_object;
5306 // Check for heap-number
5307 movp(map, FieldOperand(value, HeapObject::kMapOffset));
5308 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
5309 j(not_equal, ¬_heap_number, Label::kNear);
5310 movp(length, Immediate(HeapNumber::kSize));
5311 jmp(&is_data_object, Label::kNear);
5313 bind(¬_heap_number);
5314 // Check for strings.
5315 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5316 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5317 // If it's a string and it's not a cons string then it's an object containing
5319 Register instance_type = rcx;
5320 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
5321 testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
5322 j(not_zero, value_is_white_and_not_data);
5323 // It's a non-indirect (non-cons and non-slice) string.
5324 // If it's external, the length is just ExternalString::kSize.
5325 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5327 // External strings are the only ones with the kExternalStringTag bit
5329 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
5330 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
5331 testb(instance_type, Immediate(kExternalStringTag));
5332 j(zero, ¬_external, Label::kNear);
5333 movp(length, Immediate(ExternalString::kSize));
5334 jmp(&is_data_object, Label::kNear);
5336 bind(¬_external);
5337 // Sequential string, either Latin1 or UC16.
5338 DCHECK(kOneByteStringTag == 0x04);
5339 andp(length, Immediate(kStringEncodingMask));
5340 xorp(length, Immediate(kStringEncodingMask));
5341 addp(length, Immediate(0x04));
5342 // Value now either 4 (if Latin1) or 8 (if UC16), i.e. char-size shifted by 2.
5343 imulp(length, FieldOperand(value, String::kLengthOffset));
5344 shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
5345 addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
5346 andp(length, Immediate(~kObjectAlignmentMask));
5348 bind(&is_data_object);
5349 // Value is a data object, and it is white. Mark it black. Since we know
5350 // that the object is white we can make it black by flipping one bit.
5351 orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5353 andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
5354 addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
5360 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5362 Register empty_fixed_array_value = r8;
5363 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5366 // Check if the enum length field is properly initialized, indicating that
5367 // there is an enum cache.
5368 movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5370 EnumLength(rdx, rbx);
5371 Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
5372 j(equal, call_runtime);
5378 movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5380 // For all objects but the receiver, check that the cache is empty.
5381 EnumLength(rdx, rbx);
5382 Cmp(rdx, Smi::FromInt(0));
5383 j(not_equal, call_runtime);
5387 // Check that there are no elements. Register rcx contains the current JS
5388 // object we've reached through the prototype chain.
5390 cmpp(empty_fixed_array_value,
5391 FieldOperand(rcx, JSObject::kElementsOffset));
5392 j(equal, &no_elements);
5394 // Second chance, the object may be using the empty slow element dictionary.
5395 LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
5396 cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
5397 j(not_equal, call_runtime);
5400 movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
5401 cmpp(rcx, null_value);
5402 j(not_equal, &next);
5405 void MacroAssembler::TestJSArrayForAllocationMemento(
5406 Register receiver_reg,
5407 Register scratch_reg,
5408 Label* no_memento_found) {
5409 ExternalReference new_space_start =
5410 ExternalReference::new_space_start(isolate());
5411 ExternalReference new_space_allocation_top =
5412 ExternalReference::new_space_allocation_top_address(isolate());
5414 leap(scratch_reg, Operand(receiver_reg,
5415 JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5416 Move(kScratchRegister, new_space_start);
5417 cmpp(scratch_reg, kScratchRegister);
5418 j(less, no_memento_found);
5419 cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
5420 j(greater, no_memento_found);
5421 CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
5422 Heap::kAllocationMementoMapRootIndex);
5426 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5431 DCHECK(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
5432 DCHECK(!scratch1.is(scratch0));
5433 Register current = scratch0;
5436 movp(current, object);
5438 // Loop based on the map going up the prototype chain.
5440 movp(current, FieldOperand(current, HeapObject::kMapOffset));
5441 movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
5442 DecodeField<Map::ElementsKindBits>(scratch1);
5443 cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
5445 movp(current, FieldOperand(current, Map::kPrototypeOffset));
5446 CompareRoot(current, Heap::kNullValueRootIndex);
5447 j(not_equal, &loop_again);
5451 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
5452 DCHECK(!dividend.is(rax));
5453 DCHECK(!dividend.is(rdx));
5454 base::MagicNumbersForDivision<uint32_t> mag =
5455 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
5456 movl(rax, Immediate(mag.multiplier));
5458 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
5459 if (divisor > 0 && neg) addl(rdx, dividend);
5460 if (divisor < 0 && !neg && mag.multiplier > 0) subl(rdx, dividend);
5461 if (mag.shift > 0) sarl(rdx, Immediate(mag.shift));
5462 movl(rax, dividend);
5463 shrl(rax, Immediate(31));
5468 } } // namespace v8::internal
5470 #endif // V8_TARGET_ARCH_X64