1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "src/bootstrapper.h"
10 #include "src/codegen.h"
11 #include "src/cpu-profiler.h"
12 #include "src/debug.h"
13 #include "src/heap/heap.h"
14 #include "src/isolate-inl.h"
15 #include "src/serialize.h"
16 #include "src/x64/assembler-x64.h"
17 #include "src/x64/macro-assembler-x64.h"
22 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
23 : Assembler(arg_isolate, buffer, size),
24 generating_stub_(false),
26 root_array_available_(true) {
27 if (isolate() != NULL) {
28 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
34 static const int64_t kInvalidRootRegisterDelta = -1;
37 int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
38 if (predictable_code_size() &&
39 (other.address() < reinterpret_cast<Address>(isolate()) ||
40 other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
41 return kInvalidRootRegisterDelta;
43 Address roots_register_value = kRootRegisterBias +
44 reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
46 int64_t delta = kInvalidRootRegisterDelta; // Bogus initialization.
47 if (kPointerSize == kInt64Size) {
48 delta = other.address() - roots_register_value;
50 // For x32, zero extend the address to 64-bit and calculate the delta.
51 uint64_t o = static_cast<uint32_t>(
52 reinterpret_cast<intptr_t>(other.address()));
53 uint64_t r = static_cast<uint32_t>(
54 reinterpret_cast<intptr_t>(roots_register_value));
61 Operand MacroAssembler::ExternalOperand(ExternalReference target,
63 if (root_array_available_ && !serializer_enabled()) {
64 int64_t delta = RootRegisterDelta(target);
65 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
66 return Operand(kRootRegister, static_cast<int32_t>(delta));
69 Move(scratch, target);
70 return Operand(scratch, 0);
74 void MacroAssembler::Load(Register destination, ExternalReference source) {
75 if (root_array_available_ && !serializer_enabled()) {
76 int64_t delta = RootRegisterDelta(source);
77 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
78 movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
83 if (destination.is(rax)) {
86 Move(kScratchRegister, source);
87 movp(destination, Operand(kScratchRegister, 0));
92 void MacroAssembler::Store(ExternalReference destination, Register source) {
93 if (root_array_available_ && !serializer_enabled()) {
94 int64_t delta = RootRegisterDelta(destination);
95 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
96 movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
101 if (source.is(rax)) {
102 store_rax(destination);
104 Move(kScratchRegister, destination);
105 movp(Operand(kScratchRegister, 0), source);
110 void MacroAssembler::LoadAddress(Register destination,
111 ExternalReference source) {
112 if (root_array_available_ && !serializer_enabled()) {
113 int64_t delta = RootRegisterDelta(source);
114 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
115 leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
120 Move(destination, source);
124 int MacroAssembler::LoadAddressSize(ExternalReference source) {
125 if (root_array_available_ && !serializer_enabled()) {
126 // This calculation depends on the internals of LoadAddress.
127 // It's correctness is ensured by the asserts in the Call
128 // instruction below.
129 int64_t delta = RootRegisterDelta(source);
130 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
131 // Operand is leap(scratch, Operand(kRootRegister, delta));
132 // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
134 if (!is_int8(static_cast<int32_t>(delta))) {
135 size += 3; // Need full four-byte displacement in lea.
140 // Size of movp(destination, src);
141 return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
145 void MacroAssembler::PushAddress(ExternalReference source) {
146 int64_t address = reinterpret_cast<int64_t>(source.address());
147 if (is_int32(address) && !serializer_enabled()) {
148 if (emit_debug_code()) {
149 Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
151 Push(Immediate(static_cast<int32_t>(address)));
154 LoadAddress(kScratchRegister, source);
155 Push(kScratchRegister);
159 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
160 DCHECK(root_array_available_);
161 movp(destination, Operand(kRootRegister,
162 (index << kPointerSizeLog2) - kRootRegisterBias));
166 void MacroAssembler::LoadRootIndexed(Register destination,
167 Register variable_offset,
169 DCHECK(root_array_available_);
171 Operand(kRootRegister,
172 variable_offset, times_pointer_size,
173 (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
177 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
178 DCHECK(root_array_available_);
179 movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
184 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
185 DCHECK(root_array_available_);
186 Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
190 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
191 DCHECK(root_array_available_);
192 cmpp(with, Operand(kRootRegister,
193 (index << kPointerSizeLog2) - kRootRegisterBias));
197 void MacroAssembler::CompareRoot(const Operand& with,
198 Heap::RootListIndex index) {
199 DCHECK(root_array_available_);
200 DCHECK(!with.AddressUsesRegister(kScratchRegister));
201 LoadRoot(kScratchRegister, index);
202 cmpp(with, kScratchRegister);
206 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
209 SaveFPRegsMode save_fp,
210 RememberedSetFinalAction and_then) {
211 if (emit_debug_code()) {
213 JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
217 // Load store buffer top.
218 LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
219 // Store pointer to buffer.
220 movp(Operand(scratch, 0), addr);
221 // Increment buffer top.
222 addp(scratch, Immediate(kPointerSize));
223 // Write back new top of buffer.
224 StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
225 // Call stub on end of buffer.
227 // Check for end of buffer.
228 testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
229 if (and_then == kReturnAtEnd) {
230 Label buffer_overflowed;
231 j(not_equal, &buffer_overflowed, Label::kNear);
233 bind(&buffer_overflowed);
235 DCHECK(and_then == kFallThroughAtEnd);
236 j(equal, &done, Label::kNear);
238 StoreBufferOverflowStub store_buffer_overflow =
239 StoreBufferOverflowStub(isolate(), save_fp);
240 CallStub(&store_buffer_overflow);
241 if (and_then == kReturnAtEnd) {
244 DCHECK(and_then == kFallThroughAtEnd);
250 void MacroAssembler::InNewSpace(Register object,
254 Label::Distance distance) {
255 if (serializer_enabled()) {
256 // Can't do arithmetic on external references if it might get serialized.
257 // The mask isn't really an address. We load it as an external reference in
258 // case the size of the new space is different between the snapshot maker
259 // and the running system.
260 if (scratch.is(object)) {
261 Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
262 andp(scratch, kScratchRegister);
264 Move(scratch, ExternalReference::new_space_mask(isolate()));
265 andp(scratch, object);
267 Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
268 cmpp(scratch, kScratchRegister);
269 j(cc, branch, distance);
271 DCHECK(kPointerSize == kInt64Size
272 ? is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))
273 : kPointerSize == kInt32Size);
274 intptr_t new_space_start =
275 reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
276 Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
277 Assembler::RelocInfoNone());
278 if (scratch.is(object)) {
279 addp(scratch, kScratchRegister);
281 leap(scratch, Operand(object, kScratchRegister, times_1, 0));
284 Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
285 j(cc, branch, distance);
290 void MacroAssembler::RecordWriteField(
295 SaveFPRegsMode save_fp,
296 RememberedSetAction remembered_set_action,
298 PointersToHereCheck pointers_to_here_check_for_value) {
299 // First, check if a write barrier is even needed. The tests below
300 // catch stores of Smis.
303 // Skip barrier if writing a smi.
304 if (smi_check == INLINE_SMI_CHECK) {
305 JumpIfSmi(value, &done);
308 // Although the object register is tagged, the offset is relative to the start
309 // of the object, so so offset must be a multiple of kPointerSize.
310 DCHECK(IsAligned(offset, kPointerSize));
312 leap(dst, FieldOperand(object, offset));
313 if (emit_debug_code()) {
315 testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
316 j(zero, &ok, Label::kNear);
321 RecordWrite(object, dst, value, save_fp, remembered_set_action,
322 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
326 // Clobber clobbered input registers when running with the debug-code flag
327 // turned on to provoke errors.
328 if (emit_debug_code()) {
329 Move(value, kZapValue, Assembler::RelocInfoNone());
330 Move(dst, kZapValue, Assembler::RelocInfoNone());
335 void MacroAssembler::RecordWriteArray(
339 SaveFPRegsMode save_fp,
340 RememberedSetAction remembered_set_action,
342 PointersToHereCheck pointers_to_here_check_for_value) {
343 // First, check if a write barrier is even needed. The tests below
344 // catch stores of Smis.
347 // Skip barrier if writing a smi.
348 if (smi_check == INLINE_SMI_CHECK) {
349 JumpIfSmi(value, &done);
352 // Array access: calculate the destination address. Index is not a smi.
353 Register dst = index;
354 leap(dst, Operand(object, index, times_pointer_size,
355 FixedArray::kHeaderSize - kHeapObjectTag));
357 RecordWrite(object, dst, value, save_fp, remembered_set_action,
358 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
362 // Clobber clobbered input registers when running with the debug-code flag
363 // turned on to provoke errors.
364 if (emit_debug_code()) {
365 Move(value, kZapValue, Assembler::RelocInfoNone());
366 Move(index, kZapValue, Assembler::RelocInfoNone());
371 void MacroAssembler::RecordWriteForMap(Register object,
374 SaveFPRegsMode fp_mode) {
375 DCHECK(!object.is(kScratchRegister));
376 DCHECK(!object.is(map));
377 DCHECK(!object.is(dst));
378 DCHECK(!map.is(dst));
379 AssertNotSmi(object);
381 if (emit_debug_code()) {
383 if (map.is(kScratchRegister)) pushq(map);
384 CompareMap(map, isolate()->factory()->meta_map());
385 if (map.is(kScratchRegister)) popq(map);
386 j(equal, &ok, Label::kNear);
391 if (!FLAG_incremental_marking) {
395 if (emit_debug_code()) {
397 if (map.is(kScratchRegister)) pushq(map);
398 cmpp(map, FieldOperand(object, HeapObject::kMapOffset));
399 if (map.is(kScratchRegister)) popq(map);
400 j(equal, &ok, Label::kNear);
405 // Compute the address.
406 leap(dst, FieldOperand(object, HeapObject::kMapOffset));
408 // First, check if a write barrier is even needed. The tests below
409 // catch stores of smis and stores into the young generation.
412 // A single check of the map's pages interesting flag suffices, since it is
413 // only set during incremental collection, and then it's also guaranteed that
414 // the from object's page's interesting flag is also set. This optimization
415 // relies on the fact that maps can never be in new space.
417 map, // Used as scratch.
418 MemoryChunk::kPointersToHereAreInterestingMask,
423 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
429 // Count number of write barriers in generated code.
430 isolate()->counters()->write_barriers_static()->Increment();
431 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
433 // Clobber clobbered registers when running with the debug-code flag
434 // turned on to provoke errors.
435 if (emit_debug_code()) {
436 Move(dst, kZapValue, Assembler::RelocInfoNone());
437 Move(map, kZapValue, Assembler::RelocInfoNone());
442 void MacroAssembler::RecordWrite(
446 SaveFPRegsMode fp_mode,
447 RememberedSetAction remembered_set_action,
449 PointersToHereCheck pointers_to_here_check_for_value) {
450 DCHECK(!object.is(value));
451 DCHECK(!object.is(address));
452 DCHECK(!value.is(address));
453 AssertNotSmi(object);
455 if (remembered_set_action == OMIT_REMEMBERED_SET &&
456 !FLAG_incremental_marking) {
460 if (emit_debug_code()) {
462 cmpp(value, Operand(address, 0));
463 j(equal, &ok, Label::kNear);
468 // First, check if a write barrier is even needed. The tests below
469 // catch stores of smis and stores into the young generation.
472 if (smi_check == INLINE_SMI_CHECK) {
473 // Skip barrier if writing a smi.
474 JumpIfSmi(value, &done);
477 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
479 value, // Used as scratch.
480 MemoryChunk::kPointersToHereAreInterestingMask,
486 CheckPageFlag(object,
487 value, // Used as scratch.
488 MemoryChunk::kPointersFromHereAreInterestingMask,
493 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
499 // Count number of write barriers in generated code.
500 isolate()->counters()->write_barriers_static()->Increment();
501 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
503 // Clobber clobbered registers when running with the debug-code flag
504 // turned on to provoke errors.
505 if (emit_debug_code()) {
506 Move(address, kZapValue, Assembler::RelocInfoNone());
507 Move(value, kZapValue, Assembler::RelocInfoNone());
512 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
513 if (emit_debug_code()) Check(cc, reason);
517 void MacroAssembler::AssertFastElements(Register elements) {
518 if (emit_debug_code()) {
520 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
521 Heap::kFixedArrayMapRootIndex);
522 j(equal, &ok, Label::kNear);
523 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
524 Heap::kFixedDoubleArrayMapRootIndex);
525 j(equal, &ok, Label::kNear);
526 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
527 Heap::kFixedCOWArrayMapRootIndex);
528 j(equal, &ok, Label::kNear);
529 Abort(kJSObjectWithFastElementsMapHasSlowElements);
535 void MacroAssembler::Check(Condition cc, BailoutReason reason) {
537 j(cc, &L, Label::kNear);
539 // Control will not return here.
544 void MacroAssembler::CheckStackAlignment() {
545 int frame_alignment = base::OS::ActivationFrameAlignment();
546 int frame_alignment_mask = frame_alignment - 1;
547 if (frame_alignment > kPointerSize) {
548 DCHECK(IsPowerOf2(frame_alignment));
549 Label alignment_as_expected;
550 testp(rsp, Immediate(frame_alignment_mask));
551 j(zero, &alignment_as_expected, Label::kNear);
552 // Abort if stack is not aligned.
554 bind(&alignment_as_expected);
559 void MacroAssembler::NegativeZeroTest(Register result,
563 testl(result, result);
564 j(not_zero, &ok, Label::kNear);
571 void MacroAssembler::Abort(BailoutReason reason) {
573 const char* msg = GetBailoutReason(reason);
575 RecordComment("Abort message: ");
579 if (FLAG_trap_on_abort) {
585 Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
586 Assembler::RelocInfoNone());
587 Push(kScratchRegister);
590 // We don't actually want to generate a pile of code for this, so just
591 // claim there is a stack frame, without generating one.
592 FrameScope scope(this, StackFrame::NONE);
593 CallRuntime(Runtime::kAbort, 1);
595 CallRuntime(Runtime::kAbort, 1);
597 // Control will not return here.
602 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
603 DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
604 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
608 void MacroAssembler::TailCallStub(CodeStub* stub) {
609 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
613 void MacroAssembler::StubReturn(int argc) {
614 DCHECK(argc >= 1 && generating_stub());
615 ret((argc - 1) * kPointerSize);
619 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
620 return has_frame_ || !stub->SometimesSetsUpAFrame();
624 void MacroAssembler::IndexFromHash(Register hash, Register index) {
625 // The assert checks that the constants for the maximum number of digits
626 // for an array index cached in the hash field and the number of bits
627 // reserved for it does not conflict.
628 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
629 (1 << String::kArrayIndexValueBits));
630 if (!hash.is(index)) {
633 DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
637 void MacroAssembler::CallRuntime(const Runtime::Function* f,
639 SaveFPRegsMode save_doubles) {
640 // If the expected number of arguments of the runtime function is
641 // constant, we check that the actual number of arguments match the
643 CHECK(f->nargs < 0 || f->nargs == num_arguments);
645 // TODO(1236192): Most runtime routines don't need the number of
646 // arguments passed in because it is constant. At some point we
647 // should remove this need and make the runtime routine entry code
649 Set(rax, num_arguments);
650 LoadAddress(rbx, ExternalReference(f, isolate()));
651 CEntryStub ces(isolate(), f->result_size, save_doubles);
656 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
658 Set(rax, num_arguments);
659 LoadAddress(rbx, ext);
661 CEntryStub stub(isolate(), 1);
666 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
669 // ----------- S t a t e -------------
670 // -- rsp[0] : return address
671 // -- rsp[8] : argument num_arguments - 1
673 // -- rsp[8 * num_arguments] : argument 0 (receiver)
674 // -----------------------------------
676 // TODO(1236192): Most runtime routines don't need the number of
677 // arguments passed in because it is constant. At some point we
678 // should remove this need and make the runtime routine entry code
680 Set(rax, num_arguments);
681 JumpToExternalReference(ext, result_size);
685 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
688 TailCallExternalReference(ExternalReference(fid, isolate()),
694 static int Offset(ExternalReference ref0, ExternalReference ref1) {
695 int64_t offset = (ref0.address() - ref1.address());
696 // Check that fits into int.
697 DCHECK(static_cast<int>(offset) == offset);
698 return static_cast<int>(offset);
702 void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
703 EnterApiExitFrame(arg_stack_space);
707 void MacroAssembler::CallApiFunctionAndReturn(
708 Register function_address,
709 ExternalReference thunk_ref,
710 Register thunk_last_arg,
712 Operand return_value_operand,
713 Operand* context_restore_operand) {
715 Label promote_scheduled_exception;
716 Label exception_handled;
717 Label delete_allocated_handles;
718 Label leave_exit_frame;
721 Factory* factory = isolate()->factory();
722 ExternalReference next_address =
723 ExternalReference::handle_scope_next_address(isolate());
724 const int kNextOffset = 0;
725 const int kLimitOffset = Offset(
726 ExternalReference::handle_scope_limit_address(isolate()),
728 const int kLevelOffset = Offset(
729 ExternalReference::handle_scope_level_address(isolate()),
731 ExternalReference scheduled_exception_address =
732 ExternalReference::scheduled_exception_address(isolate());
734 DCHECK(rdx.is(function_address) || r8.is(function_address));
735 // Allocate HandleScope in callee-save registers.
736 Register prev_next_address_reg = r14;
737 Register prev_limit_reg = rbx;
738 Register base_reg = r15;
739 Move(base_reg, next_address);
740 movp(prev_next_address_reg, Operand(base_reg, kNextOffset));
741 movp(prev_limit_reg, Operand(base_reg, kLimitOffset));
742 addl(Operand(base_reg, kLevelOffset), Immediate(1));
744 if (FLAG_log_timer_events) {
745 FrameScope frame(this, StackFrame::MANUAL);
746 PushSafepointRegisters();
747 PrepareCallCFunction(1);
748 LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
749 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
750 PopSafepointRegisters();
754 Label profiler_disabled;
755 Label end_profiler_check;
756 Move(rax, ExternalReference::is_profiling_address(isolate()));
757 cmpb(Operand(rax, 0), Immediate(0));
758 j(zero, &profiler_disabled);
760 // Third parameter is the address of the actual getter function.
761 Move(thunk_last_arg, function_address);
762 Move(rax, thunk_ref);
763 jmp(&end_profiler_check);
765 bind(&profiler_disabled);
766 // Call the api function!
767 Move(rax, function_address);
769 bind(&end_profiler_check);
771 // Call the api function!
774 if (FLAG_log_timer_events) {
775 FrameScope frame(this, StackFrame::MANUAL);
776 PushSafepointRegisters();
777 PrepareCallCFunction(1);
778 LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
779 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
780 PopSafepointRegisters();
783 // Load the value from ReturnValue
784 movp(rax, return_value_operand);
787 // No more valid handles (the result handle was the last one). Restore
788 // previous handle scope.
789 subl(Operand(base_reg, kLevelOffset), Immediate(1));
790 movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
791 cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
792 j(not_equal, &delete_allocated_handles);
793 bind(&leave_exit_frame);
795 // Check if the function scheduled an exception.
796 Move(rsi, scheduled_exception_address);
797 Cmp(Operand(rsi, 0), factory->the_hole_value());
798 j(not_equal, &promote_scheduled_exception);
799 bind(&exception_handled);
801 #if ENABLE_EXTRA_CHECKS
802 // Check if the function returned a valid JavaScript value.
804 Register return_value = rax;
807 JumpIfSmi(return_value, &ok, Label::kNear);
808 movp(map, FieldOperand(return_value, HeapObject::kMapOffset));
810 CmpInstanceType(map, FIRST_NONSTRING_TYPE);
811 j(below, &ok, Label::kNear);
813 CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
814 j(above_equal, &ok, Label::kNear);
816 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
817 j(equal, &ok, Label::kNear);
819 CompareRoot(return_value, Heap::kUndefinedValueRootIndex);
820 j(equal, &ok, Label::kNear);
822 CompareRoot(return_value, Heap::kTrueValueRootIndex);
823 j(equal, &ok, Label::kNear);
825 CompareRoot(return_value, Heap::kFalseValueRootIndex);
826 j(equal, &ok, Label::kNear);
828 CompareRoot(return_value, Heap::kNullValueRootIndex);
829 j(equal, &ok, Label::kNear);
831 Abort(kAPICallReturnedInvalidObject);
836 bool restore_context = context_restore_operand != NULL;
837 if (restore_context) {
838 movp(rsi, *context_restore_operand);
840 LeaveApiExitFrame(!restore_context);
841 ret(stack_space * kPointerSize);
843 bind(&promote_scheduled_exception);
845 FrameScope frame(this, StackFrame::INTERNAL);
846 CallRuntime(Runtime::kPromoteScheduledException, 0);
848 jmp(&exception_handled);
850 // HandleScope limit has changed. Delete allocated extensions.
851 bind(&delete_allocated_handles);
852 movp(Operand(base_reg, kLimitOffset), prev_limit_reg);
853 movp(prev_limit_reg, rax);
854 LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
856 ExternalReference::delete_handle_scope_extensions(isolate()));
858 movp(rax, prev_limit_reg);
859 jmp(&leave_exit_frame);
863 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
865 // Set the entry point and jump to the C entry runtime stub.
866 LoadAddress(rbx, ext);
867 CEntryStub ces(isolate(), result_size);
868 jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
872 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
874 const CallWrapper& call_wrapper) {
875 // You can't call a builtin without a valid frame.
876 DCHECK(flag == JUMP_FUNCTION || has_frame());
878 // Rely on the assertion to check that the number of provided
879 // arguments match the expected number of arguments. Fake a
880 // parameter count to avoid emitting code to do the check.
881 ParameterCount expected(0);
882 GetBuiltinEntry(rdx, id);
883 InvokeCode(rdx, expected, expected, flag, call_wrapper);
887 void MacroAssembler::GetBuiltinFunction(Register target,
888 Builtins::JavaScript id) {
889 // Load the builtins object into target register.
890 movp(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
891 movp(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
892 movp(target, FieldOperand(target,
893 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
897 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
898 DCHECK(!target.is(rdi));
899 // Load the JavaScript builtin function from the builtins object.
900 GetBuiltinFunction(rdi, id);
901 movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
905 #define REG(Name) { kRegister_ ## Name ## _Code }
907 static const Register saved_regs[] = {
908 REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
909 REG(r9), REG(r10), REG(r11)
914 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
917 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
920 Register exclusion3) {
921 // We don't allow a GC during a store buffer overflow so there is no need to
922 // store the registers in any particular way, but we do have to store and
924 for (int i = 0; i < kNumberOfSavedRegs; i++) {
925 Register reg = saved_regs[i];
926 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
930 // R12 to r15 are callee save on all platforms.
931 if (fp_mode == kSaveFPRegs) {
932 subp(rsp, Immediate(kSIMD128Size * XMMRegister::kMaxNumRegisters));
933 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
934 XMMRegister reg = XMMRegister::from_code(i);
935 movups(Operand(rsp, i * kSIMD128Size), reg);
941 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
944 Register exclusion3) {
945 if (fp_mode == kSaveFPRegs) {
946 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
947 XMMRegister reg = XMMRegister::from_code(i);
948 movups(reg, Operand(rsp, i * kSIMD128Size));
950 addp(rsp, Immediate(kSIMD128Size * XMMRegister::kMaxNumRegisters));
952 for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
953 Register reg = saved_regs[i];
954 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
961 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
967 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
973 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
974 DCHECK(!r.IsDouble());
975 if (r.IsInteger8()) {
977 } else if (r.IsUInteger8()) {
979 } else if (r.IsInteger16()) {
981 } else if (r.IsUInteger16()) {
983 } else if (r.IsInteger32()) {
991 void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
992 DCHECK(!r.IsDouble());
993 if (r.IsInteger8() || r.IsUInteger8()) {
995 } else if (r.IsInteger16() || r.IsUInteger16()) {
997 } else if (r.IsInteger32()) {
1000 if (r.IsHeapObject()) {
1002 } else if (r.IsSmi()) {
1010 void MacroAssembler::Set(Register dst, int64_t x) {
1013 } else if (is_uint32(x)) {
1014 movl(dst, Immediate(static_cast<uint32_t>(x)));
1015 } else if (is_int32(x)) {
1016 movq(dst, Immediate(static_cast<int32_t>(x)));
1023 void MacroAssembler::Set(const Operand& dst, intptr_t x) {
1024 if (kPointerSize == kInt64Size) {
1026 movp(dst, Immediate(static_cast<int32_t>(x)));
1028 Set(kScratchRegister, x);
1029 movp(dst, kScratchRegister);
1032 movp(dst, Immediate(static_cast<int32_t>(x)));
1037 // ----------------------------------------------------------------------------
1038 // Smi tagging, untagging and tag detection.
1040 bool MacroAssembler::IsUnsafeInt(const int32_t x) {
1041 static const int kMaxBits = 17;
1042 return !is_intn(x, kMaxBits);
1046 void MacroAssembler::SafeMove(Register dst, Smi* src) {
1047 DCHECK(!dst.is(kScratchRegister));
1048 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1049 if (SmiValuesAre32Bits()) {
1050 // JIT cookie can be converted to Smi.
1051 Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
1052 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1053 xorp(dst, kScratchRegister);
1055 DCHECK(SmiValuesAre31Bits());
1056 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
1057 movp(dst, Immediate(value ^ jit_cookie()));
1058 xorp(dst, Immediate(jit_cookie()));
1066 void MacroAssembler::SafePush(Smi* src) {
1067 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1068 if (SmiValuesAre32Bits()) {
1069 // JIT cookie can be converted to Smi.
1070 Push(Smi::FromInt(src->value() ^ jit_cookie()));
1071 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1072 xorp(Operand(rsp, 0), kScratchRegister);
1074 DCHECK(SmiValuesAre31Bits());
1075 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
1076 Push(Immediate(value ^ jit_cookie()));
1077 xorp(Operand(rsp, 0), Immediate(jit_cookie()));
1085 Register MacroAssembler::GetSmiConstant(Smi* source) {
1086 int value = source->value();
1088 xorl(kScratchRegister, kScratchRegister);
1089 return kScratchRegister;
1092 return kSmiConstantRegister;
1094 LoadSmiConstant(kScratchRegister, source);
1095 return kScratchRegister;
1099 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
1100 if (emit_debug_code()) {
1101 Move(dst, Smi::FromInt(kSmiConstantRegisterValue),
1102 Assembler::RelocInfoNone());
1103 cmpp(dst, kSmiConstantRegister);
1104 Assert(equal, kUninitializedKSmiConstantRegister);
1106 int value = source->value();
1111 bool negative = value < 0;
1112 unsigned int uvalue = negative ? -value : value;
1117 Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
1121 leap(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
1125 leap(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
1129 Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
1133 Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
1137 Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
1140 movp(dst, kSmiConstantRegister);
1146 Move(dst, source, Assembler::RelocInfoNone());
1155 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
1156 STATIC_ASSERT(kSmiTag == 0);
1160 shlp(dst, Immediate(kSmiShift));
1164 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
1165 if (emit_debug_code()) {
1166 testb(dst, Immediate(0x01));
1168 j(zero, &ok, Label::kNear);
1169 Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
1173 if (SmiValuesAre32Bits()) {
1174 DCHECK(kSmiShift % kBitsPerByte == 0);
1175 movl(Operand(dst, kSmiShift / kBitsPerByte), src);
1177 DCHECK(SmiValuesAre31Bits());
1178 Integer32ToSmi(kScratchRegister, src);
1179 movp(dst, kScratchRegister);
1184 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
1188 addl(dst, Immediate(constant));
1190 leal(dst, Operand(src, constant));
1192 shlp(dst, Immediate(kSmiShift));
1196 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
1197 STATIC_ASSERT(kSmiTag == 0);
1202 if (SmiValuesAre32Bits()) {
1203 shrp(dst, Immediate(kSmiShift));
1205 DCHECK(SmiValuesAre31Bits());
1206 sarl(dst, Immediate(kSmiShift));
1211 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
1212 if (SmiValuesAre32Bits()) {
1213 movl(dst, Operand(src, kSmiShift / kBitsPerByte));
1215 DCHECK(SmiValuesAre31Bits());
1217 sarl(dst, Immediate(kSmiShift));
1222 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
1223 STATIC_ASSERT(kSmiTag == 0);
1227 sarp(dst, Immediate(kSmiShift));
1228 if (kPointerSize == kInt32Size) {
1229 // Sign extend to 64-bit.
1235 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
1236 if (SmiValuesAre32Bits()) {
1237 movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
1239 DCHECK(SmiValuesAre31Bits());
1241 SmiToInteger64(dst, dst);
1246 void MacroAssembler::SmiTest(Register src) {
1252 void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1259 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
1265 void MacroAssembler::Cmp(Register dst, Smi* src) {
1266 DCHECK(!dst.is(kScratchRegister));
1267 if (src->value() == 0) {
1270 Register constant_reg = GetSmiConstant(src);
1271 cmpp(dst, constant_reg);
1276 void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
1283 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
1290 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
1292 if (SmiValuesAre32Bits()) {
1293 cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
1295 DCHECK(SmiValuesAre31Bits());
1296 cmpl(dst, Immediate(src));
1301 void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
1302 // The Operand cannot use the smi register.
1303 Register smi_reg = GetSmiConstant(src);
1304 DCHECK(!dst.AddressUsesRegister(smi_reg));
1309 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
1310 if (SmiValuesAre32Bits()) {
1311 cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
1313 DCHECK(SmiValuesAre31Bits());
1314 SmiToInteger32(kScratchRegister, dst);
1315 cmpl(kScratchRegister, src);
1320 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
1326 SmiToInteger64(dst, src);
1332 if (power < kSmiShift) {
1333 sarp(dst, Immediate(kSmiShift - power));
1334 } else if (power > kSmiShift) {
1335 shlp(dst, Immediate(power - kSmiShift));
1340 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
1343 DCHECK((0 <= power) && (power < 32));
1345 shrp(dst, Immediate(power + kSmiShift));
1347 UNIMPLEMENTED(); // Not used.
1352 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1354 Label::Distance near_jump) {
1355 if (dst.is(src1) || dst.is(src2)) {
1356 DCHECK(!src1.is(kScratchRegister));
1357 DCHECK(!src2.is(kScratchRegister));
1358 movp(kScratchRegister, src1);
1359 orp(kScratchRegister, src2);
1360 JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
1361 movp(dst, kScratchRegister);
1365 JumpIfNotSmi(dst, on_not_smis, near_jump);
1370 Condition MacroAssembler::CheckSmi(Register src) {
1371 STATIC_ASSERT(kSmiTag == 0);
1372 testb(src, Immediate(kSmiTagMask));
1377 Condition MacroAssembler::CheckSmi(const Operand& src) {
1378 STATIC_ASSERT(kSmiTag == 0);
1379 testb(src, Immediate(kSmiTagMask));
1384 Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
1385 STATIC_ASSERT(kSmiTag == 0);
1386 // Test that both bits of the mask 0x8000000000000001 are zero.
1387 movp(kScratchRegister, src);
1388 rolp(kScratchRegister, Immediate(1));
1389 testb(kScratchRegister, Immediate(3));
1394 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
1395 if (first.is(second)) {
1396 return CheckSmi(first);
1398 STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
1399 if (SmiValuesAre32Bits()) {
1400 leal(kScratchRegister, Operand(first, second, times_1, 0));
1401 testb(kScratchRegister, Immediate(0x03));
1403 DCHECK(SmiValuesAre31Bits());
1404 movl(kScratchRegister, first);
1405 orl(kScratchRegister, second);
1406 testb(kScratchRegister, Immediate(kSmiTagMask));
1412 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
1414 if (first.is(second)) {
1415 return CheckNonNegativeSmi(first);
1417 movp(kScratchRegister, first);
1418 orp(kScratchRegister, second);
1419 rolp(kScratchRegister, Immediate(1));
1420 testl(kScratchRegister, Immediate(3));
1425 Condition MacroAssembler::CheckEitherSmi(Register first,
1428 if (first.is(second)) {
1429 return CheckSmi(first);
1431 if (scratch.is(second)) {
1432 andl(scratch, first);
1434 if (!scratch.is(first)) {
1435 movl(scratch, first);
1437 andl(scratch, second);
1439 testb(scratch, Immediate(kSmiTagMask));
1444 Condition MacroAssembler::CheckIsMinSmi(Register src) {
1445 DCHECK(!src.is(kScratchRegister));
1446 // If we overflow by subtracting one, it's the minimal smi value.
1447 cmpp(src, kSmiConstantRegister);
1452 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
1453 if (SmiValuesAre32Bits()) {
1454 // A 32-bit integer value can always be converted to a smi.
1457 DCHECK(SmiValuesAre31Bits());
1458 cmpl(src, Immediate(0xc0000000));
1464 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
1465 if (SmiValuesAre32Bits()) {
1466 // An unsigned 32-bit integer value is valid as long as the high bit
1471 DCHECK(SmiValuesAre31Bits());
1472 testl(src, Immediate(0xc0000000));
1478 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
1480 andl(dst, Immediate(kSmiTagMask));
1482 movl(dst, Immediate(kSmiTagMask));
1488 void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
1489 if (!(src.AddressUsesRegister(dst))) {
1490 movl(dst, Immediate(kSmiTagMask));
1494 andl(dst, Immediate(kSmiTagMask));
1499 void MacroAssembler::JumpIfValidSmiValue(Register src,
1501 Label::Distance near_jump) {
1502 Condition is_valid = CheckInteger32ValidSmiValue(src);
1503 j(is_valid, on_valid, near_jump);
1507 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1509 Label::Distance near_jump) {
1510 Condition is_valid = CheckInteger32ValidSmiValue(src);
1511 j(NegateCondition(is_valid), on_invalid, near_jump);
1515 void MacroAssembler::JumpIfUIntValidSmiValue(Register src,
1517 Label::Distance near_jump) {
1518 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1519 j(is_valid, on_valid, near_jump);
1523 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1525 Label::Distance near_jump) {
1526 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1527 j(NegateCondition(is_valid), on_invalid, near_jump);
1531 void MacroAssembler::JumpIfSmi(Register src,
1533 Label::Distance near_jump) {
1534 Condition smi = CheckSmi(src);
1535 j(smi, on_smi, near_jump);
1539 void MacroAssembler::JumpIfNotSmi(Register src,
1541 Label::Distance near_jump) {
1542 Condition smi = CheckSmi(src);
1543 j(NegateCondition(smi), on_not_smi, near_jump);
1547 void MacroAssembler::JumpUnlessNonNegativeSmi(
1548 Register src, Label* on_not_smi_or_negative,
1549 Label::Distance near_jump) {
1550 Condition non_negative_smi = CheckNonNegativeSmi(src);
1551 j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
1555 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1558 Label::Distance near_jump) {
1559 SmiCompare(src, constant);
1560 j(equal, on_equals, near_jump);
1564 void MacroAssembler::JumpIfNotBothSmi(Register src1,
1566 Label* on_not_both_smi,
1567 Label::Distance near_jump) {
1568 Condition both_smi = CheckBothSmi(src1, src2);
1569 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1573 void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
1575 Label* on_not_both_smi,
1576 Label::Distance near_jump) {
1577 Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
1578 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1582 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1583 if (constant->value() == 0) {
1588 } else if (dst.is(src)) {
1589 DCHECK(!dst.is(kScratchRegister));
1590 switch (constant->value()) {
1592 addp(dst, kSmiConstantRegister);
1595 leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1598 leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1601 leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1604 Register constant_reg = GetSmiConstant(constant);
1605 addp(dst, constant_reg);
1609 switch (constant->value()) {
1611 leap(dst, Operand(src, kSmiConstantRegister, times_1, 0));
1614 leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1617 leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1620 leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1623 LoadSmiConstant(dst, constant);
1631 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1632 if (constant->value() != 0) {
1633 if (SmiValuesAre32Bits()) {
1634 addl(Operand(dst, kSmiShift / kBitsPerByte),
1635 Immediate(constant->value()));
1637 DCHECK(SmiValuesAre31Bits());
1638 addp(dst, Immediate(constant));
1644 void MacroAssembler::SmiAddConstant(Register dst,
1647 SmiOperationExecutionMode mode,
1648 Label* bailout_label,
1649 Label::Distance near_jump) {
1650 if (constant->value() == 0) {
1654 } else if (dst.is(src)) {
1655 DCHECK(!dst.is(kScratchRegister));
1656 LoadSmiConstant(kScratchRegister, constant);
1657 addp(dst, kScratchRegister);
1658 if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
1659 j(no_overflow, bailout_label, near_jump);
1660 DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
1661 subp(dst, kScratchRegister);
1662 } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
1663 if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
1665 j(no_overflow, &done, Label::kNear);
1666 subp(dst, kScratchRegister);
1667 jmp(bailout_label, near_jump);
1670 // Bailout if overflow without reserving src.
1671 j(overflow, bailout_label, near_jump);
1674 CHECK(mode.IsEmpty());
1677 DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
1678 DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW));
1679 LoadSmiConstant(dst, constant);
1681 j(overflow, bailout_label, near_jump);
1686 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1687 if (constant->value() == 0) {
1691 } else if (dst.is(src)) {
1692 DCHECK(!dst.is(kScratchRegister));
1693 Register constant_reg = GetSmiConstant(constant);
1694 subp(dst, constant_reg);
1696 if (constant->value() == Smi::kMinValue) {
1697 LoadSmiConstant(dst, constant);
1698 // Adding and subtracting the min-value gives the same result, it only
1699 // differs on the overflow bit, which we don't check here.
1702 // Subtract by adding the negation.
1703 LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1710 void MacroAssembler::SmiSubConstant(Register dst,
1713 SmiOperationExecutionMode mode,
1714 Label* bailout_label,
1715 Label::Distance near_jump) {
1716 if (constant->value() == 0) {
1720 } else if (dst.is(src)) {
1721 DCHECK(!dst.is(kScratchRegister));
1722 LoadSmiConstant(kScratchRegister, constant);
1723 subp(dst, kScratchRegister);
1724 if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
1725 j(no_overflow, bailout_label, near_jump);
1726 DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
1727 addp(dst, kScratchRegister);
1728 } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
1729 if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
1731 j(no_overflow, &done, Label::kNear);
1732 addp(dst, kScratchRegister);
1733 jmp(bailout_label, near_jump);
1736 // Bailout if overflow without reserving src.
1737 j(overflow, bailout_label, near_jump);
1740 CHECK(mode.IsEmpty());
1743 DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
1744 DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW));
1745 if (constant->value() == Smi::kMinValue) {
1746 DCHECK(!dst.is(kScratchRegister));
1748 LoadSmiConstant(kScratchRegister, constant);
1749 subp(dst, kScratchRegister);
1750 j(overflow, bailout_label, near_jump);
1752 // Subtract by adding the negation.
1753 LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1755 j(overflow, bailout_label, near_jump);
1761 void MacroAssembler::SmiNeg(Register dst,
1763 Label* on_smi_result,
1764 Label::Distance near_jump) {
1766 DCHECK(!dst.is(kScratchRegister));
1767 movp(kScratchRegister, src);
1768 negp(dst); // Low 32 bits are retained as zero by negation.
1769 // Test if result is zero or Smi::kMinValue.
1770 cmpp(dst, kScratchRegister);
1771 j(not_equal, on_smi_result, near_jump);
1772 movp(src, kScratchRegister);
1777 // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1778 j(not_equal, on_smi_result, near_jump);
1784 static void SmiAddHelper(MacroAssembler* masm,
1788 Label* on_not_smi_result,
1789 Label::Distance near_jump) {
1792 masm->addp(dst, src2);
1793 masm->j(no_overflow, &done, Label::kNear);
1795 masm->subp(dst, src2);
1796 masm->jmp(on_not_smi_result, near_jump);
1799 masm->movp(dst, src1);
1800 masm->addp(dst, src2);
1801 masm->j(overflow, on_not_smi_result, near_jump);
1806 void MacroAssembler::SmiAdd(Register dst,
1809 Label* on_not_smi_result,
1810 Label::Distance near_jump) {
1811 DCHECK_NOT_NULL(on_not_smi_result);
1812 DCHECK(!dst.is(src2));
1813 SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1817 void MacroAssembler::SmiAdd(Register dst,
1819 const Operand& src2,
1820 Label* on_not_smi_result,
1821 Label::Distance near_jump) {
1822 DCHECK_NOT_NULL(on_not_smi_result);
1823 DCHECK(!src2.AddressUsesRegister(dst));
1824 SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1828 void MacroAssembler::SmiAdd(Register dst,
1831 // No overflow checking. Use only when it's known that
1832 // overflowing is impossible.
1833 if (!dst.is(src1)) {
1834 if (emit_debug_code()) {
1835 movp(kScratchRegister, src1);
1836 addp(kScratchRegister, src2);
1837 Check(no_overflow, kSmiAdditionOverflow);
1839 leap(dst, Operand(src1, src2, times_1, 0));
1842 Assert(no_overflow, kSmiAdditionOverflow);
1848 static void SmiSubHelper(MacroAssembler* masm,
1852 Label* on_not_smi_result,
1853 Label::Distance near_jump) {
1856 masm->subp(dst, src2);
1857 masm->j(no_overflow, &done, Label::kNear);
1859 masm->addp(dst, src2);
1860 masm->jmp(on_not_smi_result, near_jump);
1863 masm->movp(dst, src1);
1864 masm->subp(dst, src2);
1865 masm->j(overflow, on_not_smi_result, near_jump);
1870 void MacroAssembler::SmiSub(Register dst,
1873 Label* on_not_smi_result,
1874 Label::Distance near_jump) {
1875 DCHECK_NOT_NULL(on_not_smi_result);
1876 DCHECK(!dst.is(src2));
1877 SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1881 void MacroAssembler::SmiSub(Register dst,
1883 const Operand& src2,
1884 Label* on_not_smi_result,
1885 Label::Distance near_jump) {
1886 DCHECK_NOT_NULL(on_not_smi_result);
1887 DCHECK(!src2.AddressUsesRegister(dst));
1888 SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1893 static void SmiSubNoOverflowHelper(MacroAssembler* masm,
1897 // No overflow checking. Use only when it's known that
1898 // overflowing is impossible (e.g., subtracting two positive smis).
1899 if (!dst.is(src1)) {
1900 masm->movp(dst, src1);
1902 masm->subp(dst, src2);
1903 masm->Assert(no_overflow, kSmiSubtractionOverflow);
1907 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1908 DCHECK(!dst.is(src2));
1909 SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
1913 void MacroAssembler::SmiSub(Register dst,
1915 const Operand& src2) {
1916 SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
1920 void MacroAssembler::SmiMul(Register dst,
1923 Label* on_not_smi_result,
1924 Label::Distance near_jump) {
1925 DCHECK(!dst.is(src2));
1926 DCHECK(!dst.is(kScratchRegister));
1927 DCHECK(!src1.is(kScratchRegister));
1928 DCHECK(!src2.is(kScratchRegister));
1931 Label failure, zero_correct_result;
1932 movp(kScratchRegister, src1); // Create backup for later testing.
1933 SmiToInteger64(dst, src1);
1935 j(overflow, &failure, Label::kNear);
1937 // Check for negative zero result. If product is zero, and one
1938 // argument is negative, go to slow case.
1939 Label correct_result;
1941 j(not_zero, &correct_result, Label::kNear);
1943 movp(dst, kScratchRegister);
1945 // Result was positive zero.
1946 j(positive, &zero_correct_result, Label::kNear);
1948 bind(&failure); // Reused failure exit, restores src1.
1949 movp(src1, kScratchRegister);
1950 jmp(on_not_smi_result, near_jump);
1952 bind(&zero_correct_result);
1955 bind(&correct_result);
1957 SmiToInteger64(dst, src1);
1959 j(overflow, on_not_smi_result, near_jump);
1960 // Check for negative zero result. If product is zero, and one
1961 // argument is negative, go to slow case.
1962 Label correct_result;
1964 j(not_zero, &correct_result, Label::kNear);
1965 // One of src1 and src2 is zero, the check whether the other is
1967 movp(kScratchRegister, src1);
1968 xorp(kScratchRegister, src2);
1969 j(negative, on_not_smi_result, near_jump);
1970 bind(&correct_result);
1975 void MacroAssembler::SmiDiv(Register dst,
1978 Label* on_not_smi_result,
1979 Label::Distance near_jump) {
1980 DCHECK(!src1.is(kScratchRegister));
1981 DCHECK(!src2.is(kScratchRegister));
1982 DCHECK(!dst.is(kScratchRegister));
1983 DCHECK(!src2.is(rax));
1984 DCHECK(!src2.is(rdx));
1985 DCHECK(!src1.is(rdx));
1987 // Check for 0 divisor (result is +/-Infinity).
1989 j(zero, on_not_smi_result, near_jump);
1992 movp(kScratchRegister, src1);
1994 SmiToInteger32(rax, src1);
1995 // We need to rule out dividing Smi::kMinValue by -1, since that would
1996 // overflow in idiv and raise an exception.
1997 // We combine this with negative zero test (negative zero only happens
1998 // when dividing zero by a negative number).
2000 // We overshoot a little and go to slow case if we divide min-value
2001 // by any negative value, not just -1.
2003 testl(rax, Immediate(~Smi::kMinValue));
2004 j(not_zero, &safe_div, Label::kNear);
2007 j(positive, &safe_div, Label::kNear);
2008 movp(src1, kScratchRegister);
2009 jmp(on_not_smi_result, near_jump);
2011 j(negative, on_not_smi_result, near_jump);
2015 SmiToInteger32(src2, src2);
2016 // Sign extend src1 into edx:eax.
2019 Integer32ToSmi(src2, src2);
2020 // Check that the remainder is zero.
2024 j(zero, &smi_result, Label::kNear);
2025 movp(src1, kScratchRegister);
2026 jmp(on_not_smi_result, near_jump);
2029 j(not_zero, on_not_smi_result, near_jump);
2031 if (!dst.is(src1) && src1.is(rax)) {
2032 movp(src1, kScratchRegister);
2034 Integer32ToSmi(dst, rax);
2038 void MacroAssembler::SmiMod(Register dst,
2041 Label* on_not_smi_result,
2042 Label::Distance near_jump) {
2043 DCHECK(!dst.is(kScratchRegister));
2044 DCHECK(!src1.is(kScratchRegister));
2045 DCHECK(!src2.is(kScratchRegister));
2046 DCHECK(!src2.is(rax));
2047 DCHECK(!src2.is(rdx));
2048 DCHECK(!src1.is(rdx));
2049 DCHECK(!src1.is(src2));
2052 j(zero, on_not_smi_result, near_jump);
2055 movp(kScratchRegister, src1);
2057 SmiToInteger32(rax, src1);
2058 SmiToInteger32(src2, src2);
2060 // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
2062 cmpl(rax, Immediate(Smi::kMinValue));
2063 j(not_equal, &safe_div, Label::kNear);
2064 cmpl(src2, Immediate(-1));
2065 j(not_equal, &safe_div, Label::kNear);
2066 // Retag inputs and go slow case.
2067 Integer32ToSmi(src2, src2);
2069 movp(src1, kScratchRegister);
2071 jmp(on_not_smi_result, near_jump);
2074 // Sign extend eax into edx:eax.
2077 // Restore smi tags on inputs.
2078 Integer32ToSmi(src2, src2);
2080 movp(src1, kScratchRegister);
2082 // Check for a negative zero result. If the result is zero, and the
2083 // dividend is negative, go slow to return a floating point negative zero.
2086 j(not_zero, &smi_result, Label::kNear);
2088 j(negative, on_not_smi_result, near_jump);
2090 Integer32ToSmi(dst, rdx);
2094 void MacroAssembler::SmiNot(Register dst, Register src) {
2095 DCHECK(!dst.is(kScratchRegister));
2096 DCHECK(!src.is(kScratchRegister));
2097 if (SmiValuesAre32Bits()) {
2098 // Set tag and padding bits before negating, so that they are zero
2100 movl(kScratchRegister, Immediate(~0));
2102 DCHECK(SmiValuesAre31Bits());
2103 movl(kScratchRegister, Immediate(1));
2106 xorp(dst, kScratchRegister);
2108 leap(dst, Operand(src, kScratchRegister, times_1, 0));
2114 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
2115 DCHECK(!dst.is(src2));
2116 if (!dst.is(src1)) {
2123 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
2124 if (constant->value() == 0) {
2126 } else if (dst.is(src)) {
2127 DCHECK(!dst.is(kScratchRegister));
2128 Register constant_reg = GetSmiConstant(constant);
2129 andp(dst, constant_reg);
2131 LoadSmiConstant(dst, constant);
2137 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
2138 if (!dst.is(src1)) {
2139 DCHECK(!src1.is(src2));
2146 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
2148 DCHECK(!dst.is(kScratchRegister));
2149 Register constant_reg = GetSmiConstant(constant);
2150 orp(dst, constant_reg);
2152 LoadSmiConstant(dst, constant);
2158 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
2159 if (!dst.is(src1)) {
2160 DCHECK(!src1.is(src2));
2167 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
2169 DCHECK(!dst.is(kScratchRegister));
2170 Register constant_reg = GetSmiConstant(constant);
2171 xorp(dst, constant_reg);
2173 LoadSmiConstant(dst, constant);
2179 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
2182 DCHECK(is_uint5(shift_value));
2183 if (shift_value > 0) {
2185 sarp(dst, Immediate(shift_value + kSmiShift));
2186 shlp(dst, Immediate(kSmiShift));
2188 UNIMPLEMENTED(); // Not used.
2194 void MacroAssembler::SmiShiftLeftConstant(Register dst,
2197 Label* on_not_smi_result,
2198 Label::Distance near_jump) {
2199 if (SmiValuesAre32Bits()) {
2203 if (shift_value > 0) {
2204 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2205 shlq(dst, Immediate(shift_value & 0x1f));
2208 DCHECK(SmiValuesAre31Bits());
2210 UNIMPLEMENTED(); // Not used.
2212 SmiToInteger32(dst, src);
2213 shll(dst, Immediate(shift_value));
2214 JumpIfNotValidSmiValue(dst, on_not_smi_result, near_jump);
2215 Integer32ToSmi(dst, dst);
2221 void MacroAssembler::SmiShiftLogicalRightConstant(
2222 Register dst, Register src, int shift_value,
2223 Label* on_not_smi_result, Label::Distance near_jump) {
2224 // Logic right shift interprets its result as an *unsigned* number.
2226 UNIMPLEMENTED(); // Not used.
2228 if (shift_value == 0) {
2230 j(negative, on_not_smi_result, near_jump);
2232 if (SmiValuesAre32Bits()) {
2234 shrp(dst, Immediate(shift_value + kSmiShift));
2235 shlp(dst, Immediate(kSmiShift));
2237 DCHECK(SmiValuesAre31Bits());
2238 SmiToInteger32(dst, src);
2239 shrp(dst, Immediate(shift_value));
2240 JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump);
2241 Integer32ToSmi(dst, dst);
2247 void MacroAssembler::SmiShiftLeft(Register dst,
2250 Label* on_not_smi_result,
2251 Label::Distance near_jump) {
2252 if (SmiValuesAre32Bits()) {
2253 DCHECK(!dst.is(rcx));
2254 if (!dst.is(src1)) {
2257 // Untag shift amount.
2258 SmiToInteger32(rcx, src2);
2259 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2260 andp(rcx, Immediate(0x1f));
2263 DCHECK(SmiValuesAre31Bits());
2264 DCHECK(!dst.is(kScratchRegister));
2265 DCHECK(!src1.is(kScratchRegister));
2266 DCHECK(!src2.is(kScratchRegister));
2267 DCHECK(!dst.is(src2));
2268 DCHECK(!dst.is(rcx));
2270 if (src1.is(rcx) || src2.is(rcx)) {
2271 movq(kScratchRegister, rcx);
2274 UNIMPLEMENTED(); // Not used.
2277 SmiToInteger32(dst, src1);
2278 SmiToInteger32(rcx, src2);
2280 JumpIfValidSmiValue(dst, &valid_result, Label::kNear);
2281 // As src1 or src2 could not be dst, we do not need to restore them for
2283 if (src1.is(rcx) || src2.is(rcx)) {
2285 movq(src1, kScratchRegister);
2287 movq(src2, kScratchRegister);
2290 jmp(on_not_smi_result, near_jump);
2291 bind(&valid_result);
2292 Integer32ToSmi(dst, dst);
2298 void MacroAssembler::SmiShiftLogicalRight(Register dst,
2301 Label* on_not_smi_result,
2302 Label::Distance near_jump) {
2303 DCHECK(!dst.is(kScratchRegister));
2304 DCHECK(!src1.is(kScratchRegister));
2305 DCHECK(!src2.is(kScratchRegister));
2306 DCHECK(!dst.is(src2));
2307 DCHECK(!dst.is(rcx));
2308 if (src1.is(rcx) || src2.is(rcx)) {
2309 movq(kScratchRegister, rcx);
2312 UNIMPLEMENTED(); // Not used.
2315 SmiToInteger32(dst, src1);
2316 SmiToInteger32(rcx, src2);
2318 JumpIfUIntValidSmiValue(dst, &valid_result, Label::kNear);
2319 // As src1 or src2 could not be dst, we do not need to restore them for
2321 if (src1.is(rcx) || src2.is(rcx)) {
2323 movq(src1, kScratchRegister);
2325 movq(src2, kScratchRegister);
2328 jmp(on_not_smi_result, near_jump);
2329 bind(&valid_result);
2330 Integer32ToSmi(dst, dst);
2335 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
2338 DCHECK(!dst.is(kScratchRegister));
2339 DCHECK(!src1.is(kScratchRegister));
2340 DCHECK(!src2.is(kScratchRegister));
2341 DCHECK(!dst.is(rcx));
2343 SmiToInteger32(rcx, src2);
2344 if (!dst.is(src1)) {
2347 SmiToInteger32(dst, dst);
2349 Integer32ToSmi(dst, dst);
2353 void MacroAssembler::SelectNonSmi(Register dst,
2357 Label::Distance near_jump) {
2358 DCHECK(!dst.is(kScratchRegister));
2359 DCHECK(!src1.is(kScratchRegister));
2360 DCHECK(!src2.is(kScratchRegister));
2361 DCHECK(!dst.is(src1));
2362 DCHECK(!dst.is(src2));
2363 // Both operands must not be smis.
2365 Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
2366 Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
2368 STATIC_ASSERT(kSmiTag == 0);
2369 DCHECK_EQ(0, Smi::FromInt(0));
2370 movl(kScratchRegister, Immediate(kSmiTagMask));
2371 andp(kScratchRegister, src1);
2372 testl(kScratchRegister, src2);
2373 // If non-zero then both are smis.
2374 j(not_zero, on_not_smis, near_jump);
2376 // Exactly one operand is a smi.
2377 DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
2378 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
2379 subp(kScratchRegister, Immediate(1));
2380 // If src1 is a smi, then scratch register all 1s, else it is all 0s.
2383 andp(dst, kScratchRegister);
2384 // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
2386 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
2390 SmiIndex MacroAssembler::SmiToIndex(Register dst,
2393 if (SmiValuesAre32Bits()) {
2394 DCHECK(is_uint6(shift));
2395 // There is a possible optimization if shift is in the range 60-63, but that
2396 // will (and must) never happen.
2400 if (shift < kSmiShift) {
2401 sarp(dst, Immediate(kSmiShift - shift));
2403 shlp(dst, Immediate(shift - kSmiShift));
2405 return SmiIndex(dst, times_1);
2407 DCHECK(SmiValuesAre31Bits());
2408 DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2412 // We have to sign extend the index register to 64-bit as the SMI might
2415 if (shift == times_1) {
2416 sarq(dst, Immediate(kSmiShift));
2417 return SmiIndex(dst, times_1);
2419 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2424 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
2427 if (SmiValuesAre32Bits()) {
2428 // Register src holds a positive smi.
2429 DCHECK(is_uint6(shift));
2434 if (shift < kSmiShift) {
2435 sarp(dst, Immediate(kSmiShift - shift));
2437 shlp(dst, Immediate(shift - kSmiShift));
2439 return SmiIndex(dst, times_1);
2441 DCHECK(SmiValuesAre31Bits());
2442 DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2447 if (shift == times_1) {
2448 sarq(dst, Immediate(kSmiShift));
2449 return SmiIndex(dst, times_1);
2451 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2456 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
2457 if (SmiValuesAre32Bits()) {
2458 DCHECK_EQ(0, kSmiShift % kBitsPerByte);
2459 addl(dst, Operand(src, kSmiShift / kBitsPerByte));
2461 DCHECK(SmiValuesAre31Bits());
2462 SmiToInteger32(kScratchRegister, src);
2463 addl(dst, kScratchRegister);
2468 void MacroAssembler::Push(Smi* source) {
2469 intptr_t smi = reinterpret_cast<intptr_t>(source);
2470 if (is_int32(smi)) {
2471 Push(Immediate(static_cast<int32_t>(smi)));
2473 Register constant = GetSmiConstant(source);
2479 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
2480 DCHECK(!src.is(scratch));
2483 shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2484 shlp(src, Immediate(kSmiShift));
2487 shlp(scratch, Immediate(kSmiShift));
2492 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
2493 DCHECK(!dst.is(scratch));
2496 shrp(scratch, Immediate(kSmiShift));
2498 shrp(dst, Immediate(kSmiShift));
2500 shlp(dst, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2505 void MacroAssembler::Test(const Operand& src, Smi* source) {
2506 if (SmiValuesAre32Bits()) {
2507 testl(Operand(src, kIntSize), Immediate(source->value()));
2509 DCHECK(SmiValuesAre31Bits());
2510 testl(src, Immediate(source));
2515 // ----------------------------------------------------------------------------
2518 void MacroAssembler::LookupNumberStringCache(Register object,
2523 // Use of registers. Register result is used as a temporary.
2524 Register number_string_cache = result;
2525 Register mask = scratch1;
2526 Register scratch = scratch2;
2528 // Load the number string cache.
2529 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2531 // Make the hash mask from the length of the number string cache. It
2532 // contains two elements (number and string) for each cache entry.
2534 mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
2535 shrl(mask, Immediate(1));
2536 subp(mask, Immediate(1)); // Make mask.
2538 // Calculate the entry in the number string cache. The hash value in the
2539 // number string cache for smis is just the smi value, and the hash for
2540 // doubles is the xor of the upper and lower words. See
2541 // Heap::GetNumberStringCache.
2543 Label load_result_from_cache;
2544 JumpIfSmi(object, &is_smi);
2546 isolate()->factory()->heap_number_map(),
2550 STATIC_ASSERT(8 == kDoubleSize);
2551 movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
2552 xorp(scratch, FieldOperand(object, HeapNumber::kValueOffset));
2553 andp(scratch, mask);
2554 // Each entry in string cache consists of two pointer sized fields,
2555 // but times_twice_pointer_size (multiplication by 16) scale factor
2556 // is not supported by addrmode on x64 platform.
2557 // So we have to premultiply entry index before lookup.
2558 shlp(scratch, Immediate(kPointerSizeLog2 + 1));
2560 Register index = scratch;
2561 Register probe = mask;
2563 FieldOperand(number_string_cache,
2566 FixedArray::kHeaderSize));
2567 JumpIfSmi(probe, not_found);
2568 movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
2569 ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
2570 j(parity_even, not_found); // Bail out if NaN is involved.
2571 j(not_equal, not_found); // The cache did not contain this value.
2572 jmp(&load_result_from_cache);
2575 SmiToInteger32(scratch, object);
2576 andp(scratch, mask);
2577 // Each entry in string cache consists of two pointer sized fields,
2578 // but times_twice_pointer_size (multiplication by 16) scale factor
2579 // is not supported by addrmode on x64 platform.
2580 // So we have to premultiply entry index before lookup.
2581 shlp(scratch, Immediate(kPointerSizeLog2 + 1));
2583 // Check if the entry is the smi we are looking for.
2585 FieldOperand(number_string_cache,
2588 FixedArray::kHeaderSize));
2589 j(not_equal, not_found);
2591 // Get the result from the cache.
2592 bind(&load_result_from_cache);
2594 FieldOperand(number_string_cache,
2597 FixedArray::kHeaderSize + kPointerSize));
2598 IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
2602 void MacroAssembler::absps(XMMRegister dst) {
2603 static const struct V8_ALIGNED(16) {
2608 } float_absolute_constant =
2609 { 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF };
2610 Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_absolute_constant));
2611 andps(dst, Operand(kScratchRegister, 0));
2615 void MacroAssembler::abspd(XMMRegister dst) {
2616 static const struct V8_ALIGNED(16) {
2619 } double_absolute_constant =
2620 { V8_UINT64_C(0x7FFFFFFFFFFFFFFF), V8_UINT64_C(0x7FFFFFFFFFFFFFFF) };
2621 Set(kScratchRegister, reinterpret_cast<intptr_t>(&double_absolute_constant));
2622 andpd(dst, Operand(kScratchRegister, 0));
2626 void MacroAssembler::negateps(XMMRegister dst) {
2627 static const struct V8_ALIGNED(16) {
2632 } float_negate_constant =
2633 { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
2634 Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_negate_constant));
2635 xorps(dst, Operand(kScratchRegister, 0));
2639 void MacroAssembler::negatepd(XMMRegister dst) {
2640 static const struct V8_ALIGNED(16) {
2643 } double_absolute_constant =
2644 { V8_UINT64_C(0x8000000000000000), V8_UINT64_C(0x8000000000000000) };
2645 Set(kScratchRegister, reinterpret_cast<intptr_t>(&double_absolute_constant));
2646 xorpd(dst, Operand(kScratchRegister, 0));
2650 void MacroAssembler::notps(XMMRegister dst) {
2651 static const struct V8_ALIGNED(16) {
2656 } float_not_constant =
2657 { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
2658 Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_not_constant));
2659 xorps(dst, Operand(kScratchRegister, 0));
2663 void MacroAssembler::pnegd(XMMRegister dst) {
2664 static const struct V8_ALIGNED(16) {
2669 } int32_one_constant = { 0x1, 0x1, 0x1, 0x1 };
2671 Set(kScratchRegister, reinterpret_cast<intptr_t>(&int32_one_constant));
2672 paddd(dst, Operand(kScratchRegister, 0));
2677 void MacroAssembler::JumpIfNotString(Register object,
2678 Register object_map,
2680 Label::Distance near_jump) {
2681 Condition is_smi = CheckSmi(object);
2682 j(is_smi, not_string, near_jump);
2683 CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
2684 j(above_equal, not_string, near_jump);
2688 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
2689 Register first_object,
2690 Register second_object,
2694 Label::Distance near_jump) {
2695 // Check that both objects are not smis.
2696 Condition either_smi = CheckEitherSmi(first_object, second_object);
2697 j(either_smi, on_fail, near_jump);
2699 // Load instance type for both strings.
2700 movp(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
2701 movp(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
2702 movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2703 movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2705 // Check that both are flat ASCII strings.
2706 DCHECK(kNotStringTag != 0);
2707 const int kFlatAsciiStringMask =
2708 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2709 const int kFlatAsciiStringTag =
2710 kStringTag | kOneByteStringTag | kSeqStringTag;
2712 andl(scratch1, Immediate(kFlatAsciiStringMask));
2713 andl(scratch2, Immediate(kFlatAsciiStringMask));
2714 // Interleave the bits to check both scratch1 and scratch2 in one test.
2715 DCHECK_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2716 leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2718 Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
2719 j(not_equal, on_fail, near_jump);
2723 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
2724 Register instance_type,
2727 Label::Distance near_jump) {
2728 if (!scratch.is(instance_type)) {
2729 movl(scratch, instance_type);
2732 const int kFlatAsciiStringMask =
2733 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2735 andl(scratch, Immediate(kFlatAsciiStringMask));
2736 cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
2737 j(not_equal, failure, near_jump);
2741 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
2742 Register first_object_instance_type,
2743 Register second_object_instance_type,
2747 Label::Distance near_jump) {
2748 // Load instance type for both strings.
2749 movp(scratch1, first_object_instance_type);
2750 movp(scratch2, second_object_instance_type);
2752 // Check that both are flat ASCII strings.
2753 DCHECK(kNotStringTag != 0);
2754 const int kFlatAsciiStringMask =
2755 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2756 const int kFlatAsciiStringTag =
2757 kStringTag | kOneByteStringTag | kSeqStringTag;
2759 andl(scratch1, Immediate(kFlatAsciiStringMask));
2760 andl(scratch2, Immediate(kFlatAsciiStringMask));
2761 // Interleave the bits to check both scratch1 and scratch2 in one test.
2762 DCHECK_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2763 leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2765 Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
2766 j(not_equal, on_fail, near_jump);
2771 static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
2772 T operand_or_register,
2773 Label* not_unique_name,
2774 Label::Distance distance) {
2775 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2777 masm->testb(operand_or_register,
2778 Immediate(kIsNotStringMask | kIsNotInternalizedMask));
2779 masm->j(zero, &succeed, Label::kNear);
2780 masm->cmpb(operand_or_register, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
2781 masm->j(not_equal, not_unique_name, distance);
2783 masm->bind(&succeed);
2787 void MacroAssembler::JumpIfNotUniqueName(Operand operand,
2788 Label* not_unique_name,
2789 Label::Distance distance) {
2790 JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
2794 void MacroAssembler::JumpIfNotUniqueName(Register reg,
2795 Label* not_unique_name,
2796 Label::Distance distance) {
2797 JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
2801 void MacroAssembler::Move(Register dst, Register src) {
2808 void MacroAssembler::Move(Register dst, Handle<Object> source) {
2809 AllowDeferredHandleDereference smi_check;
2810 if (source->IsSmi()) {
2811 Move(dst, Smi::cast(*source));
2813 MoveHeapObject(dst, source);
2818 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
2819 AllowDeferredHandleDereference smi_check;
2820 if (source->IsSmi()) {
2821 Move(dst, Smi::cast(*source));
2823 MoveHeapObject(kScratchRegister, source);
2824 movp(dst, kScratchRegister);
2829 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
2830 AllowDeferredHandleDereference smi_check;
2831 if (source->IsSmi()) {
2832 Cmp(dst, Smi::cast(*source));
2834 MoveHeapObject(kScratchRegister, source);
2835 cmpp(dst, kScratchRegister);
2840 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
2841 AllowDeferredHandleDereference smi_check;
2842 if (source->IsSmi()) {
2843 Cmp(dst, Smi::cast(*source));
2845 MoveHeapObject(kScratchRegister, source);
2846 cmpp(dst, kScratchRegister);
2851 void MacroAssembler::Push(Handle<Object> source) {
2852 AllowDeferredHandleDereference smi_check;
2853 if (source->IsSmi()) {
2854 Push(Smi::cast(*source));
2856 MoveHeapObject(kScratchRegister, source);
2857 Push(kScratchRegister);
2862 void MacroAssembler::MoveHeapObject(Register result,
2863 Handle<Object> object) {
2864 AllowDeferredHandleDereference using_raw_address;
2865 DCHECK(object->IsHeapObject());
2866 if (isolate()->heap()->InNewSpace(*object)) {
2867 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2868 Move(result, cell, RelocInfo::CELL);
2869 movp(result, Operand(result, 0));
2871 Move(result, object, RelocInfo::EMBEDDED_OBJECT);
2876 void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
2878 AllowDeferredHandleDereference embedding_raw_address;
2879 load_rax(cell.location(), RelocInfo::CELL);
2881 Move(dst, cell, RelocInfo::CELL);
2882 movp(dst, Operand(dst, 0));
2887 void MacroAssembler::Drop(int stack_elements) {
2888 if (stack_elements > 0) {
2889 addp(rsp, Immediate(stack_elements * kPointerSize));
2894 void MacroAssembler::DropUnderReturnAddress(int stack_elements,
2896 DCHECK(stack_elements > 0);
2897 if (kPointerSize == kInt64Size && stack_elements == 1) {
2898 popq(MemOperand(rsp, 0));
2902 PopReturnAddressTo(scratch);
2903 Drop(stack_elements);
2904 PushReturnAddressFrom(scratch);
2908 void MacroAssembler::Push(Register src) {
2909 if (kPointerSize == kInt64Size) {
2912 // x32 uses 64-bit push for rbp in the prologue.
2913 DCHECK(src.code() != rbp.code());
2914 leal(rsp, Operand(rsp, -4));
2915 movp(Operand(rsp, 0), src);
2920 void MacroAssembler::Push(const Operand& src) {
2921 if (kPointerSize == kInt64Size) {
2924 movp(kScratchRegister, src);
2925 leal(rsp, Operand(rsp, -4));
2926 movp(Operand(rsp, 0), kScratchRegister);
2931 void MacroAssembler::PushQuad(const Operand& src) {
2932 if (kPointerSize == kInt64Size) {
2935 movp(kScratchRegister, src);
2936 pushq(kScratchRegister);
2941 void MacroAssembler::Push(Immediate value) {
2942 if (kPointerSize == kInt64Size) {
2945 leal(rsp, Operand(rsp, -4));
2946 movp(Operand(rsp, 0), value);
2951 void MacroAssembler::PushImm32(int32_t imm32) {
2952 if (kPointerSize == kInt64Size) {
2955 leal(rsp, Operand(rsp, -4));
2956 movp(Operand(rsp, 0), Immediate(imm32));
2961 void MacroAssembler::Pop(Register dst) {
2962 if (kPointerSize == kInt64Size) {
2965 // x32 uses 64-bit pop for rbp in the epilogue.
2966 DCHECK(dst.code() != rbp.code());
2967 movp(dst, Operand(rsp, 0));
2968 leal(rsp, Operand(rsp, 4));
2973 void MacroAssembler::Pop(const Operand& dst) {
2974 if (kPointerSize == kInt64Size) {
2977 Register scratch = dst.AddressUsesRegister(kScratchRegister)
2978 ? kSmiConstantRegister : kScratchRegister;
2979 movp(scratch, Operand(rsp, 0));
2981 leal(rsp, Operand(rsp, 4));
2982 if (scratch.is(kSmiConstantRegister)) {
2983 // Restore kSmiConstantRegister.
2984 movp(kSmiConstantRegister,
2985 reinterpret_cast<void*>(Smi::FromInt(kSmiConstantRegisterValue)),
2986 Assembler::RelocInfoNone());
2992 void MacroAssembler::PopQuad(const Operand& dst) {
2993 if (kPointerSize == kInt64Size) {
2996 popq(kScratchRegister);
2997 movp(dst, kScratchRegister);
3002 void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst,
3005 DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
3006 offset <= SharedFunctionInfo::kSize &&
3007 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
3008 if (kPointerSize == kInt64Size) {
3009 movsxlq(dst, FieldOperand(base, offset));
3011 movp(dst, FieldOperand(base, offset));
3012 SmiToInteger32(dst, dst);
3017 void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base,
3020 DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
3021 offset <= SharedFunctionInfo::kSize &&
3022 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
3023 if (kPointerSize == kInt32Size) {
3024 // On x32, this field is represented by SMI.
3027 int byte_offset = bits / kBitsPerByte;
3028 int bit_in_byte = bits & (kBitsPerByte - 1);
3029 testb(FieldOperand(base, offset + byte_offset), Immediate(1 << bit_in_byte));
3033 void MacroAssembler::Jump(ExternalReference ext) {
3034 LoadAddress(kScratchRegister, ext);
3035 jmp(kScratchRegister);
3039 void MacroAssembler::Jump(const Operand& op) {
3040 if (kPointerSize == kInt64Size) {
3043 movp(kScratchRegister, op);
3044 jmp(kScratchRegister);
3049 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
3050 Move(kScratchRegister, destination, rmode);
3051 jmp(kScratchRegister);
3055 void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
3056 // TODO(X64): Inline this
3057 jmp(code_object, rmode);
3061 int MacroAssembler::CallSize(ExternalReference ext) {
3062 // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
3063 return LoadAddressSize(ext) +
3064 Assembler::kCallScratchRegisterInstructionLength;
3068 void MacroAssembler::Call(ExternalReference ext) {
3070 int end_position = pc_offset() + CallSize(ext);
3072 LoadAddress(kScratchRegister, ext);
3073 call(kScratchRegister);
3075 CHECK_EQ(end_position, pc_offset());
3080 void MacroAssembler::Call(const Operand& op) {
3081 if (kPointerSize == kInt64Size) {
3084 movp(kScratchRegister, op);
3085 call(kScratchRegister);
3090 void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
3092 int end_position = pc_offset() + CallSize(destination);
3094 Move(kScratchRegister, destination, rmode);
3095 call(kScratchRegister);
3097 CHECK_EQ(pc_offset(), end_position);
3102 void MacroAssembler::Call(Handle<Code> code_object,
3103 RelocInfo::Mode rmode,
3104 TypeFeedbackId ast_id) {
3106 int end_position = pc_offset() + CallSize(code_object);
3108 DCHECK(RelocInfo::IsCodeTarget(rmode) ||
3109 rmode == RelocInfo::CODE_AGE_SEQUENCE);
3110 call(code_object, rmode, ast_id);
3112 CHECK_EQ(end_position, pc_offset());
3117 void MacroAssembler::Pushad() {
3122 // Not pushing rsp or rbp.
3127 // r10 is kScratchRegister.
3129 // r12 is kSmiConstantRegister.
3130 // r13 is kRootRegister.
3133 STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
3134 // Use lea for symmetry with Popad.
3136 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
3137 leap(rsp, Operand(rsp, -sp_delta));
3141 void MacroAssembler::Popad() {
3142 // Popad must not change the flags, so use lea instead of addq.
3144 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
3145 leap(rsp, Operand(rsp, sp_delta));
3160 void MacroAssembler::Dropad() {
3161 addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
3165 // Order general registers are pushed by Pushad:
3166 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
3168 MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
3188 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
3189 const Immediate& imm) {
3190 movp(SafepointRegisterSlot(dst), imm);
3194 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
3195 movp(SafepointRegisterSlot(dst), src);
3199 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
3200 movp(dst, SafepointRegisterSlot(src));
3204 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
3205 return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
3209 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
3210 int handler_index) {
3211 // Adjust this code if not the case.
3212 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
3214 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3215 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3216 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3217 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3218 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3220 // We will build up the handler from the bottom by pushing on the stack.
3221 // First push the frame pointer and context.
3222 if (kind == StackHandler::JS_ENTRY) {
3223 // The frame pointer does not point to a JS frame so we save NULL for
3224 // rbp. We expect the code throwing an exception to check rbp before
3225 // dereferencing it to restore the context.
3226 pushq(Immediate(0)); // NULL frame pointer.
3227 Push(Smi::FromInt(0)); // No context.
3233 // Push the state and the code object.
3235 StackHandler::IndexField::encode(handler_index) |
3236 StackHandler::KindField::encode(kind);
3237 Push(Immediate(state));
3240 // Link the current handler as the next handler.
3241 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3242 Push(ExternalOperand(handler_address));
3243 // Set this new handler as the current one.
3244 movp(ExternalOperand(handler_address), rsp);
3248 void MacroAssembler::PopTryHandler() {
3249 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3250 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3251 Pop(ExternalOperand(handler_address));
3252 addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
3256 void MacroAssembler::JumpToHandlerEntry() {
3257 // Compute the handler entry address and jump to it. The handler table is
3258 // a fixed array of (smi-tagged) code offsets.
3259 // rax = exception, rdi = code object, rdx = state.
3260 movp(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
3261 shrp(rdx, Immediate(StackHandler::kKindWidth));
3263 FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
3264 SmiToInteger64(rdx, rdx);
3265 leap(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
3270 void MacroAssembler::Throw(Register value) {
3271 // Adjust this code if not the case.
3272 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
3274 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3275 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3276 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3277 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3278 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3280 // The exception is expected in rax.
3281 if (!value.is(rax)) {
3284 // Drop the stack pointer to the top of the top handler.
3285 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3286 movp(rsp, ExternalOperand(handler_address));
3287 // Restore the next handler.
3288 Pop(ExternalOperand(handler_address));
3290 // Remove the code object and state, compute the handler address in rdi.
3291 Pop(rdi); // Code object.
3292 Pop(rdx); // Offset and state.
3294 // Restore the context and frame pointer.
3295 Pop(rsi); // Context.
3296 popq(rbp); // Frame pointer.
3298 // If the handler is a JS frame, restore the context to the frame.
3299 // (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
3303 j(zero, &skip, Label::kNear);
3304 movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
3307 JumpToHandlerEntry();
3311 void MacroAssembler::ThrowUncatchable(Register value) {
3312 // Adjust this code if not the case.
3313 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
3315 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3316 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3317 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3318 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3319 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3321 // The exception is expected in rax.
3322 if (!value.is(rax)) {
3325 // Drop the stack pointer to the top of the top stack handler.
3326 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3327 Load(rsp, handler_address);
3329 // Unwind the handlers until the top ENTRY handler is found.
3330 Label fetch_next, check_kind;
3331 jmp(&check_kind, Label::kNear);
3333 movp(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
3336 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
3337 testl(Operand(rsp, StackHandlerConstants::kStateOffset),
3338 Immediate(StackHandler::KindField::kMask));
3339 j(not_zero, &fetch_next);
3341 // Set the top handler address to next handler past the top ENTRY handler.
3342 Pop(ExternalOperand(handler_address));
3344 // Remove the code object and state, compute the handler address in rdi.
3345 Pop(rdi); // Code object.
3346 Pop(rdx); // Offset and state.
3348 // Clear the context pointer and frame pointer (0 was saved in the handler).
3352 JumpToHandlerEntry();
3356 void MacroAssembler::Ret() {
3361 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
3362 if (is_uint16(bytes_dropped)) {
3365 PopReturnAddressTo(scratch);
3366 addp(rsp, Immediate(bytes_dropped));
3367 PushReturnAddressFrom(scratch);
3373 void MacroAssembler::FCmp() {
3379 void MacroAssembler::CmpObjectType(Register heap_object,
3382 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3383 CmpInstanceType(map, type);
3387 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
3388 cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
3389 Immediate(static_cast<int8_t>(type)));
3393 void MacroAssembler::CheckFastElements(Register map,
3395 Label::Distance distance) {
3396 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3397 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3398 STATIC_ASSERT(FAST_ELEMENTS == 2);
3399 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3400 cmpb(FieldOperand(map, Map::kBitField2Offset),
3401 Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3402 j(above, fail, distance);
3406 void MacroAssembler::CheckFastObjectElements(Register map,
3408 Label::Distance distance) {
3409 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3410 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3411 STATIC_ASSERT(FAST_ELEMENTS == 2);
3412 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3413 cmpb(FieldOperand(map, Map::kBitField2Offset),
3414 Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3415 j(below_equal, fail, distance);
3416 cmpb(FieldOperand(map, Map::kBitField2Offset),
3417 Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3418 j(above, fail, distance);
3422 void MacroAssembler::CheckFastSmiElements(Register map,
3424 Label::Distance distance) {
3425 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3426 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3427 cmpb(FieldOperand(map, Map::kBitField2Offset),
3428 Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3429 j(above, fail, distance);
3433 void MacroAssembler::StoreNumberToDoubleElements(
3434 Register maybe_number,
3437 XMMRegister xmm_scratch,
3439 int elements_offset) {
3440 Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
3442 JumpIfSmi(maybe_number, &smi_value, Label::kNear);
3444 CheckMap(maybe_number,
3445 isolate()->factory()->heap_number_map(),
3449 // Double value, canonicalize NaN.
3450 uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
3451 cmpl(FieldOperand(maybe_number, offset),
3452 Immediate(kNaNOrInfinityLowerBoundUpper32));
3453 j(greater_equal, &maybe_nan, Label::kNear);
3456 movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
3457 bind(&have_double_value);
3458 movsd(FieldOperand(elements, index, times_8,
3459 FixedDoubleArray::kHeaderSize - elements_offset),
3464 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3465 // it's an Infinity, and the non-NaN code path applies.
3466 j(greater, &is_nan, Label::kNear);
3467 cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
3470 // Convert all NaNs to the same canonical NaN value when they are stored in
3471 // the double array.
3472 Set(kScratchRegister, BitCast<uint64_t>(
3473 FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
3474 movq(xmm_scratch, kScratchRegister);
3475 jmp(&have_double_value, Label::kNear);
3478 // Value is a smi. convert to a double and store.
3479 // Preserve original value.
3480 SmiToInteger32(kScratchRegister, maybe_number);
3481 Cvtlsi2sd(xmm_scratch, kScratchRegister);
3482 movsd(FieldOperand(elements, index, times_8,
3483 FixedDoubleArray::kHeaderSize - elements_offset),
3489 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
3490 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3494 void MacroAssembler::CheckMap(Register obj,
3497 SmiCheckType smi_check_type) {
3498 if (smi_check_type == DO_SMI_CHECK) {
3499 JumpIfSmi(obj, fail);
3502 CompareMap(obj, map);
3507 void MacroAssembler::ClampUint8(Register reg) {
3509 testl(reg, Immediate(0xFFFFFF00));
3510 j(zero, &done, Label::kNear);
3511 setcc(negative, reg); // 1 if negative, 0 if positive.
3512 decb(reg); // 0 if negative, 255 if positive.
3517 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
3518 XMMRegister temp_xmm_reg,
3519 Register result_reg) {
3522 xorps(temp_xmm_reg, temp_xmm_reg);
3523 cvtsd2si(result_reg, input_reg);
3524 testl(result_reg, Immediate(0xFFFFFF00));
3525 j(zero, &done, Label::kNear);
3526 cmpl(result_reg, Immediate(1));
3527 j(overflow, &conv_failure, Label::kNear);
3528 movl(result_reg, Immediate(0));
3529 setcc(sign, result_reg);
3530 subl(result_reg, Immediate(1));
3531 andl(result_reg, Immediate(255));
3532 jmp(&done, Label::kNear);
3533 bind(&conv_failure);
3535 ucomisd(input_reg, temp_xmm_reg);
3536 j(below, &done, Label::kNear);
3537 Set(result_reg, 255);
3542 void MacroAssembler::LoadUint32(XMMRegister dst,
3544 if (FLAG_debug_code) {
3545 cmpq(src, Immediate(0xffffffff));
3546 Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
3548 cvtqsi2sd(dst, src);
3552 void MacroAssembler::SlowTruncateToI(Register result_reg,
3555 DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
3556 call(stub.GetCode(), RelocInfo::CODE_TARGET);
3560 void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
3561 Register input_reg) {
3563 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3564 cvttsd2siq(result_reg, xmm0);
3565 cmpq(result_reg, Immediate(1));
3566 j(no_overflow, &done, Label::kNear);
3569 if (input_reg.is(result_reg)) {
3570 subp(rsp, Immediate(kDoubleSize));
3571 movsd(MemOperand(rsp, 0), xmm0);
3572 SlowTruncateToI(result_reg, rsp, 0);
3573 addp(rsp, Immediate(kDoubleSize));
3575 SlowTruncateToI(result_reg, input_reg);
3579 // Keep our invariant that the upper 32 bits are zero.
3580 movl(result_reg, result_reg);
3584 void MacroAssembler::TruncateDoubleToI(Register result_reg,
3585 XMMRegister input_reg) {
3587 cvttsd2siq(result_reg, input_reg);
3588 cmpq(result_reg, Immediate(1));
3589 j(no_overflow, &done, Label::kNear);
3591 subp(rsp, Immediate(kDoubleSize));
3592 movsd(MemOperand(rsp, 0), input_reg);
3593 SlowTruncateToI(result_reg, rsp, 0);
3594 addp(rsp, Immediate(kDoubleSize));
3597 // Keep our invariant that the upper 32 bits are zero.
3598 movl(result_reg, result_reg);
3602 void MacroAssembler::DoubleToI(Register result_reg,
3603 XMMRegister input_reg,
3604 XMMRegister scratch,
3605 MinusZeroMode minus_zero_mode,
3606 Label* conversion_failed,
3607 Label::Distance dst) {
3608 cvttsd2si(result_reg, input_reg);
3609 Cvtlsi2sd(xmm0, result_reg);
3610 ucomisd(xmm0, input_reg);
3611 j(not_equal, conversion_failed, dst);
3612 j(parity_even, conversion_failed, dst); // NaN.
3613 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
3615 // The integer converted back is equal to the original. We
3616 // only have to test if we got -0 as an input.
3617 testl(result_reg, result_reg);
3618 j(not_zero, &done, Label::kNear);
3619 movmskpd(result_reg, input_reg);
3620 // Bit 0 contains the sign of the double in input_reg.
3621 // If input was positive, we are ok and return 0, otherwise
3622 // jump to conversion_failed.
3623 andl(result_reg, Immediate(1));
3624 j(not_zero, conversion_failed, dst);
3630 void MacroAssembler::TaggedToI(Register result_reg,
3633 MinusZeroMode minus_zero_mode,
3634 Label* lost_precision,
3635 Label::Distance dst) {
3637 DCHECK(!temp.is(xmm0));
3639 // Heap number map check.
3640 CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3641 Heap::kHeapNumberMapRootIndex);
3642 j(not_equal, lost_precision, dst);
3644 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3645 cvttsd2si(result_reg, xmm0);
3646 Cvtlsi2sd(temp, result_reg);
3647 ucomisd(xmm0, temp);
3648 RecordComment("Deferred TaggedToI: lost precision");
3649 j(not_equal, lost_precision, dst);
3650 RecordComment("Deferred TaggedToI: NaN");
3651 j(parity_even, lost_precision, dst); // NaN.
3652 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
3653 testl(result_reg, result_reg);
3654 j(not_zero, &done, Label::kNear);
3655 movmskpd(result_reg, xmm0);
3656 andl(result_reg, Immediate(1));
3657 j(not_zero, lost_precision, dst);
3663 void MacroAssembler::LoadInstanceDescriptors(Register map,
3664 Register descriptors) {
3665 movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
3669 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3670 movl(dst, FieldOperand(map, Map::kBitField3Offset));
3671 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3675 void MacroAssembler::EnumLength(Register dst, Register map) {
3676 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3677 movl(dst, FieldOperand(map, Map::kBitField3Offset));
3678 andl(dst, Immediate(Map::EnumLengthBits::kMask));
3679 Integer32ToSmi(dst, dst);
3683 void MacroAssembler::DispatchMap(Register obj,
3686 Handle<Code> success,
3687 SmiCheckType smi_check_type) {
3689 if (smi_check_type == DO_SMI_CHECK) {
3690 JumpIfSmi(obj, &fail);
3692 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3693 j(equal, success, RelocInfo::CODE_TARGET);
3699 void MacroAssembler::AssertNumber(Register object) {
3700 if (emit_debug_code()) {
3702 Condition is_smi = CheckSmi(object);
3703 j(is_smi, &ok, Label::kNear);
3704 Cmp(FieldOperand(object, HeapObject::kMapOffset),
3705 isolate()->factory()->heap_number_map());
3706 Check(equal, kOperandIsNotANumber);
3712 void MacroAssembler::AssertNotSmi(Register object) {
3713 if (emit_debug_code()) {
3714 Condition is_smi = CheckSmi(object);
3715 Check(NegateCondition(is_smi), kOperandIsASmi);
3720 void MacroAssembler::AssertSmi(Register object) {
3721 if (emit_debug_code()) {
3722 Condition is_smi = CheckSmi(object);
3723 Check(is_smi, kOperandIsNotASmi);
3728 void MacroAssembler::AssertSmi(const Operand& object) {
3729 if (emit_debug_code()) {
3730 Condition is_smi = CheckSmi(object);
3731 Check(is_smi, kOperandIsNotASmi);
3736 void MacroAssembler::AssertZeroExtended(Register int32_register) {
3737 if (emit_debug_code()) {
3738 DCHECK(!int32_register.is(kScratchRegister));
3739 movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
3740 cmpq(kScratchRegister, int32_register);
3741 Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
3746 void MacroAssembler::AssertString(Register object) {
3747 if (emit_debug_code()) {
3748 testb(object, Immediate(kSmiTagMask));
3749 Check(not_equal, kOperandIsASmiAndNotAString);
3751 movp(object, FieldOperand(object, HeapObject::kMapOffset));
3752 CmpInstanceType(object, FIRST_NONSTRING_TYPE);
3754 Check(below, kOperandIsNotAString);
3759 void MacroAssembler::AssertName(Register object) {
3760 if (emit_debug_code()) {
3761 testb(object, Immediate(kSmiTagMask));
3762 Check(not_equal, kOperandIsASmiAndNotAName);
3764 movp(object, FieldOperand(object, HeapObject::kMapOffset));
3765 CmpInstanceType(object, LAST_NAME_TYPE);
3767 Check(below_equal, kOperandIsNotAName);
3772 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
3773 if (emit_debug_code()) {
3774 Label done_checking;
3775 AssertNotSmi(object);
3776 Cmp(object, isolate()->factory()->undefined_value());
3777 j(equal, &done_checking);
3778 Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
3779 Assert(equal, kExpectedUndefinedOrCell);
3780 bind(&done_checking);
3785 void MacroAssembler::AssertRootValue(Register src,
3786 Heap::RootListIndex root_value_index,
3787 BailoutReason reason) {
3788 if (emit_debug_code()) {
3789 DCHECK(!src.is(kScratchRegister));
3790 LoadRoot(kScratchRegister, root_value_index);
3791 cmpp(src, kScratchRegister);
3792 Check(equal, reason);
3798 Condition MacroAssembler::IsObjectStringType(Register heap_object,
3800 Register instance_type) {
3801 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3802 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3803 STATIC_ASSERT(kNotStringTag != 0);
3804 testb(instance_type, Immediate(kIsNotStringMask));
3809 Condition MacroAssembler::IsObjectNameType(Register heap_object,
3811 Register instance_type) {
3812 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3813 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3814 cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
3819 void MacroAssembler::TryGetFunctionPrototype(Register function,
3822 bool miss_on_bound_function) {
3824 if (miss_on_bound_function) {
3825 // Check that the receiver isn't a smi.
3826 testl(function, Immediate(kSmiTagMask));
3829 // Check that the function really is a function.
3830 CmpObjectType(function, JS_FUNCTION_TYPE, result);
3833 movp(kScratchRegister,
3834 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3835 // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
3837 TestBitSharedFunctionInfoSpecialField(kScratchRegister,
3838 SharedFunctionInfo::kCompilerHintsOffset,
3839 SharedFunctionInfo::kBoundFunction);
3842 // Make sure that the function has an instance prototype.
3843 testb(FieldOperand(result, Map::kBitFieldOffset),
3844 Immediate(1 << Map::kHasNonInstancePrototype));
3845 j(not_zero, &non_instance, Label::kNear);
3848 // Get the prototype or initial map from the function.
3850 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3852 // If the prototype or initial map is the hole, don't return it and
3853 // simply miss the cache instead. This will allow us to allocate a
3854 // prototype object on-demand in the runtime system.
3855 CompareRoot(result, Heap::kTheHoleValueRootIndex);
3858 // If the function does not have an initial map, we're done.
3860 CmpObjectType(result, MAP_TYPE, kScratchRegister);
3861 j(not_equal, &done, Label::kNear);
3863 // Get the prototype from the initial map.
3864 movp(result, FieldOperand(result, Map::kPrototypeOffset));
3866 if (miss_on_bound_function) {
3867 jmp(&done, Label::kNear);
3869 // Non-instance prototype: Fetch prototype from constructor field
3871 bind(&non_instance);
3872 movp(result, FieldOperand(result, Map::kConstructorOffset));
3880 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
3881 if (FLAG_native_code_counters && counter->Enabled()) {
3882 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3883 movl(counter_operand, Immediate(value));
3888 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
3890 if (FLAG_native_code_counters && counter->Enabled()) {
3891 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3893 incl(counter_operand);
3895 addl(counter_operand, Immediate(value));
3901 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
3903 if (FLAG_native_code_counters && counter->Enabled()) {
3904 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3906 decl(counter_operand);
3908 subl(counter_operand, Immediate(value));
3914 void MacroAssembler::DebugBreak() {
3915 Set(rax, 0); // No arguments.
3916 LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
3917 CEntryStub ces(isolate(), 1);
3918 DCHECK(AllowThisStubCall(&ces));
3919 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3923 void MacroAssembler::InvokeCode(Register code,
3924 const ParameterCount& expected,
3925 const ParameterCount& actual,
3927 const CallWrapper& call_wrapper) {
3928 // You can't call a function without a valid frame.
3929 DCHECK(flag == JUMP_FUNCTION || has_frame());
3932 bool definitely_mismatches = false;
3933 InvokePrologue(expected,
3935 Handle<Code>::null(),
3938 &definitely_mismatches,
3942 if (!definitely_mismatches) {
3943 if (flag == CALL_FUNCTION) {
3944 call_wrapper.BeforeCall(CallSize(code));
3946 call_wrapper.AfterCall();
3948 DCHECK(flag == JUMP_FUNCTION);
3956 void MacroAssembler::InvokeFunction(Register function,
3957 const ParameterCount& actual,
3959 const CallWrapper& call_wrapper) {
3960 // You can't call a function without a valid frame.
3961 DCHECK(flag == JUMP_FUNCTION || has_frame());
3963 DCHECK(function.is(rdi));
3964 movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3965 movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
3966 LoadSharedFunctionInfoSpecialField(rbx, rdx,
3967 SharedFunctionInfo::kFormalParameterCountOffset);
3968 // Advances rdx to the end of the Code object header, to the start of
3969 // the executable code.
3970 movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3972 ParameterCount expected(rbx);
3973 InvokeCode(rdx, expected, actual, flag, call_wrapper);
3977 void MacroAssembler::InvokeFunction(Register function,
3978 const ParameterCount& expected,
3979 const ParameterCount& actual,
3981 const CallWrapper& call_wrapper) {
3982 // You can't call a function without a valid frame.
3983 DCHECK(flag == JUMP_FUNCTION || has_frame());
3985 DCHECK(function.is(rdi));
3986 movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
3987 // Advances rdx to the end of the Code object header, to the start of
3988 // the executable code.
3989 movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3991 InvokeCode(rdx, expected, actual, flag, call_wrapper);
3995 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3996 const ParameterCount& expected,
3997 const ParameterCount& actual,
3999 const CallWrapper& call_wrapper) {
4000 Move(rdi, function);
4001 InvokeFunction(rdi, expected, actual, flag, call_wrapper);
4005 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
4006 const ParameterCount& actual,
4007 Handle<Code> code_constant,
4008 Register code_register,
4010 bool* definitely_mismatches,
4012 Label::Distance near_jump,
4013 const CallWrapper& call_wrapper) {
4014 bool definitely_matches = false;
4015 *definitely_mismatches = false;
4017 if (expected.is_immediate()) {
4018 DCHECK(actual.is_immediate());
4019 if (expected.immediate() == actual.immediate()) {
4020 definitely_matches = true;
4022 Set(rax, actual.immediate());
4023 if (expected.immediate() ==
4024 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
4025 // Don't worry about adapting arguments for built-ins that
4026 // don't want that done. Skip adaption code by making it look
4027 // like we have a match between expected and actual number of
4029 definitely_matches = true;
4031 *definitely_mismatches = true;
4032 Set(rbx, expected.immediate());
4036 if (actual.is_immediate()) {
4037 // Expected is in register, actual is immediate. This is the
4038 // case when we invoke function values without going through the
4040 cmpp(expected.reg(), Immediate(actual.immediate()));
4041 j(equal, &invoke, Label::kNear);
4042 DCHECK(expected.reg().is(rbx));
4043 Set(rax, actual.immediate());
4044 } else if (!expected.reg().is(actual.reg())) {
4045 // Both expected and actual are in (different) registers. This
4046 // is the case when we invoke functions using call and apply.
4047 cmpp(expected.reg(), actual.reg());
4048 j(equal, &invoke, Label::kNear);
4049 DCHECK(actual.reg().is(rax));
4050 DCHECK(expected.reg().is(rbx));
4054 if (!definitely_matches) {
4055 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
4056 if (!code_constant.is_null()) {
4057 Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
4058 addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
4059 } else if (!code_register.is(rdx)) {
4060 movp(rdx, code_register);
4063 if (flag == CALL_FUNCTION) {
4064 call_wrapper.BeforeCall(CallSize(adaptor));
4065 Call(adaptor, RelocInfo::CODE_TARGET);
4066 call_wrapper.AfterCall();
4067 if (!*definitely_mismatches) {
4068 jmp(done, near_jump);
4071 Jump(adaptor, RelocInfo::CODE_TARGET);
4078 void MacroAssembler::StubPrologue() {
4079 pushq(rbp); // Caller's frame pointer.
4081 Push(rsi); // Callee's context.
4082 Push(Smi::FromInt(StackFrame::STUB));
4086 void MacroAssembler::Prologue(bool code_pre_aging) {
4087 PredictableCodeSizeScope predictible_code_size_scope(this,
4088 kNoCodeAgeSequenceLength);
4089 if (code_pre_aging) {
4090 // Pre-age the code.
4091 Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
4092 RelocInfo::CODE_AGE_SEQUENCE);
4093 Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
4095 pushq(rbp); // Caller's frame pointer.
4097 Push(rsi); // Callee's context.
4098 Push(rdi); // Callee's JS function.
4103 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4106 Push(rsi); // Context.
4107 Push(Smi::FromInt(type));
4108 Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
4109 Push(kScratchRegister);
4110 if (emit_debug_code()) {
4111 Move(kScratchRegister,
4112 isolate()->factory()->undefined_value(),
4113 RelocInfo::EMBEDDED_OBJECT);
4114 cmpp(Operand(rsp, 0), kScratchRegister);
4115 Check(not_equal, kCodeObjectNotProperlyPatched);
4120 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4121 if (emit_debug_code()) {
4122 Move(kScratchRegister, Smi::FromInt(type));
4123 cmpp(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
4124 Check(equal, kStackFrameTypesMustMatch);
4131 void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
4132 // Set up the frame structure on the stack.
4133 // All constants are relative to the frame pointer of the exit frame.
4134 DCHECK(ExitFrameConstants::kCallerSPDisplacement ==
4135 kFPOnStackSize + kPCOnStackSize);
4136 DCHECK(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
4137 DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
4141 // Reserve room for entry stack pointer and push the code object.
4142 DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
4143 Push(Immediate(0)); // Saved entry sp, patched before call.
4144 Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
4145 Push(kScratchRegister); // Accessed from EditFrame::code_slot.
4147 // Save the frame pointer and the context in top.
4149 movp(r14, rax); // Backup rax in callee-save register.
4152 Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
4153 Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
4157 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
4158 bool save_doubles) {
4160 const int kShadowSpace = 4;
4161 arg_stack_space += kShadowSpace;
4163 // Optionally save all XMM registers.
4165 int space = XMMRegister::kMaxNumAllocatableRegisters * kSIMD128Size +
4166 arg_stack_space * kRegisterSize;
4167 subp(rsp, Immediate(space));
4168 int offset = -2 * kPointerSize;
4169 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
4170 XMMRegister reg = XMMRegister::FromAllocationIndex(i);
4171 movups(Operand(rbp, offset - ((i + 1) * kSIMD128Size)), reg);
4173 } else if (arg_stack_space > 0) {
4174 subp(rsp, Immediate(arg_stack_space * kRegisterSize));
4177 // Get the required frame alignment for the OS.
4178 const int kFrameAlignment = base::OS::ActivationFrameAlignment();
4179 if (kFrameAlignment > 0) {
4180 DCHECK(IsPowerOf2(kFrameAlignment));
4181 DCHECK(is_int8(kFrameAlignment));
4182 andp(rsp, Immediate(-kFrameAlignment));
4185 // Patch the saved entry sp.
4186 movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
4190 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
4191 EnterExitFramePrologue(true);
4193 // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
4194 // so it must be retained across the C-call.
4195 int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
4196 leap(r15, Operand(rbp, r14, times_pointer_size, offset));
4198 EnterExitFrameEpilogue(arg_stack_space, save_doubles);
4202 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
4203 EnterExitFramePrologue(false);
4204 EnterExitFrameEpilogue(arg_stack_space, false);
4208 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
4212 int offset = -2 * kPointerSize;
4213 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
4214 XMMRegister reg = XMMRegister::FromAllocationIndex(i);
4215 movups(reg, Operand(rbp, offset - ((i + 1) * kSIMD128Size)));
4218 // Get the return address from the stack and restore the frame pointer.
4219 movp(rcx, Operand(rbp, kFPOnStackSize));
4220 movp(rbp, Operand(rbp, 0 * kPointerSize));
4222 // Drop everything up to and including the arguments and the receiver
4223 // from the caller stack.
4224 leap(rsp, Operand(r15, 1 * kPointerSize));
4226 PushReturnAddressFrom(rcx);
4228 LeaveExitFrameEpilogue(true);
4232 void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
4236 LeaveExitFrameEpilogue(restore_context);
4240 void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
4241 // Restore current context from top and clear it in debug mode.
4242 ExternalReference context_address(Isolate::kContextAddress, isolate());
4243 Operand context_operand = ExternalOperand(context_address);
4244 if (restore_context) {
4245 movp(rsi, context_operand);
4248 movp(context_operand, Immediate(0));
4251 // Clear the top frame.
4252 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
4254 Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
4255 movp(c_entry_fp_operand, Immediate(0));
4259 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
4262 Label same_contexts;
4264 DCHECK(!holder_reg.is(scratch));
4265 DCHECK(!scratch.is(kScratchRegister));
4266 // Load current lexical context from the stack frame.
4267 movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
4269 // When generating debug code, make sure the lexical context is set.
4270 if (emit_debug_code()) {
4271 cmpp(scratch, Immediate(0));
4272 Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
4274 // Load the native context of the current context.
4276 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
4277 movp(scratch, FieldOperand(scratch, offset));
4278 movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
4280 // Check the context is a native context.
4281 if (emit_debug_code()) {
4282 Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
4283 isolate()->factory()->native_context_map());
4284 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
4287 // Check if both contexts are the same.
4288 cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4289 j(equal, &same_contexts);
4291 // Compare security tokens.
4292 // Check that the security token in the calling global object is
4293 // compatible with the security token in the receiving global
4296 // Check the context is a native context.
4297 if (emit_debug_code()) {
4298 // Preserve original value of holder_reg.
4301 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4302 CompareRoot(holder_reg, Heap::kNullValueRootIndex);
4303 Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
4305 // Read the first word and compare to native_context_map(),
4306 movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
4307 CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
4308 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
4312 movp(kScratchRegister,
4313 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4315 Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
4316 movp(scratch, FieldOperand(scratch, token_offset));
4317 cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
4320 bind(&same_contexts);
4324 // Compute the hash code from the untagged key. This must be kept in sync with
4325 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
4326 // code-stub-hydrogen.cc
4327 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
4328 // First of all we assign the hash seed to scratch.
4329 LoadRoot(scratch, Heap::kHashSeedRootIndex);
4330 SmiToInteger32(scratch, scratch);
4332 // Xor original key with a seed.
4335 // Compute the hash code from the untagged key. This must be kept in sync
4336 // with ComputeIntegerHash in utils.h.
4338 // hash = ~hash + (hash << 15);
4341 shll(scratch, Immediate(15));
4343 // hash = hash ^ (hash >> 12);
4345 shrl(scratch, Immediate(12));
4347 // hash = hash + (hash << 2);
4348 leal(r0, Operand(r0, r0, times_4, 0));
4349 // hash = hash ^ (hash >> 4);
4351 shrl(scratch, Immediate(4));
4353 // hash = hash * 2057;
4354 imull(r0, r0, Immediate(2057));
4355 // hash = hash ^ (hash >> 16);
4357 shrl(scratch, Immediate(16));
4363 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
4372 // elements - holds the slow-case elements of the receiver on entry.
4373 // Unchanged unless 'result' is the same register.
4375 // key - holds the smi key on entry.
4376 // Unchanged unless 'result' is the same register.
4378 // Scratch registers:
4380 // r0 - holds the untagged key on entry and holds the hash once computed.
4382 // r1 - used to hold the capacity mask of the dictionary
4384 // r2 - used for the index into the dictionary.
4386 // result - holds the result on exit if the load succeeded.
4387 // Allowed to be the same as 'key' or 'result'.
4388 // Unchanged on bailout so 'key' or 'result' can be used
4389 // in further computation.
4393 GetNumberHash(r0, r1);
4395 // Compute capacity mask.
4396 SmiToInteger32(r1, FieldOperand(elements,
4397 SeededNumberDictionary::kCapacityOffset));
4400 // Generate an unrolled loop that performs a few probes before giving up.
4401 for (int i = 0; i < kNumberDictionaryProbes; i++) {
4402 // Use r2 for index calculations and keep the hash intact in r0.
4404 // Compute the masked index: (hash + i + i * i) & mask.
4406 addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
4410 // Scale the index by multiplying by the entry size.
4411 DCHECK(SeededNumberDictionary::kEntrySize == 3);
4412 leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
4414 // Check if the key matches.
4415 cmpp(key, FieldOperand(elements,
4418 SeededNumberDictionary::kElementsStartOffset));
4419 if (i != (kNumberDictionaryProbes - 1)) {
4427 // Check that the value is a normal propety.
4428 const int kDetailsOffset =
4429 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
4430 DCHECK_EQ(NORMAL, 0);
4431 Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
4432 Smi::FromInt(PropertyDetails::TypeField::kMask));
4435 // Get the value at the masked, scaled index.
4436 const int kValueOffset =
4437 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
4438 movp(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
4442 void MacroAssembler::LoadAllocationTopHelper(Register result,
4444 AllocationFlags flags) {
4445 ExternalReference allocation_top =
4446 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4448 // Just return if allocation top is already known.
4449 if ((flags & RESULT_CONTAINS_TOP) != 0) {
4450 // No use of scratch if allocation top is provided.
4451 DCHECK(!scratch.is_valid());
4453 // Assert that result actually contains top on entry.
4454 Operand top_operand = ExternalOperand(allocation_top);
4455 cmpp(result, top_operand);
4456 Check(equal, kUnexpectedAllocationTop);
4461 // Move address of new object to result. Use scratch register if available,
4462 // and keep address in scratch until call to UpdateAllocationTopHelper.
4463 if (scratch.is_valid()) {
4464 LoadAddress(scratch, allocation_top);
4465 movp(result, Operand(scratch, 0));
4467 Load(result, allocation_top);
4472 void MacroAssembler::MakeSureDoubleAlignedHelper(Register result,
4475 AllocationFlags flags) {
4476 if (kPointerSize == kDoubleSize) {
4477 if (FLAG_debug_code) {
4478 testl(result, Immediate(kDoubleAlignmentMask));
4479 Check(zero, kAllocationIsNotDoubleAligned);
4482 // Align the next allocation. Storing the filler map without checking top
4483 // is safe in new-space because the limit of the heap is aligned there.
4484 DCHECK(kPointerSize * 2 == kDoubleSize);
4485 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
4486 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
4487 // Make sure scratch is not clobbered by this function as it might be
4488 // used in UpdateAllocationTopHelper later.
4489 DCHECK(!scratch.is(kScratchRegister));
4491 testl(result, Immediate(kDoubleAlignmentMask));
4492 j(zero, &aligned, Label::kNear);
4493 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
4494 ExternalReference allocation_limit =
4495 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4496 cmpp(result, ExternalOperand(allocation_limit));
4497 j(above_equal, gc_required);
4499 LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex);
4500 movp(Operand(result, 0), kScratchRegister);
4501 addp(result, Immediate(kDoubleSize / 2));
4507 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
4509 AllocationFlags flags) {
4510 if (emit_debug_code()) {
4511 testp(result_end, Immediate(kObjectAlignmentMask));
4512 Check(zero, kUnalignedAllocationInNewSpace);
4515 ExternalReference allocation_top =
4516 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4519 if (scratch.is_valid()) {
4520 // Scratch already contains address of allocation top.
4521 movp(Operand(scratch, 0), result_end);
4523 Store(allocation_top, result_end);
4528 void MacroAssembler::Allocate(int object_size,
4530 Register result_end,
4533 AllocationFlags flags) {
4534 DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
4535 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
4536 if (!FLAG_inline_new) {
4537 if (emit_debug_code()) {
4538 // Trash the registers to simulate an allocation failure.
4539 movl(result, Immediate(0x7091));
4540 if (result_end.is_valid()) {
4541 movl(result_end, Immediate(0x7191));
4543 if (scratch.is_valid()) {
4544 movl(scratch, Immediate(0x7291));
4550 DCHECK(!result.is(result_end));
4552 // Load address of new object into result.
4553 LoadAllocationTopHelper(result, scratch, flags);
4555 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4556 MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4559 // Calculate new top and bail out if new space is exhausted.
4560 ExternalReference allocation_limit =
4561 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4563 Register top_reg = result_end.is_valid() ? result_end : result;
4565 if (!top_reg.is(result)) {
4566 movp(top_reg, result);
4568 addp(top_reg, Immediate(object_size));
4569 j(carry, gc_required);
4570 Operand limit_operand = ExternalOperand(allocation_limit);
4571 cmpp(top_reg, limit_operand);
4572 j(above, gc_required);
4574 // Update allocation top.
4575 UpdateAllocationTopHelper(top_reg, scratch, flags);
4577 bool tag_result = (flags & TAG_OBJECT) != 0;
4578 if (top_reg.is(result)) {
4580 subp(result, Immediate(object_size - kHeapObjectTag));
4582 subp(result, Immediate(object_size));
4584 } else if (tag_result) {
4585 // Tag the result if requested.
4586 DCHECK(kHeapObjectTag == 1);
4592 void MacroAssembler::Allocate(int header_size,
4593 ScaleFactor element_size,
4594 Register element_count,
4596 Register result_end,
4599 AllocationFlags flags) {
4600 DCHECK((flags & SIZE_IN_WORDS) == 0);
4601 leap(result_end, Operand(element_count, element_size, header_size));
4602 Allocate(result_end, result, result_end, scratch, gc_required, flags);
4606 void MacroAssembler::Allocate(Register object_size,
4608 Register result_end,
4611 AllocationFlags flags) {
4612 DCHECK((flags & SIZE_IN_WORDS) == 0);
4613 if (!FLAG_inline_new) {
4614 if (emit_debug_code()) {
4615 // Trash the registers to simulate an allocation failure.
4616 movl(result, Immediate(0x7091));
4617 movl(result_end, Immediate(0x7191));
4618 if (scratch.is_valid()) {
4619 movl(scratch, Immediate(0x7291));
4621 // object_size is left unchanged by this function.
4626 DCHECK(!result.is(result_end));
4628 // Load address of new object into result.
4629 LoadAllocationTopHelper(result, scratch, flags);
4631 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4632 MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4635 // Calculate new top and bail out if new space is exhausted.
4636 ExternalReference allocation_limit =
4637 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4638 if (!object_size.is(result_end)) {
4639 movp(result_end, object_size);
4641 addp(result_end, result);
4642 j(carry, gc_required);
4643 Operand limit_operand = ExternalOperand(allocation_limit);
4644 cmpp(result_end, limit_operand);
4645 j(above, gc_required);
4647 // Update allocation top.
4648 UpdateAllocationTopHelper(result_end, scratch, flags);
4650 // Tag the result if requested.
4651 if ((flags & TAG_OBJECT) != 0) {
4652 addp(result, Immediate(kHeapObjectTag));
4657 void MacroAssembler::UndoAllocationInNewSpace(Register object) {
4658 ExternalReference new_space_allocation_top =
4659 ExternalReference::new_space_allocation_top_address(isolate());
4661 // Make sure the object has no tag before resetting top.
4662 andp(object, Immediate(~kHeapObjectTagMask));
4663 Operand top_operand = ExternalOperand(new_space_allocation_top);
4665 cmpp(object, top_operand);
4666 Check(below, kUndoAllocationOfNonAllocatedMemory);
4668 movp(top_operand, object);
4672 void MacroAssembler::AllocateHeapNumber(Register result,
4676 // Allocate heap number in new space.
4677 Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
4679 Heap::RootListIndex map_index = mode == MUTABLE
4680 ? Heap::kMutableHeapNumberMapRootIndex
4681 : Heap::kHeapNumberMapRootIndex;
4684 LoadRoot(kScratchRegister, map_index);
4685 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4689 #define SIMD128_HEAP_ALLOCATE_FUNCTIONS(V) \
4690 V(Float32x4, float32x4, FLOAT32x4) \
4691 V(Float64x2, float64x2, FLOAT64x2) \
4692 V(Int32x4, int32x4, INT32x4)
4694 #define DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION(Type, type, TYPE) \
4695 void MacroAssembler::Allocate##Type(Register result, \
4696 Register scratch1, \
4697 Register scratch2, \
4698 Register scratch3, \
4699 Label* gc_required) { \
4700 /* Allocate SIMD128 object. */ \
4701 Allocate(Type::kSize, result, scratch1, no_reg, gc_required, TAG_OBJECT);\
4702 /* Load the initial map and assign to new allocated object. */ \
4703 movp(scratch1, Operand(rbp, StandardFrameConstants::kContextOffset)); \
4706 Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); \
4708 FieldOperand(scratch1, GlobalObject::kNativeContextOffset)); \
4711 Context::SlotOffset(Context::TYPE##_FUNCTION_INDEX))); \
4712 LoadGlobalFunctionInitialMap(scratch1, scratch1); \
4713 movp(FieldOperand(result, JSObject::kMapOffset), \
4715 /* Initialize the properties and elements. */ \
4716 MoveHeapObject(kScratchRegister, \
4717 isolate()->factory()->empty_fixed_array()); \
4718 movp(FieldOperand(result, JSObject::kPropertiesOffset), \
4719 kScratchRegister); \
4720 movp(FieldOperand(result, JSObject::kElementsOffset), \
4721 kScratchRegister); \
4722 /* Allocate FixedTypedArray object. */ \
4723 Allocate(FixedTypedArrayBase::kDataOffset + k##Type##Size, \
4724 scratch1, scratch2, no_reg, gc_required, TAG_OBJECT); \
4725 MoveHeapObject(kScratchRegister, \
4726 isolate()->factory()->fixed_##type##_array_map()); \
4727 movp(FieldOperand(scratch1, FixedTypedArrayBase::kMapOffset), \
4728 kScratchRegister); \
4729 movp(scratch3, Immediate(1)); \
4730 Integer32ToSmi(scratch2, scratch3); \
4731 movp(FieldOperand(scratch1, FixedTypedArrayBase::kLengthOffset), \
4733 /* Assign FixedTypedArray object to SIMD128 object. */ \
4734 movp(FieldOperand(result, Type::kValueOffset), scratch1); \
4737 SIMD128_HEAP_ALLOCATE_FUNCTIONS(DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION)
4740 void MacroAssembler::AllocateTwoByteString(Register result,
4745 Label* gc_required) {
4746 // Calculate the number of bytes needed for the characters in the string while
4747 // observing object alignment.
4748 const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
4749 kObjectAlignmentMask;
4750 DCHECK(kShortSize == 2);
4751 // scratch1 = length * 2 + kObjectAlignmentMask.
4752 leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
4754 andp(scratch1, Immediate(~kObjectAlignmentMask));
4755 if (kHeaderAlignment > 0) {
4756 subp(scratch1, Immediate(kHeaderAlignment));
4759 // Allocate two byte string in new space.
4760 Allocate(SeqTwoByteString::kHeaderSize,
4769 // Set the map, length and hash field.
4770 LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
4771 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4772 Integer32ToSmi(scratch1, length);
4773 movp(FieldOperand(result, String::kLengthOffset), scratch1);
4774 movp(FieldOperand(result, String::kHashFieldOffset),
4775 Immediate(String::kEmptyHashField));
4779 void MacroAssembler::AllocateAsciiString(Register result,
4784 Label* gc_required) {
4785 // Calculate the number of bytes needed for the characters in the string while
4786 // observing object alignment.
4787 const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
4788 kObjectAlignmentMask;
4789 movl(scratch1, length);
4790 DCHECK(kCharSize == 1);
4791 addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
4792 andp(scratch1, Immediate(~kObjectAlignmentMask));
4793 if (kHeaderAlignment > 0) {
4794 subp(scratch1, Immediate(kHeaderAlignment));
4797 // Allocate ASCII string in new space.
4798 Allocate(SeqOneByteString::kHeaderSize,
4807 // Set the map, length and hash field.
4808 LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
4809 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4810 Integer32ToSmi(scratch1, length);
4811 movp(FieldOperand(result, String::kLengthOffset), scratch1);
4812 movp(FieldOperand(result, String::kHashFieldOffset),
4813 Immediate(String::kEmptyHashField));
4817 void MacroAssembler::AllocateTwoByteConsString(Register result,
4820 Label* gc_required) {
4821 // Allocate heap number in new space.
4822 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4825 // Set the map. The other fields are left uninitialized.
4826 LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
4827 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4831 void MacroAssembler::AllocateAsciiConsString(Register result,
4834 Label* gc_required) {
4835 Allocate(ConsString::kSize,
4842 // Set the map. The other fields are left uninitialized.
4843 LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
4844 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4848 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
4851 Label* gc_required) {
4852 // Allocate heap number in new space.
4853 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4856 // Set the map. The other fields are left uninitialized.
4857 LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
4858 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4862 void MacroAssembler::AllocateAsciiSlicedString(Register result,
4865 Label* gc_required) {
4866 // Allocate heap number in new space.
4867 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4870 // Set the map. The other fields are left uninitialized.
4871 LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex);
4872 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4876 // Copy memory, byte-by-byte, from source to destination. Not optimized for
4877 // long or aligned copies. The contents of scratch and length are destroyed.
4878 // Destination is incremented by length, source, length and scratch are
4880 // A simpler loop is faster on small copies, but slower on large ones.
4881 // The cld() instruction must have been emitted, to set the direction flag(),
4882 // before calling this function.
4883 void MacroAssembler::CopyBytes(Register destination,
4888 DCHECK(min_length >= 0);
4889 if (emit_debug_code()) {
4890 cmpl(length, Immediate(min_length));
4891 Assert(greater_equal, kInvalidMinLength);
4893 Label short_loop, len8, len16, len24, done, short_string;
4895 const int kLongStringLimit = 4 * kPointerSize;
4896 if (min_length <= kLongStringLimit) {
4897 cmpl(length, Immediate(kPointerSize));
4898 j(below, &short_string, Label::kNear);
4901 DCHECK(source.is(rsi));
4902 DCHECK(destination.is(rdi));
4903 DCHECK(length.is(rcx));
4905 if (min_length <= kLongStringLimit) {
4906 cmpl(length, Immediate(2 * kPointerSize));
4907 j(below_equal, &len8, Label::kNear);
4908 cmpl(length, Immediate(3 * kPointerSize));
4909 j(below_equal, &len16, Label::kNear);
4910 cmpl(length, Immediate(4 * kPointerSize));
4911 j(below_equal, &len24, Label::kNear);
4914 // Because source is 8-byte aligned in our uses of this function,
4915 // we keep source aligned for the rep movs operation by copying the odd bytes
4916 // at the end of the ranges.
4917 movp(scratch, length);
4918 shrl(length, Immediate(kPointerSizeLog2));
4920 // Move remaining bytes of length.
4921 andl(scratch, Immediate(kPointerSize - 1));
4922 movp(length, Operand(source, scratch, times_1, -kPointerSize));
4923 movp(Operand(destination, scratch, times_1, -kPointerSize), length);
4924 addp(destination, scratch);
4926 if (min_length <= kLongStringLimit) {
4927 jmp(&done, Label::kNear);
4929 movp(scratch, Operand(source, 2 * kPointerSize));
4930 movp(Operand(destination, 2 * kPointerSize), scratch);
4932 movp(scratch, Operand(source, kPointerSize));
4933 movp(Operand(destination, kPointerSize), scratch);
4935 movp(scratch, Operand(source, 0));
4936 movp(Operand(destination, 0), scratch);
4937 // Move remaining bytes of length.
4938 movp(scratch, Operand(source, length, times_1, -kPointerSize));
4939 movp(Operand(destination, length, times_1, -kPointerSize), scratch);
4940 addp(destination, length);
4941 jmp(&done, Label::kNear);
4943 bind(&short_string);
4944 if (min_length == 0) {
4945 testl(length, length);
4946 j(zero, &done, Label::kNear);
4950 movb(scratch, Operand(source, 0));
4951 movb(Operand(destination, 0), scratch);
4955 j(not_zero, &short_loop);
4962 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
4963 Register end_offset,
4968 movp(Operand(start_offset, 0), filler);
4969 addp(start_offset, Immediate(kPointerSize));
4971 cmpp(start_offset, end_offset);
4976 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4977 if (context_chain_length > 0) {
4978 // Move up the chain of contexts to the context containing the slot.
4979 movp(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4980 for (int i = 1; i < context_chain_length; i++) {
4981 movp(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4984 // Slot is in the current function context. Move it into the
4985 // destination register in case we store into it (the write barrier
4986 // cannot be allowed to destroy the context in rsi).
4990 // We should not have found a with context by walking the context
4991 // chain (i.e., the static scope chain and runtime context chain do
4992 // not agree). A variable occurring in such a scope should have
4993 // slot type LOOKUP and not CONTEXT.
4994 if (emit_debug_code()) {
4995 CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
4996 Heap::kWithContextMapRootIndex);
4997 Check(not_equal, kVariableResolvedToWithContext);
5002 void MacroAssembler::LoadTransitionedArrayMapConditional(
5003 ElementsKind expected_kind,
5004 ElementsKind transitioned_kind,
5005 Register map_in_out,
5007 Label* no_map_match) {
5008 // Load the global or builtins object from the current context.
5010 Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
5011 movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
5013 // Check that the function's map is the same as the expected cached map.
5014 movp(scratch, Operand(scratch,
5015 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
5017 int offset = expected_kind * kPointerSize +
5018 FixedArrayBase::kHeaderSize;
5019 cmpp(map_in_out, FieldOperand(scratch, offset));
5020 j(not_equal, no_map_match);
5022 // Use the transitioned cached map.
5023 offset = transitioned_kind * kPointerSize +
5024 FixedArrayBase::kHeaderSize;
5025 movp(map_in_out, FieldOperand(scratch, offset));
5030 static const int kRegisterPassedArguments = 4;
5032 static const int kRegisterPassedArguments = 6;
5035 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
5036 // Load the global or builtins object from the current context.
5038 Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
5039 // Load the native context from the global or builtins object.
5040 movp(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
5041 // Load the function from the native context.
5042 movp(function, Operand(function, Context::SlotOffset(index)));
5046 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
5048 // Load the initial map. The global functions all have initial maps.
5049 movp(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
5050 if (emit_debug_code()) {
5052 CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
5055 Abort(kGlobalFunctionsMustHaveInitialMap);
5061 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
5062 // On Windows 64 stack slots are reserved by the caller for all arguments
5063 // including the ones passed in registers, and space is always allocated for
5064 // the four register arguments even if the function takes fewer than four
5066 // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
5067 // and the caller does not reserve stack slots for them.
5068 DCHECK(num_arguments >= 0);
5070 const int kMinimumStackSlots = kRegisterPassedArguments;
5071 if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
5072 return num_arguments;
5074 if (num_arguments < kRegisterPassedArguments) return 0;
5075 return num_arguments - kRegisterPassedArguments;
5080 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5083 uint32_t encoding_mask) {
5085 JumpIfNotSmi(string, &is_object);
5090 movp(value, FieldOperand(string, HeapObject::kMapOffset));
5091 movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset));
5093 andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
5094 cmpp(value, Immediate(encoding_mask));
5096 Check(equal, kUnexpectedStringType);
5098 // The index is assumed to be untagged coming in, tag it to compare with the
5099 // string length without using a temp register, it is restored at the end of
5101 Integer32ToSmi(index, index);
5102 SmiCompare(index, FieldOperand(string, String::kLengthOffset));
5103 Check(less, kIndexIsTooLarge);
5105 SmiCompare(index, Smi::FromInt(0));
5106 Check(greater_equal, kIndexIsNegative);
5108 // Restore the index
5109 SmiToInteger32(index, index);
5113 void MacroAssembler::PrepareCallCFunction(int num_arguments) {
5114 int frame_alignment = base::OS::ActivationFrameAlignment();
5115 DCHECK(frame_alignment != 0);
5116 DCHECK(num_arguments >= 0);
5118 // Make stack end at alignment and allocate space for arguments and old rsp.
5119 movp(kScratchRegister, rsp);
5120 DCHECK(IsPowerOf2(frame_alignment));
5121 int argument_slots_on_stack =
5122 ArgumentStackSlotsForCFunctionCall(num_arguments);
5123 subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
5124 andp(rsp, Immediate(-frame_alignment));
5125 movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
5129 void MacroAssembler::CallCFunction(ExternalReference function,
5130 int num_arguments) {
5131 LoadAddress(rax, function);
5132 CallCFunction(rax, num_arguments);
5136 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
5137 DCHECK(has_frame());
5138 // Check stack alignment.
5139 if (emit_debug_code()) {
5140 CheckStackAlignment();
5144 DCHECK(base::OS::ActivationFrameAlignment() != 0);
5145 DCHECK(num_arguments >= 0);
5146 int argument_slots_on_stack =
5147 ArgumentStackSlotsForCFunctionCall(num_arguments);
5148 movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
5153 bool AreAliased(Register reg1,
5161 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
5162 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
5163 reg7.is_valid() + reg8.is_valid();
5166 if (reg1.is_valid()) regs |= reg1.bit();
5167 if (reg2.is_valid()) regs |= reg2.bit();
5168 if (reg3.is_valid()) regs |= reg3.bit();
5169 if (reg4.is_valid()) regs |= reg4.bit();
5170 if (reg5.is_valid()) regs |= reg5.bit();
5171 if (reg6.is_valid()) regs |= reg6.bit();
5172 if (reg7.is_valid()) regs |= reg7.bit();
5173 if (reg8.is_valid()) regs |= reg8.bit();
5174 int n_of_non_aliasing_regs = NumRegs(regs);
5176 return n_of_valid_regs != n_of_non_aliasing_regs;
5181 CodePatcher::CodePatcher(byte* address, int size)
5182 : address_(address),
5184 masm_(NULL, address, size + Assembler::kGap) {
5185 // Create a new macro assembler pointing to the address of the code to patch.
5186 // The size is adjusted with kGap on order for the assembler to generate size
5187 // bytes of instructions without failing with buffer size constraints.
5188 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5192 CodePatcher::~CodePatcher() {
5193 // Indicate that code has changed.
5194 CpuFeatures::FlushICache(address_, size_);
5196 // Check that the code was patched as expected.
5197 DCHECK(masm_.pc_ == address_ + size_);
5198 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5202 void MacroAssembler::CheckPageFlag(
5207 Label* condition_met,
5208 Label::Distance condition_met_distance) {
5209 DCHECK(cc == zero || cc == not_zero);
5210 if (scratch.is(object)) {
5211 andp(scratch, Immediate(~Page::kPageAlignmentMask));
5213 movp(scratch, Immediate(~Page::kPageAlignmentMask));
5214 andp(scratch, object);
5216 if (mask < (1 << kBitsPerByte)) {
5217 testb(Operand(scratch, MemoryChunk::kFlagsOffset),
5218 Immediate(static_cast<uint8_t>(mask)));
5220 testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
5222 j(cc, condition_met, condition_met_distance);
5226 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
5228 Label* if_deprecated) {
5229 if (map->CanBeDeprecated()) {
5231 movl(scratch, FieldOperand(scratch, Map::kBitField3Offset));
5232 andl(scratch, Immediate(Map::Deprecated::kMask));
5233 j(not_zero, if_deprecated);
5238 void MacroAssembler::JumpIfBlack(Register object,
5239 Register bitmap_scratch,
5240 Register mask_scratch,
5242 Label::Distance on_black_distance) {
5243 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
5244 GetMarkBits(object, bitmap_scratch, mask_scratch);
5246 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5247 // The mask_scratch register contains a 1 at the position of the first bit
5248 // and a 0 at all other positions, including the position of the second bit.
5249 movp(rcx, mask_scratch);
5250 // Make rcx into a mask that covers both marking bits using the operation
5251 // rcx = mask | (mask << 1).
5252 leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
5253 // Note that we are using a 4-byte aligned 8-byte load.
5254 andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
5255 cmpp(mask_scratch, rcx);
5256 j(equal, on_black, on_black_distance);
5260 // Detect some, but not all, common pointer-free objects. This is used by the
5261 // incremental write barrier which doesn't care about oddballs (they are always
5262 // marked black immediately so this code is not hit).
5263 void MacroAssembler::JumpIfDataObject(
5266 Label* not_data_object,
5267 Label::Distance not_data_object_distance) {
5268 Label is_data_object;
5269 movp(scratch, FieldOperand(value, HeapObject::kMapOffset));
5270 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5271 j(equal, &is_data_object, Label::kNear);
5272 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5273 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5274 // If it's a string and it's not a cons string then it's an object containing
5276 testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
5277 Immediate(kIsIndirectStringMask | kIsNotStringMask));
5278 j(not_zero, not_data_object, not_data_object_distance);
5279 bind(&is_data_object);
5283 void MacroAssembler::GetMarkBits(Register addr_reg,
5284 Register bitmap_reg,
5285 Register mask_reg) {
5286 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
5287 movp(bitmap_reg, addr_reg);
5288 // Sign extended 32 bit immediate.
5289 andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
5290 movp(rcx, addr_reg);
5292 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
5293 shrl(rcx, Immediate(shift));
5295 Immediate((Page::kPageAlignmentMask >> shift) &
5296 ~(Bitmap::kBytesPerCell - 1)));
5298 addp(bitmap_reg, rcx);
5299 movp(rcx, addr_reg);
5300 shrl(rcx, Immediate(kPointerSizeLog2));
5301 andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
5302 movl(mask_reg, Immediate(1));
5307 void MacroAssembler::EnsureNotWhite(
5309 Register bitmap_scratch,
5310 Register mask_scratch,
5311 Label* value_is_white_and_not_data,
5312 Label::Distance distance) {
5313 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
5314 GetMarkBits(value, bitmap_scratch, mask_scratch);
5316 // If the value is black or grey we don't need to do anything.
5317 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5318 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5319 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
5320 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5324 // Since both black and grey have a 1 in the first position and white does
5325 // not have a 1 there we only need to check one bit.
5326 testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5327 j(not_zero, &done, Label::kNear);
5329 if (emit_debug_code()) {
5330 // Check for impossible bit pattern.
5333 // shl. May overflow making the check conservative.
5334 addp(mask_scratch, mask_scratch);
5335 testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5336 j(zero, &ok, Label::kNear);
5342 // Value is white. We check whether it is data that doesn't need scanning.
5343 // Currently only checks for HeapNumber and non-cons strings.
5344 Register map = rcx; // Holds map while checking type.
5345 Register length = rcx; // Holds length of object after checking type.
5346 Label not_heap_number;
5347 Label is_data_object;
5349 // Check for heap-number
5350 movp(map, FieldOperand(value, HeapObject::kMapOffset));
5351 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
5352 j(not_equal, ¬_heap_number, Label::kNear);
5353 movp(length, Immediate(HeapNumber::kSize));
5354 jmp(&is_data_object, Label::kNear);
5356 bind(¬_heap_number);
5357 // Check for strings.
5358 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5359 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5360 // If it's a string and it's not a cons string then it's an object containing
5362 Register instance_type = rcx;
5363 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
5364 testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
5365 j(not_zero, value_is_white_and_not_data);
5366 // It's a non-indirect (non-cons and non-slice) string.
5367 // If it's external, the length is just ExternalString::kSize.
5368 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5370 // External strings are the only ones with the kExternalStringTag bit
5372 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
5373 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
5374 testb(instance_type, Immediate(kExternalStringTag));
5375 j(zero, ¬_external, Label::kNear);
5376 movp(length, Immediate(ExternalString::kSize));
5377 jmp(&is_data_object, Label::kNear);
5379 bind(¬_external);
5380 // Sequential string, either ASCII or UC16.
5381 DCHECK(kOneByteStringTag == 0x04);
5382 andp(length, Immediate(kStringEncodingMask));
5383 xorp(length, Immediate(kStringEncodingMask));
5384 addp(length, Immediate(0x04));
5385 // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
5386 imulp(length, FieldOperand(value, String::kLengthOffset));
5387 shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
5388 addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
5389 andp(length, Immediate(~kObjectAlignmentMask));
5391 bind(&is_data_object);
5392 // Value is a data object, and it is white. Mark it black. Since we know
5393 // that the object is white we can make it black by flipping one bit.
5394 orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5396 andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
5397 addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
5403 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5405 Register empty_fixed_array_value = r8;
5406 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5409 // Check if the enum length field is properly initialized, indicating that
5410 // there is an enum cache.
5411 movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5413 EnumLength(rdx, rbx);
5414 Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
5415 j(equal, call_runtime);
5421 movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5423 // For all objects but the receiver, check that the cache is empty.
5424 EnumLength(rdx, rbx);
5425 Cmp(rdx, Smi::FromInt(0));
5426 j(not_equal, call_runtime);
5430 // Check that there are no elements. Register rcx contains the current JS
5431 // object we've reached through the prototype chain.
5433 cmpp(empty_fixed_array_value,
5434 FieldOperand(rcx, JSObject::kElementsOffset));
5435 j(equal, &no_elements);
5437 // Second chance, the object may be using the empty slow element dictionary.
5438 LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
5439 cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
5440 j(not_equal, call_runtime);
5443 movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
5444 cmpp(rcx, null_value);
5445 j(not_equal, &next);
5448 void MacroAssembler::TestJSArrayForAllocationMemento(
5449 Register receiver_reg,
5450 Register scratch_reg,
5451 Label* no_memento_found) {
5452 ExternalReference new_space_start =
5453 ExternalReference::new_space_start(isolate());
5454 ExternalReference new_space_allocation_top =
5455 ExternalReference::new_space_allocation_top_address(isolate());
5457 leap(scratch_reg, Operand(receiver_reg,
5458 JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5459 Move(kScratchRegister, new_space_start);
5460 cmpp(scratch_reg, kScratchRegister);
5461 j(less, no_memento_found);
5462 cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
5463 j(greater, no_memento_found);
5464 CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
5465 Heap::kAllocationMementoMapRootIndex);
5469 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5474 DCHECK(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
5475 DCHECK(!scratch1.is(scratch0));
5476 Register current = scratch0;
5479 movp(current, object);
5481 // Loop based on the map going up the prototype chain.
5483 movp(current, FieldOperand(current, HeapObject::kMapOffset));
5484 movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
5485 DecodeField<Map::ElementsKindBits>(scratch1);
5486 cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
5488 movp(current, FieldOperand(current, Map::kPrototypeOffset));
5489 CompareRoot(current, Heap::kNullValueRootIndex);
5490 j(not_equal, &loop_again);
5494 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
5495 DCHECK(!dividend.is(rax));
5496 DCHECK(!dividend.is(rdx));
5497 MultiplierAndShift ms(divisor);
5498 movl(rax, Immediate(ms.multiplier()));
5500 if (divisor > 0 && ms.multiplier() < 0) addl(rdx, dividend);
5501 if (divisor < 0 && ms.multiplier() > 0) subl(rdx, dividend);
5502 if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift()));
5503 movl(rax, dividend);
5504 shrl(rax, Immediate(31));
5509 } } // namespace v8::internal
5511 #endif // V8_TARGET_ARCH_X64