1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "bootstrapper.h"
11 #include "cpu-profiler.h"
12 #include "assembler-x64.h"
13 #include "macro-assembler-x64.h"
14 #include "serialize.h"
17 #include "isolate-inl.h"
22 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
23 : Assembler(arg_isolate, buffer, size),
24 generating_stub_(false),
26 root_array_available_(true) {
27 if (isolate() != NULL) {
28 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
34 static const int64_t kInvalidRootRegisterDelta = -1;
37 int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
38 if (predictable_code_size() &&
39 (other.address() < reinterpret_cast<Address>(isolate()) ||
40 other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
41 return kInvalidRootRegisterDelta;
43 Address roots_register_value = kRootRegisterBias +
44 reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
46 int64_t delta = kInvalidRootRegisterDelta; // Bogus initialization.
47 if (kPointerSize == kInt64Size) {
48 delta = other.address() - roots_register_value;
50 // For x32, zero extend the address to 64-bit and calculate the delta.
51 uint64_t o = static_cast<uint32_t>(
52 reinterpret_cast<intptr_t>(other.address()));
53 uint64_t r = static_cast<uint32_t>(
54 reinterpret_cast<intptr_t>(roots_register_value));
61 Operand MacroAssembler::ExternalOperand(ExternalReference target,
63 if (root_array_available_ && !Serializer::enabled(isolate())) {
64 int64_t delta = RootRegisterDelta(target);
65 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
66 return Operand(kRootRegister, static_cast<int32_t>(delta));
69 Move(scratch, target);
70 return Operand(scratch, 0);
74 void MacroAssembler::Load(Register destination, ExternalReference source) {
75 if (root_array_available_ && !Serializer::enabled(isolate())) {
76 int64_t delta = RootRegisterDelta(source);
77 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
78 movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
83 if (destination.is(rax)) {
86 Move(kScratchRegister, source);
87 movp(destination, Operand(kScratchRegister, 0));
92 void MacroAssembler::Store(ExternalReference destination, Register source) {
93 if (root_array_available_ && !Serializer::enabled(isolate())) {
94 int64_t delta = RootRegisterDelta(destination);
95 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
96 movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
101 if (source.is(rax)) {
102 store_rax(destination);
104 Move(kScratchRegister, destination);
105 movp(Operand(kScratchRegister, 0), source);
110 void MacroAssembler::LoadAddress(Register destination,
111 ExternalReference source) {
112 if (root_array_available_ && !Serializer::enabled(isolate())) {
113 int64_t delta = RootRegisterDelta(source);
114 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
115 leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
120 Move(destination, source);
124 int MacroAssembler::LoadAddressSize(ExternalReference source) {
125 if (root_array_available_ && !Serializer::enabled(isolate())) {
126 // This calculation depends on the internals of LoadAddress.
127 // It's correctness is ensured by the asserts in the Call
128 // instruction below.
129 int64_t delta = RootRegisterDelta(source);
130 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
131 // Operand is leap(scratch, Operand(kRootRegister, delta));
132 // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
134 if (!is_int8(static_cast<int32_t>(delta))) {
135 size += 3; // Need full four-byte displacement in lea.
140 // Size of movp(destination, src);
141 return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
145 void MacroAssembler::PushAddress(ExternalReference source) {
146 int64_t address = reinterpret_cast<int64_t>(source.address());
147 if (is_int32(address) && !Serializer::enabled(isolate())) {
148 if (emit_debug_code()) {
149 Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
151 Push(Immediate(static_cast<int32_t>(address)));
154 LoadAddress(kScratchRegister, source);
155 Push(kScratchRegister);
159 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
160 ASSERT(root_array_available_);
161 movp(destination, Operand(kRootRegister,
162 (index << kPointerSizeLog2) - kRootRegisterBias));
166 void MacroAssembler::LoadRootIndexed(Register destination,
167 Register variable_offset,
169 ASSERT(root_array_available_);
171 Operand(kRootRegister,
172 variable_offset, times_pointer_size,
173 (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
177 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
178 ASSERT(root_array_available_);
179 movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
184 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
185 ASSERT(root_array_available_);
186 Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
190 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
191 ASSERT(root_array_available_);
192 cmpp(with, Operand(kRootRegister,
193 (index << kPointerSizeLog2) - kRootRegisterBias));
197 void MacroAssembler::CompareRoot(const Operand& with,
198 Heap::RootListIndex index) {
199 ASSERT(root_array_available_);
200 ASSERT(!with.AddressUsesRegister(kScratchRegister));
201 LoadRoot(kScratchRegister, index);
202 cmpp(with, kScratchRegister);
206 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
209 SaveFPRegsMode save_fp,
210 RememberedSetFinalAction and_then) {
211 if (emit_debug_code()) {
213 JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
217 // Load store buffer top.
218 LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
219 // Store pointer to buffer.
220 movp(Operand(scratch, 0), addr);
221 // Increment buffer top.
222 addp(scratch, Immediate(kPointerSize));
223 // Write back new top of buffer.
224 StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
225 // Call stub on end of buffer.
227 // Check for end of buffer.
228 testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
229 if (and_then == kReturnAtEnd) {
230 Label buffer_overflowed;
231 j(not_equal, &buffer_overflowed, Label::kNear);
233 bind(&buffer_overflowed);
235 ASSERT(and_then == kFallThroughAtEnd);
236 j(equal, &done, Label::kNear);
238 StoreBufferOverflowStub store_buffer_overflow =
239 StoreBufferOverflowStub(isolate(), save_fp);
240 CallStub(&store_buffer_overflow);
241 if (and_then == kReturnAtEnd) {
244 ASSERT(and_then == kFallThroughAtEnd);
250 void MacroAssembler::InNewSpace(Register object,
254 Label::Distance distance) {
255 if (Serializer::enabled(isolate())) {
256 // Can't do arithmetic on external references if it might get serialized.
257 // The mask isn't really an address. We load it as an external reference in
258 // case the size of the new space is different between the snapshot maker
259 // and the running system.
260 if (scratch.is(object)) {
261 Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
262 andp(scratch, kScratchRegister);
264 Move(scratch, ExternalReference::new_space_mask(isolate()));
265 andp(scratch, object);
267 Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
268 cmpp(scratch, kScratchRegister);
269 j(cc, branch, distance);
271 ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())));
272 intptr_t new_space_start =
273 reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
274 Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
275 Assembler::RelocInfoNone());
276 if (scratch.is(object)) {
277 addp(scratch, kScratchRegister);
279 leap(scratch, Operand(object, kScratchRegister, times_1, 0));
282 Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
283 j(cc, branch, distance);
288 void MacroAssembler::RecordWriteField(
293 SaveFPRegsMode save_fp,
294 RememberedSetAction remembered_set_action,
295 SmiCheck smi_check) {
296 // First, check if a write barrier is even needed. The tests below
297 // catch stores of Smis.
300 // Skip barrier if writing a smi.
301 if (smi_check == INLINE_SMI_CHECK) {
302 JumpIfSmi(value, &done);
305 // Although the object register is tagged, the offset is relative to the start
306 // of the object, so so offset must be a multiple of kPointerSize.
307 ASSERT(IsAligned(offset, kPointerSize));
309 leap(dst, FieldOperand(object, offset));
310 if (emit_debug_code()) {
312 testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
313 j(zero, &ok, Label::kNear);
319 object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
323 // Clobber clobbered input registers when running with the debug-code flag
324 // turned on to provoke errors.
325 if (emit_debug_code()) {
326 Move(value, kZapValue, Assembler::RelocInfoNone());
327 Move(dst, kZapValue, Assembler::RelocInfoNone());
332 void MacroAssembler::RecordWriteArray(Register object,
335 SaveFPRegsMode save_fp,
336 RememberedSetAction remembered_set_action,
337 SmiCheck smi_check) {
338 // First, check if a write barrier is even needed. The tests below
339 // catch stores of Smis.
342 // Skip barrier if writing a smi.
343 if (smi_check == INLINE_SMI_CHECK) {
344 JumpIfSmi(value, &done);
347 // Array access: calculate the destination address. Index is not a smi.
348 Register dst = index;
349 leap(dst, Operand(object, index, times_pointer_size,
350 FixedArray::kHeaderSize - kHeapObjectTag));
353 object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
357 // Clobber clobbered input registers when running with the debug-code flag
358 // turned on to provoke errors.
359 if (emit_debug_code()) {
360 Move(value, kZapValue, Assembler::RelocInfoNone());
361 Move(index, kZapValue, Assembler::RelocInfoNone());
366 void MacroAssembler::RecordWrite(Register object,
369 SaveFPRegsMode fp_mode,
370 RememberedSetAction remembered_set_action,
371 SmiCheck smi_check) {
372 ASSERT(!object.is(value));
373 ASSERT(!object.is(address));
374 ASSERT(!value.is(address));
375 AssertNotSmi(object);
377 if (remembered_set_action == OMIT_REMEMBERED_SET &&
378 !FLAG_incremental_marking) {
382 if (emit_debug_code()) {
384 cmpp(value, Operand(address, 0));
385 j(equal, &ok, Label::kNear);
390 // Count number of write barriers in generated code.
391 isolate()->counters()->write_barriers_static()->Increment();
392 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
394 // First, check if a write barrier is even needed. The tests below
395 // catch stores of smis and stores into the young generation.
398 if (smi_check == INLINE_SMI_CHECK) {
399 // Skip barrier if writing a smi.
400 JumpIfSmi(value, &done);
404 value, // Used as scratch.
405 MemoryChunk::kPointersToHereAreInterestingMask,
410 CheckPageFlag(object,
411 value, // Used as scratch.
412 MemoryChunk::kPointersFromHereAreInterestingMask,
417 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
423 // Clobber clobbered registers when running with the debug-code flag
424 // turned on to provoke errors.
425 if (emit_debug_code()) {
426 Move(address, kZapValue, Assembler::RelocInfoNone());
427 Move(value, kZapValue, Assembler::RelocInfoNone());
432 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
433 if (emit_debug_code()) Check(cc, reason);
437 void MacroAssembler::AssertFastElements(Register elements) {
438 if (emit_debug_code()) {
440 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
441 Heap::kFixedArrayMapRootIndex);
442 j(equal, &ok, Label::kNear);
443 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
444 Heap::kFixedDoubleArrayMapRootIndex);
445 j(equal, &ok, Label::kNear);
446 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
447 Heap::kFixedCOWArrayMapRootIndex);
448 j(equal, &ok, Label::kNear);
449 Abort(kJSObjectWithFastElementsMapHasSlowElements);
455 void MacroAssembler::Check(Condition cc, BailoutReason reason) {
457 j(cc, &L, Label::kNear);
459 // Control will not return here.
464 void MacroAssembler::CheckStackAlignment() {
465 int frame_alignment = OS::ActivationFrameAlignment();
466 int frame_alignment_mask = frame_alignment - 1;
467 if (frame_alignment > kPointerSize) {
468 ASSERT(IsPowerOf2(frame_alignment));
469 Label alignment_as_expected;
470 testp(rsp, Immediate(frame_alignment_mask));
471 j(zero, &alignment_as_expected, Label::kNear);
472 // Abort if stack is not aligned.
474 bind(&alignment_as_expected);
479 void MacroAssembler::NegativeZeroTest(Register result,
483 testl(result, result);
484 j(not_zero, &ok, Label::kNear);
491 void MacroAssembler::Abort(BailoutReason reason) {
493 const char* msg = GetBailoutReason(reason);
495 RecordComment("Abort message: ");
499 if (FLAG_trap_on_abort) {
506 Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
507 Assembler::RelocInfoNone());
508 Push(kScratchRegister);
511 // We don't actually want to generate a pile of code for this, so just
512 // claim there is a stack frame, without generating one.
513 FrameScope scope(this, StackFrame::NONE);
514 CallRuntime(Runtime::kAbort, 1);
516 CallRuntime(Runtime::kAbort, 1);
518 // Control will not return here.
523 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
524 ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
525 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
529 void MacroAssembler::TailCallStub(CodeStub* stub) {
530 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
534 void MacroAssembler::StubReturn(int argc) {
535 ASSERT(argc >= 1 && generating_stub());
536 ret((argc - 1) * kPointerSize);
540 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
541 return has_frame_ || !stub->SometimesSetsUpAFrame();
545 void MacroAssembler::IllegalOperation(int num_arguments) {
546 if (num_arguments > 0) {
547 addp(rsp, Immediate(num_arguments * kPointerSize));
549 LoadRoot(rax, Heap::kUndefinedValueRootIndex);
553 void MacroAssembler::IndexFromHash(Register hash, Register index) {
554 // The assert checks that the constants for the maximum number of digits
555 // for an array index cached in the hash field and the number of bits
556 // reserved for it does not conflict.
557 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
558 (1 << String::kArrayIndexValueBits));
559 // We want the smi-tagged index in key. Even if we subsequently go to
560 // the slow case, converting the key to a smi is always valid.
562 // hash: key's hash field, including its array index value.
563 andp(hash, Immediate(String::kArrayIndexValueMask));
564 shrp(hash, Immediate(String::kHashShift));
565 // Here we actually clobber the key which will be used if calling into
566 // runtime later. However as the new key is the numeric value of a string key
567 // there is no difference in using either key.
568 Integer32ToSmi(index, hash);
572 void MacroAssembler::CallRuntime(const Runtime::Function* f,
574 SaveFPRegsMode save_doubles) {
575 // If the expected number of arguments of the runtime function is
576 // constant, we check that the actual number of arguments match the
578 if (f->nargs >= 0 && f->nargs != num_arguments) {
579 IllegalOperation(num_arguments);
583 // TODO(1236192): Most runtime routines don't need the number of
584 // arguments passed in because it is constant. At some point we
585 // should remove this need and make the runtime routine entry code
587 Set(rax, num_arguments);
588 LoadAddress(rbx, ExternalReference(f, isolate()));
589 CEntryStub ces(isolate(), f->result_size, save_doubles);
594 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
596 Set(rax, num_arguments);
597 LoadAddress(rbx, ext);
599 CEntryStub stub(isolate(), 1);
604 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
607 // ----------- S t a t e -------------
608 // -- rsp[0] : return address
609 // -- rsp[8] : argument num_arguments - 1
611 // -- rsp[8 * num_arguments] : argument 0 (receiver)
612 // -----------------------------------
614 // TODO(1236192): Most runtime routines don't need the number of
615 // arguments passed in because it is constant. At some point we
616 // should remove this need and make the runtime routine entry code
618 Set(rax, num_arguments);
619 JumpToExternalReference(ext, result_size);
623 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
626 TailCallExternalReference(ExternalReference(fid, isolate()),
632 static int Offset(ExternalReference ref0, ExternalReference ref1) {
633 int64_t offset = (ref0.address() - ref1.address());
634 // Check that fits into int.
635 ASSERT(static_cast<int>(offset) == offset);
636 return static_cast<int>(offset);
640 void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
641 EnterApiExitFrame(arg_stack_space);
645 void MacroAssembler::CallApiFunctionAndReturn(
646 Register function_address,
647 ExternalReference thunk_ref,
648 Register thunk_last_arg,
650 Operand return_value_operand,
651 Operand* context_restore_operand) {
653 Label promote_scheduled_exception;
654 Label exception_handled;
655 Label delete_allocated_handles;
656 Label leave_exit_frame;
659 Factory* factory = isolate()->factory();
660 ExternalReference next_address =
661 ExternalReference::handle_scope_next_address(isolate());
662 const int kNextOffset = 0;
663 const int kLimitOffset = Offset(
664 ExternalReference::handle_scope_limit_address(isolate()),
666 const int kLevelOffset = Offset(
667 ExternalReference::handle_scope_level_address(isolate()),
669 ExternalReference scheduled_exception_address =
670 ExternalReference::scheduled_exception_address(isolate());
672 ASSERT(rdx.is(function_address) || r8.is(function_address));
673 // Allocate HandleScope in callee-save registers.
674 Register prev_next_address_reg = r14;
675 Register prev_limit_reg = rbx;
676 Register base_reg = r15;
677 Move(base_reg, next_address);
678 movp(prev_next_address_reg, Operand(base_reg, kNextOffset));
679 movp(prev_limit_reg, Operand(base_reg, kLimitOffset));
680 addl(Operand(base_reg, kLevelOffset), Immediate(1));
682 if (FLAG_log_timer_events) {
683 FrameScope frame(this, StackFrame::MANUAL);
684 PushSafepointRegisters();
685 PrepareCallCFunction(1);
686 LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
687 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
688 PopSafepointRegisters();
692 Label profiler_disabled;
693 Label end_profiler_check;
694 Move(rax, ExternalReference::is_profiling_address(isolate()));
695 cmpb(Operand(rax, 0), Immediate(0));
696 j(zero, &profiler_disabled);
698 // Third parameter is the address of the actual getter function.
699 Move(thunk_last_arg, function_address);
700 Move(rax, thunk_ref);
701 jmp(&end_profiler_check);
703 bind(&profiler_disabled);
704 // Call the api function!
705 Move(rax, function_address);
707 bind(&end_profiler_check);
709 // Call the api function!
712 if (FLAG_log_timer_events) {
713 FrameScope frame(this, StackFrame::MANUAL);
714 PushSafepointRegisters();
715 PrepareCallCFunction(1);
716 LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
717 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
718 PopSafepointRegisters();
721 // Load the value from ReturnValue
722 movp(rax, return_value_operand);
725 // No more valid handles (the result handle was the last one). Restore
726 // previous handle scope.
727 subl(Operand(base_reg, kLevelOffset), Immediate(1));
728 movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
729 cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
730 j(not_equal, &delete_allocated_handles);
731 bind(&leave_exit_frame);
733 // Check if the function scheduled an exception.
734 Move(rsi, scheduled_exception_address);
735 Cmp(Operand(rsi, 0), factory->the_hole_value());
736 j(not_equal, &promote_scheduled_exception);
737 bind(&exception_handled);
739 #if ENABLE_EXTRA_CHECKS
740 // Check if the function returned a valid JavaScript value.
742 Register return_value = rax;
745 JumpIfSmi(return_value, &ok, Label::kNear);
746 movp(map, FieldOperand(return_value, HeapObject::kMapOffset));
748 CmpInstanceType(map, FIRST_NONSTRING_TYPE);
749 j(below, &ok, Label::kNear);
751 CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
752 j(above_equal, &ok, Label::kNear);
754 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
755 j(equal, &ok, Label::kNear);
757 CompareRoot(return_value, Heap::kUndefinedValueRootIndex);
758 j(equal, &ok, Label::kNear);
760 CompareRoot(return_value, Heap::kTrueValueRootIndex);
761 j(equal, &ok, Label::kNear);
763 CompareRoot(return_value, Heap::kFalseValueRootIndex);
764 j(equal, &ok, Label::kNear);
766 CompareRoot(return_value, Heap::kNullValueRootIndex);
767 j(equal, &ok, Label::kNear);
769 Abort(kAPICallReturnedInvalidObject);
774 bool restore_context = context_restore_operand != NULL;
775 if (restore_context) {
776 movp(rsi, *context_restore_operand);
778 LeaveApiExitFrame(!restore_context);
779 ret(stack_space * kPointerSize);
781 bind(&promote_scheduled_exception);
783 FrameScope frame(this, StackFrame::INTERNAL);
784 CallRuntime(Runtime::kHiddenPromoteScheduledException, 0);
786 jmp(&exception_handled);
788 // HandleScope limit has changed. Delete allocated extensions.
789 bind(&delete_allocated_handles);
790 movp(Operand(base_reg, kLimitOffset), prev_limit_reg);
791 movp(prev_limit_reg, rax);
792 LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
794 ExternalReference::delete_handle_scope_extensions(isolate()));
796 movp(rax, prev_limit_reg);
797 jmp(&leave_exit_frame);
801 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
803 // Set the entry point and jump to the C entry runtime stub.
804 LoadAddress(rbx, ext);
805 CEntryStub ces(isolate(), result_size);
806 jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
810 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
812 const CallWrapper& call_wrapper) {
813 // You can't call a builtin without a valid frame.
814 ASSERT(flag == JUMP_FUNCTION || has_frame());
816 // Rely on the assertion to check that the number of provided
817 // arguments match the expected number of arguments. Fake a
818 // parameter count to avoid emitting code to do the check.
819 ParameterCount expected(0);
820 GetBuiltinEntry(rdx, id);
821 InvokeCode(rdx, expected, expected, flag, call_wrapper);
825 void MacroAssembler::GetBuiltinFunction(Register target,
826 Builtins::JavaScript id) {
827 // Load the builtins object into target register.
828 movp(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
829 movp(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
830 movp(target, FieldOperand(target,
831 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
835 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
836 ASSERT(!target.is(rdi));
837 // Load the JavaScript builtin function from the builtins object.
838 GetBuiltinFunction(rdi, id);
839 movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
843 #define REG(Name) { kRegister_ ## Name ## _Code }
845 static const Register saved_regs[] = {
846 REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
847 REG(r9), REG(r10), REG(r11)
852 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
855 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
858 Register exclusion3) {
859 // We don't allow a GC during a store buffer overflow so there is no need to
860 // store the registers in any particular way, but we do have to store and
862 for (int i = 0; i < kNumberOfSavedRegs; i++) {
863 Register reg = saved_regs[i];
864 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
868 // R12 to r15 are callee save on all platforms.
869 if (fp_mode == kSaveFPRegs) {
870 subp(rsp, Immediate(kSIMD128Size * XMMRegister::kMaxNumRegisters));
871 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
872 XMMRegister reg = XMMRegister::from_code(i);
873 movups(Operand(rsp, i * kSIMD128Size), reg);
879 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
882 Register exclusion3) {
883 if (fp_mode == kSaveFPRegs) {
884 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
885 XMMRegister reg = XMMRegister::from_code(i);
886 movups(reg, Operand(rsp, i * kSIMD128Size));
888 addp(rsp, Immediate(kSIMD128Size * XMMRegister::kMaxNumRegisters));
890 for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
891 Register reg = saved_regs[i];
892 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
899 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
905 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
911 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
912 ASSERT(!r.IsDouble());
913 if (r.IsInteger8()) {
915 } else if (r.IsUInteger8()) {
917 } else if (r.IsInteger16()) {
919 } else if (r.IsUInteger16()) {
921 } else if (r.IsInteger32()) {
929 void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
930 ASSERT(!r.IsDouble());
931 if (r.IsInteger8() || r.IsUInteger8()) {
933 } else if (r.IsInteger16() || r.IsUInteger16()) {
935 } else if (r.IsInteger32()) {
943 void MacroAssembler::Set(Register dst, int64_t x) {
946 } else if (is_uint32(x)) {
947 movl(dst, Immediate(static_cast<uint32_t>(x)));
948 } else if (is_int32(x)) {
949 movq(dst, Immediate(static_cast<int32_t>(x)));
956 void MacroAssembler::Set(const Operand& dst, intptr_t x) {
957 if (kPointerSize == kInt64Size) {
959 movp(dst, Immediate(static_cast<int32_t>(x)));
961 Set(kScratchRegister, x);
962 movp(dst, kScratchRegister);
965 movp(dst, Immediate(static_cast<int32_t>(x)));
970 // ----------------------------------------------------------------------------
971 // Smi tagging, untagging and tag detection.
973 bool MacroAssembler::IsUnsafeInt(const int32_t x) {
974 static const int kMaxBits = 17;
975 return !is_intn(x, kMaxBits);
979 void MacroAssembler::SafeMove(Register dst, Smi* src) {
980 ASSERT(!dst.is(kScratchRegister));
981 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
982 if (SmiValuesAre32Bits()) {
983 // JIT cookie can be converted to Smi.
984 Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
985 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
986 xorp(dst, kScratchRegister);
988 ASSERT(SmiValuesAre31Bits());
989 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
990 movp(dst, Immediate(value ^ jit_cookie()));
991 xorp(dst, Immediate(jit_cookie()));
999 void MacroAssembler::SafePush(Smi* src) {
1000 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1001 if (SmiValuesAre32Bits()) {
1002 // JIT cookie can be converted to Smi.
1003 Push(Smi::FromInt(src->value() ^ jit_cookie()));
1004 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1005 xorp(Operand(rsp, 0), kScratchRegister);
1007 ASSERT(SmiValuesAre31Bits());
1008 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
1009 Push(Immediate(value ^ jit_cookie()));
1010 xorp(Operand(rsp, 0), Immediate(jit_cookie()));
1018 Register MacroAssembler::GetSmiConstant(Smi* source) {
1019 int value = source->value();
1021 xorl(kScratchRegister, kScratchRegister);
1022 return kScratchRegister;
1025 return kSmiConstantRegister;
1027 LoadSmiConstant(kScratchRegister, source);
1028 return kScratchRegister;
1032 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
1033 if (emit_debug_code()) {
1034 Move(dst, Smi::FromInt(kSmiConstantRegisterValue),
1035 Assembler::RelocInfoNone());
1036 cmpq(dst, kSmiConstantRegister);
1037 Assert(equal, kUninitializedKSmiConstantRegister);
1039 int value = source->value();
1044 bool negative = value < 0;
1045 unsigned int uvalue = negative ? -value : value;
1050 Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
1054 leap(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
1058 leap(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
1062 Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
1066 Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
1070 Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
1073 movp(dst, kSmiConstantRegister);
1079 Move(dst, source, Assembler::RelocInfoNone());
1088 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
1089 STATIC_ASSERT(kSmiTag == 0);
1093 shlp(dst, Immediate(kSmiShift));
1097 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
1098 if (emit_debug_code()) {
1099 testb(dst, Immediate(0x01));
1101 j(zero, &ok, Label::kNear);
1102 Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
1106 if (SmiValuesAre32Bits()) {
1107 ASSERT(kSmiShift % kBitsPerByte == 0);
1108 movl(Operand(dst, kSmiShift / kBitsPerByte), src);
1110 ASSERT(SmiValuesAre31Bits());
1111 Integer32ToSmi(kScratchRegister, src);
1112 movp(dst, kScratchRegister);
1117 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
1121 addl(dst, Immediate(constant));
1123 leal(dst, Operand(src, constant));
1125 shlp(dst, Immediate(kSmiShift));
1129 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
1130 STATIC_ASSERT(kSmiTag == 0);
1135 if (SmiValuesAre32Bits()) {
1136 shrp(dst, Immediate(kSmiShift));
1138 ASSERT(SmiValuesAre31Bits());
1139 sarl(dst, Immediate(kSmiShift));
1144 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
1145 if (SmiValuesAre32Bits()) {
1146 movl(dst, Operand(src, kSmiShift / kBitsPerByte));
1148 ASSERT(SmiValuesAre31Bits());
1150 sarl(dst, Immediate(kSmiShift));
1155 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
1156 STATIC_ASSERT(kSmiTag == 0);
1160 sarp(dst, Immediate(kSmiShift));
1161 if (kPointerSize == kInt32Size) {
1162 // Sign extend to 64-bit.
1168 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
1169 if (SmiValuesAre32Bits()) {
1170 movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
1172 ASSERT(SmiValuesAre31Bits());
1174 SmiToInteger64(dst, dst);
1179 void MacroAssembler::SmiTest(Register src) {
1185 void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1192 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
1198 void MacroAssembler::Cmp(Register dst, Smi* src) {
1199 ASSERT(!dst.is(kScratchRegister));
1200 if (src->value() == 0) {
1203 Register constant_reg = GetSmiConstant(src);
1204 cmpp(dst, constant_reg);
1209 void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
1216 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
1223 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
1225 if (SmiValuesAre32Bits()) {
1226 cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
1228 ASSERT(SmiValuesAre31Bits());
1229 cmpl(dst, Immediate(src));
1234 void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
1235 // The Operand cannot use the smi register.
1236 Register smi_reg = GetSmiConstant(src);
1237 ASSERT(!dst.AddressUsesRegister(smi_reg));
1242 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
1243 if (SmiValuesAre32Bits()) {
1244 cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
1246 ASSERT(SmiValuesAre31Bits());
1247 SmiToInteger32(kScratchRegister, dst);
1248 cmpl(kScratchRegister, src);
1253 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
1259 SmiToInteger64(dst, src);
1265 if (power < kSmiShift) {
1266 sarp(dst, Immediate(kSmiShift - power));
1267 } else if (power > kSmiShift) {
1268 shlp(dst, Immediate(power - kSmiShift));
1273 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
1276 ASSERT((0 <= power) && (power < 32));
1278 shrp(dst, Immediate(power + kSmiShift));
1280 UNIMPLEMENTED(); // Not used.
1285 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1287 Label::Distance near_jump) {
1288 if (dst.is(src1) || dst.is(src2)) {
1289 ASSERT(!src1.is(kScratchRegister));
1290 ASSERT(!src2.is(kScratchRegister));
1291 movp(kScratchRegister, src1);
1292 orp(kScratchRegister, src2);
1293 JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
1294 movp(dst, kScratchRegister);
1298 JumpIfNotSmi(dst, on_not_smis, near_jump);
1303 Condition MacroAssembler::CheckSmi(Register src) {
1304 STATIC_ASSERT(kSmiTag == 0);
1305 testb(src, Immediate(kSmiTagMask));
1310 Condition MacroAssembler::CheckSmi(const Operand& src) {
1311 STATIC_ASSERT(kSmiTag == 0);
1312 testb(src, Immediate(kSmiTagMask));
1317 Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
1318 STATIC_ASSERT(kSmiTag == 0);
1319 // Test that both bits of the mask 0x8000000000000001 are zero.
1320 movp(kScratchRegister, src);
1321 rolp(kScratchRegister, Immediate(1));
1322 testb(kScratchRegister, Immediate(3));
1327 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
1328 if (first.is(second)) {
1329 return CheckSmi(first);
1331 STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
1332 if (SmiValuesAre32Bits()) {
1333 leal(kScratchRegister, Operand(first, second, times_1, 0));
1334 testb(kScratchRegister, Immediate(0x03));
1336 ASSERT(SmiValuesAre31Bits());
1337 movl(kScratchRegister, first);
1338 orl(kScratchRegister, second);
1339 testb(kScratchRegister, Immediate(kSmiTagMask));
1345 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
1347 if (first.is(second)) {
1348 return CheckNonNegativeSmi(first);
1350 movp(kScratchRegister, first);
1351 orp(kScratchRegister, second);
1352 rolp(kScratchRegister, Immediate(1));
1353 testl(kScratchRegister, Immediate(3));
1358 Condition MacroAssembler::CheckEitherSmi(Register first,
1361 if (first.is(second)) {
1362 return CheckSmi(first);
1364 if (scratch.is(second)) {
1365 andl(scratch, first);
1367 if (!scratch.is(first)) {
1368 movl(scratch, first);
1370 andl(scratch, second);
1372 testb(scratch, Immediate(kSmiTagMask));
1377 Condition MacroAssembler::CheckIsMinSmi(Register src) {
1378 ASSERT(!src.is(kScratchRegister));
1379 // If we overflow by subtracting one, it's the minimal smi value.
1380 cmpp(src, kSmiConstantRegister);
1385 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
1386 if (SmiValuesAre32Bits()) {
1387 // A 32-bit integer value can always be converted to a smi.
1390 ASSERT(SmiValuesAre31Bits());
1391 cmpl(src, Immediate(0xc0000000));
1397 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
1398 if (SmiValuesAre32Bits()) {
1399 // An unsigned 32-bit integer value is valid as long as the high bit
1404 ASSERT(SmiValuesAre31Bits());
1405 testl(src, Immediate(0xc0000000));
1411 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
1413 andl(dst, Immediate(kSmiTagMask));
1415 movl(dst, Immediate(kSmiTagMask));
1421 void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
1422 if (!(src.AddressUsesRegister(dst))) {
1423 movl(dst, Immediate(kSmiTagMask));
1427 andl(dst, Immediate(kSmiTagMask));
1432 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1434 Label::Distance near_jump) {
1435 Condition is_valid = CheckInteger32ValidSmiValue(src);
1436 j(NegateCondition(is_valid), on_invalid, near_jump);
1440 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1442 Label::Distance near_jump) {
1443 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1444 j(NegateCondition(is_valid), on_invalid, near_jump);
1448 void MacroAssembler::JumpIfSmi(Register src,
1450 Label::Distance near_jump) {
1451 Condition smi = CheckSmi(src);
1452 j(smi, on_smi, near_jump);
1456 void MacroAssembler::JumpIfNotSmi(Register src,
1458 Label::Distance near_jump) {
1459 Condition smi = CheckSmi(src);
1460 j(NegateCondition(smi), on_not_smi, near_jump);
1464 void MacroAssembler::JumpUnlessNonNegativeSmi(
1465 Register src, Label* on_not_smi_or_negative,
1466 Label::Distance near_jump) {
1467 Condition non_negative_smi = CheckNonNegativeSmi(src);
1468 j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
1472 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1475 Label::Distance near_jump) {
1476 SmiCompare(src, constant);
1477 j(equal, on_equals, near_jump);
1481 void MacroAssembler::JumpIfNotBothSmi(Register src1,
1483 Label* on_not_both_smi,
1484 Label::Distance near_jump) {
1485 Condition both_smi = CheckBothSmi(src1, src2);
1486 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1490 void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
1492 Label* on_not_both_smi,
1493 Label::Distance near_jump) {
1494 Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
1495 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1499 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1500 if (constant->value() == 0) {
1505 } else if (dst.is(src)) {
1506 ASSERT(!dst.is(kScratchRegister));
1507 switch (constant->value()) {
1509 addp(dst, kSmiConstantRegister);
1512 leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1515 leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1518 leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1521 Register constant_reg = GetSmiConstant(constant);
1522 addp(dst, constant_reg);
1526 switch (constant->value()) {
1528 leap(dst, Operand(src, kSmiConstantRegister, times_1, 0));
1531 leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1534 leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1537 leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1540 LoadSmiConstant(dst, constant);
1548 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1549 if (constant->value() != 0) {
1550 if (SmiValuesAre32Bits()) {
1551 addl(Operand(dst, kSmiShift / kBitsPerByte),
1552 Immediate(constant->value()));
1554 ASSERT(SmiValuesAre31Bits());
1555 addp(dst, Immediate(constant));
1561 void MacroAssembler::SmiAddConstant(Register dst,
1564 SmiOperationExecutionMode mode,
1565 Label* bailout_label,
1566 Label::Distance near_jump) {
1567 if (constant->value() == 0) {
1571 } else if (dst.is(src)) {
1572 ASSERT(!dst.is(kScratchRegister));
1573 LoadSmiConstant(kScratchRegister, constant);
1574 addp(dst, kScratchRegister);
1575 if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
1576 j(no_overflow, bailout_label, near_jump);
1577 ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
1578 subp(dst, kScratchRegister);
1579 } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
1580 if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
1582 j(no_overflow, &done, Label::kNear);
1583 subp(dst, kScratchRegister);
1584 jmp(bailout_label, near_jump);
1587 // Bailout if overflow without reserving src.
1588 j(overflow, bailout_label, near_jump);
1591 CHECK(mode.IsEmpty());
1594 ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
1595 ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
1596 LoadSmiConstant(dst, constant);
1598 j(overflow, bailout_label, near_jump);
1603 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1604 if (constant->value() == 0) {
1608 } else if (dst.is(src)) {
1609 ASSERT(!dst.is(kScratchRegister));
1610 Register constant_reg = GetSmiConstant(constant);
1611 subp(dst, constant_reg);
1613 if (constant->value() == Smi::kMinValue) {
1614 LoadSmiConstant(dst, constant);
1615 // Adding and subtracting the min-value gives the same result, it only
1616 // differs on the overflow bit, which we don't check here.
1619 // Subtract by adding the negation.
1620 LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1627 void MacroAssembler::SmiSubConstant(Register dst,
1630 SmiOperationExecutionMode mode,
1631 Label* bailout_label,
1632 Label::Distance near_jump) {
1633 if (constant->value() == 0) {
1637 } else if (dst.is(src)) {
1638 ASSERT(!dst.is(kScratchRegister));
1639 LoadSmiConstant(kScratchRegister, constant);
1640 subp(dst, kScratchRegister);
1641 if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
1642 j(no_overflow, bailout_label, near_jump);
1643 ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
1644 addp(dst, kScratchRegister);
1645 } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
1646 if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
1648 j(no_overflow, &done, Label::kNear);
1649 addp(dst, kScratchRegister);
1650 jmp(bailout_label, near_jump);
1653 // Bailout if overflow without reserving src.
1654 j(overflow, bailout_label, near_jump);
1657 CHECK(mode.IsEmpty());
1660 ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
1661 ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
1662 if (constant->value() == Smi::kMinValue) {
1663 ASSERT(!dst.is(kScratchRegister));
1665 LoadSmiConstant(kScratchRegister, constant);
1666 subp(dst, kScratchRegister);
1667 j(overflow, bailout_label, near_jump);
1669 // Subtract by adding the negation.
1670 LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1672 j(overflow, bailout_label, near_jump);
1678 void MacroAssembler::SmiNeg(Register dst,
1680 Label* on_smi_result,
1681 Label::Distance near_jump) {
1683 ASSERT(!dst.is(kScratchRegister));
1684 movp(kScratchRegister, src);
1685 negp(dst); // Low 32 bits are retained as zero by negation.
1686 // Test if result is zero or Smi::kMinValue.
1687 cmpp(dst, kScratchRegister);
1688 j(not_equal, on_smi_result, near_jump);
1689 movp(src, kScratchRegister);
1694 // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1695 j(not_equal, on_smi_result, near_jump);
1701 static void SmiAddHelper(MacroAssembler* masm,
1705 Label* on_not_smi_result,
1706 Label::Distance near_jump) {
1709 masm->addp(dst, src2);
1710 masm->j(no_overflow, &done, Label::kNear);
1712 masm->subp(dst, src2);
1713 masm->jmp(on_not_smi_result, near_jump);
1716 masm->movp(dst, src1);
1717 masm->addp(dst, src2);
1718 masm->j(overflow, on_not_smi_result, near_jump);
1723 void MacroAssembler::SmiAdd(Register dst,
1726 Label* on_not_smi_result,
1727 Label::Distance near_jump) {
1728 ASSERT_NOT_NULL(on_not_smi_result);
1729 ASSERT(!dst.is(src2));
1730 SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1734 void MacroAssembler::SmiAdd(Register dst,
1736 const Operand& src2,
1737 Label* on_not_smi_result,
1738 Label::Distance near_jump) {
1739 ASSERT_NOT_NULL(on_not_smi_result);
1740 ASSERT(!src2.AddressUsesRegister(dst));
1741 SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1745 void MacroAssembler::SmiAdd(Register dst,
1748 // No overflow checking. Use only when it's known that
1749 // overflowing is impossible.
1750 if (!dst.is(src1)) {
1751 if (emit_debug_code()) {
1752 movp(kScratchRegister, src1);
1753 addp(kScratchRegister, src2);
1754 Check(no_overflow, kSmiAdditionOverflow);
1756 leap(dst, Operand(src1, src2, times_1, 0));
1759 Assert(no_overflow, kSmiAdditionOverflow);
1765 static void SmiSubHelper(MacroAssembler* masm,
1769 Label* on_not_smi_result,
1770 Label::Distance near_jump) {
1773 masm->subp(dst, src2);
1774 masm->j(no_overflow, &done, Label::kNear);
1776 masm->addp(dst, src2);
1777 masm->jmp(on_not_smi_result, near_jump);
1780 masm->movp(dst, src1);
1781 masm->subp(dst, src2);
1782 masm->j(overflow, on_not_smi_result, near_jump);
1787 void MacroAssembler::SmiSub(Register dst,
1790 Label* on_not_smi_result,
1791 Label::Distance near_jump) {
1792 ASSERT_NOT_NULL(on_not_smi_result);
1793 ASSERT(!dst.is(src2));
1794 SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1798 void MacroAssembler::SmiSub(Register dst,
1800 const Operand& src2,
1801 Label* on_not_smi_result,
1802 Label::Distance near_jump) {
1803 ASSERT_NOT_NULL(on_not_smi_result);
1804 ASSERT(!src2.AddressUsesRegister(dst));
1805 SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1810 static void SmiSubNoOverflowHelper(MacroAssembler* masm,
1814 // No overflow checking. Use only when it's known that
1815 // overflowing is impossible (e.g., subtracting two positive smis).
1816 if (!dst.is(src1)) {
1817 masm->movp(dst, src1);
1819 masm->subp(dst, src2);
1820 masm->Assert(no_overflow, kSmiSubtractionOverflow);
1824 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1825 ASSERT(!dst.is(src2));
1826 SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
1830 void MacroAssembler::SmiSub(Register dst,
1832 const Operand& src2) {
1833 SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
1837 void MacroAssembler::SmiMul(Register dst,
1840 Label* on_not_smi_result,
1841 Label::Distance near_jump) {
1842 ASSERT(!dst.is(src2));
1843 ASSERT(!dst.is(kScratchRegister));
1844 ASSERT(!src1.is(kScratchRegister));
1845 ASSERT(!src2.is(kScratchRegister));
1848 Label failure, zero_correct_result;
1849 movp(kScratchRegister, src1); // Create backup for later testing.
1850 SmiToInteger64(dst, src1);
1852 j(overflow, &failure, Label::kNear);
1854 // Check for negative zero result. If product is zero, and one
1855 // argument is negative, go to slow case.
1856 Label correct_result;
1858 j(not_zero, &correct_result, Label::kNear);
1860 movp(dst, kScratchRegister);
1862 // Result was positive zero.
1863 j(positive, &zero_correct_result, Label::kNear);
1865 bind(&failure); // Reused failure exit, restores src1.
1866 movp(src1, kScratchRegister);
1867 jmp(on_not_smi_result, near_jump);
1869 bind(&zero_correct_result);
1872 bind(&correct_result);
1874 SmiToInteger64(dst, src1);
1876 j(overflow, on_not_smi_result, near_jump);
1877 // Check for negative zero result. If product is zero, and one
1878 // argument is negative, go to slow case.
1879 Label correct_result;
1881 j(not_zero, &correct_result, Label::kNear);
1882 // One of src1 and src2 is zero, the check whether the other is
1884 movp(kScratchRegister, src1);
1885 xorp(kScratchRegister, src2);
1886 j(negative, on_not_smi_result, near_jump);
1887 bind(&correct_result);
1892 void MacroAssembler::SmiDiv(Register dst,
1895 Label* on_not_smi_result,
1896 Label::Distance near_jump) {
1897 ASSERT(!src1.is(kScratchRegister));
1898 ASSERT(!src2.is(kScratchRegister));
1899 ASSERT(!dst.is(kScratchRegister));
1900 ASSERT(!src2.is(rax));
1901 ASSERT(!src2.is(rdx));
1902 ASSERT(!src1.is(rdx));
1904 // Check for 0 divisor (result is +/-Infinity).
1906 j(zero, on_not_smi_result, near_jump);
1909 movp(kScratchRegister, src1);
1911 SmiToInteger32(rax, src1);
1912 // We need to rule out dividing Smi::kMinValue by -1, since that would
1913 // overflow in idiv and raise an exception.
1914 // We combine this with negative zero test (negative zero only happens
1915 // when dividing zero by a negative number).
1917 // We overshoot a little and go to slow case if we divide min-value
1918 // by any negative value, not just -1.
1920 testl(rax, Immediate(0x7fffffff));
1921 j(not_zero, &safe_div, Label::kNear);
1924 j(positive, &safe_div, Label::kNear);
1925 movp(src1, kScratchRegister);
1926 jmp(on_not_smi_result, near_jump);
1928 j(negative, on_not_smi_result, near_jump);
1932 SmiToInteger32(src2, src2);
1933 // Sign extend src1 into edx:eax.
1936 Integer32ToSmi(src2, src2);
1937 // Check that the remainder is zero.
1941 j(zero, &smi_result, Label::kNear);
1942 movp(src1, kScratchRegister);
1943 jmp(on_not_smi_result, near_jump);
1946 j(not_zero, on_not_smi_result, near_jump);
1948 if (!dst.is(src1) && src1.is(rax)) {
1949 movp(src1, kScratchRegister);
1951 Integer32ToSmi(dst, rax);
1955 void MacroAssembler::SmiMod(Register dst,
1958 Label* on_not_smi_result,
1959 Label::Distance near_jump) {
1960 ASSERT(!dst.is(kScratchRegister));
1961 ASSERT(!src1.is(kScratchRegister));
1962 ASSERT(!src2.is(kScratchRegister));
1963 ASSERT(!src2.is(rax));
1964 ASSERT(!src2.is(rdx));
1965 ASSERT(!src1.is(rdx));
1966 ASSERT(!src1.is(src2));
1969 j(zero, on_not_smi_result, near_jump);
1972 movp(kScratchRegister, src1);
1974 SmiToInteger32(rax, src1);
1975 SmiToInteger32(src2, src2);
1977 // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
1979 cmpl(rax, Immediate(Smi::kMinValue));
1980 j(not_equal, &safe_div, Label::kNear);
1981 cmpl(src2, Immediate(-1));
1982 j(not_equal, &safe_div, Label::kNear);
1983 // Retag inputs and go slow case.
1984 Integer32ToSmi(src2, src2);
1986 movp(src1, kScratchRegister);
1988 jmp(on_not_smi_result, near_jump);
1991 // Sign extend eax into edx:eax.
1994 // Restore smi tags on inputs.
1995 Integer32ToSmi(src2, src2);
1997 movp(src1, kScratchRegister);
1999 // Check for a negative zero result. If the result is zero, and the
2000 // dividend is negative, go slow to return a floating point negative zero.
2003 j(not_zero, &smi_result, Label::kNear);
2005 j(negative, on_not_smi_result, near_jump);
2007 Integer32ToSmi(dst, rdx);
2011 void MacroAssembler::SmiNot(Register dst, Register src) {
2012 ASSERT(!dst.is(kScratchRegister));
2013 ASSERT(!src.is(kScratchRegister));
2014 if (SmiValuesAre32Bits()) {
2015 // Set tag and padding bits before negating, so that they are zero
2017 movl(kScratchRegister, Immediate(~0));
2019 ASSERT(SmiValuesAre31Bits());
2020 movl(kScratchRegister, Immediate(1));
2023 xorp(dst, kScratchRegister);
2025 leap(dst, Operand(src, kScratchRegister, times_1, 0));
2031 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
2032 ASSERT(!dst.is(src2));
2033 if (!dst.is(src1)) {
2040 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
2041 if (constant->value() == 0) {
2043 } else if (dst.is(src)) {
2044 ASSERT(!dst.is(kScratchRegister));
2045 Register constant_reg = GetSmiConstant(constant);
2046 andp(dst, constant_reg);
2048 LoadSmiConstant(dst, constant);
2054 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
2055 if (!dst.is(src1)) {
2056 ASSERT(!src1.is(src2));
2063 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
2065 ASSERT(!dst.is(kScratchRegister));
2066 Register constant_reg = GetSmiConstant(constant);
2067 orp(dst, constant_reg);
2069 LoadSmiConstant(dst, constant);
2075 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
2076 if (!dst.is(src1)) {
2077 ASSERT(!src1.is(src2));
2084 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
2086 ASSERT(!dst.is(kScratchRegister));
2087 Register constant_reg = GetSmiConstant(constant);
2088 xorp(dst, constant_reg);
2090 LoadSmiConstant(dst, constant);
2096 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
2099 ASSERT(is_uint5(shift_value));
2100 if (shift_value > 0) {
2102 sarp(dst, Immediate(shift_value + kSmiShift));
2103 shlp(dst, Immediate(kSmiShift));
2105 UNIMPLEMENTED(); // Not used.
2111 void MacroAssembler::SmiShiftLeftConstant(Register dst,
2117 if (shift_value > 0) {
2118 shlp(dst, Immediate(shift_value));
2123 void MacroAssembler::SmiShiftLogicalRightConstant(
2124 Register dst, Register src, int shift_value,
2125 Label* on_not_smi_result, Label::Distance near_jump) {
2126 // Logic right shift interprets its result as an *unsigned* number.
2128 UNIMPLEMENTED(); // Not used.
2131 if (shift_value == 0) {
2133 j(negative, on_not_smi_result, near_jump);
2135 shrq(dst, Immediate(shift_value + kSmiShift));
2136 shlq(dst, Immediate(kSmiShift));
2141 void MacroAssembler::SmiShiftLeft(Register dst,
2144 ASSERT(!dst.is(rcx));
2145 // Untag shift amount.
2146 if (!dst.is(src1)) {
2149 SmiToInteger32(rcx, src2);
2150 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2151 andq(rcx, Immediate(0x1f));
2156 void MacroAssembler::SmiShiftLogicalRight(Register dst,
2159 Label* on_not_smi_result,
2160 Label::Distance near_jump) {
2161 ASSERT(!dst.is(kScratchRegister));
2162 ASSERT(!src1.is(kScratchRegister));
2163 ASSERT(!src2.is(kScratchRegister));
2164 ASSERT(!dst.is(rcx));
2165 // dst and src1 can be the same, because the one case that bails out
2166 // is a shift by 0, which leaves dst, and therefore src1, unchanged.
2167 if (src1.is(rcx) || src2.is(rcx)) {
2168 movq(kScratchRegister, rcx);
2170 if (!dst.is(src1)) {
2173 SmiToInteger32(rcx, src2);
2174 orl(rcx, Immediate(kSmiShift));
2175 shrq_cl(dst); // Shift is rcx modulo 0x1f + 32.
2176 shlq(dst, Immediate(kSmiShift));
2178 if (src1.is(rcx) || src2.is(rcx)) {
2179 Label positive_result;
2180 j(positive, &positive_result, Label::kNear);
2182 movq(src1, kScratchRegister);
2184 movq(src2, kScratchRegister);
2186 jmp(on_not_smi_result, near_jump);
2187 bind(&positive_result);
2189 // src2 was zero and src1 negative.
2190 j(negative, on_not_smi_result, near_jump);
2195 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
2198 ASSERT(!dst.is(kScratchRegister));
2199 ASSERT(!src1.is(kScratchRegister));
2200 ASSERT(!src2.is(kScratchRegister));
2201 ASSERT(!dst.is(rcx));
2203 movp(kScratchRegister, src1);
2204 } else if (src2.is(rcx)) {
2205 movp(kScratchRegister, src2);
2207 if (!dst.is(src1)) {
2210 SmiToInteger32(rcx, src2);
2211 orl(rcx, Immediate(kSmiShift));
2212 sarp_cl(dst); // Shift 32 + original rcx & 0x1f.
2213 shlp(dst, Immediate(kSmiShift));
2215 movp(src1, kScratchRegister);
2216 } else if (src2.is(rcx)) {
2217 movp(src2, kScratchRegister);
2222 void MacroAssembler::SelectNonSmi(Register dst,
2226 Label::Distance near_jump) {
2227 ASSERT(!dst.is(kScratchRegister));
2228 ASSERT(!src1.is(kScratchRegister));
2229 ASSERT(!src2.is(kScratchRegister));
2230 ASSERT(!dst.is(src1));
2231 ASSERT(!dst.is(src2));
2232 // Both operands must not be smis.
2234 Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
2235 Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
2237 STATIC_ASSERT(kSmiTag == 0);
2238 ASSERT_EQ(0, Smi::FromInt(0));
2239 movl(kScratchRegister, Immediate(kSmiTagMask));
2240 andp(kScratchRegister, src1);
2241 testl(kScratchRegister, src2);
2242 // If non-zero then both are smis.
2243 j(not_zero, on_not_smis, near_jump);
2245 // Exactly one operand is a smi.
2246 ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
2247 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
2248 subp(kScratchRegister, Immediate(1));
2249 // If src1 is a smi, then scratch register all 1s, else it is all 0s.
2252 andp(dst, kScratchRegister);
2253 // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
2255 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
2259 SmiIndex MacroAssembler::SmiToIndex(Register dst,
2262 if (SmiValuesAre32Bits()) {
2263 ASSERT(is_uint6(shift));
2264 // There is a possible optimization if shift is in the range 60-63, but that
2265 // will (and must) never happen.
2269 if (shift < kSmiShift) {
2270 sarp(dst, Immediate(kSmiShift - shift));
2272 shlp(dst, Immediate(shift - kSmiShift));
2274 return SmiIndex(dst, times_1);
2276 ASSERT(SmiValuesAre31Bits());
2277 ASSERT(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2281 // We have to sign extend the index register to 64-bit as the SMI might
2284 if (shift == times_1) {
2285 sarq(dst, Immediate(kSmiShift));
2286 return SmiIndex(dst, times_1);
2288 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2293 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
2296 if (SmiValuesAre32Bits()) {
2297 // Register src holds a positive smi.
2298 ASSERT(is_uint6(shift));
2303 if (shift < kSmiShift) {
2304 sarp(dst, Immediate(kSmiShift - shift));
2306 shlp(dst, Immediate(shift - kSmiShift));
2308 return SmiIndex(dst, times_1);
2310 ASSERT(SmiValuesAre31Bits());
2311 ASSERT(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2316 if (shift == times_1) {
2317 sarq(dst, Immediate(kSmiShift));
2318 return SmiIndex(dst, times_1);
2320 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2325 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
2326 if (SmiValuesAre32Bits()) {
2327 ASSERT_EQ(0, kSmiShift % kBitsPerByte);
2328 addl(dst, Operand(src, kSmiShift / kBitsPerByte));
2330 ASSERT(SmiValuesAre31Bits());
2331 SmiToInteger32(kScratchRegister, src);
2332 addl(dst, kScratchRegister);
2337 void MacroAssembler::Push(Smi* source) {
2338 intptr_t smi = reinterpret_cast<intptr_t>(source);
2339 if (is_int32(smi)) {
2340 Push(Immediate(static_cast<int32_t>(smi)));
2342 Register constant = GetSmiConstant(source);
2348 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
2349 ASSERT(!src.is(scratch));
2352 shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2353 shlp(src, Immediate(kSmiShift));
2356 shlp(scratch, Immediate(kSmiShift));
2361 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
2362 ASSERT(!dst.is(scratch));
2365 shrp(scratch, Immediate(kSmiShift));
2367 shrp(dst, Immediate(kSmiShift));
2369 shlp(dst, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2374 void MacroAssembler::Test(const Operand& src, Smi* source) {
2375 if (SmiValuesAre32Bits()) {
2376 testl(Operand(src, kIntSize), Immediate(source->value()));
2378 ASSERT(SmiValuesAre31Bits());
2379 testl(src, Immediate(source));
2384 // ----------------------------------------------------------------------------
2387 void MacroAssembler::LookupNumberStringCache(Register object,
2392 // Use of registers. Register result is used as a temporary.
2393 Register number_string_cache = result;
2394 Register mask = scratch1;
2395 Register scratch = scratch2;
2397 // Load the number string cache.
2398 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2400 // Make the hash mask from the length of the number string cache. It
2401 // contains two elements (number and string) for each cache entry.
2403 mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
2404 shrl(mask, Immediate(1));
2405 subp(mask, Immediate(1)); // Make mask.
2407 // Calculate the entry in the number string cache. The hash value in the
2408 // number string cache for smis is just the smi value, and the hash for
2409 // doubles is the xor of the upper and lower words. See
2410 // Heap::GetNumberStringCache.
2412 Label load_result_from_cache;
2413 JumpIfSmi(object, &is_smi);
2415 isolate()->factory()->heap_number_map(),
2419 STATIC_ASSERT(8 == kDoubleSize);
2420 movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
2421 xorp(scratch, FieldOperand(object, HeapNumber::kValueOffset));
2422 andp(scratch, mask);
2423 // Each entry in string cache consists of two pointer sized fields,
2424 // but times_twice_pointer_size (multiplication by 16) scale factor
2425 // is not supported by addrmode on x64 platform.
2426 // So we have to premultiply entry index before lookup.
2427 shlp(scratch, Immediate(kPointerSizeLog2 + 1));
2429 Register index = scratch;
2430 Register probe = mask;
2432 FieldOperand(number_string_cache,
2435 FixedArray::kHeaderSize));
2436 JumpIfSmi(probe, not_found);
2437 movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
2438 ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
2439 j(parity_even, not_found); // Bail out if NaN is involved.
2440 j(not_equal, not_found); // The cache did not contain this value.
2441 jmp(&load_result_from_cache);
2444 SmiToInteger32(scratch, object);
2445 andp(scratch, mask);
2446 // Each entry in string cache consists of two pointer sized fields,
2447 // but times_twice_pointer_size (multiplication by 16) scale factor
2448 // is not supported by addrmode on x64 platform.
2449 // So we have to premultiply entry index before lookup.
2450 shlp(scratch, Immediate(kPointerSizeLog2 + 1));
2452 // Check if the entry is the smi we are looking for.
2454 FieldOperand(number_string_cache,
2457 FixedArray::kHeaderSize));
2458 j(not_equal, not_found);
2460 // Get the result from the cache.
2461 bind(&load_result_from_cache);
2463 FieldOperand(number_string_cache,
2466 FixedArray::kHeaderSize + kPointerSize));
2467 IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
2471 void MacroAssembler::absps(XMMRegister dst) {
2472 static const struct V8_ALIGNED(16) {
2477 } float_absolute_constant =
2478 { 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF };
2479 Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_absolute_constant));
2480 andps(dst, Operand(kScratchRegister, 0));
2484 void MacroAssembler::abspd(XMMRegister dst) {
2485 static const struct V8_ALIGNED(16) {
2488 } double_absolute_constant =
2489 { V8_UINT64_C(0x7FFFFFFFFFFFFFFF), V8_UINT64_C(0x7FFFFFFFFFFFFFFF) };
2490 Set(kScratchRegister, reinterpret_cast<intptr_t>(&double_absolute_constant));
2491 andpd(dst, Operand(kScratchRegister, 0));
2495 void MacroAssembler::negateps(XMMRegister dst) {
2496 static const struct V8_ALIGNED(16) {
2501 } float_negate_constant =
2502 { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
2503 Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_negate_constant));
2504 xorps(dst, Operand(kScratchRegister, 0));
2508 void MacroAssembler::negatepd(XMMRegister dst) {
2509 static const struct V8_ALIGNED(16) {
2512 } double_absolute_constant =
2513 { V8_UINT64_C(0x8000000000000000), V8_UINT64_C(0x8000000000000000) };
2514 Set(kScratchRegister, reinterpret_cast<intptr_t>(&double_absolute_constant));
2515 xorpd(dst, Operand(kScratchRegister, 0));
2519 void MacroAssembler::notps(XMMRegister dst) {
2520 static const struct V8_ALIGNED(16) {
2525 } float_not_constant =
2526 { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
2527 Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_not_constant));
2528 xorps(dst, Operand(kScratchRegister, 0));
2532 void MacroAssembler::pnegd(XMMRegister dst) {
2533 static const struct V8_ALIGNED(16) {
2538 } int32_one_constant = { 0x1, 0x1, 0x1, 0x1 };
2540 Set(kScratchRegister, reinterpret_cast<intptr_t>(&int32_one_constant));
2541 paddd(dst, Operand(kScratchRegister, 0));
2546 void MacroAssembler::JumpIfNotString(Register object,
2547 Register object_map,
2549 Label::Distance near_jump) {
2550 Condition is_smi = CheckSmi(object);
2551 j(is_smi, not_string, near_jump);
2552 CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
2553 j(above_equal, not_string, near_jump);
2557 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
2558 Register first_object,
2559 Register second_object,
2563 Label::Distance near_jump) {
2564 // Check that both objects are not smis.
2565 Condition either_smi = CheckEitherSmi(first_object, second_object);
2566 j(either_smi, on_fail, near_jump);
2568 // Load instance type for both strings.
2569 movp(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
2570 movp(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
2571 movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2572 movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2574 // Check that both are flat ASCII strings.
2575 ASSERT(kNotStringTag != 0);
2576 const int kFlatAsciiStringMask =
2577 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2578 const int kFlatAsciiStringTag =
2579 kStringTag | kOneByteStringTag | kSeqStringTag;
2581 andl(scratch1, Immediate(kFlatAsciiStringMask));
2582 andl(scratch2, Immediate(kFlatAsciiStringMask));
2583 // Interleave the bits to check both scratch1 and scratch2 in one test.
2584 ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2585 leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2587 Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
2588 j(not_equal, on_fail, near_jump);
2592 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
2593 Register instance_type,
2596 Label::Distance near_jump) {
2597 if (!scratch.is(instance_type)) {
2598 movl(scratch, instance_type);
2601 const int kFlatAsciiStringMask =
2602 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2604 andl(scratch, Immediate(kFlatAsciiStringMask));
2605 cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
2606 j(not_equal, failure, near_jump);
2610 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
2611 Register first_object_instance_type,
2612 Register second_object_instance_type,
2616 Label::Distance near_jump) {
2617 // Load instance type for both strings.
2618 movp(scratch1, first_object_instance_type);
2619 movp(scratch2, second_object_instance_type);
2621 // Check that both are flat ASCII strings.
2622 ASSERT(kNotStringTag != 0);
2623 const int kFlatAsciiStringMask =
2624 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2625 const int kFlatAsciiStringTag =
2626 kStringTag | kOneByteStringTag | kSeqStringTag;
2628 andl(scratch1, Immediate(kFlatAsciiStringMask));
2629 andl(scratch2, Immediate(kFlatAsciiStringMask));
2630 // Interleave the bits to check both scratch1 and scratch2 in one test.
2631 ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2632 leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2634 Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
2635 j(not_equal, on_fail, near_jump);
2640 static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
2641 T operand_or_register,
2642 Label* not_unique_name,
2643 Label::Distance distance) {
2644 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2646 masm->testb(operand_or_register,
2647 Immediate(kIsNotStringMask | kIsNotInternalizedMask));
2648 masm->j(zero, &succeed, Label::kNear);
2649 masm->cmpb(operand_or_register, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
2650 masm->j(not_equal, not_unique_name, distance);
2652 masm->bind(&succeed);
2656 void MacroAssembler::JumpIfNotUniqueName(Operand operand,
2657 Label* not_unique_name,
2658 Label::Distance distance) {
2659 JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
2663 void MacroAssembler::JumpIfNotUniqueName(Register reg,
2664 Label* not_unique_name,
2665 Label::Distance distance) {
2666 JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
2670 void MacroAssembler::Move(Register dst, Register src) {
2677 void MacroAssembler::Move(Register dst, Handle<Object> source) {
2678 AllowDeferredHandleDereference smi_check;
2679 if (source->IsSmi()) {
2680 Move(dst, Smi::cast(*source));
2682 MoveHeapObject(dst, source);
2687 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
2688 AllowDeferredHandleDereference smi_check;
2689 if (source->IsSmi()) {
2690 Move(dst, Smi::cast(*source));
2692 MoveHeapObject(kScratchRegister, source);
2693 movp(dst, kScratchRegister);
2698 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
2699 AllowDeferredHandleDereference smi_check;
2700 if (source->IsSmi()) {
2701 Cmp(dst, Smi::cast(*source));
2703 MoveHeapObject(kScratchRegister, source);
2704 cmpp(dst, kScratchRegister);
2709 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
2710 AllowDeferredHandleDereference smi_check;
2711 if (source->IsSmi()) {
2712 Cmp(dst, Smi::cast(*source));
2714 MoveHeapObject(kScratchRegister, source);
2715 cmpp(dst, kScratchRegister);
2720 void MacroAssembler::Push(Handle<Object> source) {
2721 AllowDeferredHandleDereference smi_check;
2722 if (source->IsSmi()) {
2723 Push(Smi::cast(*source));
2725 MoveHeapObject(kScratchRegister, source);
2726 Push(kScratchRegister);
2731 void MacroAssembler::MoveHeapObject(Register result,
2732 Handle<Object> object) {
2733 AllowDeferredHandleDereference using_raw_address;
2734 ASSERT(object->IsHeapObject());
2735 if (isolate()->heap()->InNewSpace(*object)) {
2736 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2737 Move(result, cell, RelocInfo::CELL);
2738 movp(result, Operand(result, 0));
2740 Move(result, object, RelocInfo::EMBEDDED_OBJECT);
2745 void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
2747 AllowDeferredHandleDereference embedding_raw_address;
2748 load_rax(cell.location(), RelocInfo::CELL);
2750 Move(dst, cell, RelocInfo::CELL);
2751 movp(dst, Operand(dst, 0));
2756 void MacroAssembler::Drop(int stack_elements) {
2757 if (stack_elements > 0) {
2758 addp(rsp, Immediate(stack_elements * kPointerSize));
2763 void MacroAssembler::DropUnderReturnAddress(int stack_elements,
2765 ASSERT(stack_elements > 0);
2766 if (kPointerSize == kInt64Size && stack_elements == 1) {
2767 popq(MemOperand(rsp, 0));
2771 PopReturnAddressTo(scratch);
2772 Drop(stack_elements);
2773 PushReturnAddressFrom(scratch);
2777 void MacroAssembler::Push(Register src) {
2778 if (kPointerSize == kInt64Size) {
2781 // x32 uses 64-bit push for rbp in the prologue.
2782 ASSERT(src.code() != rbp.code());
2783 leal(rsp, Operand(rsp, -4));
2784 movp(Operand(rsp, 0), src);
2789 void MacroAssembler::Push(const Operand& src) {
2790 if (kPointerSize == kInt64Size) {
2793 movp(kScratchRegister, src);
2794 leal(rsp, Operand(rsp, -4));
2795 movp(Operand(rsp, 0), kScratchRegister);
2800 void MacroAssembler::PushQuad(const Operand& src) {
2801 if (kPointerSize == kInt64Size) {
2804 movp(kScratchRegister, src);
2805 pushq(kScratchRegister);
2810 void MacroAssembler::Push(Immediate value) {
2811 if (kPointerSize == kInt64Size) {
2814 leal(rsp, Operand(rsp, -4));
2815 movp(Operand(rsp, 0), value);
2820 void MacroAssembler::PushImm32(int32_t imm32) {
2821 if (kPointerSize == kInt64Size) {
2824 leal(rsp, Operand(rsp, -4));
2825 movp(Operand(rsp, 0), Immediate(imm32));
2830 void MacroAssembler::Pop(Register dst) {
2831 if (kPointerSize == kInt64Size) {
2834 // x32 uses 64-bit pop for rbp in the epilogue.
2835 ASSERT(dst.code() != rbp.code());
2836 movp(dst, Operand(rsp, 0));
2837 leal(rsp, Operand(rsp, 4));
2842 void MacroAssembler::Pop(const Operand& dst) {
2843 if (kPointerSize == kInt64Size) {
2846 Register scratch = dst.AddressUsesRegister(kScratchRegister)
2847 ? kSmiConstantRegister : kScratchRegister;
2848 movp(scratch, Operand(rsp, 0));
2850 leal(rsp, Operand(rsp, 4));
2851 if (scratch.is(kSmiConstantRegister)) {
2852 // Restore kSmiConstantRegister.
2853 movp(kSmiConstantRegister,
2854 reinterpret_cast<void*>(Smi::FromInt(kSmiConstantRegisterValue)),
2855 Assembler::RelocInfoNone());
2861 void MacroAssembler::PopQuad(const Operand& dst) {
2862 if (kPointerSize == kInt64Size) {
2865 popq(kScratchRegister);
2866 movp(dst, kScratchRegister);
2871 void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst,
2874 ASSERT(offset > SharedFunctionInfo::kLengthOffset &&
2875 offset <= SharedFunctionInfo::kSize &&
2876 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
2877 if (kPointerSize == kInt64Size) {
2878 movsxlq(dst, FieldOperand(base, offset));
2880 movp(dst, FieldOperand(base, offset));
2881 SmiToInteger32(dst, dst);
2886 void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base,
2889 ASSERT(offset > SharedFunctionInfo::kLengthOffset &&
2890 offset <= SharedFunctionInfo::kSize &&
2891 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
2892 if (kPointerSize == kInt32Size) {
2893 // On x32, this field is represented by SMI.
2896 int byte_offset = bits / kBitsPerByte;
2897 int bit_in_byte = bits & (kBitsPerByte - 1);
2898 testb(FieldOperand(base, offset + byte_offset), Immediate(1 << bit_in_byte));
2902 void MacroAssembler::Jump(ExternalReference ext) {
2903 LoadAddress(kScratchRegister, ext);
2904 jmp(kScratchRegister);
2908 void MacroAssembler::Jump(const Operand& op) {
2909 if (kPointerSize == kInt64Size) {
2912 movp(kScratchRegister, op);
2913 jmp(kScratchRegister);
2918 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
2919 Move(kScratchRegister, destination, rmode);
2920 jmp(kScratchRegister);
2924 void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
2925 // TODO(X64): Inline this
2926 jmp(code_object, rmode);
2930 int MacroAssembler::CallSize(ExternalReference ext) {
2931 // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
2932 return LoadAddressSize(ext) +
2933 Assembler::kCallScratchRegisterInstructionLength;
2937 void MacroAssembler::Call(ExternalReference ext) {
2939 int end_position = pc_offset() + CallSize(ext);
2941 LoadAddress(kScratchRegister, ext);
2942 call(kScratchRegister);
2944 CHECK_EQ(end_position, pc_offset());
2949 void MacroAssembler::Call(const Operand& op) {
2950 if (kPointerSize == kInt64Size) {
2953 movp(kScratchRegister, op);
2954 call(kScratchRegister);
2959 void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
2961 int end_position = pc_offset() + CallSize(destination);
2963 Move(kScratchRegister, destination, rmode);
2964 call(kScratchRegister);
2966 CHECK_EQ(pc_offset(), end_position);
2971 void MacroAssembler::Call(Handle<Code> code_object,
2972 RelocInfo::Mode rmode,
2973 TypeFeedbackId ast_id) {
2975 int end_position = pc_offset() + CallSize(code_object);
2977 ASSERT(RelocInfo::IsCodeTarget(rmode) ||
2978 rmode == RelocInfo::CODE_AGE_SEQUENCE);
2979 call(code_object, rmode, ast_id);
2981 CHECK_EQ(end_position, pc_offset());
2986 void MacroAssembler::Pushad() {
2991 // Not pushing rsp or rbp.
2996 // r10 is kScratchRegister.
2998 // r12 is kSmiConstantRegister.
2999 // r13 is kRootRegister.
3002 STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
3003 // Use lea for symmetry with Popad.
3005 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
3006 leap(rsp, Operand(rsp, -sp_delta));
3010 void MacroAssembler::Popad() {
3011 // Popad must not change the flags, so use lea instead of addq.
3013 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
3014 leap(rsp, Operand(rsp, sp_delta));
3029 void MacroAssembler::Dropad() {
3030 addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
3034 // Order general registers are pushed by Pushad:
3035 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
3037 MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
3057 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
3058 const Immediate& imm) {
3059 movp(SafepointRegisterSlot(dst), imm);
3063 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
3064 movp(SafepointRegisterSlot(dst), src);
3068 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
3069 movp(dst, SafepointRegisterSlot(src));
3073 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
3074 return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
3078 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
3079 int handler_index) {
3080 // Adjust this code if not the case.
3081 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
3083 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3084 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3085 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3086 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3087 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3089 // We will build up the handler from the bottom by pushing on the stack.
3090 // First push the frame pointer and context.
3091 if (kind == StackHandler::JS_ENTRY) {
3092 // The frame pointer does not point to a JS frame so we save NULL for
3093 // rbp. We expect the code throwing an exception to check rbp before
3094 // dereferencing it to restore the context.
3095 pushq(Immediate(0)); // NULL frame pointer.
3096 Push(Smi::FromInt(0)); // No context.
3102 // Push the state and the code object.
3104 StackHandler::IndexField::encode(handler_index) |
3105 StackHandler::KindField::encode(kind);
3106 Push(Immediate(state));
3109 // Link the current handler as the next handler.
3110 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3111 Push(ExternalOperand(handler_address));
3112 // Set this new handler as the current one.
3113 movp(ExternalOperand(handler_address), rsp);
3117 void MacroAssembler::PopTryHandler() {
3118 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3119 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3120 Pop(ExternalOperand(handler_address));
3121 addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
3125 void MacroAssembler::JumpToHandlerEntry() {
3126 // Compute the handler entry address and jump to it. The handler table is
3127 // a fixed array of (smi-tagged) code offsets.
3128 // rax = exception, rdi = code object, rdx = state.
3129 movp(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
3130 shrp(rdx, Immediate(StackHandler::kKindWidth));
3132 FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
3133 SmiToInteger64(rdx, rdx);
3134 leap(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
3139 void MacroAssembler::Throw(Register value) {
3140 // Adjust this code if not the case.
3141 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
3143 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3144 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3145 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3146 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3147 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3149 // The exception is expected in rax.
3150 if (!value.is(rax)) {
3153 // Drop the stack pointer to the top of the top handler.
3154 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3155 movp(rsp, ExternalOperand(handler_address));
3156 // Restore the next handler.
3157 Pop(ExternalOperand(handler_address));
3159 // Remove the code object and state, compute the handler address in rdi.
3160 Pop(rdi); // Code object.
3161 Pop(rdx); // Offset and state.
3163 // Restore the context and frame pointer.
3164 Pop(rsi); // Context.
3165 popq(rbp); // Frame pointer.
3167 // If the handler is a JS frame, restore the context to the frame.
3168 // (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
3172 j(zero, &skip, Label::kNear);
3173 movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
3176 JumpToHandlerEntry();
3180 void MacroAssembler::ThrowUncatchable(Register value) {
3181 // Adjust this code if not the case.
3182 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
3184 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3185 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3186 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3187 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3188 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3190 // The exception is expected in rax.
3191 if (!value.is(rax)) {
3194 // Drop the stack pointer to the top of the top stack handler.
3195 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3196 Load(rsp, handler_address);
3198 // Unwind the handlers until the top ENTRY handler is found.
3199 Label fetch_next, check_kind;
3200 jmp(&check_kind, Label::kNear);
3202 movp(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
3205 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
3206 testl(Operand(rsp, StackHandlerConstants::kStateOffset),
3207 Immediate(StackHandler::KindField::kMask));
3208 j(not_zero, &fetch_next);
3210 // Set the top handler address to next handler past the top ENTRY handler.
3211 Pop(ExternalOperand(handler_address));
3213 // Remove the code object and state, compute the handler address in rdi.
3214 Pop(rdi); // Code object.
3215 Pop(rdx); // Offset and state.
3217 // Clear the context pointer and frame pointer (0 was saved in the handler).
3221 JumpToHandlerEntry();
3225 void MacroAssembler::Ret() {
3230 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
3231 if (is_uint16(bytes_dropped)) {
3234 PopReturnAddressTo(scratch);
3235 addp(rsp, Immediate(bytes_dropped));
3236 PushReturnAddressFrom(scratch);
3242 void MacroAssembler::FCmp() {
3248 void MacroAssembler::CmpObjectType(Register heap_object,
3251 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3252 CmpInstanceType(map, type);
3256 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
3257 cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
3258 Immediate(static_cast<int8_t>(type)));
3262 void MacroAssembler::CheckFastElements(Register map,
3264 Label::Distance distance) {
3265 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3266 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3267 STATIC_ASSERT(FAST_ELEMENTS == 2);
3268 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3269 cmpb(FieldOperand(map, Map::kBitField2Offset),
3270 Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3271 j(above, fail, distance);
3275 void MacroAssembler::CheckFastObjectElements(Register map,
3277 Label::Distance distance) {
3278 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3279 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3280 STATIC_ASSERT(FAST_ELEMENTS == 2);
3281 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3282 cmpb(FieldOperand(map, Map::kBitField2Offset),
3283 Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3284 j(below_equal, fail, distance);
3285 cmpb(FieldOperand(map, Map::kBitField2Offset),
3286 Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3287 j(above, fail, distance);
3291 void MacroAssembler::CheckFastSmiElements(Register map,
3293 Label::Distance distance) {
3294 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3295 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3296 cmpb(FieldOperand(map, Map::kBitField2Offset),
3297 Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3298 j(above, fail, distance);
3302 void MacroAssembler::StoreNumberToDoubleElements(
3303 Register maybe_number,
3306 XMMRegister xmm_scratch,
3308 int elements_offset) {
3309 Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
3311 JumpIfSmi(maybe_number, &smi_value, Label::kNear);
3313 CheckMap(maybe_number,
3314 isolate()->factory()->heap_number_map(),
3318 // Double value, canonicalize NaN.
3319 uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
3320 cmpl(FieldOperand(maybe_number, offset),
3321 Immediate(kNaNOrInfinityLowerBoundUpper32));
3322 j(greater_equal, &maybe_nan, Label::kNear);
3325 movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
3326 bind(&have_double_value);
3327 movsd(FieldOperand(elements, index, times_8,
3328 FixedDoubleArray::kHeaderSize - elements_offset),
3333 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3334 // it's an Infinity, and the non-NaN code path applies.
3335 j(greater, &is_nan, Label::kNear);
3336 cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
3339 // Convert all NaNs to the same canonical NaN value when they are stored in
3340 // the double array.
3341 Set(kScratchRegister, BitCast<uint64_t>(
3342 FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
3343 movq(xmm_scratch, kScratchRegister);
3344 jmp(&have_double_value, Label::kNear);
3347 // Value is a smi. convert to a double and store.
3348 // Preserve original value.
3349 SmiToInteger32(kScratchRegister, maybe_number);
3350 Cvtlsi2sd(xmm_scratch, kScratchRegister);
3351 movsd(FieldOperand(elements, index, times_8,
3352 FixedDoubleArray::kHeaderSize - elements_offset),
3358 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
3359 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3363 void MacroAssembler::CheckMap(Register obj,
3366 SmiCheckType smi_check_type) {
3367 if (smi_check_type == DO_SMI_CHECK) {
3368 JumpIfSmi(obj, fail);
3371 CompareMap(obj, map);
3376 void MacroAssembler::ClampUint8(Register reg) {
3378 testl(reg, Immediate(0xFFFFFF00));
3379 j(zero, &done, Label::kNear);
3380 setcc(negative, reg); // 1 if negative, 0 if positive.
3381 decb(reg); // 0 if negative, 255 if positive.
3386 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
3387 XMMRegister temp_xmm_reg,
3388 Register result_reg) {
3391 xorps(temp_xmm_reg, temp_xmm_reg);
3392 cvtsd2si(result_reg, input_reg);
3393 testl(result_reg, Immediate(0xFFFFFF00));
3394 j(zero, &done, Label::kNear);
3395 cmpl(result_reg, Immediate(1));
3396 j(overflow, &conv_failure, Label::kNear);
3397 movl(result_reg, Immediate(0));
3398 setcc(sign, result_reg);
3399 subl(result_reg, Immediate(1));
3400 andl(result_reg, Immediate(255));
3401 jmp(&done, Label::kNear);
3402 bind(&conv_failure);
3404 ucomisd(input_reg, temp_xmm_reg);
3405 j(below, &done, Label::kNear);
3406 Set(result_reg, 255);
3411 void MacroAssembler::LoadUint32(XMMRegister dst,
3413 XMMRegister scratch) {
3414 if (FLAG_debug_code) {
3415 cmpq(src, Immediate(0xffffffff));
3416 Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
3418 cvtqsi2sd(dst, src);
3422 void MacroAssembler::SlowTruncateToI(Register result_reg,
3425 DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
3426 call(stub.GetCode(), RelocInfo::CODE_TARGET);
3430 void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
3431 Register input_reg) {
3433 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3434 cvttsd2siq(result_reg, xmm0);
3435 cmpq(result_reg, Immediate(1));
3436 j(no_overflow, &done, Label::kNear);
3439 if (input_reg.is(result_reg)) {
3440 subp(rsp, Immediate(kDoubleSize));
3441 movsd(MemOperand(rsp, 0), xmm0);
3442 SlowTruncateToI(result_reg, rsp, 0);
3443 addp(rsp, Immediate(kDoubleSize));
3445 SlowTruncateToI(result_reg, input_reg);
3449 // Keep our invariant that the upper 32 bits are zero.
3450 movl(result_reg, result_reg);
3454 void MacroAssembler::TruncateDoubleToI(Register result_reg,
3455 XMMRegister input_reg) {
3457 cvttsd2siq(result_reg, input_reg);
3458 cmpq(result_reg, Immediate(1));
3459 j(no_overflow, &done, Label::kNear);
3461 subp(rsp, Immediate(kDoubleSize));
3462 movsd(MemOperand(rsp, 0), input_reg);
3463 SlowTruncateToI(result_reg, rsp, 0);
3464 addp(rsp, Immediate(kDoubleSize));
3467 // Keep our invariant that the upper 32 bits are zero.
3468 movl(result_reg, result_reg);
3472 void MacroAssembler::DoubleToI(Register result_reg,
3473 XMMRegister input_reg,
3474 XMMRegister scratch,
3475 MinusZeroMode minus_zero_mode,
3476 Label* conversion_failed,
3477 Label::Distance dst) {
3478 cvttsd2si(result_reg, input_reg);
3479 Cvtlsi2sd(xmm0, result_reg);
3480 ucomisd(xmm0, input_reg);
3481 j(not_equal, conversion_failed, dst);
3482 j(parity_even, conversion_failed, dst); // NaN.
3483 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
3485 // The integer converted back is equal to the original. We
3486 // only have to test if we got -0 as an input.
3487 testl(result_reg, result_reg);
3488 j(not_zero, &done, Label::kNear);
3489 movmskpd(result_reg, input_reg);
3490 // Bit 0 contains the sign of the double in input_reg.
3491 // If input was positive, we are ok and return 0, otherwise
3492 // jump to conversion_failed.
3493 andl(result_reg, Immediate(1));
3494 j(not_zero, conversion_failed, dst);
3500 void MacroAssembler::TaggedToI(Register result_reg,
3503 MinusZeroMode minus_zero_mode,
3504 Label* lost_precision,
3505 Label::Distance dst) {
3507 ASSERT(!temp.is(xmm0));
3509 // Heap number map check.
3510 CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3511 Heap::kHeapNumberMapRootIndex);
3512 j(not_equal, lost_precision, dst);
3514 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3515 cvttsd2si(result_reg, xmm0);
3516 Cvtlsi2sd(temp, result_reg);
3517 ucomisd(xmm0, temp);
3518 RecordComment("Deferred TaggedToI: lost precision");
3519 j(not_equal, lost_precision, dst);
3520 RecordComment("Deferred TaggedToI: NaN");
3521 j(parity_even, lost_precision, dst); // NaN.
3522 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
3523 testl(result_reg, result_reg);
3524 j(not_zero, &done, Label::kNear);
3525 movmskpd(result_reg, xmm0);
3526 andl(result_reg, Immediate(1));
3527 j(not_zero, lost_precision, dst);
3533 void MacroAssembler::Throw(BailoutReason reason) {
3535 const char* msg = GetBailoutReason(reason);
3537 RecordComment("Throw message: ");
3543 Push(Smi::FromInt(reason));
3545 // We don't actually want to generate a pile of code for this, so just
3546 // claim there is a stack frame, without generating one.
3547 FrameScope scope(this, StackFrame::NONE);
3548 CallRuntime(Runtime::kHiddenThrowMessage, 1);
3550 CallRuntime(Runtime::kHiddenThrowMessage, 1);
3552 // Control will not return here.
3557 void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
3559 j(NegateCondition(cc), &L);
3561 // will not return here
3566 void MacroAssembler::LoadInstanceDescriptors(Register map,
3567 Register descriptors) {
3568 movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
3572 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3573 movp(dst, FieldOperand(map, Map::kBitField3Offset));
3574 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3578 void MacroAssembler::EnumLength(Register dst, Register map) {
3579 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3580 movp(dst, FieldOperand(map, Map::kBitField3Offset));
3581 Move(kScratchRegister, Smi::FromInt(Map::EnumLengthBits::kMask));
3582 andp(dst, kScratchRegister);
3586 void MacroAssembler::DispatchMap(Register obj,
3589 Handle<Code> success,
3590 SmiCheckType smi_check_type) {
3592 if (smi_check_type == DO_SMI_CHECK) {
3593 JumpIfSmi(obj, &fail);
3595 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3596 j(equal, success, RelocInfo::CODE_TARGET);
3602 void MacroAssembler::AssertNumber(Register object) {
3603 if (emit_debug_code()) {
3605 Condition is_smi = CheckSmi(object);
3606 j(is_smi, &ok, Label::kNear);
3607 Cmp(FieldOperand(object, HeapObject::kMapOffset),
3608 isolate()->factory()->heap_number_map());
3609 Check(equal, kOperandIsNotANumber);
3615 void MacroAssembler::AssertNotSmi(Register object) {
3616 if (emit_debug_code()) {
3617 Condition is_smi = CheckSmi(object);
3618 Check(NegateCondition(is_smi), kOperandIsASmi);
3623 void MacroAssembler::AssertSmi(Register object) {
3624 if (emit_debug_code()) {
3625 Condition is_smi = CheckSmi(object);
3626 Check(is_smi, kOperandIsNotASmi);
3631 void MacroAssembler::AssertSmi(const Operand& object) {
3632 if (emit_debug_code()) {
3633 Condition is_smi = CheckSmi(object);
3634 Check(is_smi, kOperandIsNotASmi);
3639 void MacroAssembler::AssertZeroExtended(Register int32_register) {
3640 if (emit_debug_code()) {
3641 ASSERT(!int32_register.is(kScratchRegister));
3642 movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
3643 cmpq(kScratchRegister, int32_register);
3644 Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
3649 void MacroAssembler::AssertString(Register object) {
3650 if (emit_debug_code()) {
3651 testb(object, Immediate(kSmiTagMask));
3652 Check(not_equal, kOperandIsASmiAndNotAString);
3654 movp(object, FieldOperand(object, HeapObject::kMapOffset));
3655 CmpInstanceType(object, FIRST_NONSTRING_TYPE);
3657 Check(below, kOperandIsNotAString);
3662 void MacroAssembler::AssertName(Register object) {
3663 if (emit_debug_code()) {
3664 testb(object, Immediate(kSmiTagMask));
3665 Check(not_equal, kOperandIsASmiAndNotAName);
3667 movp(object, FieldOperand(object, HeapObject::kMapOffset));
3668 CmpInstanceType(object, LAST_NAME_TYPE);
3670 Check(below_equal, kOperandIsNotAName);
3675 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
3676 if (emit_debug_code()) {
3677 Label done_checking;
3678 AssertNotSmi(object);
3679 Cmp(object, isolate()->factory()->undefined_value());
3680 j(equal, &done_checking);
3681 Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
3682 Assert(equal, kExpectedUndefinedOrCell);
3683 bind(&done_checking);
3688 void MacroAssembler::AssertRootValue(Register src,
3689 Heap::RootListIndex root_value_index,
3690 BailoutReason reason) {
3691 if (emit_debug_code()) {
3692 ASSERT(!src.is(kScratchRegister));
3693 LoadRoot(kScratchRegister, root_value_index);
3694 cmpp(src, kScratchRegister);
3695 Check(equal, reason);
3701 Condition MacroAssembler::IsObjectStringType(Register heap_object,
3703 Register instance_type) {
3704 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3705 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3706 STATIC_ASSERT(kNotStringTag != 0);
3707 testb(instance_type, Immediate(kIsNotStringMask));
3712 Condition MacroAssembler::IsObjectNameType(Register heap_object,
3714 Register instance_type) {
3715 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3716 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3717 cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
3722 void MacroAssembler::TryGetFunctionPrototype(Register function,
3725 bool miss_on_bound_function) {
3726 // Check that the receiver isn't a smi.
3727 testl(function, Immediate(kSmiTagMask));
3730 // Check that the function really is a function.
3731 CmpObjectType(function, JS_FUNCTION_TYPE, result);
3734 if (miss_on_bound_function) {
3735 movp(kScratchRegister,
3736 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3737 // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
3739 TestBitSharedFunctionInfoSpecialField(kScratchRegister,
3740 SharedFunctionInfo::kCompilerHintsOffset,
3741 SharedFunctionInfo::kBoundFunction);
3745 // Make sure that the function has an instance prototype.
3747 testb(FieldOperand(result, Map::kBitFieldOffset),
3748 Immediate(1 << Map::kHasNonInstancePrototype));
3749 j(not_zero, &non_instance, Label::kNear);
3751 // Get the prototype or initial map from the function.
3753 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3755 // If the prototype or initial map is the hole, don't return it and
3756 // simply miss the cache instead. This will allow us to allocate a
3757 // prototype object on-demand in the runtime system.
3758 CompareRoot(result, Heap::kTheHoleValueRootIndex);
3761 // If the function does not have an initial map, we're done.
3763 CmpObjectType(result, MAP_TYPE, kScratchRegister);
3764 j(not_equal, &done, Label::kNear);
3766 // Get the prototype from the initial map.
3767 movp(result, FieldOperand(result, Map::kPrototypeOffset));
3768 jmp(&done, Label::kNear);
3770 // Non-instance prototype: Fetch prototype from constructor field
3772 bind(&non_instance);
3773 movp(result, FieldOperand(result, Map::kConstructorOffset));
3780 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
3781 if (FLAG_native_code_counters && counter->Enabled()) {
3782 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3783 movl(counter_operand, Immediate(value));
3788 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
3790 if (FLAG_native_code_counters && counter->Enabled()) {
3791 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3793 incl(counter_operand);
3795 addl(counter_operand, Immediate(value));
3801 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
3803 if (FLAG_native_code_counters && counter->Enabled()) {
3804 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3806 decl(counter_operand);
3808 subl(counter_operand, Immediate(value));
3814 void MacroAssembler::DebugBreak() {
3815 Set(rax, 0); // No arguments.
3816 LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
3817 CEntryStub ces(isolate(), 1);
3818 ASSERT(AllowThisStubCall(&ces));
3819 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3823 void MacroAssembler::InvokeCode(Register code,
3824 const ParameterCount& expected,
3825 const ParameterCount& actual,
3827 const CallWrapper& call_wrapper) {
3828 // You can't call a function without a valid frame.
3829 ASSERT(flag == JUMP_FUNCTION || has_frame());
3832 bool definitely_mismatches = false;
3833 InvokePrologue(expected,
3835 Handle<Code>::null(),
3838 &definitely_mismatches,
3842 if (!definitely_mismatches) {
3843 if (flag == CALL_FUNCTION) {
3844 call_wrapper.BeforeCall(CallSize(code));
3846 call_wrapper.AfterCall();
3848 ASSERT(flag == JUMP_FUNCTION);
3856 void MacroAssembler::InvokeFunction(Register function,
3857 const ParameterCount& actual,
3859 const CallWrapper& call_wrapper) {
3860 // You can't call a function without a valid frame.
3861 ASSERT(flag == JUMP_FUNCTION || has_frame());
3863 ASSERT(function.is(rdi));
3864 movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3865 movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
3866 LoadSharedFunctionInfoSpecialField(rbx, rdx,
3867 SharedFunctionInfo::kFormalParameterCountOffset);
3868 // Advances rdx to the end of the Code object header, to the start of
3869 // the executable code.
3870 movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3872 ParameterCount expected(rbx);
3873 InvokeCode(rdx, expected, actual, flag, call_wrapper);
3877 void MacroAssembler::InvokeFunction(Register function,
3878 const ParameterCount& expected,
3879 const ParameterCount& actual,
3881 const CallWrapper& call_wrapper) {
3882 // You can't call a function without a valid frame.
3883 ASSERT(flag == JUMP_FUNCTION || has_frame());
3885 ASSERT(function.is(rdi));
3886 movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
3887 // Advances rdx to the end of the Code object header, to the start of
3888 // the executable code.
3889 movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3891 InvokeCode(rdx, expected, actual, flag, call_wrapper);
3895 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3896 const ParameterCount& expected,
3897 const ParameterCount& actual,
3899 const CallWrapper& call_wrapper) {
3900 Move(rdi, function);
3901 InvokeFunction(rdi, expected, actual, flag, call_wrapper);
3905 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3906 const ParameterCount& actual,
3907 Handle<Code> code_constant,
3908 Register code_register,
3910 bool* definitely_mismatches,
3912 Label::Distance near_jump,
3913 const CallWrapper& call_wrapper) {
3914 bool definitely_matches = false;
3915 *definitely_mismatches = false;
3917 if (expected.is_immediate()) {
3918 ASSERT(actual.is_immediate());
3919 if (expected.immediate() == actual.immediate()) {
3920 definitely_matches = true;
3922 Set(rax, actual.immediate());
3923 if (expected.immediate() ==
3924 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
3925 // Don't worry about adapting arguments for built-ins that
3926 // don't want that done. Skip adaption code by making it look
3927 // like we have a match between expected and actual number of
3929 definitely_matches = true;
3931 *definitely_mismatches = true;
3932 Set(rbx, expected.immediate());
3936 if (actual.is_immediate()) {
3937 // Expected is in register, actual is immediate. This is the
3938 // case when we invoke function values without going through the
3940 cmpp(expected.reg(), Immediate(actual.immediate()));
3941 j(equal, &invoke, Label::kNear);
3942 ASSERT(expected.reg().is(rbx));
3943 Set(rax, actual.immediate());
3944 } else if (!expected.reg().is(actual.reg())) {
3945 // Both expected and actual are in (different) registers. This
3946 // is the case when we invoke functions using call and apply.
3947 cmpp(expected.reg(), actual.reg());
3948 j(equal, &invoke, Label::kNear);
3949 ASSERT(actual.reg().is(rax));
3950 ASSERT(expected.reg().is(rbx));
3954 if (!definitely_matches) {
3955 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
3956 if (!code_constant.is_null()) {
3957 Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
3958 addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
3959 } else if (!code_register.is(rdx)) {
3960 movp(rdx, code_register);
3963 if (flag == CALL_FUNCTION) {
3964 call_wrapper.BeforeCall(CallSize(adaptor));
3965 Call(adaptor, RelocInfo::CODE_TARGET);
3966 call_wrapper.AfterCall();
3967 if (!*definitely_mismatches) {
3968 jmp(done, near_jump);
3971 Jump(adaptor, RelocInfo::CODE_TARGET);
3978 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
3979 if (frame_mode == BUILD_STUB_FRAME) {
3980 pushq(rbp); // Caller's frame pointer.
3982 Push(rsi); // Callee's context.
3983 Push(Smi::FromInt(StackFrame::STUB));
3985 PredictableCodeSizeScope predictible_code_size_scope(this,
3986 kNoCodeAgeSequenceLength);
3987 if (isolate()->IsCodePreAgingActive()) {
3988 // Pre-age the code.
3989 Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
3990 RelocInfo::CODE_AGE_SEQUENCE);
3991 Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
3993 pushq(rbp); // Caller's frame pointer.
3995 Push(rsi); // Callee's context.
3996 Push(rdi); // Callee's JS function.
4002 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4005 Push(rsi); // Context.
4006 Push(Smi::FromInt(type));
4007 Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
4008 Push(kScratchRegister);
4009 if (emit_debug_code()) {
4010 Move(kScratchRegister,
4011 isolate()->factory()->undefined_value(),
4012 RelocInfo::EMBEDDED_OBJECT);
4013 cmpp(Operand(rsp, 0), kScratchRegister);
4014 Check(not_equal, kCodeObjectNotProperlyPatched);
4019 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4020 if (emit_debug_code()) {
4021 Move(kScratchRegister, Smi::FromInt(type));
4022 cmpp(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
4023 Check(equal, kStackFrameTypesMustMatch);
4030 void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
4031 // Set up the frame structure on the stack.
4032 // All constants are relative to the frame pointer of the exit frame.
4033 ASSERT(ExitFrameConstants::kCallerSPDisplacement ==
4034 kFPOnStackSize + kPCOnStackSize);
4035 ASSERT(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
4036 ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
4040 // Reserve room for entry stack pointer and push the code object.
4041 ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
4042 Push(Immediate(0)); // Saved entry sp, patched before call.
4043 Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
4044 Push(kScratchRegister); // Accessed from EditFrame::code_slot.
4046 // Save the frame pointer and the context in top.
4048 movp(r14, rax); // Backup rax in callee-save register.
4051 Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
4052 Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
4056 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
4057 bool save_doubles) {
4059 const int kShadowSpace = 4;
4060 arg_stack_space += kShadowSpace;
4062 // Optionally save all XMM registers.
4064 int space = XMMRegister::kMaxNumAllocatableRegisters * kSIMD128Size +
4065 arg_stack_space * kRegisterSize;
4066 subp(rsp, Immediate(space));
4067 int offset = -2 * kPointerSize;
4068 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
4069 XMMRegister reg = XMMRegister::FromAllocationIndex(i);
4070 movups(Operand(rbp, offset - ((i + 1) * kSIMD128Size)), reg);
4072 } else if (arg_stack_space > 0) {
4073 subp(rsp, Immediate(arg_stack_space * kRegisterSize));
4076 // Get the required frame alignment for the OS.
4077 const int kFrameAlignment = OS::ActivationFrameAlignment();
4078 if (kFrameAlignment > 0) {
4079 ASSERT(IsPowerOf2(kFrameAlignment));
4080 ASSERT(is_int8(kFrameAlignment));
4081 andp(rsp, Immediate(-kFrameAlignment));
4084 // Patch the saved entry sp.
4085 movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
4089 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
4090 EnterExitFramePrologue(true);
4092 // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
4093 // so it must be retained across the C-call.
4094 int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
4095 leap(r15, Operand(rbp, r14, times_pointer_size, offset));
4097 EnterExitFrameEpilogue(arg_stack_space, save_doubles);
4101 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
4102 EnterExitFramePrologue(false);
4103 EnterExitFrameEpilogue(arg_stack_space, false);
4107 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
4111 int offset = -2 * kPointerSize;
4112 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
4113 XMMRegister reg = XMMRegister::FromAllocationIndex(i);
4114 movups(reg, Operand(rbp, offset - ((i + 1) * kSIMD128Size)));
4117 // Get the return address from the stack and restore the frame pointer.
4118 movp(rcx, Operand(rbp, kFPOnStackSize));
4119 movp(rbp, Operand(rbp, 0 * kPointerSize));
4121 // Drop everything up to and including the arguments and the receiver
4122 // from the caller stack.
4123 leap(rsp, Operand(r15, 1 * kPointerSize));
4125 PushReturnAddressFrom(rcx);
4127 LeaveExitFrameEpilogue(true);
4131 void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
4135 LeaveExitFrameEpilogue(restore_context);
4139 void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
4140 // Restore current context from top and clear it in debug mode.
4141 ExternalReference context_address(Isolate::kContextAddress, isolate());
4142 Operand context_operand = ExternalOperand(context_address);
4143 if (restore_context) {
4144 movp(rsi, context_operand);
4147 movp(context_operand, Immediate(0));
4150 // Clear the top frame.
4151 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
4153 Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
4154 movp(c_entry_fp_operand, Immediate(0));
4158 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
4161 Label same_contexts;
4163 ASSERT(!holder_reg.is(scratch));
4164 ASSERT(!scratch.is(kScratchRegister));
4165 // Load current lexical context from the stack frame.
4166 movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
4168 // When generating debug code, make sure the lexical context is set.
4169 if (emit_debug_code()) {
4170 cmpp(scratch, Immediate(0));
4171 Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
4173 // Load the native context of the current context.
4175 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
4176 movp(scratch, FieldOperand(scratch, offset));
4177 movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
4179 // Check the context is a native context.
4180 if (emit_debug_code()) {
4181 Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
4182 isolate()->factory()->native_context_map());
4183 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
4186 // Check if both contexts are the same.
4187 cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4188 j(equal, &same_contexts);
4190 // Compare security tokens.
4191 // Check that the security token in the calling global object is
4192 // compatible with the security token in the receiving global
4195 // Check the context is a native context.
4196 if (emit_debug_code()) {
4197 // Preserve original value of holder_reg.
4200 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4201 CompareRoot(holder_reg, Heap::kNullValueRootIndex);
4202 Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
4204 // Read the first word and compare to native_context_map(),
4205 movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
4206 CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
4207 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
4211 movp(kScratchRegister,
4212 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4214 Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
4215 movp(scratch, FieldOperand(scratch, token_offset));
4216 cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
4219 bind(&same_contexts);
4223 // Compute the hash code from the untagged key. This must be kept in sync with
4224 // ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
4225 // code-stub-hydrogen.cc
4226 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
4227 // First of all we assign the hash seed to scratch.
4228 LoadRoot(scratch, Heap::kHashSeedRootIndex);
4229 SmiToInteger32(scratch, scratch);
4231 // Xor original key with a seed.
4234 // Compute the hash code from the untagged key. This must be kept in sync
4235 // with ComputeIntegerHash in utils.h.
4237 // hash = ~hash + (hash << 15);
4240 shll(scratch, Immediate(15));
4242 // hash = hash ^ (hash >> 12);
4244 shrl(scratch, Immediate(12));
4246 // hash = hash + (hash << 2);
4247 leal(r0, Operand(r0, r0, times_4, 0));
4248 // hash = hash ^ (hash >> 4);
4250 shrl(scratch, Immediate(4));
4252 // hash = hash * 2057;
4253 imull(r0, r0, Immediate(2057));
4254 // hash = hash ^ (hash >> 16);
4256 shrl(scratch, Immediate(16));
4262 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
4271 // elements - holds the slow-case elements of the receiver on entry.
4272 // Unchanged unless 'result' is the same register.
4274 // key - holds the smi key on entry.
4275 // Unchanged unless 'result' is the same register.
4277 // Scratch registers:
4279 // r0 - holds the untagged key on entry and holds the hash once computed.
4281 // r1 - used to hold the capacity mask of the dictionary
4283 // r2 - used for the index into the dictionary.
4285 // result - holds the result on exit if the load succeeded.
4286 // Allowed to be the same as 'key' or 'result'.
4287 // Unchanged on bailout so 'key' or 'result' can be used
4288 // in further computation.
4292 GetNumberHash(r0, r1);
4294 // Compute capacity mask.
4295 SmiToInteger32(r1, FieldOperand(elements,
4296 SeededNumberDictionary::kCapacityOffset));
4299 // Generate an unrolled loop that performs a few probes before giving up.
4300 for (int i = 0; i < kNumberDictionaryProbes; i++) {
4301 // Use r2 for index calculations and keep the hash intact in r0.
4303 // Compute the masked index: (hash + i + i * i) & mask.
4305 addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
4309 // Scale the index by multiplying by the entry size.
4310 ASSERT(SeededNumberDictionary::kEntrySize == 3);
4311 leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
4313 // Check if the key matches.
4314 cmpp(key, FieldOperand(elements,
4317 SeededNumberDictionary::kElementsStartOffset));
4318 if (i != (kNumberDictionaryProbes - 1)) {
4326 // Check that the value is a normal propety.
4327 const int kDetailsOffset =
4328 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
4329 ASSERT_EQ(NORMAL, 0);
4330 Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
4331 Smi::FromInt(PropertyDetails::TypeField::kMask));
4334 // Get the value at the masked, scaled index.
4335 const int kValueOffset =
4336 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
4337 movp(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
4341 void MacroAssembler::LoadAllocationTopHelper(Register result,
4343 AllocationFlags flags) {
4344 ExternalReference allocation_top =
4345 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4347 // Just return if allocation top is already known.
4348 if ((flags & RESULT_CONTAINS_TOP) != 0) {
4349 // No use of scratch if allocation top is provided.
4350 ASSERT(!scratch.is_valid());
4352 // Assert that result actually contains top on entry.
4353 Operand top_operand = ExternalOperand(allocation_top);
4354 cmpp(result, top_operand);
4355 Check(equal, kUnexpectedAllocationTop);
4360 // Move address of new object to result. Use scratch register if available,
4361 // and keep address in scratch until call to UpdateAllocationTopHelper.
4362 if (scratch.is_valid()) {
4363 LoadAddress(scratch, allocation_top);
4364 movp(result, Operand(scratch, 0));
4366 Load(result, allocation_top);
4371 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
4373 AllocationFlags flags) {
4374 if (emit_debug_code()) {
4375 testp(result_end, Immediate(kObjectAlignmentMask));
4376 Check(zero, kUnalignedAllocationInNewSpace);
4379 ExternalReference allocation_top =
4380 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4383 if (scratch.is_valid()) {
4384 // Scratch already contains address of allocation top.
4385 movp(Operand(scratch, 0), result_end);
4387 Store(allocation_top, result_end);
4392 void MacroAssembler::Allocate(int object_size,
4394 Register result_end,
4397 AllocationFlags flags) {
4398 ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
4399 ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
4400 if (!FLAG_inline_new) {
4401 if (emit_debug_code()) {
4402 // Trash the registers to simulate an allocation failure.
4403 movl(result, Immediate(0x7091));
4404 if (result_end.is_valid()) {
4405 movl(result_end, Immediate(0x7191));
4407 if (scratch.is_valid()) {
4408 movl(scratch, Immediate(0x7291));
4414 ASSERT(!result.is(result_end));
4416 // Load address of new object into result.
4417 LoadAllocationTopHelper(result, scratch, flags);
4419 // Align the next allocation. Storing the filler map without checking top is
4420 // safe in new-space because the limit of the heap is aligned there.
4421 if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
4422 testq(result, Immediate(kDoubleAlignmentMask));
4423 Check(zero, kAllocationIsNotDoubleAligned);
4426 // Calculate new top and bail out if new space is exhausted.
4427 ExternalReference allocation_limit =
4428 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4430 Register top_reg = result_end.is_valid() ? result_end : result;
4432 if (!top_reg.is(result)) {
4433 movp(top_reg, result);
4435 addp(top_reg, Immediate(object_size));
4436 j(carry, gc_required);
4437 Operand limit_operand = ExternalOperand(allocation_limit);
4438 cmpp(top_reg, limit_operand);
4439 j(above, gc_required);
4441 // Update allocation top.
4442 UpdateAllocationTopHelper(top_reg, scratch, flags);
4444 bool tag_result = (flags & TAG_OBJECT) != 0;
4445 if (top_reg.is(result)) {
4447 subp(result, Immediate(object_size - kHeapObjectTag));
4449 subp(result, Immediate(object_size));
4451 } else if (tag_result) {
4452 // Tag the result if requested.
4453 ASSERT(kHeapObjectTag == 1);
4459 void MacroAssembler::Allocate(int header_size,
4460 ScaleFactor element_size,
4461 Register element_count,
4463 Register result_end,
4466 AllocationFlags flags) {
4467 ASSERT((flags & SIZE_IN_WORDS) == 0);
4468 leap(result_end, Operand(element_count, element_size, header_size));
4469 Allocate(result_end, result, result_end, scratch, gc_required, flags);
4473 void MacroAssembler::Allocate(Register object_size,
4475 Register result_end,
4478 AllocationFlags flags) {
4479 ASSERT((flags & SIZE_IN_WORDS) == 0);
4480 if (!FLAG_inline_new) {
4481 if (emit_debug_code()) {
4482 // Trash the registers to simulate an allocation failure.
4483 movl(result, Immediate(0x7091));
4484 movl(result_end, Immediate(0x7191));
4485 if (scratch.is_valid()) {
4486 movl(scratch, Immediate(0x7291));
4488 // object_size is left unchanged by this function.
4493 ASSERT(!result.is(result_end));
4495 // Load address of new object into result.
4496 LoadAllocationTopHelper(result, scratch, flags);
4498 // Align the next allocation. Storing the filler map without checking top is
4499 // safe in new-space because the limit of the heap is aligned there.
4500 if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
4501 testq(result, Immediate(kDoubleAlignmentMask));
4502 Check(zero, kAllocationIsNotDoubleAligned);
4505 // Calculate new top and bail out if new space is exhausted.
4506 ExternalReference allocation_limit =
4507 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4508 if (!object_size.is(result_end)) {
4509 movp(result_end, object_size);
4511 addp(result_end, result);
4512 j(carry, gc_required);
4513 Operand limit_operand = ExternalOperand(allocation_limit);
4514 cmpp(result_end, limit_operand);
4515 j(above, gc_required);
4517 // Update allocation top.
4518 UpdateAllocationTopHelper(result_end, scratch, flags);
4520 // Tag the result if requested.
4521 if ((flags & TAG_OBJECT) != 0) {
4522 addp(result, Immediate(kHeapObjectTag));
4527 void MacroAssembler::UndoAllocationInNewSpace(Register object) {
4528 ExternalReference new_space_allocation_top =
4529 ExternalReference::new_space_allocation_top_address(isolate());
4531 // Make sure the object has no tag before resetting top.
4532 andp(object, Immediate(~kHeapObjectTagMask));
4533 Operand top_operand = ExternalOperand(new_space_allocation_top);
4535 cmpp(object, top_operand);
4536 Check(below, kUndoAllocationOfNonAllocatedMemory);
4538 movp(top_operand, object);
4542 void MacroAssembler::AllocateHeapNumber(Register result,
4544 Label* gc_required) {
4545 // Allocate heap number in new space.
4546 Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
4549 LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
4550 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4554 void MacroAssembler::AllocateSIMDHeapObject(int size,
4558 Heap::RootListIndex map_index) {
4559 Allocate(size, result, scratch, no_reg, gc_required, TAG_OBJECT);
4562 LoadRoot(kScratchRegister, map_index);
4563 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4567 void MacroAssembler::AllocateTwoByteString(Register result,
4572 Label* gc_required) {
4573 // Calculate the number of bytes needed for the characters in the string while
4574 // observing object alignment.
4575 const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
4576 kObjectAlignmentMask;
4577 ASSERT(kShortSize == 2);
4578 // scratch1 = length * 2 + kObjectAlignmentMask.
4579 leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
4581 andp(scratch1, Immediate(~kObjectAlignmentMask));
4582 if (kHeaderAlignment > 0) {
4583 subp(scratch1, Immediate(kHeaderAlignment));
4586 // Allocate two byte string in new space.
4587 Allocate(SeqTwoByteString::kHeaderSize,
4596 // Set the map, length and hash field.
4597 LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
4598 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4599 Integer32ToSmi(scratch1, length);
4600 movp(FieldOperand(result, String::kLengthOffset), scratch1);
4601 movp(FieldOperand(result, String::kHashFieldOffset),
4602 Immediate(String::kEmptyHashField));
4606 void MacroAssembler::AllocateAsciiString(Register result,
4611 Label* gc_required) {
4612 // Calculate the number of bytes needed for the characters in the string while
4613 // observing object alignment.
4614 const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
4615 kObjectAlignmentMask;
4616 movl(scratch1, length);
4617 ASSERT(kCharSize == 1);
4618 addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
4619 andp(scratch1, Immediate(~kObjectAlignmentMask));
4620 if (kHeaderAlignment > 0) {
4621 subp(scratch1, Immediate(kHeaderAlignment));
4624 // Allocate ASCII string in new space.
4625 Allocate(SeqOneByteString::kHeaderSize,
4634 // Set the map, length and hash field.
4635 LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
4636 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4637 Integer32ToSmi(scratch1, length);
4638 movp(FieldOperand(result, String::kLengthOffset), scratch1);
4639 movp(FieldOperand(result, String::kHashFieldOffset),
4640 Immediate(String::kEmptyHashField));
4644 void MacroAssembler::AllocateTwoByteConsString(Register result,
4647 Label* gc_required) {
4648 // Allocate heap number in new space.
4649 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4652 // Set the map. The other fields are left uninitialized.
4653 LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
4654 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4658 void MacroAssembler::AllocateAsciiConsString(Register result,
4661 Label* gc_required) {
4662 Label allocate_new_space, install_map;
4663 AllocationFlags flags = TAG_OBJECT;
4665 ExternalReference high_promotion_mode = ExternalReference::
4666 new_space_high_promotion_mode_active_address(isolate());
4668 Load(scratch1, high_promotion_mode);
4669 testb(scratch1, Immediate(1));
4670 j(zero, &allocate_new_space);
4671 Allocate(ConsString::kSize,
4676 static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
4680 bind(&allocate_new_space);
4681 Allocate(ConsString::kSize,
4690 // Set the map. The other fields are left uninitialized.
4691 LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
4692 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4696 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
4699 Label* gc_required) {
4700 // Allocate heap number in new space.
4701 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4704 // Set the map. The other fields are left uninitialized.
4705 LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
4706 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4710 void MacroAssembler::AllocateAsciiSlicedString(Register result,
4713 Label* gc_required) {
4714 // Allocate heap number in new space.
4715 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4718 // Set the map. The other fields are left uninitialized.
4719 LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex);
4720 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4724 // Copy memory, byte-by-byte, from source to destination. Not optimized for
4725 // long or aligned copies. The contents of scratch and length are destroyed.
4726 // Destination is incremented by length, source, length and scratch are
4728 // A simpler loop is faster on small copies, but slower on large ones.
4729 // The cld() instruction must have been emitted, to set the direction flag(),
4730 // before calling this function.
4731 void MacroAssembler::CopyBytes(Register destination,
4736 ASSERT(min_length >= 0);
4737 if (emit_debug_code()) {
4738 cmpl(length, Immediate(min_length));
4739 Assert(greater_equal, kInvalidMinLength);
4741 Label short_loop, len8, len16, len24, done, short_string;
4743 const int kLongStringLimit = 4 * kPointerSize;
4744 if (min_length <= kLongStringLimit) {
4745 cmpl(length, Immediate(kPointerSize));
4746 j(below, &short_string, Label::kNear);
4749 ASSERT(source.is(rsi));
4750 ASSERT(destination.is(rdi));
4751 ASSERT(length.is(rcx));
4753 if (min_length <= kLongStringLimit) {
4754 cmpl(length, Immediate(2 * kPointerSize));
4755 j(below_equal, &len8, Label::kNear);
4756 cmpl(length, Immediate(3 * kPointerSize));
4757 j(below_equal, &len16, Label::kNear);
4758 cmpl(length, Immediate(4 * kPointerSize));
4759 j(below_equal, &len24, Label::kNear);
4762 // Because source is 8-byte aligned in our uses of this function,
4763 // we keep source aligned for the rep movs operation by copying the odd bytes
4764 // at the end of the ranges.
4765 movp(scratch, length);
4766 shrl(length, Immediate(kPointerSizeLog2));
4768 // Move remaining bytes of length.
4769 andl(scratch, Immediate(kPointerSize - 1));
4770 movp(length, Operand(source, scratch, times_1, -kPointerSize));
4771 movp(Operand(destination, scratch, times_1, -kPointerSize), length);
4772 addp(destination, scratch);
4774 if (min_length <= kLongStringLimit) {
4775 jmp(&done, Label::kNear);
4777 movp(scratch, Operand(source, 2 * kPointerSize));
4778 movp(Operand(destination, 2 * kPointerSize), scratch);
4780 movp(scratch, Operand(source, kPointerSize));
4781 movp(Operand(destination, kPointerSize), scratch);
4783 movp(scratch, Operand(source, 0));
4784 movp(Operand(destination, 0), scratch);
4785 // Move remaining bytes of length.
4786 movp(scratch, Operand(source, length, times_1, -kPointerSize));
4787 movp(Operand(destination, length, times_1, -kPointerSize), scratch);
4788 addp(destination, length);
4789 jmp(&done, Label::kNear);
4791 bind(&short_string);
4792 if (min_length == 0) {
4793 testl(length, length);
4794 j(zero, &done, Label::kNear);
4798 movb(scratch, Operand(source, 0));
4799 movb(Operand(destination, 0), scratch);
4803 j(not_zero, &short_loop);
4810 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
4811 Register end_offset,
4816 movp(Operand(start_offset, 0), filler);
4817 addp(start_offset, Immediate(kPointerSize));
4819 cmpp(start_offset, end_offset);
4824 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4825 if (context_chain_length > 0) {
4826 // Move up the chain of contexts to the context containing the slot.
4827 movp(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4828 for (int i = 1; i < context_chain_length; i++) {
4829 movp(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4832 // Slot is in the current function context. Move it into the
4833 // destination register in case we store into it (the write barrier
4834 // cannot be allowed to destroy the context in rsi).
4838 // We should not have found a with context by walking the context
4839 // chain (i.e., the static scope chain and runtime context chain do
4840 // not agree). A variable occurring in such a scope should have
4841 // slot type LOOKUP and not CONTEXT.
4842 if (emit_debug_code()) {
4843 CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
4844 Heap::kWithContextMapRootIndex);
4845 Check(not_equal, kVariableResolvedToWithContext);
4850 void MacroAssembler::LoadTransitionedArrayMapConditional(
4851 ElementsKind expected_kind,
4852 ElementsKind transitioned_kind,
4853 Register map_in_out,
4855 Label* no_map_match) {
4856 // Load the global or builtins object from the current context.
4858 Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4859 movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
4861 // Check that the function's map is the same as the expected cached map.
4862 movp(scratch, Operand(scratch,
4863 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4865 int offset = expected_kind * kPointerSize +
4866 FixedArrayBase::kHeaderSize;
4867 cmpp(map_in_out, FieldOperand(scratch, offset));
4868 j(not_equal, no_map_match);
4870 // Use the transitioned cached map.
4871 offset = transitioned_kind * kPointerSize +
4872 FixedArrayBase::kHeaderSize;
4873 movp(map_in_out, FieldOperand(scratch, offset));
4878 static const int kRegisterPassedArguments = 4;
4880 static const int kRegisterPassedArguments = 6;
4883 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4884 // Load the global or builtins object from the current context.
4886 Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4887 // Load the native context from the global or builtins object.
4888 movp(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
4889 // Load the function from the native context.
4890 movp(function, Operand(function, Context::SlotOffset(index)));
4894 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4896 // Load the initial map. The global functions all have initial maps.
4897 movp(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4898 if (emit_debug_code()) {
4900 CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
4903 Abort(kGlobalFunctionsMustHaveInitialMap);
4909 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
4910 // On Windows 64 stack slots are reserved by the caller for all arguments
4911 // including the ones passed in registers, and space is always allocated for
4912 // the four register arguments even if the function takes fewer than four
4914 // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
4915 // and the caller does not reserve stack slots for them.
4916 ASSERT(num_arguments >= 0);
4918 const int kMinimumStackSlots = kRegisterPassedArguments;
4919 if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
4920 return num_arguments;
4922 if (num_arguments < kRegisterPassedArguments) return 0;
4923 return num_arguments - kRegisterPassedArguments;
4928 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
4931 uint32_t encoding_mask) {
4933 JumpIfNotSmi(string, &is_object);
4938 movp(value, FieldOperand(string, HeapObject::kMapOffset));
4939 movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset));
4941 andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
4942 cmpp(value, Immediate(encoding_mask));
4944 Check(equal, kUnexpectedStringType);
4946 // The index is assumed to be untagged coming in, tag it to compare with the
4947 // string length without using a temp register, it is restored at the end of
4949 Integer32ToSmi(index, index);
4950 SmiCompare(index, FieldOperand(string, String::kLengthOffset));
4951 Check(less, kIndexIsTooLarge);
4953 SmiCompare(index, Smi::FromInt(0));
4954 Check(greater_equal, kIndexIsNegative);
4956 // Restore the index
4957 SmiToInteger32(index, index);
4961 void MacroAssembler::PrepareCallCFunction(int num_arguments) {
4962 int frame_alignment = OS::ActivationFrameAlignment();
4963 ASSERT(frame_alignment != 0);
4964 ASSERT(num_arguments >= 0);
4966 // Make stack end at alignment and allocate space for arguments and old rsp.
4967 movp(kScratchRegister, rsp);
4968 ASSERT(IsPowerOf2(frame_alignment));
4969 int argument_slots_on_stack =
4970 ArgumentStackSlotsForCFunctionCall(num_arguments);
4971 subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
4972 andp(rsp, Immediate(-frame_alignment));
4973 movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
4977 void MacroAssembler::CallCFunction(ExternalReference function,
4978 int num_arguments) {
4979 LoadAddress(rax, function);
4980 CallCFunction(rax, num_arguments);
4984 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
4985 ASSERT(has_frame());
4986 // Check stack alignment.
4987 if (emit_debug_code()) {
4988 CheckStackAlignment();
4992 ASSERT(OS::ActivationFrameAlignment() != 0);
4993 ASSERT(num_arguments >= 0);
4994 int argument_slots_on_stack =
4995 ArgumentStackSlotsForCFunctionCall(num_arguments);
4996 movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
5000 bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
5001 if (r1.is(r2)) return true;
5002 if (r1.is(r3)) return true;
5003 if (r1.is(r4)) return true;
5004 if (r2.is(r3)) return true;
5005 if (r2.is(r4)) return true;
5006 if (r3.is(r4)) return true;
5011 CodePatcher::CodePatcher(byte* address, int size)
5012 : address_(address),
5014 masm_(NULL, address, size + Assembler::kGap) {
5015 // Create a new macro assembler pointing to the address of the code to patch.
5016 // The size is adjusted with kGap on order for the assembler to generate size
5017 // bytes of instructions without failing with buffer size constraints.
5018 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5022 CodePatcher::~CodePatcher() {
5023 // Indicate that code has changed.
5024 CPU::FlushICache(address_, size_);
5026 // Check that the code was patched as expected.
5027 ASSERT(masm_.pc_ == address_ + size_);
5028 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5032 void MacroAssembler::CheckPageFlag(
5037 Label* condition_met,
5038 Label::Distance condition_met_distance) {
5039 ASSERT(cc == zero || cc == not_zero);
5040 if (scratch.is(object)) {
5041 andp(scratch, Immediate(~Page::kPageAlignmentMask));
5043 movp(scratch, Immediate(~Page::kPageAlignmentMask));
5044 andp(scratch, object);
5046 if (mask < (1 << kBitsPerByte)) {
5047 testb(Operand(scratch, MemoryChunk::kFlagsOffset),
5048 Immediate(static_cast<uint8_t>(mask)));
5050 testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
5052 j(cc, condition_met, condition_met_distance);
5056 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
5058 Label* if_deprecated) {
5059 if (map->CanBeDeprecated()) {
5061 movp(scratch, FieldOperand(scratch, Map::kBitField3Offset));
5062 SmiToInteger32(scratch, scratch);
5063 andp(scratch, Immediate(Map::Deprecated::kMask));
5064 j(not_zero, if_deprecated);
5069 void MacroAssembler::JumpIfBlack(Register object,
5070 Register bitmap_scratch,
5071 Register mask_scratch,
5073 Label::Distance on_black_distance) {
5074 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
5075 GetMarkBits(object, bitmap_scratch, mask_scratch);
5077 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5078 // The mask_scratch register contains a 1 at the position of the first bit
5079 // and a 0 at all other positions, including the position of the second bit.
5080 movp(rcx, mask_scratch);
5081 // Make rcx into a mask that covers both marking bits using the operation
5082 // rcx = mask | (mask << 1).
5083 leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
5084 // Note that we are using a 4-byte aligned 8-byte load.
5085 andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
5086 cmpp(mask_scratch, rcx);
5087 j(equal, on_black, on_black_distance);
5091 // Detect some, but not all, common pointer-free objects. This is used by the
5092 // incremental write barrier which doesn't care about oddballs (they are always
5093 // marked black immediately so this code is not hit).
5094 void MacroAssembler::JumpIfDataObject(
5097 Label* not_data_object,
5098 Label::Distance not_data_object_distance) {
5099 Label is_data_object;
5100 movp(scratch, FieldOperand(value, HeapObject::kMapOffset));
5101 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5102 j(equal, &is_data_object, Label::kNear);
5103 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5104 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5105 // If it's a string and it's not a cons string then it's an object containing
5107 testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
5108 Immediate(kIsIndirectStringMask | kIsNotStringMask));
5109 j(not_zero, not_data_object, not_data_object_distance);
5110 bind(&is_data_object);
5114 void MacroAssembler::GetMarkBits(Register addr_reg,
5115 Register bitmap_reg,
5116 Register mask_reg) {
5117 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
5118 movp(bitmap_reg, addr_reg);
5119 // Sign extended 32 bit immediate.
5120 andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
5121 movp(rcx, addr_reg);
5123 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
5124 shrl(rcx, Immediate(shift));
5126 Immediate((Page::kPageAlignmentMask >> shift) &
5127 ~(Bitmap::kBytesPerCell - 1)));
5129 addp(bitmap_reg, rcx);
5130 movp(rcx, addr_reg);
5131 shrl(rcx, Immediate(kPointerSizeLog2));
5132 andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
5133 movl(mask_reg, Immediate(1));
5138 void MacroAssembler::EnsureNotWhite(
5140 Register bitmap_scratch,
5141 Register mask_scratch,
5142 Label* value_is_white_and_not_data,
5143 Label::Distance distance) {
5144 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
5145 GetMarkBits(value, bitmap_scratch, mask_scratch);
5147 // If the value is black or grey we don't need to do anything.
5148 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5149 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5150 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
5151 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5155 // Since both black and grey have a 1 in the first position and white does
5156 // not have a 1 there we only need to check one bit.
5157 testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5158 j(not_zero, &done, Label::kNear);
5160 if (emit_debug_code()) {
5161 // Check for impossible bit pattern.
5164 // shl. May overflow making the check conservative.
5165 addp(mask_scratch, mask_scratch);
5166 testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5167 j(zero, &ok, Label::kNear);
5173 // Value is white. We check whether it is data that doesn't need scanning.
5174 // Currently only checks for HeapNumber and non-cons strings.
5175 Register map = rcx; // Holds map while checking type.
5176 Register length = rcx; // Holds length of object after checking type.
5177 Label not_heap_number;
5178 Label is_data_object;
5180 // Check for heap-number
5181 movp(map, FieldOperand(value, HeapObject::kMapOffset));
5182 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
5183 j(not_equal, ¬_heap_number, Label::kNear);
5184 movp(length, Immediate(HeapNumber::kSize));
5185 jmp(&is_data_object, Label::kNear);
5187 bind(¬_heap_number);
5188 // Check for strings.
5189 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5190 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5191 // If it's a string and it's not a cons string then it's an object containing
5193 Register instance_type = rcx;
5194 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
5195 testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
5196 j(not_zero, value_is_white_and_not_data);
5197 // It's a non-indirect (non-cons and non-slice) string.
5198 // If it's external, the length is just ExternalString::kSize.
5199 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5201 // External strings are the only ones with the kExternalStringTag bit
5203 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
5204 ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
5205 testb(instance_type, Immediate(kExternalStringTag));
5206 j(zero, ¬_external, Label::kNear);
5207 movp(length, Immediate(ExternalString::kSize));
5208 jmp(&is_data_object, Label::kNear);
5210 bind(¬_external);
5211 // Sequential string, either ASCII or UC16.
5212 ASSERT(kOneByteStringTag == 0x04);
5213 andp(length, Immediate(kStringEncodingMask));
5214 xorp(length, Immediate(kStringEncodingMask));
5215 addp(length, Immediate(0x04));
5216 // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
5217 imulp(length, FieldOperand(value, String::kLengthOffset));
5218 shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
5219 addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
5220 andp(length, Immediate(~kObjectAlignmentMask));
5222 bind(&is_data_object);
5223 // Value is a data object, and it is white. Mark it black. Since we know
5224 // that the object is white we can make it black by flipping one bit.
5225 orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5227 andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
5228 addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
5234 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5236 Register empty_fixed_array_value = r8;
5237 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5240 // Check if the enum length field is properly initialized, indicating that
5241 // there is an enum cache.
5242 movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5244 EnumLength(rdx, rbx);
5245 Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
5246 j(equal, call_runtime);
5252 movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5254 // For all objects but the receiver, check that the cache is empty.
5255 EnumLength(rdx, rbx);
5256 Cmp(rdx, Smi::FromInt(0));
5257 j(not_equal, call_runtime);
5261 // Check that there are no elements. Register rcx contains the current JS
5262 // object we've reached through the prototype chain.
5264 cmpp(empty_fixed_array_value,
5265 FieldOperand(rcx, JSObject::kElementsOffset));
5266 j(equal, &no_elements);
5268 // Second chance, the object may be using the empty slow element dictionary.
5269 LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
5270 cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
5271 j(not_equal, call_runtime);
5274 movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
5275 cmpp(rcx, null_value);
5276 j(not_equal, &next);
5279 void MacroAssembler::TestJSArrayForAllocationMemento(
5280 Register receiver_reg,
5281 Register scratch_reg,
5282 Label* no_memento_found) {
5283 ExternalReference new_space_start =
5284 ExternalReference::new_space_start(isolate());
5285 ExternalReference new_space_allocation_top =
5286 ExternalReference::new_space_allocation_top_address(isolate());
5288 leap(scratch_reg, Operand(receiver_reg,
5289 JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5290 Move(kScratchRegister, new_space_start);
5291 cmpp(scratch_reg, kScratchRegister);
5292 j(less, no_memento_found);
5293 cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
5294 j(greater, no_memento_found);
5295 CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
5296 Heap::kAllocationMementoMapRootIndex);
5300 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5305 ASSERT(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
5306 ASSERT(!scratch1.is(scratch0));
5307 Register current = scratch0;
5310 movp(current, object);
5312 // Loop based on the map going up the prototype chain.
5314 movp(current, FieldOperand(current, HeapObject::kMapOffset));
5315 movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
5316 andp(scratch1, Immediate(Map::kElementsKindMask));
5317 shrp(scratch1, Immediate(Map::kElementsKindShift));
5318 cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
5320 movp(current, FieldOperand(current, Map::kPrototypeOffset));
5321 CompareRoot(current, Heap::kNullValueRootIndex);
5322 j(not_equal, &loop_again);
5326 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
5327 ASSERT(!dividend.is(rax));
5328 ASSERT(!dividend.is(rdx));
5329 MultiplierAndShift ms(divisor);
5330 movl(rax, Immediate(ms.multiplier()));
5332 if (divisor > 0 && ms.multiplier() < 0) addl(rdx, dividend);
5333 if (divisor < 0 && ms.multiplier() > 0) subl(rdx, dividend);
5334 if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift()));
5335 movl(rax, dividend);
5336 shrl(rax, Immediate(31));
5341 } } // namespace v8::internal
5343 #endif // V8_TARGET_ARCH_X64