1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "src/base/bits.h"
10 #include "src/base/division-by-constant.h"
11 #include "src/bootstrapper.h"
12 #include "src/codegen.h"
13 #include "src/cpu-profiler.h"
14 #include "src/debug.h"
15 #include "src/heap/heap.h"
16 #include "src/isolate-inl.h"
17 #include "src/serialize.h"
18 #include "src/x64/assembler-x64.h"
19 #include "src/x64/macro-assembler-x64.h"
24 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
25 : Assembler(arg_isolate, buffer, size),
26 generating_stub_(false),
28 root_array_available_(true) {
29 if (isolate() != NULL) {
30 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
36 static const int64_t kInvalidRootRegisterDelta = -1;
39 int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
40 if (predictable_code_size() &&
41 (other.address() < reinterpret_cast<Address>(isolate()) ||
42 other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
43 return kInvalidRootRegisterDelta;
45 Address roots_register_value = kRootRegisterBias +
46 reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
48 int64_t delta = kInvalidRootRegisterDelta; // Bogus initialization.
49 if (kPointerSize == kInt64Size) {
50 delta = other.address() - roots_register_value;
52 // For x32, zero extend the address to 64-bit and calculate the delta.
53 uint64_t o = static_cast<uint32_t>(
54 reinterpret_cast<intptr_t>(other.address()));
55 uint64_t r = static_cast<uint32_t>(
56 reinterpret_cast<intptr_t>(roots_register_value));
63 Operand MacroAssembler::ExternalOperand(ExternalReference target,
65 if (root_array_available_ && !serializer_enabled()) {
66 int64_t delta = RootRegisterDelta(target);
67 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
68 return Operand(kRootRegister, static_cast<int32_t>(delta));
71 Move(scratch, target);
72 return Operand(scratch, 0);
76 void MacroAssembler::Load(Register destination, ExternalReference source) {
77 if (root_array_available_ && !serializer_enabled()) {
78 int64_t delta = RootRegisterDelta(source);
79 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
80 movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
85 if (destination.is(rax)) {
88 Move(kScratchRegister, source);
89 movp(destination, Operand(kScratchRegister, 0));
94 void MacroAssembler::Store(ExternalReference destination, Register source) {
95 if (root_array_available_ && !serializer_enabled()) {
96 int64_t delta = RootRegisterDelta(destination);
97 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
98 movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
103 if (source.is(rax)) {
104 store_rax(destination);
106 Move(kScratchRegister, destination);
107 movp(Operand(kScratchRegister, 0), source);
112 void MacroAssembler::LoadAddress(Register destination,
113 ExternalReference source) {
114 if (root_array_available_ && !serializer_enabled()) {
115 int64_t delta = RootRegisterDelta(source);
116 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
117 leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
122 Move(destination, source);
126 int MacroAssembler::LoadAddressSize(ExternalReference source) {
127 if (root_array_available_ && !serializer_enabled()) {
128 // This calculation depends on the internals of LoadAddress.
129 // It's correctness is ensured by the asserts in the Call
130 // instruction below.
131 int64_t delta = RootRegisterDelta(source);
132 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
133 // Operand is leap(scratch, Operand(kRootRegister, delta));
134 // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
136 if (!is_int8(static_cast<int32_t>(delta))) {
137 size += 3; // Need full four-byte displacement in lea.
142 // Size of movp(destination, src);
143 return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
147 void MacroAssembler::PushAddress(ExternalReference source) {
148 int64_t address = reinterpret_cast<int64_t>(source.address());
149 if (is_int32(address) && !serializer_enabled()) {
150 if (emit_debug_code()) {
151 Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
153 Push(Immediate(static_cast<int32_t>(address)));
156 LoadAddress(kScratchRegister, source);
157 Push(kScratchRegister);
161 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
162 DCHECK(root_array_available_);
163 movp(destination, Operand(kRootRegister,
164 (index << kPointerSizeLog2) - kRootRegisterBias));
168 void MacroAssembler::LoadRootIndexed(Register destination,
169 Register variable_offset,
171 DCHECK(root_array_available_);
173 Operand(kRootRegister,
174 variable_offset, times_pointer_size,
175 (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
179 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
180 DCHECK(root_array_available_);
181 movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
186 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
187 DCHECK(root_array_available_);
188 Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
192 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
193 DCHECK(root_array_available_);
194 cmpp(with, Operand(kRootRegister,
195 (index << kPointerSizeLog2) - kRootRegisterBias));
199 void MacroAssembler::CompareRoot(const Operand& with,
200 Heap::RootListIndex index) {
201 DCHECK(root_array_available_);
202 DCHECK(!with.AddressUsesRegister(kScratchRegister));
203 LoadRoot(kScratchRegister, index);
204 cmpp(with, kScratchRegister);
208 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
211 SaveFPRegsMode save_fp,
212 RememberedSetFinalAction and_then) {
213 if (emit_debug_code()) {
215 JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
219 // Load store buffer top.
220 LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
221 // Store pointer to buffer.
222 movp(Operand(scratch, 0), addr);
223 // Increment buffer top.
224 addp(scratch, Immediate(kPointerSize));
225 // Write back new top of buffer.
226 StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
227 // Call stub on end of buffer.
229 // Check for end of buffer.
230 testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
231 if (and_then == kReturnAtEnd) {
232 Label buffer_overflowed;
233 j(not_equal, &buffer_overflowed, Label::kNear);
235 bind(&buffer_overflowed);
237 DCHECK(and_then == kFallThroughAtEnd);
238 j(equal, &done, Label::kNear);
240 StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
241 CallStub(&store_buffer_overflow);
242 if (and_then == kReturnAtEnd) {
245 DCHECK(and_then == kFallThroughAtEnd);
251 void MacroAssembler::InNewSpace(Register object,
255 Label::Distance distance) {
256 if (serializer_enabled()) {
257 // Can't do arithmetic on external references if it might get serialized.
258 // The mask isn't really an address. We load it as an external reference in
259 // case the size of the new space is different between the snapshot maker
260 // and the running system.
261 if (scratch.is(object)) {
262 Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
263 andp(scratch, kScratchRegister);
265 Move(scratch, ExternalReference::new_space_mask(isolate()));
266 andp(scratch, object);
268 Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
269 cmpp(scratch, kScratchRegister);
270 j(cc, branch, distance);
272 DCHECK(kPointerSize == kInt64Size
273 ? is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))
274 : kPointerSize == kInt32Size);
275 intptr_t new_space_start =
276 reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
277 Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
278 Assembler::RelocInfoNone());
279 if (scratch.is(object)) {
280 addp(scratch, kScratchRegister);
282 leap(scratch, Operand(object, kScratchRegister, times_1, 0));
285 Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
286 j(cc, branch, distance);
291 void MacroAssembler::RecordWriteField(
296 SaveFPRegsMode save_fp,
297 RememberedSetAction remembered_set_action,
299 PointersToHereCheck pointers_to_here_check_for_value) {
300 // First, check if a write barrier is even needed. The tests below
301 // catch stores of Smis.
304 // Skip barrier if writing a smi.
305 if (smi_check == INLINE_SMI_CHECK) {
306 JumpIfSmi(value, &done);
309 // Although the object register is tagged, the offset is relative to the start
310 // of the object, so so offset must be a multiple of kPointerSize.
311 DCHECK(IsAligned(offset, kPointerSize));
313 leap(dst, FieldOperand(object, offset));
314 if (emit_debug_code()) {
316 testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
317 j(zero, &ok, Label::kNear);
322 RecordWrite(object, dst, value, save_fp, remembered_set_action,
323 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
327 // Clobber clobbered input registers when running with the debug-code flag
328 // turned on to provoke errors.
329 if (emit_debug_code()) {
330 Move(value, kZapValue, Assembler::RelocInfoNone());
331 Move(dst, kZapValue, Assembler::RelocInfoNone());
336 void MacroAssembler::RecordWriteArray(
340 SaveFPRegsMode save_fp,
341 RememberedSetAction remembered_set_action,
343 PointersToHereCheck pointers_to_here_check_for_value) {
344 // First, check if a write barrier is even needed. The tests below
345 // catch stores of Smis.
348 // Skip barrier if writing a smi.
349 if (smi_check == INLINE_SMI_CHECK) {
350 JumpIfSmi(value, &done);
353 // Array access: calculate the destination address. Index is not a smi.
354 Register dst = index;
355 leap(dst, Operand(object, index, times_pointer_size,
356 FixedArray::kHeaderSize - kHeapObjectTag));
358 RecordWrite(object, dst, value, save_fp, remembered_set_action,
359 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
363 // Clobber clobbered input registers when running with the debug-code flag
364 // turned on to provoke errors.
365 if (emit_debug_code()) {
366 Move(value, kZapValue, Assembler::RelocInfoNone());
367 Move(index, kZapValue, Assembler::RelocInfoNone());
372 void MacroAssembler::RecordWriteForMap(Register object,
375 SaveFPRegsMode fp_mode) {
376 DCHECK(!object.is(kScratchRegister));
377 DCHECK(!object.is(map));
378 DCHECK(!object.is(dst));
379 DCHECK(!map.is(dst));
380 AssertNotSmi(object);
382 if (emit_debug_code()) {
384 if (map.is(kScratchRegister)) pushq(map);
385 CompareMap(map, isolate()->factory()->meta_map());
386 if (map.is(kScratchRegister)) popq(map);
387 j(equal, &ok, Label::kNear);
392 if (!FLAG_incremental_marking) {
396 if (emit_debug_code()) {
398 if (map.is(kScratchRegister)) pushq(map);
399 cmpp(map, FieldOperand(object, HeapObject::kMapOffset));
400 if (map.is(kScratchRegister)) popq(map);
401 j(equal, &ok, Label::kNear);
406 // Compute the address.
407 leap(dst, FieldOperand(object, HeapObject::kMapOffset));
409 // First, check if a write barrier is even needed. The tests below
410 // catch stores of smis and stores into the young generation.
413 // A single check of the map's pages interesting flag suffices, since it is
414 // only set during incremental collection, and then it's also guaranteed that
415 // the from object's page's interesting flag is also set. This optimization
416 // relies on the fact that maps can never be in new space.
418 map, // Used as scratch.
419 MemoryChunk::kPointersToHereAreInterestingMask,
424 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
430 // Count number of write barriers in generated code.
431 isolate()->counters()->write_barriers_static()->Increment();
432 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
434 // Clobber clobbered registers when running with the debug-code flag
435 // turned on to provoke errors.
436 if (emit_debug_code()) {
437 Move(dst, kZapValue, Assembler::RelocInfoNone());
438 Move(map, kZapValue, Assembler::RelocInfoNone());
443 void MacroAssembler::RecordWrite(
447 SaveFPRegsMode fp_mode,
448 RememberedSetAction remembered_set_action,
450 PointersToHereCheck pointers_to_here_check_for_value) {
451 DCHECK(!object.is(value));
452 DCHECK(!object.is(address));
453 DCHECK(!value.is(address));
454 AssertNotSmi(object);
456 if (remembered_set_action == OMIT_REMEMBERED_SET &&
457 !FLAG_incremental_marking) {
461 if (emit_debug_code()) {
463 cmpp(value, Operand(address, 0));
464 j(equal, &ok, Label::kNear);
469 // First, check if a write barrier is even needed. The tests below
470 // catch stores of smis and stores into the young generation.
473 if (smi_check == INLINE_SMI_CHECK) {
474 // Skip barrier if writing a smi.
475 JumpIfSmi(value, &done);
478 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
480 value, // Used as scratch.
481 MemoryChunk::kPointersToHereAreInterestingMask,
487 CheckPageFlag(object,
488 value, // Used as scratch.
489 MemoryChunk::kPointersFromHereAreInterestingMask,
494 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
500 // Count number of write barriers in generated code.
501 isolate()->counters()->write_barriers_static()->Increment();
502 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
504 // Clobber clobbered registers when running with the debug-code flag
505 // turned on to provoke errors.
506 if (emit_debug_code()) {
507 Move(address, kZapValue, Assembler::RelocInfoNone());
508 Move(value, kZapValue, Assembler::RelocInfoNone());
513 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
514 if (emit_debug_code()) Check(cc, reason);
518 void MacroAssembler::AssertFastElements(Register elements) {
519 if (emit_debug_code()) {
521 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
522 Heap::kFixedArrayMapRootIndex);
523 j(equal, &ok, Label::kNear);
524 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
525 Heap::kFixedDoubleArrayMapRootIndex);
526 j(equal, &ok, Label::kNear);
527 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
528 Heap::kFixedCOWArrayMapRootIndex);
529 j(equal, &ok, Label::kNear);
530 Abort(kJSObjectWithFastElementsMapHasSlowElements);
536 void MacroAssembler::Check(Condition cc, BailoutReason reason) {
538 j(cc, &L, Label::kNear);
540 // Control will not return here.
545 void MacroAssembler::CheckStackAlignment() {
546 int frame_alignment = base::OS::ActivationFrameAlignment();
547 int frame_alignment_mask = frame_alignment - 1;
548 if (frame_alignment > kPointerSize) {
549 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
550 Label alignment_as_expected;
551 testp(rsp, Immediate(frame_alignment_mask));
552 j(zero, &alignment_as_expected, Label::kNear);
553 // Abort if stack is not aligned.
555 bind(&alignment_as_expected);
560 void MacroAssembler::NegativeZeroTest(Register result,
564 testl(result, result);
565 j(not_zero, &ok, Label::kNear);
572 void MacroAssembler::Abort(BailoutReason reason) {
574 const char* msg = GetBailoutReason(reason);
576 RecordComment("Abort message: ");
580 if (FLAG_trap_on_abort) {
586 Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
587 Assembler::RelocInfoNone());
588 Push(kScratchRegister);
591 // We don't actually want to generate a pile of code for this, so just
592 // claim there is a stack frame, without generating one.
593 FrameScope scope(this, StackFrame::NONE);
594 CallRuntime(Runtime::kAbort, 1);
596 CallRuntime(Runtime::kAbort, 1);
598 // Control will not return here.
603 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
604 DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
605 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
609 void MacroAssembler::TailCallStub(CodeStub* stub) {
610 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
614 void MacroAssembler::StubReturn(int argc) {
615 DCHECK(argc >= 1 && generating_stub());
616 ret((argc - 1) * kPointerSize);
620 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
621 return has_frame_ || !stub->SometimesSetsUpAFrame();
625 void MacroAssembler::IndexFromHash(Register hash, Register index) {
626 // The assert checks that the constants for the maximum number of digits
627 // for an array index cached in the hash field and the number of bits
628 // reserved for it does not conflict.
629 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
630 (1 << String::kArrayIndexValueBits));
631 if (!hash.is(index)) {
634 DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
638 void MacroAssembler::CallRuntime(const Runtime::Function* f,
640 SaveFPRegsMode save_doubles) {
641 // If the expected number of arguments of the runtime function is
642 // constant, we check that the actual number of arguments match the
644 CHECK(f->nargs < 0 || f->nargs == num_arguments);
646 // TODO(1236192): Most runtime routines don't need the number of
647 // arguments passed in because it is constant. At some point we
648 // should remove this need and make the runtime routine entry code
650 Set(rax, num_arguments);
651 LoadAddress(rbx, ExternalReference(f, isolate()));
652 CEntryStub ces(isolate(), f->result_size, save_doubles);
657 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
659 Set(rax, num_arguments);
660 LoadAddress(rbx, ext);
662 CEntryStub stub(isolate(), 1);
667 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
670 // ----------- S t a t e -------------
671 // -- rsp[0] : return address
672 // -- rsp[8] : argument num_arguments - 1
674 // -- rsp[8 * num_arguments] : argument 0 (receiver)
675 // -----------------------------------
677 // TODO(1236192): Most runtime routines don't need the number of
678 // arguments passed in because it is constant. At some point we
679 // should remove this need and make the runtime routine entry code
681 Set(rax, num_arguments);
682 JumpToExternalReference(ext, result_size);
686 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
689 TailCallExternalReference(ExternalReference(fid, isolate()),
695 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
697 // Set the entry point and jump to the C entry runtime stub.
698 LoadAddress(rbx, ext);
699 CEntryStub ces(isolate(), result_size);
700 jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
704 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
706 const CallWrapper& call_wrapper) {
707 // You can't call a builtin without a valid frame.
708 DCHECK(flag == JUMP_FUNCTION || has_frame());
710 // Rely on the assertion to check that the number of provided
711 // arguments match the expected number of arguments. Fake a
712 // parameter count to avoid emitting code to do the check.
713 ParameterCount expected(0);
714 GetBuiltinEntry(rdx, id);
715 InvokeCode(rdx, expected, expected, flag, call_wrapper);
719 void MacroAssembler::GetBuiltinFunction(Register target,
720 Builtins::JavaScript id) {
721 // Load the builtins object into target register.
722 movp(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
723 movp(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
724 movp(target, FieldOperand(target,
725 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
729 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
730 DCHECK(!target.is(rdi));
731 // Load the JavaScript builtin function from the builtins object.
732 GetBuiltinFunction(rdi, id);
733 movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
737 #define REG(Name) { kRegister_ ## Name ## _Code }
739 static const Register saved_regs[] = {
740 REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
741 REG(r9), REG(r10), REG(r11)
746 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
749 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
752 Register exclusion3) {
753 // We don't allow a GC during a store buffer overflow so there is no need to
754 // store the registers in any particular way, but we do have to store and
756 for (int i = 0; i < kNumberOfSavedRegs; i++) {
757 Register reg = saved_regs[i];
758 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
762 // R12 to r15 are callee save on all platforms.
763 if (fp_mode == kSaveFPRegs) {
764 subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
765 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
766 XMMRegister reg = XMMRegister::from_code(i);
767 movsd(Operand(rsp, i * kDoubleSize), reg);
773 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
776 Register exclusion3) {
777 if (fp_mode == kSaveFPRegs) {
778 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
779 XMMRegister reg = XMMRegister::from_code(i);
780 movsd(reg, Operand(rsp, i * kDoubleSize));
782 addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
784 for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
785 Register reg = saved_regs[i];
786 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
793 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
799 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
805 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
806 DCHECK(!r.IsDouble());
807 if (r.IsInteger8()) {
809 } else if (r.IsUInteger8()) {
811 } else if (r.IsInteger16()) {
813 } else if (r.IsUInteger16()) {
815 } else if (r.IsInteger32()) {
823 void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
824 DCHECK(!r.IsDouble());
825 if (r.IsInteger8() || r.IsUInteger8()) {
827 } else if (r.IsInteger16() || r.IsUInteger16()) {
829 } else if (r.IsInteger32()) {
832 if (r.IsHeapObject()) {
834 } else if (r.IsSmi()) {
842 void MacroAssembler::Set(Register dst, int64_t x) {
845 } else if (is_uint32(x)) {
846 movl(dst, Immediate(static_cast<uint32_t>(x)));
847 } else if (is_int32(x)) {
848 movq(dst, Immediate(static_cast<int32_t>(x)));
855 void MacroAssembler::Set(const Operand& dst, intptr_t x) {
856 if (kPointerSize == kInt64Size) {
858 movp(dst, Immediate(static_cast<int32_t>(x)));
860 Set(kScratchRegister, x);
861 movp(dst, kScratchRegister);
864 movp(dst, Immediate(static_cast<int32_t>(x)));
869 // ----------------------------------------------------------------------------
870 // Smi tagging, untagging and tag detection.
872 bool MacroAssembler::IsUnsafeInt(const int32_t x) {
873 static const int kMaxBits = 17;
874 return !is_intn(x, kMaxBits);
878 void MacroAssembler::SafeMove(Register dst, Smi* src) {
879 DCHECK(!dst.is(kScratchRegister));
880 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
881 if (SmiValuesAre32Bits()) {
882 // JIT cookie can be converted to Smi.
883 Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
884 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
885 xorp(dst, kScratchRegister);
887 DCHECK(SmiValuesAre31Bits());
888 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
889 movp(dst, Immediate(value ^ jit_cookie()));
890 xorp(dst, Immediate(jit_cookie()));
898 void MacroAssembler::SafePush(Smi* src) {
899 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
900 if (SmiValuesAre32Bits()) {
901 // JIT cookie can be converted to Smi.
902 Push(Smi::FromInt(src->value() ^ jit_cookie()));
903 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
904 xorp(Operand(rsp, 0), kScratchRegister);
906 DCHECK(SmiValuesAre31Bits());
907 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
908 Push(Immediate(value ^ jit_cookie()));
909 xorp(Operand(rsp, 0), Immediate(jit_cookie()));
917 Register MacroAssembler::GetSmiConstant(Smi* source) {
918 int value = source->value();
920 xorl(kScratchRegister, kScratchRegister);
921 return kScratchRegister;
924 return kSmiConstantRegister;
926 LoadSmiConstant(kScratchRegister, source);
927 return kScratchRegister;
931 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
932 if (emit_debug_code()) {
933 Move(dst, Smi::FromInt(kSmiConstantRegisterValue),
934 Assembler::RelocInfoNone());
935 cmpp(dst, kSmiConstantRegister);
936 Assert(equal, kUninitializedKSmiConstantRegister);
938 int value = source->value();
943 bool negative = value < 0;
944 unsigned int uvalue = negative ? -value : value;
949 Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
953 leap(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
957 leap(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
961 Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
965 Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
969 Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
972 movp(dst, kSmiConstantRegister);
978 Move(dst, source, Assembler::RelocInfoNone());
987 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
988 STATIC_ASSERT(kSmiTag == 0);
992 shlp(dst, Immediate(kSmiShift));
996 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
997 if (emit_debug_code()) {
998 testb(dst, Immediate(0x01));
1000 j(zero, &ok, Label::kNear);
1001 Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
1005 if (SmiValuesAre32Bits()) {
1006 DCHECK(kSmiShift % kBitsPerByte == 0);
1007 movl(Operand(dst, kSmiShift / kBitsPerByte), src);
1009 DCHECK(SmiValuesAre31Bits());
1010 Integer32ToSmi(kScratchRegister, src);
1011 movp(dst, kScratchRegister);
1016 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
1020 addl(dst, Immediate(constant));
1022 leal(dst, Operand(src, constant));
1024 shlp(dst, Immediate(kSmiShift));
1028 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
1029 STATIC_ASSERT(kSmiTag == 0);
1034 if (SmiValuesAre32Bits()) {
1035 shrp(dst, Immediate(kSmiShift));
1037 DCHECK(SmiValuesAre31Bits());
1038 sarl(dst, Immediate(kSmiShift));
1043 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
1044 if (SmiValuesAre32Bits()) {
1045 movl(dst, Operand(src, kSmiShift / kBitsPerByte));
1047 DCHECK(SmiValuesAre31Bits());
1049 sarl(dst, Immediate(kSmiShift));
1054 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
1055 STATIC_ASSERT(kSmiTag == 0);
1059 sarp(dst, Immediate(kSmiShift));
1060 if (kPointerSize == kInt32Size) {
1061 // Sign extend to 64-bit.
1067 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
1068 if (SmiValuesAre32Bits()) {
1069 movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
1071 DCHECK(SmiValuesAre31Bits());
1073 SmiToInteger64(dst, dst);
1078 void MacroAssembler::SmiTest(Register src) {
1084 void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1091 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
1097 void MacroAssembler::Cmp(Register dst, Smi* src) {
1098 DCHECK(!dst.is(kScratchRegister));
1099 if (src->value() == 0) {
1102 Register constant_reg = GetSmiConstant(src);
1103 cmpp(dst, constant_reg);
1108 void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
1115 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
1122 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
1124 if (SmiValuesAre32Bits()) {
1125 cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
1127 DCHECK(SmiValuesAre31Bits());
1128 cmpl(dst, Immediate(src));
1133 void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
1134 // The Operand cannot use the smi register.
1135 Register smi_reg = GetSmiConstant(src);
1136 DCHECK(!dst.AddressUsesRegister(smi_reg));
1141 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
1142 if (SmiValuesAre32Bits()) {
1143 cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
1145 DCHECK(SmiValuesAre31Bits());
1146 SmiToInteger32(kScratchRegister, dst);
1147 cmpl(kScratchRegister, src);
1152 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
1158 SmiToInteger64(dst, src);
1164 if (power < kSmiShift) {
1165 sarp(dst, Immediate(kSmiShift - power));
1166 } else if (power > kSmiShift) {
1167 shlp(dst, Immediate(power - kSmiShift));
1172 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
1175 DCHECK((0 <= power) && (power < 32));
1177 shrp(dst, Immediate(power + kSmiShift));
1179 UNIMPLEMENTED(); // Not used.
1184 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1186 Label::Distance near_jump) {
1187 if (dst.is(src1) || dst.is(src2)) {
1188 DCHECK(!src1.is(kScratchRegister));
1189 DCHECK(!src2.is(kScratchRegister));
1190 movp(kScratchRegister, src1);
1191 orp(kScratchRegister, src2);
1192 JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
1193 movp(dst, kScratchRegister);
1197 JumpIfNotSmi(dst, on_not_smis, near_jump);
1202 Condition MacroAssembler::CheckSmi(Register src) {
1203 STATIC_ASSERT(kSmiTag == 0);
1204 testb(src, Immediate(kSmiTagMask));
1209 Condition MacroAssembler::CheckSmi(const Operand& src) {
1210 STATIC_ASSERT(kSmiTag == 0);
1211 testb(src, Immediate(kSmiTagMask));
1216 Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
1217 STATIC_ASSERT(kSmiTag == 0);
1218 // Test that both bits of the mask 0x8000000000000001 are zero.
1219 movp(kScratchRegister, src);
1220 rolp(kScratchRegister, Immediate(1));
1221 testb(kScratchRegister, Immediate(3));
1226 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
1227 if (first.is(second)) {
1228 return CheckSmi(first);
1230 STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
1231 if (SmiValuesAre32Bits()) {
1232 leal(kScratchRegister, Operand(first, second, times_1, 0));
1233 testb(kScratchRegister, Immediate(0x03));
1235 DCHECK(SmiValuesAre31Bits());
1236 movl(kScratchRegister, first);
1237 orl(kScratchRegister, second);
1238 testb(kScratchRegister, Immediate(kSmiTagMask));
1244 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
1246 if (first.is(second)) {
1247 return CheckNonNegativeSmi(first);
1249 movp(kScratchRegister, first);
1250 orp(kScratchRegister, second);
1251 rolp(kScratchRegister, Immediate(1));
1252 testl(kScratchRegister, Immediate(3));
1257 Condition MacroAssembler::CheckEitherSmi(Register first,
1260 if (first.is(second)) {
1261 return CheckSmi(first);
1263 if (scratch.is(second)) {
1264 andl(scratch, first);
1266 if (!scratch.is(first)) {
1267 movl(scratch, first);
1269 andl(scratch, second);
1271 testb(scratch, Immediate(kSmiTagMask));
1276 Condition MacroAssembler::CheckIsMinSmi(Register src) {
1277 DCHECK(!src.is(kScratchRegister));
1278 // If we overflow by subtracting one, it's the minimal smi value.
1279 cmpp(src, kSmiConstantRegister);
1284 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
1285 if (SmiValuesAre32Bits()) {
1286 // A 32-bit integer value can always be converted to a smi.
1289 DCHECK(SmiValuesAre31Bits());
1290 cmpl(src, Immediate(0xc0000000));
1296 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
1297 if (SmiValuesAre32Bits()) {
1298 // An unsigned 32-bit integer value is valid as long as the high bit
1303 DCHECK(SmiValuesAre31Bits());
1304 testl(src, Immediate(0xc0000000));
1310 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
1312 andl(dst, Immediate(kSmiTagMask));
1314 movl(dst, Immediate(kSmiTagMask));
1320 void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
1321 if (!(src.AddressUsesRegister(dst))) {
1322 movl(dst, Immediate(kSmiTagMask));
1326 andl(dst, Immediate(kSmiTagMask));
1331 void MacroAssembler::JumpIfValidSmiValue(Register src,
1333 Label::Distance near_jump) {
1334 Condition is_valid = CheckInteger32ValidSmiValue(src);
1335 j(is_valid, on_valid, near_jump);
1339 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1341 Label::Distance near_jump) {
1342 Condition is_valid = CheckInteger32ValidSmiValue(src);
1343 j(NegateCondition(is_valid), on_invalid, near_jump);
1347 void MacroAssembler::JumpIfUIntValidSmiValue(Register src,
1349 Label::Distance near_jump) {
1350 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1351 j(is_valid, on_valid, near_jump);
1355 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1357 Label::Distance near_jump) {
1358 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1359 j(NegateCondition(is_valid), on_invalid, near_jump);
1363 void MacroAssembler::JumpIfSmi(Register src,
1365 Label::Distance near_jump) {
1366 Condition smi = CheckSmi(src);
1367 j(smi, on_smi, near_jump);
1371 void MacroAssembler::JumpIfNotSmi(Register src,
1373 Label::Distance near_jump) {
1374 Condition smi = CheckSmi(src);
1375 j(NegateCondition(smi), on_not_smi, near_jump);
1379 void MacroAssembler::JumpUnlessNonNegativeSmi(
1380 Register src, Label* on_not_smi_or_negative,
1381 Label::Distance near_jump) {
1382 Condition non_negative_smi = CheckNonNegativeSmi(src);
1383 j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
1387 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1390 Label::Distance near_jump) {
1391 SmiCompare(src, constant);
1392 j(equal, on_equals, near_jump);
1396 void MacroAssembler::JumpIfNotBothSmi(Register src1,
1398 Label* on_not_both_smi,
1399 Label::Distance near_jump) {
1400 Condition both_smi = CheckBothSmi(src1, src2);
1401 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1405 void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
1407 Label* on_not_both_smi,
1408 Label::Distance near_jump) {
1409 Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
1410 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1414 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1415 if (constant->value() == 0) {
1420 } else if (dst.is(src)) {
1421 DCHECK(!dst.is(kScratchRegister));
1422 switch (constant->value()) {
1424 addp(dst, kSmiConstantRegister);
1427 leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1430 leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1433 leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1436 Register constant_reg = GetSmiConstant(constant);
1437 addp(dst, constant_reg);
1441 switch (constant->value()) {
1443 leap(dst, Operand(src, kSmiConstantRegister, times_1, 0));
1446 leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1449 leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1452 leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1455 LoadSmiConstant(dst, constant);
1463 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1464 if (constant->value() != 0) {
1465 if (SmiValuesAre32Bits()) {
1466 addl(Operand(dst, kSmiShift / kBitsPerByte),
1467 Immediate(constant->value()));
1469 DCHECK(SmiValuesAre31Bits());
1470 addp(dst, Immediate(constant));
1476 void MacroAssembler::SmiAddConstant(Register dst,
1479 SmiOperationExecutionMode mode,
1480 Label* bailout_label,
1481 Label::Distance near_jump) {
1482 if (constant->value() == 0) {
1486 } else if (dst.is(src)) {
1487 DCHECK(!dst.is(kScratchRegister));
1488 LoadSmiConstant(kScratchRegister, constant);
1489 addp(dst, kScratchRegister);
1490 if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
1491 j(no_overflow, bailout_label, near_jump);
1492 DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
1493 subp(dst, kScratchRegister);
1494 } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
1495 if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
1497 j(no_overflow, &done, Label::kNear);
1498 subp(dst, kScratchRegister);
1499 jmp(bailout_label, near_jump);
1502 // Bailout if overflow without reserving src.
1503 j(overflow, bailout_label, near_jump);
1506 CHECK(mode.IsEmpty());
1509 DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
1510 DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW));
1511 LoadSmiConstant(dst, constant);
1513 j(overflow, bailout_label, near_jump);
1518 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1519 if (constant->value() == 0) {
1523 } else if (dst.is(src)) {
1524 DCHECK(!dst.is(kScratchRegister));
1525 Register constant_reg = GetSmiConstant(constant);
1526 subp(dst, constant_reg);
1528 if (constant->value() == Smi::kMinValue) {
1529 LoadSmiConstant(dst, constant);
1530 // Adding and subtracting the min-value gives the same result, it only
1531 // differs on the overflow bit, which we don't check here.
1534 // Subtract by adding the negation.
1535 LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1542 void MacroAssembler::SmiSubConstant(Register dst,
1545 SmiOperationExecutionMode mode,
1546 Label* bailout_label,
1547 Label::Distance near_jump) {
1548 if (constant->value() == 0) {
1552 } else if (dst.is(src)) {
1553 DCHECK(!dst.is(kScratchRegister));
1554 LoadSmiConstant(kScratchRegister, constant);
1555 subp(dst, kScratchRegister);
1556 if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
1557 j(no_overflow, bailout_label, near_jump);
1558 DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
1559 addp(dst, kScratchRegister);
1560 } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
1561 if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
1563 j(no_overflow, &done, Label::kNear);
1564 addp(dst, kScratchRegister);
1565 jmp(bailout_label, near_jump);
1568 // Bailout if overflow without reserving src.
1569 j(overflow, bailout_label, near_jump);
1572 CHECK(mode.IsEmpty());
1575 DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
1576 DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW));
1577 if (constant->value() == Smi::kMinValue) {
1578 DCHECK(!dst.is(kScratchRegister));
1580 LoadSmiConstant(kScratchRegister, constant);
1581 subp(dst, kScratchRegister);
1582 j(overflow, bailout_label, near_jump);
1584 // Subtract by adding the negation.
1585 LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1587 j(overflow, bailout_label, near_jump);
1593 void MacroAssembler::SmiNeg(Register dst,
1595 Label* on_smi_result,
1596 Label::Distance near_jump) {
1598 DCHECK(!dst.is(kScratchRegister));
1599 movp(kScratchRegister, src);
1600 negp(dst); // Low 32 bits are retained as zero by negation.
1601 // Test if result is zero or Smi::kMinValue.
1602 cmpp(dst, kScratchRegister);
1603 j(not_equal, on_smi_result, near_jump);
1604 movp(src, kScratchRegister);
1609 // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1610 j(not_equal, on_smi_result, near_jump);
1616 static void SmiAddHelper(MacroAssembler* masm,
1620 Label* on_not_smi_result,
1621 Label::Distance near_jump) {
1624 masm->addp(dst, src2);
1625 masm->j(no_overflow, &done, Label::kNear);
1627 masm->subp(dst, src2);
1628 masm->jmp(on_not_smi_result, near_jump);
1631 masm->movp(dst, src1);
1632 masm->addp(dst, src2);
1633 masm->j(overflow, on_not_smi_result, near_jump);
1638 void MacroAssembler::SmiAdd(Register dst,
1641 Label* on_not_smi_result,
1642 Label::Distance near_jump) {
1643 DCHECK_NOT_NULL(on_not_smi_result);
1644 DCHECK(!dst.is(src2));
1645 SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1649 void MacroAssembler::SmiAdd(Register dst,
1651 const Operand& src2,
1652 Label* on_not_smi_result,
1653 Label::Distance near_jump) {
1654 DCHECK_NOT_NULL(on_not_smi_result);
1655 DCHECK(!src2.AddressUsesRegister(dst));
1656 SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1660 void MacroAssembler::SmiAdd(Register dst,
1663 // No overflow checking. Use only when it's known that
1664 // overflowing is impossible.
1665 if (!dst.is(src1)) {
1666 if (emit_debug_code()) {
1667 movp(kScratchRegister, src1);
1668 addp(kScratchRegister, src2);
1669 Check(no_overflow, kSmiAdditionOverflow);
1671 leap(dst, Operand(src1, src2, times_1, 0));
1674 Assert(no_overflow, kSmiAdditionOverflow);
1680 static void SmiSubHelper(MacroAssembler* masm,
1684 Label* on_not_smi_result,
1685 Label::Distance near_jump) {
1688 masm->subp(dst, src2);
1689 masm->j(no_overflow, &done, Label::kNear);
1691 masm->addp(dst, src2);
1692 masm->jmp(on_not_smi_result, near_jump);
1695 masm->movp(dst, src1);
1696 masm->subp(dst, src2);
1697 masm->j(overflow, on_not_smi_result, near_jump);
1702 void MacroAssembler::SmiSub(Register dst,
1705 Label* on_not_smi_result,
1706 Label::Distance near_jump) {
1707 DCHECK_NOT_NULL(on_not_smi_result);
1708 DCHECK(!dst.is(src2));
1709 SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1713 void MacroAssembler::SmiSub(Register dst,
1715 const Operand& src2,
1716 Label* on_not_smi_result,
1717 Label::Distance near_jump) {
1718 DCHECK_NOT_NULL(on_not_smi_result);
1719 DCHECK(!src2.AddressUsesRegister(dst));
1720 SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1725 static void SmiSubNoOverflowHelper(MacroAssembler* masm,
1729 // No overflow checking. Use only when it's known that
1730 // overflowing is impossible (e.g., subtracting two positive smis).
1731 if (!dst.is(src1)) {
1732 masm->movp(dst, src1);
1734 masm->subp(dst, src2);
1735 masm->Assert(no_overflow, kSmiSubtractionOverflow);
1739 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1740 DCHECK(!dst.is(src2));
1741 SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
1745 void MacroAssembler::SmiSub(Register dst,
1747 const Operand& src2) {
1748 SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
1752 void MacroAssembler::SmiMul(Register dst,
1755 Label* on_not_smi_result,
1756 Label::Distance near_jump) {
1757 DCHECK(!dst.is(src2));
1758 DCHECK(!dst.is(kScratchRegister));
1759 DCHECK(!src1.is(kScratchRegister));
1760 DCHECK(!src2.is(kScratchRegister));
1763 Label failure, zero_correct_result;
1764 movp(kScratchRegister, src1); // Create backup for later testing.
1765 SmiToInteger64(dst, src1);
1767 j(overflow, &failure, Label::kNear);
1769 // Check for negative zero result. If product is zero, and one
1770 // argument is negative, go to slow case.
1771 Label correct_result;
1773 j(not_zero, &correct_result, Label::kNear);
1775 movp(dst, kScratchRegister);
1777 // Result was positive zero.
1778 j(positive, &zero_correct_result, Label::kNear);
1780 bind(&failure); // Reused failure exit, restores src1.
1781 movp(src1, kScratchRegister);
1782 jmp(on_not_smi_result, near_jump);
1784 bind(&zero_correct_result);
1787 bind(&correct_result);
1789 SmiToInteger64(dst, src1);
1791 j(overflow, on_not_smi_result, near_jump);
1792 // Check for negative zero result. If product is zero, and one
1793 // argument is negative, go to slow case.
1794 Label correct_result;
1796 j(not_zero, &correct_result, Label::kNear);
1797 // One of src1 and src2 is zero, the check whether the other is
1799 movp(kScratchRegister, src1);
1800 xorp(kScratchRegister, src2);
1801 j(negative, on_not_smi_result, near_jump);
1802 bind(&correct_result);
1807 void MacroAssembler::SmiDiv(Register dst,
1810 Label* on_not_smi_result,
1811 Label::Distance near_jump) {
1812 DCHECK(!src1.is(kScratchRegister));
1813 DCHECK(!src2.is(kScratchRegister));
1814 DCHECK(!dst.is(kScratchRegister));
1815 DCHECK(!src2.is(rax));
1816 DCHECK(!src2.is(rdx));
1817 DCHECK(!src1.is(rdx));
1819 // Check for 0 divisor (result is +/-Infinity).
1821 j(zero, on_not_smi_result, near_jump);
1824 movp(kScratchRegister, src1);
1826 SmiToInteger32(rax, src1);
1827 // We need to rule out dividing Smi::kMinValue by -1, since that would
1828 // overflow in idiv and raise an exception.
1829 // We combine this with negative zero test (negative zero only happens
1830 // when dividing zero by a negative number).
1832 // We overshoot a little and go to slow case if we divide min-value
1833 // by any negative value, not just -1.
1835 testl(rax, Immediate(~Smi::kMinValue));
1836 j(not_zero, &safe_div, Label::kNear);
1839 j(positive, &safe_div, Label::kNear);
1840 movp(src1, kScratchRegister);
1841 jmp(on_not_smi_result, near_jump);
1843 j(negative, on_not_smi_result, near_jump);
1847 SmiToInteger32(src2, src2);
1848 // Sign extend src1 into edx:eax.
1851 Integer32ToSmi(src2, src2);
1852 // Check that the remainder is zero.
1856 j(zero, &smi_result, Label::kNear);
1857 movp(src1, kScratchRegister);
1858 jmp(on_not_smi_result, near_jump);
1861 j(not_zero, on_not_smi_result, near_jump);
1863 if (!dst.is(src1) && src1.is(rax)) {
1864 movp(src1, kScratchRegister);
1866 Integer32ToSmi(dst, rax);
1870 void MacroAssembler::SmiMod(Register dst,
1873 Label* on_not_smi_result,
1874 Label::Distance near_jump) {
1875 DCHECK(!dst.is(kScratchRegister));
1876 DCHECK(!src1.is(kScratchRegister));
1877 DCHECK(!src2.is(kScratchRegister));
1878 DCHECK(!src2.is(rax));
1879 DCHECK(!src2.is(rdx));
1880 DCHECK(!src1.is(rdx));
1881 DCHECK(!src1.is(src2));
1884 j(zero, on_not_smi_result, near_jump);
1887 movp(kScratchRegister, src1);
1889 SmiToInteger32(rax, src1);
1890 SmiToInteger32(src2, src2);
1892 // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
1894 cmpl(rax, Immediate(Smi::kMinValue));
1895 j(not_equal, &safe_div, Label::kNear);
1896 cmpl(src2, Immediate(-1));
1897 j(not_equal, &safe_div, Label::kNear);
1898 // Retag inputs and go slow case.
1899 Integer32ToSmi(src2, src2);
1901 movp(src1, kScratchRegister);
1903 jmp(on_not_smi_result, near_jump);
1906 // Sign extend eax into edx:eax.
1909 // Restore smi tags on inputs.
1910 Integer32ToSmi(src2, src2);
1912 movp(src1, kScratchRegister);
1914 // Check for a negative zero result. If the result is zero, and the
1915 // dividend is negative, go slow to return a floating point negative zero.
1918 j(not_zero, &smi_result, Label::kNear);
1920 j(negative, on_not_smi_result, near_jump);
1922 Integer32ToSmi(dst, rdx);
1926 void MacroAssembler::SmiNot(Register dst, Register src) {
1927 DCHECK(!dst.is(kScratchRegister));
1928 DCHECK(!src.is(kScratchRegister));
1929 if (SmiValuesAre32Bits()) {
1930 // Set tag and padding bits before negating, so that they are zero
1932 movl(kScratchRegister, Immediate(~0));
1934 DCHECK(SmiValuesAre31Bits());
1935 movl(kScratchRegister, Immediate(1));
1938 xorp(dst, kScratchRegister);
1940 leap(dst, Operand(src, kScratchRegister, times_1, 0));
1946 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
1947 DCHECK(!dst.is(src2));
1948 if (!dst.is(src1)) {
1955 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
1956 if (constant->value() == 0) {
1958 } else if (dst.is(src)) {
1959 DCHECK(!dst.is(kScratchRegister));
1960 Register constant_reg = GetSmiConstant(constant);
1961 andp(dst, constant_reg);
1963 LoadSmiConstant(dst, constant);
1969 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
1970 if (!dst.is(src1)) {
1971 DCHECK(!src1.is(src2));
1978 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
1980 DCHECK(!dst.is(kScratchRegister));
1981 Register constant_reg = GetSmiConstant(constant);
1982 orp(dst, constant_reg);
1984 LoadSmiConstant(dst, constant);
1990 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
1991 if (!dst.is(src1)) {
1992 DCHECK(!src1.is(src2));
1999 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
2001 DCHECK(!dst.is(kScratchRegister));
2002 Register constant_reg = GetSmiConstant(constant);
2003 xorp(dst, constant_reg);
2005 LoadSmiConstant(dst, constant);
2011 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
2014 DCHECK(is_uint5(shift_value));
2015 if (shift_value > 0) {
2017 sarp(dst, Immediate(shift_value + kSmiShift));
2018 shlp(dst, Immediate(kSmiShift));
2020 UNIMPLEMENTED(); // Not used.
2026 void MacroAssembler::SmiShiftLeftConstant(Register dst,
2029 Label* on_not_smi_result,
2030 Label::Distance near_jump) {
2031 if (SmiValuesAre32Bits()) {
2035 if (shift_value > 0) {
2036 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2037 shlq(dst, Immediate(shift_value & 0x1f));
2040 DCHECK(SmiValuesAre31Bits());
2042 UNIMPLEMENTED(); // Not used.
2044 SmiToInteger32(dst, src);
2045 shll(dst, Immediate(shift_value));
2046 JumpIfNotValidSmiValue(dst, on_not_smi_result, near_jump);
2047 Integer32ToSmi(dst, dst);
2053 void MacroAssembler::SmiShiftLogicalRightConstant(
2054 Register dst, Register src, int shift_value,
2055 Label* on_not_smi_result, Label::Distance near_jump) {
2056 // Logic right shift interprets its result as an *unsigned* number.
2058 UNIMPLEMENTED(); // Not used.
2060 if (shift_value == 0) {
2062 j(negative, on_not_smi_result, near_jump);
2064 if (SmiValuesAre32Bits()) {
2066 shrp(dst, Immediate(shift_value + kSmiShift));
2067 shlp(dst, Immediate(kSmiShift));
2069 DCHECK(SmiValuesAre31Bits());
2070 SmiToInteger32(dst, src);
2071 shrp(dst, Immediate(shift_value));
2072 JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump);
2073 Integer32ToSmi(dst, dst);
2079 void MacroAssembler::SmiShiftLeft(Register dst,
2082 Label* on_not_smi_result,
2083 Label::Distance near_jump) {
2084 if (SmiValuesAre32Bits()) {
2085 DCHECK(!dst.is(rcx));
2086 if (!dst.is(src1)) {
2089 // Untag shift amount.
2090 SmiToInteger32(rcx, src2);
2091 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2092 andp(rcx, Immediate(0x1f));
2095 DCHECK(SmiValuesAre31Bits());
2096 DCHECK(!dst.is(kScratchRegister));
2097 DCHECK(!src1.is(kScratchRegister));
2098 DCHECK(!src2.is(kScratchRegister));
2099 DCHECK(!dst.is(src2));
2100 DCHECK(!dst.is(rcx));
2102 if (src1.is(rcx) || src2.is(rcx)) {
2103 movq(kScratchRegister, rcx);
2106 UNIMPLEMENTED(); // Not used.
2109 SmiToInteger32(dst, src1);
2110 SmiToInteger32(rcx, src2);
2112 JumpIfValidSmiValue(dst, &valid_result, Label::kNear);
2113 // As src1 or src2 could not be dst, we do not need to restore them for
2115 if (src1.is(rcx) || src2.is(rcx)) {
2117 movq(src1, kScratchRegister);
2119 movq(src2, kScratchRegister);
2122 jmp(on_not_smi_result, near_jump);
2123 bind(&valid_result);
2124 Integer32ToSmi(dst, dst);
2130 void MacroAssembler::SmiShiftLogicalRight(Register dst,
2133 Label* on_not_smi_result,
2134 Label::Distance near_jump) {
2135 DCHECK(!dst.is(kScratchRegister));
2136 DCHECK(!src1.is(kScratchRegister));
2137 DCHECK(!src2.is(kScratchRegister));
2138 DCHECK(!dst.is(src2));
2139 DCHECK(!dst.is(rcx));
2140 if (src1.is(rcx) || src2.is(rcx)) {
2141 movq(kScratchRegister, rcx);
2144 UNIMPLEMENTED(); // Not used.
2147 SmiToInteger32(dst, src1);
2148 SmiToInteger32(rcx, src2);
2150 JumpIfUIntValidSmiValue(dst, &valid_result, Label::kNear);
2151 // As src1 or src2 could not be dst, we do not need to restore them for
2153 if (src1.is(rcx) || src2.is(rcx)) {
2155 movq(src1, kScratchRegister);
2157 movq(src2, kScratchRegister);
2160 jmp(on_not_smi_result, near_jump);
2161 bind(&valid_result);
2162 Integer32ToSmi(dst, dst);
2167 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
2170 DCHECK(!dst.is(kScratchRegister));
2171 DCHECK(!src1.is(kScratchRegister));
2172 DCHECK(!src2.is(kScratchRegister));
2173 DCHECK(!dst.is(rcx));
2175 SmiToInteger32(rcx, src2);
2176 if (!dst.is(src1)) {
2179 SmiToInteger32(dst, dst);
2181 Integer32ToSmi(dst, dst);
2185 void MacroAssembler::SelectNonSmi(Register dst,
2189 Label::Distance near_jump) {
2190 DCHECK(!dst.is(kScratchRegister));
2191 DCHECK(!src1.is(kScratchRegister));
2192 DCHECK(!src2.is(kScratchRegister));
2193 DCHECK(!dst.is(src1));
2194 DCHECK(!dst.is(src2));
2195 // Both operands must not be smis.
2197 Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
2198 Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
2200 STATIC_ASSERT(kSmiTag == 0);
2201 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
2202 movl(kScratchRegister, Immediate(kSmiTagMask));
2203 andp(kScratchRegister, src1);
2204 testl(kScratchRegister, src2);
2205 // If non-zero then both are smis.
2206 j(not_zero, on_not_smis, near_jump);
2208 // Exactly one operand is a smi.
2209 DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
2210 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
2211 subp(kScratchRegister, Immediate(1));
2212 // If src1 is a smi, then scratch register all 1s, else it is all 0s.
2215 andp(dst, kScratchRegister);
2216 // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
2218 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
2222 SmiIndex MacroAssembler::SmiToIndex(Register dst,
2225 if (SmiValuesAre32Bits()) {
2226 DCHECK(is_uint6(shift));
2227 // There is a possible optimization if shift is in the range 60-63, but that
2228 // will (and must) never happen.
2232 if (shift < kSmiShift) {
2233 sarp(dst, Immediate(kSmiShift - shift));
2235 shlp(dst, Immediate(shift - kSmiShift));
2237 return SmiIndex(dst, times_1);
2239 DCHECK(SmiValuesAre31Bits());
2240 DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2244 // We have to sign extend the index register to 64-bit as the SMI might
2247 if (shift == times_1) {
2248 sarq(dst, Immediate(kSmiShift));
2249 return SmiIndex(dst, times_1);
2251 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2256 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
2259 if (SmiValuesAre32Bits()) {
2260 // Register src holds a positive smi.
2261 DCHECK(is_uint6(shift));
2266 if (shift < kSmiShift) {
2267 sarp(dst, Immediate(kSmiShift - shift));
2269 shlp(dst, Immediate(shift - kSmiShift));
2271 return SmiIndex(dst, times_1);
2273 DCHECK(SmiValuesAre31Bits());
2274 DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2279 if (shift == times_1) {
2280 sarq(dst, Immediate(kSmiShift));
2281 return SmiIndex(dst, times_1);
2283 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2288 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
2289 if (SmiValuesAre32Bits()) {
2290 DCHECK_EQ(0, kSmiShift % kBitsPerByte);
2291 addl(dst, Operand(src, kSmiShift / kBitsPerByte));
2293 DCHECK(SmiValuesAre31Bits());
2294 SmiToInteger32(kScratchRegister, src);
2295 addl(dst, kScratchRegister);
2300 void MacroAssembler::Push(Smi* source) {
2301 intptr_t smi = reinterpret_cast<intptr_t>(source);
2302 if (is_int32(smi)) {
2303 Push(Immediate(static_cast<int32_t>(smi)));
2305 Register constant = GetSmiConstant(source);
2311 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
2312 DCHECK(!src.is(scratch));
2315 shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2316 shlp(src, Immediate(kSmiShift));
2319 shlp(scratch, Immediate(kSmiShift));
2324 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
2325 DCHECK(!dst.is(scratch));
2328 shrp(scratch, Immediate(kSmiShift));
2330 shrp(dst, Immediate(kSmiShift));
2332 shlp(dst, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2337 void MacroAssembler::Test(const Operand& src, Smi* source) {
2338 if (SmiValuesAre32Bits()) {
2339 testl(Operand(src, kIntSize), Immediate(source->value()));
2341 DCHECK(SmiValuesAre31Bits());
2342 testl(src, Immediate(source));
2347 // ----------------------------------------------------------------------------
2350 void MacroAssembler::LookupNumberStringCache(Register object,
2355 // Use of registers. Register result is used as a temporary.
2356 Register number_string_cache = result;
2357 Register mask = scratch1;
2358 Register scratch = scratch2;
2360 // Load the number string cache.
2361 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2363 // Make the hash mask from the length of the number string cache. It
2364 // contains two elements (number and string) for each cache entry.
2366 mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
2367 shrl(mask, Immediate(1));
2368 subp(mask, Immediate(1)); // Make mask.
2370 // Calculate the entry in the number string cache. The hash value in the
2371 // number string cache for smis is just the smi value, and the hash for
2372 // doubles is the xor of the upper and lower words. See
2373 // Heap::GetNumberStringCache.
2375 Label load_result_from_cache;
2376 JumpIfSmi(object, &is_smi);
2378 isolate()->factory()->heap_number_map(),
2382 STATIC_ASSERT(8 == kDoubleSize);
2383 movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
2384 xorp(scratch, FieldOperand(object, HeapNumber::kValueOffset));
2385 andp(scratch, mask);
2386 // Each entry in string cache consists of two pointer sized fields,
2387 // but times_twice_pointer_size (multiplication by 16) scale factor
2388 // is not supported by addrmode on x64 platform.
2389 // So we have to premultiply entry index before lookup.
2390 shlp(scratch, Immediate(kPointerSizeLog2 + 1));
2392 Register index = scratch;
2393 Register probe = mask;
2395 FieldOperand(number_string_cache,
2398 FixedArray::kHeaderSize));
2399 JumpIfSmi(probe, not_found);
2400 movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
2401 ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
2402 j(parity_even, not_found); // Bail out if NaN is involved.
2403 j(not_equal, not_found); // The cache did not contain this value.
2404 jmp(&load_result_from_cache);
2407 SmiToInteger32(scratch, object);
2408 andp(scratch, mask);
2409 // Each entry in string cache consists of two pointer sized fields,
2410 // but times_twice_pointer_size (multiplication by 16) scale factor
2411 // is not supported by addrmode on x64 platform.
2412 // So we have to premultiply entry index before lookup.
2413 shlp(scratch, Immediate(kPointerSizeLog2 + 1));
2415 // Check if the entry is the smi we are looking for.
2417 FieldOperand(number_string_cache,
2420 FixedArray::kHeaderSize));
2421 j(not_equal, not_found);
2423 // Get the result from the cache.
2424 bind(&load_result_from_cache);
2426 FieldOperand(number_string_cache,
2429 FixedArray::kHeaderSize + kPointerSize));
2430 IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
2434 void MacroAssembler::JumpIfNotString(Register object,
2435 Register object_map,
2437 Label::Distance near_jump) {
2438 Condition is_smi = CheckSmi(object);
2439 j(is_smi, not_string, near_jump);
2440 CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
2441 j(above_equal, not_string, near_jump);
2445 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(
2446 Register first_object, Register second_object, Register scratch1,
2447 Register scratch2, Label* on_fail, Label::Distance near_jump) {
2448 // Check that both objects are not smis.
2449 Condition either_smi = CheckEitherSmi(first_object, second_object);
2450 j(either_smi, on_fail, near_jump);
2452 // Load instance type for both strings.
2453 movp(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
2454 movp(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
2455 movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2456 movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2458 // Check that both are flat one-byte strings.
2459 DCHECK(kNotStringTag != 0);
2460 const int kFlatOneByteStringMask =
2461 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2462 const int kFlatOneByteStringTag =
2463 kStringTag | kOneByteStringTag | kSeqStringTag;
2465 andl(scratch1, Immediate(kFlatOneByteStringMask));
2466 andl(scratch2, Immediate(kFlatOneByteStringMask));
2467 // Interleave the bits to check both scratch1 and scratch2 in one test.
2468 DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
2469 leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2471 Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
2472 j(not_equal, on_fail, near_jump);
2476 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
2477 Register instance_type, Register scratch, Label* failure,
2478 Label::Distance near_jump) {
2479 if (!scratch.is(instance_type)) {
2480 movl(scratch, instance_type);
2483 const int kFlatOneByteStringMask =
2484 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2486 andl(scratch, Immediate(kFlatOneByteStringMask));
2487 cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
2488 j(not_equal, failure, near_jump);
2492 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
2493 Register first_object_instance_type, Register second_object_instance_type,
2494 Register scratch1, Register scratch2, Label* on_fail,
2495 Label::Distance near_jump) {
2496 // Load instance type for both strings.
2497 movp(scratch1, first_object_instance_type);
2498 movp(scratch2, second_object_instance_type);
2500 // Check that both are flat one-byte strings.
2501 DCHECK(kNotStringTag != 0);
2502 const int kFlatOneByteStringMask =
2503 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2504 const int kFlatOneByteStringTag =
2505 kStringTag | kOneByteStringTag | kSeqStringTag;
2507 andl(scratch1, Immediate(kFlatOneByteStringMask));
2508 andl(scratch2, Immediate(kFlatOneByteStringMask));
2509 // Interleave the bits to check both scratch1 and scratch2 in one test.
2510 DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
2511 leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2513 Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
2514 j(not_equal, on_fail, near_jump);
2519 static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
2520 T operand_or_register,
2521 Label* not_unique_name,
2522 Label::Distance distance) {
2523 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2525 masm->testb(operand_or_register,
2526 Immediate(kIsNotStringMask | kIsNotInternalizedMask));
2527 masm->j(zero, &succeed, Label::kNear);
2528 masm->cmpb(operand_or_register, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
2529 masm->j(not_equal, not_unique_name, distance);
2531 masm->bind(&succeed);
2535 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
2536 Label* not_unique_name,
2537 Label::Distance distance) {
2538 JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
2542 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
2543 Label* not_unique_name,
2544 Label::Distance distance) {
2545 JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
2549 void MacroAssembler::Move(Register dst, Register src) {
2556 void MacroAssembler::Move(Register dst, Handle<Object> source) {
2557 AllowDeferredHandleDereference smi_check;
2558 if (source->IsSmi()) {
2559 Move(dst, Smi::cast(*source));
2561 MoveHeapObject(dst, source);
2566 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
2567 AllowDeferredHandleDereference smi_check;
2568 if (source->IsSmi()) {
2569 Move(dst, Smi::cast(*source));
2571 MoveHeapObject(kScratchRegister, source);
2572 movp(dst, kScratchRegister);
2577 void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
2581 movl(kScratchRegister, Immediate(src));
2582 movq(dst, kScratchRegister);
2587 void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
2588 uint32_t lower = static_cast<uint32_t>(src);
2589 uint32_t upper = static_cast<uint32_t>(src >> 32);
2597 movq(kScratchRegister, src);
2598 movq(dst, kScratchRegister);
2604 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
2605 AllowDeferredHandleDereference smi_check;
2606 if (source->IsSmi()) {
2607 Cmp(dst, Smi::cast(*source));
2609 MoveHeapObject(kScratchRegister, source);
2610 cmpp(dst, kScratchRegister);
2615 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
2616 AllowDeferredHandleDereference smi_check;
2617 if (source->IsSmi()) {
2618 Cmp(dst, Smi::cast(*source));
2620 MoveHeapObject(kScratchRegister, source);
2621 cmpp(dst, kScratchRegister);
2626 void MacroAssembler::Push(Handle<Object> source) {
2627 AllowDeferredHandleDereference smi_check;
2628 if (source->IsSmi()) {
2629 Push(Smi::cast(*source));
2631 MoveHeapObject(kScratchRegister, source);
2632 Push(kScratchRegister);
2637 void MacroAssembler::MoveHeapObject(Register result,
2638 Handle<Object> object) {
2639 AllowDeferredHandleDereference using_raw_address;
2640 DCHECK(object->IsHeapObject());
2641 if (isolate()->heap()->InNewSpace(*object)) {
2642 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2643 Move(result, cell, RelocInfo::CELL);
2644 movp(result, Operand(result, 0));
2646 Move(result, object, RelocInfo::EMBEDDED_OBJECT);
2651 void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
2653 AllowDeferredHandleDereference embedding_raw_address;
2654 load_rax(cell.location(), RelocInfo::CELL);
2656 Move(dst, cell, RelocInfo::CELL);
2657 movp(dst, Operand(dst, 0));
2662 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
2664 Move(scratch, cell, RelocInfo::EMBEDDED_OBJECT);
2665 cmpp(value, FieldOperand(scratch, WeakCell::kValueOffset));
2669 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
2670 Move(value, cell, RelocInfo::EMBEDDED_OBJECT);
2671 movp(value, FieldOperand(value, WeakCell::kValueOffset));
2675 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
2677 GetWeakValue(value, cell);
2678 JumpIfSmi(value, miss);
2682 void MacroAssembler::Drop(int stack_elements) {
2683 if (stack_elements > 0) {
2684 addp(rsp, Immediate(stack_elements * kPointerSize));
2689 void MacroAssembler::DropUnderReturnAddress(int stack_elements,
2691 DCHECK(stack_elements > 0);
2692 if (kPointerSize == kInt64Size && stack_elements == 1) {
2693 popq(MemOperand(rsp, 0));
2697 PopReturnAddressTo(scratch);
2698 Drop(stack_elements);
2699 PushReturnAddressFrom(scratch);
2703 void MacroAssembler::Push(Register src) {
2704 if (kPointerSize == kInt64Size) {
2707 // x32 uses 64-bit push for rbp in the prologue.
2708 DCHECK(src.code() != rbp.code());
2709 leal(rsp, Operand(rsp, -4));
2710 movp(Operand(rsp, 0), src);
2715 void MacroAssembler::Push(const Operand& src) {
2716 if (kPointerSize == kInt64Size) {
2719 movp(kScratchRegister, src);
2720 leal(rsp, Operand(rsp, -4));
2721 movp(Operand(rsp, 0), kScratchRegister);
2726 void MacroAssembler::PushQuad(const Operand& src) {
2727 if (kPointerSize == kInt64Size) {
2730 movp(kScratchRegister, src);
2731 pushq(kScratchRegister);
2736 void MacroAssembler::Push(Immediate value) {
2737 if (kPointerSize == kInt64Size) {
2740 leal(rsp, Operand(rsp, -4));
2741 movp(Operand(rsp, 0), value);
2746 void MacroAssembler::PushImm32(int32_t imm32) {
2747 if (kPointerSize == kInt64Size) {
2750 leal(rsp, Operand(rsp, -4));
2751 movp(Operand(rsp, 0), Immediate(imm32));
2756 void MacroAssembler::Pop(Register dst) {
2757 if (kPointerSize == kInt64Size) {
2760 // x32 uses 64-bit pop for rbp in the epilogue.
2761 DCHECK(dst.code() != rbp.code());
2762 movp(dst, Operand(rsp, 0));
2763 leal(rsp, Operand(rsp, 4));
2768 void MacroAssembler::Pop(const Operand& dst) {
2769 if (kPointerSize == kInt64Size) {
2772 Register scratch = dst.AddressUsesRegister(kScratchRegister)
2773 ? kSmiConstantRegister : kScratchRegister;
2774 movp(scratch, Operand(rsp, 0));
2776 leal(rsp, Operand(rsp, 4));
2777 if (scratch.is(kSmiConstantRegister)) {
2778 // Restore kSmiConstantRegister.
2779 movp(kSmiConstantRegister,
2780 reinterpret_cast<void*>(Smi::FromInt(kSmiConstantRegisterValue)),
2781 Assembler::RelocInfoNone());
2787 void MacroAssembler::PopQuad(const Operand& dst) {
2788 if (kPointerSize == kInt64Size) {
2791 popq(kScratchRegister);
2792 movp(dst, kScratchRegister);
2797 void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst,
2800 DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
2801 offset <= SharedFunctionInfo::kSize &&
2802 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
2803 if (kPointerSize == kInt64Size) {
2804 movsxlq(dst, FieldOperand(base, offset));
2806 movp(dst, FieldOperand(base, offset));
2807 SmiToInteger32(dst, dst);
2812 void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base,
2815 DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
2816 offset <= SharedFunctionInfo::kSize &&
2817 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
2818 if (kPointerSize == kInt32Size) {
2819 // On x32, this field is represented by SMI.
2822 int byte_offset = bits / kBitsPerByte;
2823 int bit_in_byte = bits & (kBitsPerByte - 1);
2824 testb(FieldOperand(base, offset + byte_offset), Immediate(1 << bit_in_byte));
2828 void MacroAssembler::Jump(ExternalReference ext) {
2829 LoadAddress(kScratchRegister, ext);
2830 jmp(kScratchRegister);
2834 void MacroAssembler::Jump(const Operand& op) {
2835 if (kPointerSize == kInt64Size) {
2838 movp(kScratchRegister, op);
2839 jmp(kScratchRegister);
2844 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
2845 Move(kScratchRegister, destination, rmode);
2846 jmp(kScratchRegister);
2850 void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
2851 // TODO(X64): Inline this
2852 jmp(code_object, rmode);
2856 int MacroAssembler::CallSize(ExternalReference ext) {
2857 // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
2858 return LoadAddressSize(ext) +
2859 Assembler::kCallScratchRegisterInstructionLength;
2863 void MacroAssembler::Call(ExternalReference ext) {
2865 int end_position = pc_offset() + CallSize(ext);
2867 LoadAddress(kScratchRegister, ext);
2868 call(kScratchRegister);
2870 CHECK_EQ(end_position, pc_offset());
2875 void MacroAssembler::Call(const Operand& op) {
2876 if (kPointerSize == kInt64Size && !CpuFeatures::IsSupported(ATOM)) {
2879 movp(kScratchRegister, op);
2880 call(kScratchRegister);
2885 void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
2887 int end_position = pc_offset() + CallSize(destination);
2889 Move(kScratchRegister, destination, rmode);
2890 call(kScratchRegister);
2892 CHECK_EQ(pc_offset(), end_position);
2897 void MacroAssembler::Call(Handle<Code> code_object,
2898 RelocInfo::Mode rmode,
2899 TypeFeedbackId ast_id) {
2901 int end_position = pc_offset() + CallSize(code_object);
2903 DCHECK(RelocInfo::IsCodeTarget(rmode) ||
2904 rmode == RelocInfo::CODE_AGE_SEQUENCE);
2905 call(code_object, rmode, ast_id);
2907 CHECK_EQ(end_position, pc_offset());
2912 void MacroAssembler::Pushad() {
2917 // Not pushing rsp or rbp.
2922 // r10 is kScratchRegister.
2924 // r12 is kSmiConstantRegister.
2925 // r13 is kRootRegister.
2928 STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
2929 // Use lea for symmetry with Popad.
2931 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
2932 leap(rsp, Operand(rsp, -sp_delta));
2936 void MacroAssembler::Popad() {
2937 // Popad must not change the flags, so use lea instead of addq.
2939 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
2940 leap(rsp, Operand(rsp, sp_delta));
2955 void MacroAssembler::Dropad() {
2956 addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
2960 // Order general registers are pushed by Pushad:
2961 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
2963 MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
2983 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
2984 const Immediate& imm) {
2985 movp(SafepointRegisterSlot(dst), imm);
2989 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
2990 movp(SafepointRegisterSlot(dst), src);
2994 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
2995 movp(dst, SafepointRegisterSlot(src));
2999 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
3000 return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
3004 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
3005 int handler_index) {
3006 // Adjust this code if not the case.
3007 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
3009 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3010 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3011 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3012 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3013 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3015 // We will build up the handler from the bottom by pushing on the stack.
3016 // First push the frame pointer and context.
3017 if (kind == StackHandler::JS_ENTRY) {
3018 // The frame pointer does not point to a JS frame so we save NULL for
3019 // rbp. We expect the code throwing an exception to check rbp before
3020 // dereferencing it to restore the context.
3021 pushq(Immediate(0)); // NULL frame pointer.
3022 Push(Smi::FromInt(0)); // No context.
3028 // Push the state and the code object.
3030 StackHandler::IndexField::encode(handler_index) |
3031 StackHandler::KindField::encode(kind);
3032 Push(Immediate(state));
3035 // Link the current handler as the next handler.
3036 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3037 Push(ExternalOperand(handler_address));
3038 // Set this new handler as the current one.
3039 movp(ExternalOperand(handler_address), rsp);
3043 void MacroAssembler::PopTryHandler() {
3044 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3045 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3046 Pop(ExternalOperand(handler_address));
3047 addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
3051 void MacroAssembler::JumpToHandlerEntry() {
3052 // Compute the handler entry address and jump to it. The handler table is
3053 // a fixed array of (smi-tagged) code offsets.
3054 // rax = exception, rdi = code object, rdx = state.
3055 movp(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
3056 shrp(rdx, Immediate(StackHandler::kKindWidth));
3058 FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
3059 SmiToInteger64(rdx, rdx);
3060 leap(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
3065 void MacroAssembler::Throw(Register value) {
3066 // Adjust this code if not the case.
3067 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
3069 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3070 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3071 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3072 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3073 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3075 // The exception is expected in rax.
3076 if (!value.is(rax)) {
3079 // Drop the stack pointer to the top of the top handler.
3080 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3081 movp(rsp, ExternalOperand(handler_address));
3082 // Restore the next handler.
3083 Pop(ExternalOperand(handler_address));
3085 // Remove the code object and state, compute the handler address in rdi.
3086 Pop(rdi); // Code object.
3087 Pop(rdx); // Offset and state.
3089 // Restore the context and frame pointer.
3090 Pop(rsi); // Context.
3091 popq(rbp); // Frame pointer.
3093 // If the handler is a JS frame, restore the context to the frame.
3094 // (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
3098 j(zero, &skip, Label::kNear);
3099 movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
3102 JumpToHandlerEntry();
3106 void MacroAssembler::ThrowUncatchable(Register value) {
3107 // Adjust this code if not the case.
3108 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
3110 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3111 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3112 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3113 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3114 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3116 // The exception is expected in rax.
3117 if (!value.is(rax)) {
3120 // Drop the stack pointer to the top of the top stack handler.
3121 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3122 Load(rsp, handler_address);
3124 // Unwind the handlers until the top ENTRY handler is found.
3125 Label fetch_next, check_kind;
3126 jmp(&check_kind, Label::kNear);
3128 movp(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
3131 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
3132 testl(Operand(rsp, StackHandlerConstants::kStateOffset),
3133 Immediate(StackHandler::KindField::kMask));
3134 j(not_zero, &fetch_next);
3136 // Set the top handler address to next handler past the top ENTRY handler.
3137 Pop(ExternalOperand(handler_address));
3139 // Remove the code object and state, compute the handler address in rdi.
3140 Pop(rdi); // Code object.
3141 Pop(rdx); // Offset and state.
3143 // Clear the context pointer and frame pointer (0 was saved in the handler).
3147 JumpToHandlerEntry();
3151 void MacroAssembler::Ret() {
3156 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
3157 if (is_uint16(bytes_dropped)) {
3160 PopReturnAddressTo(scratch);
3161 addp(rsp, Immediate(bytes_dropped));
3162 PushReturnAddressFrom(scratch);
3168 void MacroAssembler::FCmp() {
3174 void MacroAssembler::CmpObjectType(Register heap_object,
3177 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3178 CmpInstanceType(map, type);
3182 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
3183 cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
3184 Immediate(static_cast<int8_t>(type)));
3188 void MacroAssembler::CheckFastElements(Register map,
3190 Label::Distance distance) {
3191 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3192 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3193 STATIC_ASSERT(FAST_ELEMENTS == 2);
3194 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3195 cmpb(FieldOperand(map, Map::kBitField2Offset),
3196 Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3197 j(above, fail, distance);
3201 void MacroAssembler::CheckFastObjectElements(Register map,
3203 Label::Distance distance) {
3204 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3205 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3206 STATIC_ASSERT(FAST_ELEMENTS == 2);
3207 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3208 cmpb(FieldOperand(map, Map::kBitField2Offset),
3209 Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3210 j(below_equal, fail, distance);
3211 cmpb(FieldOperand(map, Map::kBitField2Offset),
3212 Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3213 j(above, fail, distance);
3217 void MacroAssembler::CheckFastSmiElements(Register map,
3219 Label::Distance distance) {
3220 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3221 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3222 cmpb(FieldOperand(map, Map::kBitField2Offset),
3223 Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3224 j(above, fail, distance);
3228 void MacroAssembler::StoreNumberToDoubleElements(
3229 Register maybe_number,
3232 XMMRegister xmm_scratch,
3234 int elements_offset) {
3235 Label smi_value, done;
3237 JumpIfSmi(maybe_number, &smi_value, Label::kNear);
3239 CheckMap(maybe_number,
3240 isolate()->factory()->heap_number_map(),
3244 // Double value, turn potential sNaN into qNaN.
3245 Move(xmm_scratch, 1.0);
3246 mulsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
3247 jmp(&done, Label::kNear);
3250 // Value is a smi. convert to a double and store.
3251 // Preserve original value.
3252 SmiToInteger32(kScratchRegister, maybe_number);
3253 Cvtlsi2sd(xmm_scratch, kScratchRegister);
3255 movsd(FieldOperand(elements, index, times_8,
3256 FixedDoubleArray::kHeaderSize - elements_offset),
3261 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
3262 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3266 void MacroAssembler::CheckMap(Register obj,
3269 SmiCheckType smi_check_type) {
3270 if (smi_check_type == DO_SMI_CHECK) {
3271 JumpIfSmi(obj, fail);
3274 CompareMap(obj, map);
3279 void MacroAssembler::ClampUint8(Register reg) {
3281 testl(reg, Immediate(0xFFFFFF00));
3282 j(zero, &done, Label::kNear);
3283 setcc(negative, reg); // 1 if negative, 0 if positive.
3284 decb(reg); // 0 if negative, 255 if positive.
3289 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
3290 XMMRegister temp_xmm_reg,
3291 Register result_reg) {
3294 xorps(temp_xmm_reg, temp_xmm_reg);
3295 cvtsd2si(result_reg, input_reg);
3296 testl(result_reg, Immediate(0xFFFFFF00));
3297 j(zero, &done, Label::kNear);
3298 cmpl(result_reg, Immediate(1));
3299 j(overflow, &conv_failure, Label::kNear);
3300 movl(result_reg, Immediate(0));
3301 setcc(sign, result_reg);
3302 subl(result_reg, Immediate(1));
3303 andl(result_reg, Immediate(255));
3304 jmp(&done, Label::kNear);
3305 bind(&conv_failure);
3307 ucomisd(input_reg, temp_xmm_reg);
3308 j(below, &done, Label::kNear);
3309 Set(result_reg, 255);
3314 void MacroAssembler::LoadUint32(XMMRegister dst,
3316 if (FLAG_debug_code) {
3317 cmpq(src, Immediate(0xffffffff));
3318 Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
3320 cvtqsi2sd(dst, src);
3324 void MacroAssembler::SlowTruncateToI(Register result_reg,
3327 DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
3328 call(stub.GetCode(), RelocInfo::CODE_TARGET);
3332 void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
3333 Register input_reg) {
3335 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3336 cvttsd2siq(result_reg, xmm0);
3337 cmpq(result_reg, Immediate(1));
3338 j(no_overflow, &done, Label::kNear);
3341 if (input_reg.is(result_reg)) {
3342 subp(rsp, Immediate(kDoubleSize));
3343 movsd(MemOperand(rsp, 0), xmm0);
3344 SlowTruncateToI(result_reg, rsp, 0);
3345 addp(rsp, Immediate(kDoubleSize));
3347 SlowTruncateToI(result_reg, input_reg);
3351 // Keep our invariant that the upper 32 bits are zero.
3352 movl(result_reg, result_reg);
3356 void MacroAssembler::TruncateDoubleToI(Register result_reg,
3357 XMMRegister input_reg) {
3359 cvttsd2siq(result_reg, input_reg);
3360 cmpq(result_reg, Immediate(1));
3361 j(no_overflow, &done, Label::kNear);
3363 subp(rsp, Immediate(kDoubleSize));
3364 movsd(MemOperand(rsp, 0), input_reg);
3365 SlowTruncateToI(result_reg, rsp, 0);
3366 addp(rsp, Immediate(kDoubleSize));
3369 // Keep our invariant that the upper 32 bits are zero.
3370 movl(result_reg, result_reg);
3374 void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
3375 XMMRegister scratch,
3376 MinusZeroMode minus_zero_mode,
3377 Label* lost_precision, Label* is_nan,
3378 Label* minus_zero, Label::Distance dst) {
3379 cvttsd2si(result_reg, input_reg);
3380 Cvtlsi2sd(xmm0, result_reg);
3381 ucomisd(xmm0, input_reg);
3382 j(not_equal, lost_precision, dst);
3383 j(parity_even, is_nan, dst); // NaN.
3384 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
3386 // The integer converted back is equal to the original. We
3387 // only have to test if we got -0 as an input.
3388 testl(result_reg, result_reg);
3389 j(not_zero, &done, Label::kNear);
3390 movmskpd(result_reg, input_reg);
3391 // Bit 0 contains the sign of the double in input_reg.
3392 // If input was positive, we are ok and return 0, otherwise
3393 // jump to minus_zero.
3394 andl(result_reg, Immediate(1));
3395 j(not_zero, minus_zero, dst);
3401 void MacroAssembler::LoadInstanceDescriptors(Register map,
3402 Register descriptors) {
3403 movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
3407 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3408 movl(dst, FieldOperand(map, Map::kBitField3Offset));
3409 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3413 void MacroAssembler::EnumLength(Register dst, Register map) {
3414 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3415 movl(dst, FieldOperand(map, Map::kBitField3Offset));
3416 andl(dst, Immediate(Map::EnumLengthBits::kMask));
3417 Integer32ToSmi(dst, dst);
3421 void MacroAssembler::LoadAccessor(Register dst, Register holder,
3423 AccessorComponent accessor) {
3424 movp(dst, FieldOperand(holder, HeapObject::kMapOffset));
3425 LoadInstanceDescriptors(dst, dst);
3426 movp(dst, FieldOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
3427 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
3428 : AccessorPair::kSetterOffset;
3429 movp(dst, FieldOperand(dst, offset));
3433 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
3434 Register scratch2, Handle<WeakCell> cell,
3435 Handle<Code> success,
3436 SmiCheckType smi_check_type) {
3438 if (smi_check_type == DO_SMI_CHECK) {
3439 JumpIfSmi(obj, &fail);
3441 movq(scratch1, FieldOperand(obj, HeapObject::kMapOffset));
3442 CmpWeakValue(scratch1, cell, scratch2);
3443 j(equal, success, RelocInfo::CODE_TARGET);
3448 void MacroAssembler::AssertNumber(Register object) {
3449 if (emit_debug_code()) {
3451 Condition is_smi = CheckSmi(object);
3452 j(is_smi, &ok, Label::kNear);
3453 Cmp(FieldOperand(object, HeapObject::kMapOffset),
3454 isolate()->factory()->heap_number_map());
3455 Check(equal, kOperandIsNotANumber);
3461 void MacroAssembler::AssertNotSmi(Register object) {
3462 if (emit_debug_code()) {
3463 Condition is_smi = CheckSmi(object);
3464 Check(NegateCondition(is_smi), kOperandIsASmi);
3469 void MacroAssembler::AssertSmi(Register object) {
3470 if (emit_debug_code()) {
3471 Condition is_smi = CheckSmi(object);
3472 Check(is_smi, kOperandIsNotASmi);
3477 void MacroAssembler::AssertSmi(const Operand& object) {
3478 if (emit_debug_code()) {
3479 Condition is_smi = CheckSmi(object);
3480 Check(is_smi, kOperandIsNotASmi);
3485 void MacroAssembler::AssertZeroExtended(Register int32_register) {
3486 if (emit_debug_code()) {
3487 DCHECK(!int32_register.is(kScratchRegister));
3488 movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
3489 cmpq(kScratchRegister, int32_register);
3490 Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
3495 void MacroAssembler::AssertString(Register object) {
3496 if (emit_debug_code()) {
3497 testb(object, Immediate(kSmiTagMask));
3498 Check(not_equal, kOperandIsASmiAndNotAString);
3500 movp(object, FieldOperand(object, HeapObject::kMapOffset));
3501 CmpInstanceType(object, FIRST_NONSTRING_TYPE);
3503 Check(below, kOperandIsNotAString);
3508 void MacroAssembler::AssertName(Register object) {
3509 if (emit_debug_code()) {
3510 testb(object, Immediate(kSmiTagMask));
3511 Check(not_equal, kOperandIsASmiAndNotAName);
3513 movp(object, FieldOperand(object, HeapObject::kMapOffset));
3514 CmpInstanceType(object, LAST_NAME_TYPE);
3516 Check(below_equal, kOperandIsNotAName);
3521 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
3522 if (emit_debug_code()) {
3523 Label done_checking;
3524 AssertNotSmi(object);
3525 Cmp(object, isolate()->factory()->undefined_value());
3526 j(equal, &done_checking);
3527 Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
3528 Assert(equal, kExpectedUndefinedOrCell);
3529 bind(&done_checking);
3534 void MacroAssembler::AssertRootValue(Register src,
3535 Heap::RootListIndex root_value_index,
3536 BailoutReason reason) {
3537 if (emit_debug_code()) {
3538 DCHECK(!src.is(kScratchRegister));
3539 LoadRoot(kScratchRegister, root_value_index);
3540 cmpp(src, kScratchRegister);
3541 Check(equal, reason);
3547 Condition MacroAssembler::IsObjectStringType(Register heap_object,
3549 Register instance_type) {
3550 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3551 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3552 STATIC_ASSERT(kNotStringTag != 0);
3553 testb(instance_type, Immediate(kIsNotStringMask));
3558 Condition MacroAssembler::IsObjectNameType(Register heap_object,
3560 Register instance_type) {
3561 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3562 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3563 cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
3568 void MacroAssembler::TryGetFunctionPrototype(Register function,
3571 bool miss_on_bound_function) {
3573 if (miss_on_bound_function) {
3574 // Check that the receiver isn't a smi.
3575 testl(function, Immediate(kSmiTagMask));
3578 // Check that the function really is a function.
3579 CmpObjectType(function, JS_FUNCTION_TYPE, result);
3582 movp(kScratchRegister,
3583 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3584 // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
3586 TestBitSharedFunctionInfoSpecialField(kScratchRegister,
3587 SharedFunctionInfo::kCompilerHintsOffset,
3588 SharedFunctionInfo::kBoundFunction);
3591 // Make sure that the function has an instance prototype.
3592 testb(FieldOperand(result, Map::kBitFieldOffset),
3593 Immediate(1 << Map::kHasNonInstancePrototype));
3594 j(not_zero, &non_instance, Label::kNear);
3597 // Get the prototype or initial map from the function.
3599 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3601 // If the prototype or initial map is the hole, don't return it and
3602 // simply miss the cache instead. This will allow us to allocate a
3603 // prototype object on-demand in the runtime system.
3604 CompareRoot(result, Heap::kTheHoleValueRootIndex);
3607 // If the function does not have an initial map, we're done.
3609 CmpObjectType(result, MAP_TYPE, kScratchRegister);
3610 j(not_equal, &done, Label::kNear);
3612 // Get the prototype from the initial map.
3613 movp(result, FieldOperand(result, Map::kPrototypeOffset));
3615 if (miss_on_bound_function) {
3616 jmp(&done, Label::kNear);
3618 // Non-instance prototype: Fetch prototype from constructor field
3620 bind(&non_instance);
3621 movp(result, FieldOperand(result, Map::kConstructorOffset));
3629 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
3630 if (FLAG_native_code_counters && counter->Enabled()) {
3631 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3632 movl(counter_operand, Immediate(value));
3637 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
3639 if (FLAG_native_code_counters && counter->Enabled()) {
3640 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3642 incl(counter_operand);
3644 addl(counter_operand, Immediate(value));
3650 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
3652 if (FLAG_native_code_counters && counter->Enabled()) {
3653 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3655 decl(counter_operand);
3657 subl(counter_operand, Immediate(value));
3663 void MacroAssembler::DebugBreak() {
3664 Set(rax, 0); // No arguments.
3665 LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
3666 CEntryStub ces(isolate(), 1);
3667 DCHECK(AllowThisStubCall(&ces));
3668 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3672 void MacroAssembler::InvokeCode(Register code,
3673 const ParameterCount& expected,
3674 const ParameterCount& actual,
3676 const CallWrapper& call_wrapper) {
3677 // You can't call a function without a valid frame.
3678 DCHECK(flag == JUMP_FUNCTION || has_frame());
3681 bool definitely_mismatches = false;
3682 InvokePrologue(expected,
3684 Handle<Code>::null(),
3687 &definitely_mismatches,
3691 if (!definitely_mismatches) {
3692 if (flag == CALL_FUNCTION) {
3693 call_wrapper.BeforeCall(CallSize(code));
3695 call_wrapper.AfterCall();
3697 DCHECK(flag == JUMP_FUNCTION);
3705 void MacroAssembler::InvokeFunction(Register function,
3706 const ParameterCount& actual,
3708 const CallWrapper& call_wrapper) {
3709 // You can't call a function without a valid frame.
3710 DCHECK(flag == JUMP_FUNCTION || has_frame());
3712 DCHECK(function.is(rdi));
3713 movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3714 movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
3715 LoadSharedFunctionInfoSpecialField(rbx, rdx,
3716 SharedFunctionInfo::kFormalParameterCountOffset);
3717 // Advances rdx to the end of the Code object header, to the start of
3718 // the executable code.
3719 movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3721 ParameterCount expected(rbx);
3722 InvokeCode(rdx, expected, actual, flag, call_wrapper);
3726 void MacroAssembler::InvokeFunction(Register function,
3727 const ParameterCount& expected,
3728 const ParameterCount& actual,
3730 const CallWrapper& call_wrapper) {
3731 // You can't call a function without a valid frame.
3732 DCHECK(flag == JUMP_FUNCTION || has_frame());
3734 DCHECK(function.is(rdi));
3735 movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
3736 // Advances rdx to the end of the Code object header, to the start of
3737 // the executable code.
3738 movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3740 InvokeCode(rdx, expected, actual, flag, call_wrapper);
3744 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3745 const ParameterCount& expected,
3746 const ParameterCount& actual,
3748 const CallWrapper& call_wrapper) {
3749 Move(rdi, function);
3750 InvokeFunction(rdi, expected, actual, flag, call_wrapper);
3754 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3755 const ParameterCount& actual,
3756 Handle<Code> code_constant,
3757 Register code_register,
3759 bool* definitely_mismatches,
3761 Label::Distance near_jump,
3762 const CallWrapper& call_wrapper) {
3763 bool definitely_matches = false;
3764 *definitely_mismatches = false;
3766 if (expected.is_immediate()) {
3767 DCHECK(actual.is_immediate());
3768 if (expected.immediate() == actual.immediate()) {
3769 definitely_matches = true;
3771 Set(rax, actual.immediate());
3772 if (expected.immediate() ==
3773 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
3774 // Don't worry about adapting arguments for built-ins that
3775 // don't want that done. Skip adaption code by making it look
3776 // like we have a match between expected and actual number of
3778 definitely_matches = true;
3780 *definitely_mismatches = true;
3781 Set(rbx, expected.immediate());
3785 if (actual.is_immediate()) {
3786 // Expected is in register, actual is immediate. This is the
3787 // case when we invoke function values without going through the
3789 cmpp(expected.reg(), Immediate(actual.immediate()));
3790 j(equal, &invoke, Label::kNear);
3791 DCHECK(expected.reg().is(rbx));
3792 Set(rax, actual.immediate());
3793 } else if (!expected.reg().is(actual.reg())) {
3794 // Both expected and actual are in (different) registers. This
3795 // is the case when we invoke functions using call and apply.
3796 cmpp(expected.reg(), actual.reg());
3797 j(equal, &invoke, Label::kNear);
3798 DCHECK(actual.reg().is(rax));
3799 DCHECK(expected.reg().is(rbx));
3803 if (!definitely_matches) {
3804 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
3805 if (!code_constant.is_null()) {
3806 Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
3807 addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
3808 } else if (!code_register.is(rdx)) {
3809 movp(rdx, code_register);
3812 if (flag == CALL_FUNCTION) {
3813 call_wrapper.BeforeCall(CallSize(adaptor));
3814 Call(adaptor, RelocInfo::CODE_TARGET);
3815 call_wrapper.AfterCall();
3816 if (!*definitely_mismatches) {
3817 jmp(done, near_jump);
3820 Jump(adaptor, RelocInfo::CODE_TARGET);
3827 void MacroAssembler::StubPrologue() {
3828 pushq(rbp); // Caller's frame pointer.
3830 Push(rsi); // Callee's context.
3831 Push(Smi::FromInt(StackFrame::STUB));
3835 void MacroAssembler::Prologue(bool code_pre_aging) {
3836 PredictableCodeSizeScope predictible_code_size_scope(this,
3837 kNoCodeAgeSequenceLength);
3838 if (code_pre_aging) {
3839 // Pre-age the code.
3840 Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
3841 RelocInfo::CODE_AGE_SEQUENCE);
3842 Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
3844 pushq(rbp); // Caller's frame pointer.
3846 Push(rsi); // Callee's context.
3847 Push(rdi); // Callee's JS function.
3852 void MacroAssembler::EnterFrame(StackFrame::Type type,
3853 bool load_constant_pool_pointer_reg) {
3854 // Out-of-line constant pool not implemented on x64.
3859 void MacroAssembler::EnterFrame(StackFrame::Type type) {
3862 Push(rsi); // Context.
3863 Push(Smi::FromInt(type));
3864 Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
3865 Push(kScratchRegister);
3866 if (emit_debug_code()) {
3867 Move(kScratchRegister,
3868 isolate()->factory()->undefined_value(),
3869 RelocInfo::EMBEDDED_OBJECT);
3870 cmpp(Operand(rsp, 0), kScratchRegister);
3871 Check(not_equal, kCodeObjectNotProperlyPatched);
3876 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
3877 if (emit_debug_code()) {
3878 Move(kScratchRegister, Smi::FromInt(type));
3879 cmpp(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
3880 Check(equal, kStackFrameTypesMustMatch);
3887 void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
3888 // Set up the frame structure on the stack.
3889 // All constants are relative to the frame pointer of the exit frame.
3890 DCHECK(ExitFrameConstants::kCallerSPDisplacement ==
3891 kFPOnStackSize + kPCOnStackSize);
3892 DCHECK(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
3893 DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
3897 // Reserve room for entry stack pointer and push the code object.
3898 DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
3899 Push(Immediate(0)); // Saved entry sp, patched before call.
3900 Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
3901 Push(kScratchRegister); // Accessed from EditFrame::code_slot.
3903 // Save the frame pointer and the context in top.
3905 movp(r14, rax); // Backup rax in callee-save register.
3908 Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
3909 Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
3910 Store(ExternalReference(Isolate::kCFunctionAddress, isolate()), rbx);
3914 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
3915 bool save_doubles) {
3917 const int kShadowSpace = 4;
3918 arg_stack_space += kShadowSpace;
3920 // Optionally save all XMM registers.
3922 int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
3923 arg_stack_space * kRegisterSize;
3924 subp(rsp, Immediate(space));
3925 int offset = -2 * kPointerSize;
3926 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
3927 XMMRegister reg = XMMRegister::FromAllocationIndex(i);
3928 movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
3930 } else if (arg_stack_space > 0) {
3931 subp(rsp, Immediate(arg_stack_space * kRegisterSize));
3934 // Get the required frame alignment for the OS.
3935 const int kFrameAlignment = base::OS::ActivationFrameAlignment();
3936 if (kFrameAlignment > 0) {
3937 DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
3938 DCHECK(is_int8(kFrameAlignment));
3939 andp(rsp, Immediate(-kFrameAlignment));
3942 // Patch the saved entry sp.
3943 movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
3947 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
3948 EnterExitFramePrologue(true);
3950 // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
3951 // so it must be retained across the C-call.
3952 int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
3953 leap(r15, Operand(rbp, r14, times_pointer_size, offset));
3955 EnterExitFrameEpilogue(arg_stack_space, save_doubles);
3959 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
3960 EnterExitFramePrologue(false);
3961 EnterExitFrameEpilogue(arg_stack_space, false);
3965 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
3969 int offset = -2 * kPointerSize;
3970 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
3971 XMMRegister reg = XMMRegister::FromAllocationIndex(i);
3972 movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
3975 // Get the return address from the stack and restore the frame pointer.
3976 movp(rcx, Operand(rbp, kFPOnStackSize));
3977 movp(rbp, Operand(rbp, 0 * kPointerSize));
3979 // Drop everything up to and including the arguments and the receiver
3980 // from the caller stack.
3981 leap(rsp, Operand(r15, 1 * kPointerSize));
3983 PushReturnAddressFrom(rcx);
3985 LeaveExitFrameEpilogue(true);
3989 void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
3993 LeaveExitFrameEpilogue(restore_context);
3997 void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
3998 // Restore current context from top and clear it in debug mode.
3999 ExternalReference context_address(Isolate::kContextAddress, isolate());
4000 Operand context_operand = ExternalOperand(context_address);
4001 if (restore_context) {
4002 movp(rsi, context_operand);
4005 movp(context_operand, Immediate(0));
4008 // Clear the top frame.
4009 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
4011 Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
4012 movp(c_entry_fp_operand, Immediate(0));
4016 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
4019 Label same_contexts;
4021 DCHECK(!holder_reg.is(scratch));
4022 DCHECK(!scratch.is(kScratchRegister));
4023 // Load current lexical context from the stack frame.
4024 movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
4026 // When generating debug code, make sure the lexical context is set.
4027 if (emit_debug_code()) {
4028 cmpp(scratch, Immediate(0));
4029 Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
4031 // Load the native context of the current context.
4033 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
4034 movp(scratch, FieldOperand(scratch, offset));
4035 movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
4037 // Check the context is a native context.
4038 if (emit_debug_code()) {
4039 Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
4040 isolate()->factory()->native_context_map());
4041 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
4044 // Check if both contexts are the same.
4045 cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4046 j(equal, &same_contexts);
4048 // Compare security tokens.
4049 // Check that the security token in the calling global object is
4050 // compatible with the security token in the receiving global
4053 // Check the context is a native context.
4054 if (emit_debug_code()) {
4055 // Preserve original value of holder_reg.
4058 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4059 CompareRoot(holder_reg, Heap::kNullValueRootIndex);
4060 Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
4062 // Read the first word and compare to native_context_map(),
4063 movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
4064 CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
4065 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
4069 movp(kScratchRegister,
4070 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4072 Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
4073 movp(scratch, FieldOperand(scratch, token_offset));
4074 cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
4077 bind(&same_contexts);
4081 // Compute the hash code from the untagged key. This must be kept in sync with
4082 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
4083 // code-stub-hydrogen.cc
4084 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
4085 // First of all we assign the hash seed to scratch.
4086 LoadRoot(scratch, Heap::kHashSeedRootIndex);
4087 SmiToInteger32(scratch, scratch);
4089 // Xor original key with a seed.
4092 // Compute the hash code from the untagged key. This must be kept in sync
4093 // with ComputeIntegerHash in utils.h.
4095 // hash = ~hash + (hash << 15);
4098 shll(scratch, Immediate(15));
4100 // hash = hash ^ (hash >> 12);
4102 shrl(scratch, Immediate(12));
4104 // hash = hash + (hash << 2);
4105 leal(r0, Operand(r0, r0, times_4, 0));
4106 // hash = hash ^ (hash >> 4);
4108 shrl(scratch, Immediate(4));
4110 // hash = hash * 2057;
4111 imull(r0, r0, Immediate(2057));
4112 // hash = hash ^ (hash >> 16);
4114 shrl(scratch, Immediate(16));
4120 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
4129 // elements - holds the slow-case elements of the receiver on entry.
4130 // Unchanged unless 'result' is the same register.
4132 // key - holds the smi key on entry.
4133 // Unchanged unless 'result' is the same register.
4135 // Scratch registers:
4137 // r0 - holds the untagged key on entry and holds the hash once computed.
4139 // r1 - used to hold the capacity mask of the dictionary
4141 // r2 - used for the index into the dictionary.
4143 // result - holds the result on exit if the load succeeded.
4144 // Allowed to be the same as 'key' or 'result'.
4145 // Unchanged on bailout so 'key' or 'result' can be used
4146 // in further computation.
4150 GetNumberHash(r0, r1);
4152 // Compute capacity mask.
4153 SmiToInteger32(r1, FieldOperand(elements,
4154 SeededNumberDictionary::kCapacityOffset));
4157 // Generate an unrolled loop that performs a few probes before giving up.
4158 for (int i = 0; i < kNumberDictionaryProbes; i++) {
4159 // Use r2 for index calculations and keep the hash intact in r0.
4161 // Compute the masked index: (hash + i + i * i) & mask.
4163 addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
4167 // Scale the index by multiplying by the entry size.
4168 DCHECK(SeededNumberDictionary::kEntrySize == 3);
4169 leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
4171 // Check if the key matches.
4172 cmpp(key, FieldOperand(elements,
4175 SeededNumberDictionary::kElementsStartOffset));
4176 if (i != (kNumberDictionaryProbes - 1)) {
4184 // Check that the value is a field property.
4185 const int kDetailsOffset =
4186 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
4188 Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
4189 Smi::FromInt(PropertyDetails::TypeField::kMask));
4192 // Get the value at the masked, scaled index.
4193 const int kValueOffset =
4194 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
4195 movp(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
4199 void MacroAssembler::LoadAllocationTopHelper(Register result,
4201 AllocationFlags flags) {
4202 ExternalReference allocation_top =
4203 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4205 // Just return if allocation top is already known.
4206 if ((flags & RESULT_CONTAINS_TOP) != 0) {
4207 // No use of scratch if allocation top is provided.
4208 DCHECK(!scratch.is_valid());
4210 // Assert that result actually contains top on entry.
4211 Operand top_operand = ExternalOperand(allocation_top);
4212 cmpp(result, top_operand);
4213 Check(equal, kUnexpectedAllocationTop);
4218 // Move address of new object to result. Use scratch register if available,
4219 // and keep address in scratch until call to UpdateAllocationTopHelper.
4220 if (scratch.is_valid()) {
4221 LoadAddress(scratch, allocation_top);
4222 movp(result, Operand(scratch, 0));
4224 Load(result, allocation_top);
4229 void MacroAssembler::MakeSureDoubleAlignedHelper(Register result,
4232 AllocationFlags flags) {
4233 if (kPointerSize == kDoubleSize) {
4234 if (FLAG_debug_code) {
4235 testl(result, Immediate(kDoubleAlignmentMask));
4236 Check(zero, kAllocationIsNotDoubleAligned);
4239 // Align the next allocation. Storing the filler map without checking top
4240 // is safe in new-space because the limit of the heap is aligned there.
4241 DCHECK(kPointerSize * 2 == kDoubleSize);
4242 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
4243 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
4244 // Make sure scratch is not clobbered by this function as it might be
4245 // used in UpdateAllocationTopHelper later.
4246 DCHECK(!scratch.is(kScratchRegister));
4248 testl(result, Immediate(kDoubleAlignmentMask));
4249 j(zero, &aligned, Label::kNear);
4250 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
4251 ExternalReference allocation_limit =
4252 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4253 cmpp(result, ExternalOperand(allocation_limit));
4254 j(above_equal, gc_required);
4256 LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex);
4257 movp(Operand(result, 0), kScratchRegister);
4258 addp(result, Immediate(kDoubleSize / 2));
4264 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
4266 AllocationFlags flags) {
4267 if (emit_debug_code()) {
4268 testp(result_end, Immediate(kObjectAlignmentMask));
4269 Check(zero, kUnalignedAllocationInNewSpace);
4272 ExternalReference allocation_top =
4273 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4276 if (scratch.is_valid()) {
4277 // Scratch already contains address of allocation top.
4278 movp(Operand(scratch, 0), result_end);
4280 Store(allocation_top, result_end);
4285 void MacroAssembler::Allocate(int object_size,
4287 Register result_end,
4290 AllocationFlags flags) {
4291 DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
4292 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
4293 if (!FLAG_inline_new) {
4294 if (emit_debug_code()) {
4295 // Trash the registers to simulate an allocation failure.
4296 movl(result, Immediate(0x7091));
4297 if (result_end.is_valid()) {
4298 movl(result_end, Immediate(0x7191));
4300 if (scratch.is_valid()) {
4301 movl(scratch, Immediate(0x7291));
4307 DCHECK(!result.is(result_end));
4309 // Load address of new object into result.
4310 LoadAllocationTopHelper(result, scratch, flags);
4312 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4313 MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4316 // Calculate new top and bail out if new space is exhausted.
4317 ExternalReference allocation_limit =
4318 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4320 Register top_reg = result_end.is_valid() ? result_end : result;
4322 if (!top_reg.is(result)) {
4323 movp(top_reg, result);
4325 addp(top_reg, Immediate(object_size));
4326 j(carry, gc_required);
4327 Operand limit_operand = ExternalOperand(allocation_limit);
4328 cmpp(top_reg, limit_operand);
4329 j(above, gc_required);
4331 // Update allocation top.
4332 UpdateAllocationTopHelper(top_reg, scratch, flags);
4334 bool tag_result = (flags & TAG_OBJECT) != 0;
4335 if (top_reg.is(result)) {
4337 subp(result, Immediate(object_size - kHeapObjectTag));
4339 subp(result, Immediate(object_size));
4341 } else if (tag_result) {
4342 // Tag the result if requested.
4343 DCHECK(kHeapObjectTag == 1);
4349 void MacroAssembler::Allocate(int header_size,
4350 ScaleFactor element_size,
4351 Register element_count,
4353 Register result_end,
4356 AllocationFlags flags) {
4357 DCHECK((flags & SIZE_IN_WORDS) == 0);
4358 leap(result_end, Operand(element_count, element_size, header_size));
4359 Allocate(result_end, result, result_end, scratch, gc_required, flags);
4363 void MacroAssembler::Allocate(Register object_size,
4365 Register result_end,
4368 AllocationFlags flags) {
4369 DCHECK((flags & SIZE_IN_WORDS) == 0);
4370 if (!FLAG_inline_new) {
4371 if (emit_debug_code()) {
4372 // Trash the registers to simulate an allocation failure.
4373 movl(result, Immediate(0x7091));
4374 movl(result_end, Immediate(0x7191));
4375 if (scratch.is_valid()) {
4376 movl(scratch, Immediate(0x7291));
4378 // object_size is left unchanged by this function.
4383 DCHECK(!result.is(result_end));
4385 // Load address of new object into result.
4386 LoadAllocationTopHelper(result, scratch, flags);
4388 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4389 MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4392 // Calculate new top and bail out if new space is exhausted.
4393 ExternalReference allocation_limit =
4394 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4395 if (!object_size.is(result_end)) {
4396 movp(result_end, object_size);
4398 addp(result_end, result);
4399 j(carry, gc_required);
4400 Operand limit_operand = ExternalOperand(allocation_limit);
4401 cmpp(result_end, limit_operand);
4402 j(above, gc_required);
4404 // Update allocation top.
4405 UpdateAllocationTopHelper(result_end, scratch, flags);
4407 // Tag the result if requested.
4408 if ((flags & TAG_OBJECT) != 0) {
4409 addp(result, Immediate(kHeapObjectTag));
4414 void MacroAssembler::UndoAllocationInNewSpace(Register object) {
4415 ExternalReference new_space_allocation_top =
4416 ExternalReference::new_space_allocation_top_address(isolate());
4418 // Make sure the object has no tag before resetting top.
4419 andp(object, Immediate(~kHeapObjectTagMask));
4420 Operand top_operand = ExternalOperand(new_space_allocation_top);
4422 cmpp(object, top_operand);
4423 Check(below, kUndoAllocationOfNonAllocatedMemory);
4425 movp(top_operand, object);
4429 void MacroAssembler::AllocateHeapNumber(Register result,
4433 // Allocate heap number in new space.
4434 Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
4436 Heap::RootListIndex map_index = mode == MUTABLE
4437 ? Heap::kMutableHeapNumberMapRootIndex
4438 : Heap::kHeapNumberMapRootIndex;
4441 LoadRoot(kScratchRegister, map_index);
4442 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4446 void MacroAssembler::AllocateTwoByteString(Register result,
4451 Label* gc_required) {
4452 // Calculate the number of bytes needed for the characters in the string while
4453 // observing object alignment.
4454 const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
4455 kObjectAlignmentMask;
4456 DCHECK(kShortSize == 2);
4457 // scratch1 = length * 2 + kObjectAlignmentMask.
4458 leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
4460 andp(scratch1, Immediate(~kObjectAlignmentMask));
4461 if (kHeaderAlignment > 0) {
4462 subp(scratch1, Immediate(kHeaderAlignment));
4465 // Allocate two byte string in new space.
4466 Allocate(SeqTwoByteString::kHeaderSize,
4475 // Set the map, length and hash field.
4476 LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
4477 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4478 Integer32ToSmi(scratch1, length);
4479 movp(FieldOperand(result, String::kLengthOffset), scratch1);
4480 movp(FieldOperand(result, String::kHashFieldOffset),
4481 Immediate(String::kEmptyHashField));
4485 void MacroAssembler::AllocateOneByteString(Register result, Register length,
4486 Register scratch1, Register scratch2,
4488 Label* gc_required) {
4489 // Calculate the number of bytes needed for the characters in the string while
4490 // observing object alignment.
4491 const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
4492 kObjectAlignmentMask;
4493 movl(scratch1, length);
4494 DCHECK(kCharSize == 1);
4495 addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
4496 andp(scratch1, Immediate(~kObjectAlignmentMask));
4497 if (kHeaderAlignment > 0) {
4498 subp(scratch1, Immediate(kHeaderAlignment));
4501 // Allocate one-byte string in new space.
4502 Allocate(SeqOneByteString::kHeaderSize,
4511 // Set the map, length and hash field.
4512 LoadRoot(kScratchRegister, Heap::kOneByteStringMapRootIndex);
4513 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4514 Integer32ToSmi(scratch1, length);
4515 movp(FieldOperand(result, String::kLengthOffset), scratch1);
4516 movp(FieldOperand(result, String::kHashFieldOffset),
4517 Immediate(String::kEmptyHashField));
4521 void MacroAssembler::AllocateTwoByteConsString(Register result,
4524 Label* gc_required) {
4525 // Allocate heap number in new space.
4526 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4529 // Set the map. The other fields are left uninitialized.
4530 LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
4531 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4535 void MacroAssembler::AllocateOneByteConsString(Register result,
4538 Label* gc_required) {
4539 Allocate(ConsString::kSize,
4546 // Set the map. The other fields are left uninitialized.
4547 LoadRoot(kScratchRegister, Heap::kConsOneByteStringMapRootIndex);
4548 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4552 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
4555 Label* gc_required) {
4556 // Allocate heap number in new space.
4557 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4560 // Set the map. The other fields are left uninitialized.
4561 LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
4562 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4566 void MacroAssembler::AllocateOneByteSlicedString(Register result,
4569 Label* gc_required) {
4570 // Allocate heap number in new space.
4571 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4574 // Set the map. The other fields are left uninitialized.
4575 LoadRoot(kScratchRegister, Heap::kSlicedOneByteStringMapRootIndex);
4576 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4580 // Copy memory, byte-by-byte, from source to destination. Not optimized for
4581 // long or aligned copies. The contents of scratch and length are destroyed.
4582 // Destination is incremented by length, source, length and scratch are
4584 // A simpler loop is faster on small copies, but slower on large ones.
4585 // The cld() instruction must have been emitted, to set the direction flag(),
4586 // before calling this function.
4587 void MacroAssembler::CopyBytes(Register destination,
4592 DCHECK(min_length >= 0);
4593 if (emit_debug_code()) {
4594 cmpl(length, Immediate(min_length));
4595 Assert(greater_equal, kInvalidMinLength);
4597 Label short_loop, len8, len16, len24, done, short_string;
4599 const int kLongStringLimit = 4 * kPointerSize;
4600 if (min_length <= kLongStringLimit) {
4601 cmpl(length, Immediate(kPointerSize));
4602 j(below, &short_string, Label::kNear);
4605 DCHECK(source.is(rsi));
4606 DCHECK(destination.is(rdi));
4607 DCHECK(length.is(rcx));
4609 if (min_length <= kLongStringLimit) {
4610 cmpl(length, Immediate(2 * kPointerSize));
4611 j(below_equal, &len8, Label::kNear);
4612 cmpl(length, Immediate(3 * kPointerSize));
4613 j(below_equal, &len16, Label::kNear);
4614 cmpl(length, Immediate(4 * kPointerSize));
4615 j(below_equal, &len24, Label::kNear);
4618 // Because source is 8-byte aligned in our uses of this function,
4619 // we keep source aligned for the rep movs operation by copying the odd bytes
4620 // at the end of the ranges.
4621 movp(scratch, length);
4622 shrl(length, Immediate(kPointerSizeLog2));
4624 // Move remaining bytes of length.
4625 andl(scratch, Immediate(kPointerSize - 1));
4626 movp(length, Operand(source, scratch, times_1, -kPointerSize));
4627 movp(Operand(destination, scratch, times_1, -kPointerSize), length);
4628 addp(destination, scratch);
4630 if (min_length <= kLongStringLimit) {
4631 jmp(&done, Label::kNear);
4633 movp(scratch, Operand(source, 2 * kPointerSize));
4634 movp(Operand(destination, 2 * kPointerSize), scratch);
4636 movp(scratch, Operand(source, kPointerSize));
4637 movp(Operand(destination, kPointerSize), scratch);
4639 movp(scratch, Operand(source, 0));
4640 movp(Operand(destination, 0), scratch);
4641 // Move remaining bytes of length.
4642 movp(scratch, Operand(source, length, times_1, -kPointerSize));
4643 movp(Operand(destination, length, times_1, -kPointerSize), scratch);
4644 addp(destination, length);
4645 jmp(&done, Label::kNear);
4647 bind(&short_string);
4648 if (min_length == 0) {
4649 testl(length, length);
4650 j(zero, &done, Label::kNear);
4654 movb(scratch, Operand(source, 0));
4655 movb(Operand(destination, 0), scratch);
4659 j(not_zero, &short_loop);
4666 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
4667 Register end_offset,
4672 movp(Operand(start_offset, 0), filler);
4673 addp(start_offset, Immediate(kPointerSize));
4675 cmpp(start_offset, end_offset);
4680 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4681 if (context_chain_length > 0) {
4682 // Move up the chain of contexts to the context containing the slot.
4683 movp(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4684 for (int i = 1; i < context_chain_length; i++) {
4685 movp(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4688 // Slot is in the current function context. Move it into the
4689 // destination register in case we store into it (the write barrier
4690 // cannot be allowed to destroy the context in rsi).
4694 // We should not have found a with context by walking the context
4695 // chain (i.e., the static scope chain and runtime context chain do
4696 // not agree). A variable occurring in such a scope should have
4697 // slot type LOOKUP and not CONTEXT.
4698 if (emit_debug_code()) {
4699 CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
4700 Heap::kWithContextMapRootIndex);
4701 Check(not_equal, kVariableResolvedToWithContext);
4706 void MacroAssembler::LoadTransitionedArrayMapConditional(
4707 ElementsKind expected_kind,
4708 ElementsKind transitioned_kind,
4709 Register map_in_out,
4711 Label* no_map_match) {
4712 // Load the global or builtins object from the current context.
4714 Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4715 movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
4717 // Check that the function's map is the same as the expected cached map.
4718 movp(scratch, Operand(scratch,
4719 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4721 int offset = expected_kind * kPointerSize +
4722 FixedArrayBase::kHeaderSize;
4723 cmpp(map_in_out, FieldOperand(scratch, offset));
4724 j(not_equal, no_map_match);
4726 // Use the transitioned cached map.
4727 offset = transitioned_kind * kPointerSize +
4728 FixedArrayBase::kHeaderSize;
4729 movp(map_in_out, FieldOperand(scratch, offset));
4734 static const int kRegisterPassedArguments = 4;
4736 static const int kRegisterPassedArguments = 6;
4739 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4740 // Load the global or builtins object from the current context.
4742 Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4743 // Load the native context from the global or builtins object.
4744 movp(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
4745 // Load the function from the native context.
4746 movp(function, Operand(function, Context::SlotOffset(index)));
4750 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4752 // Load the initial map. The global functions all have initial maps.
4753 movp(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4754 if (emit_debug_code()) {
4756 CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
4759 Abort(kGlobalFunctionsMustHaveInitialMap);
4765 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
4766 // On Windows 64 stack slots are reserved by the caller for all arguments
4767 // including the ones passed in registers, and space is always allocated for
4768 // the four register arguments even if the function takes fewer than four
4770 // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
4771 // and the caller does not reserve stack slots for them.
4772 DCHECK(num_arguments >= 0);
4774 const int kMinimumStackSlots = kRegisterPassedArguments;
4775 if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
4776 return num_arguments;
4778 if (num_arguments < kRegisterPassedArguments) return 0;
4779 return num_arguments - kRegisterPassedArguments;
4784 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
4787 uint32_t encoding_mask) {
4789 JumpIfNotSmi(string, &is_object);
4794 movp(value, FieldOperand(string, HeapObject::kMapOffset));
4795 movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset));
4797 andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
4798 cmpp(value, Immediate(encoding_mask));
4800 Check(equal, kUnexpectedStringType);
4802 // The index is assumed to be untagged coming in, tag it to compare with the
4803 // string length without using a temp register, it is restored at the end of
4805 Integer32ToSmi(index, index);
4806 SmiCompare(index, FieldOperand(string, String::kLengthOffset));
4807 Check(less, kIndexIsTooLarge);
4809 SmiCompare(index, Smi::FromInt(0));
4810 Check(greater_equal, kIndexIsNegative);
4812 // Restore the index
4813 SmiToInteger32(index, index);
4817 void MacroAssembler::PrepareCallCFunction(int num_arguments) {
4818 int frame_alignment = base::OS::ActivationFrameAlignment();
4819 DCHECK(frame_alignment != 0);
4820 DCHECK(num_arguments >= 0);
4822 // Make stack end at alignment and allocate space for arguments and old rsp.
4823 movp(kScratchRegister, rsp);
4824 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4825 int argument_slots_on_stack =
4826 ArgumentStackSlotsForCFunctionCall(num_arguments);
4827 subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
4828 andp(rsp, Immediate(-frame_alignment));
4829 movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
4833 void MacroAssembler::CallCFunction(ExternalReference function,
4834 int num_arguments) {
4835 LoadAddress(rax, function);
4836 CallCFunction(rax, num_arguments);
4840 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
4841 DCHECK(has_frame());
4842 // Check stack alignment.
4843 if (emit_debug_code()) {
4844 CheckStackAlignment();
4848 DCHECK(base::OS::ActivationFrameAlignment() != 0);
4849 DCHECK(num_arguments >= 0);
4850 int argument_slots_on_stack =
4851 ArgumentStackSlotsForCFunctionCall(num_arguments);
4852 movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
4857 bool AreAliased(Register reg1,
4865 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
4866 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
4867 reg7.is_valid() + reg8.is_valid();
4870 if (reg1.is_valid()) regs |= reg1.bit();
4871 if (reg2.is_valid()) regs |= reg2.bit();
4872 if (reg3.is_valid()) regs |= reg3.bit();
4873 if (reg4.is_valid()) regs |= reg4.bit();
4874 if (reg5.is_valid()) regs |= reg5.bit();
4875 if (reg6.is_valid()) regs |= reg6.bit();
4876 if (reg7.is_valid()) regs |= reg7.bit();
4877 if (reg8.is_valid()) regs |= reg8.bit();
4878 int n_of_non_aliasing_regs = NumRegs(regs);
4880 return n_of_valid_regs != n_of_non_aliasing_regs;
4885 CodePatcher::CodePatcher(byte* address, int size)
4886 : address_(address),
4888 masm_(NULL, address, size + Assembler::kGap) {
4889 // Create a new macro assembler pointing to the address of the code to patch.
4890 // The size is adjusted with kGap on order for the assembler to generate size
4891 // bytes of instructions without failing with buffer size constraints.
4892 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4896 CodePatcher::~CodePatcher() {
4897 // Indicate that code has changed.
4898 CpuFeatures::FlushICache(address_, size_);
4900 // Check that the code was patched as expected.
4901 DCHECK(masm_.pc_ == address_ + size_);
4902 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4906 void MacroAssembler::CheckPageFlag(
4911 Label* condition_met,
4912 Label::Distance condition_met_distance) {
4913 DCHECK(cc == zero || cc == not_zero);
4914 if (scratch.is(object)) {
4915 andp(scratch, Immediate(~Page::kPageAlignmentMask));
4917 movp(scratch, Immediate(~Page::kPageAlignmentMask));
4918 andp(scratch, object);
4920 if (mask < (1 << kBitsPerByte)) {
4921 testb(Operand(scratch, MemoryChunk::kFlagsOffset),
4922 Immediate(static_cast<uint8_t>(mask)));
4924 testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
4926 j(cc, condition_met, condition_met_distance);
4930 void MacroAssembler::JumpIfBlack(Register object,
4931 Register bitmap_scratch,
4932 Register mask_scratch,
4934 Label::Distance on_black_distance) {
4935 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
4936 GetMarkBits(object, bitmap_scratch, mask_scratch);
4938 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
4939 // The mask_scratch register contains a 1 at the position of the first bit
4940 // and a 0 at all other positions, including the position of the second bit.
4941 movp(rcx, mask_scratch);
4942 // Make rcx into a mask that covers both marking bits using the operation
4943 // rcx = mask | (mask << 1).
4944 leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
4945 // Note that we are using a 4-byte aligned 8-byte load.
4946 andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
4947 cmpp(mask_scratch, rcx);
4948 j(equal, on_black, on_black_distance);
4952 // Detect some, but not all, common pointer-free objects. This is used by the
4953 // incremental write barrier which doesn't care about oddballs (they are always
4954 // marked black immediately so this code is not hit).
4955 void MacroAssembler::JumpIfDataObject(
4958 Label* not_data_object,
4959 Label::Distance not_data_object_distance) {
4960 Label is_data_object;
4961 movp(scratch, FieldOperand(value, HeapObject::kMapOffset));
4962 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
4963 j(equal, &is_data_object, Label::kNear);
4964 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4965 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4966 // If it's a string and it's not a cons string then it's an object containing
4968 testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
4969 Immediate(kIsIndirectStringMask | kIsNotStringMask));
4970 j(not_zero, not_data_object, not_data_object_distance);
4971 bind(&is_data_object);
4975 void MacroAssembler::GetMarkBits(Register addr_reg,
4976 Register bitmap_reg,
4977 Register mask_reg) {
4978 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
4979 movp(bitmap_reg, addr_reg);
4980 // Sign extended 32 bit immediate.
4981 andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
4982 movp(rcx, addr_reg);
4984 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
4985 shrl(rcx, Immediate(shift));
4987 Immediate((Page::kPageAlignmentMask >> shift) &
4988 ~(Bitmap::kBytesPerCell - 1)));
4990 addp(bitmap_reg, rcx);
4991 movp(rcx, addr_reg);
4992 shrl(rcx, Immediate(kPointerSizeLog2));
4993 andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
4994 movl(mask_reg, Immediate(1));
4999 void MacroAssembler::EnsureNotWhite(
5001 Register bitmap_scratch,
5002 Register mask_scratch,
5003 Label* value_is_white_and_not_data,
5004 Label::Distance distance) {
5005 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
5006 GetMarkBits(value, bitmap_scratch, mask_scratch);
5008 // If the value is black or grey we don't need to do anything.
5009 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5010 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5011 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
5012 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5016 // Since both black and grey have a 1 in the first position and white does
5017 // not have a 1 there we only need to check one bit.
5018 testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5019 j(not_zero, &done, Label::kNear);
5021 if (emit_debug_code()) {
5022 // Check for impossible bit pattern.
5025 // shl. May overflow making the check conservative.
5026 addp(mask_scratch, mask_scratch);
5027 testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5028 j(zero, &ok, Label::kNear);
5034 // Value is white. We check whether it is data that doesn't need scanning.
5035 // Currently only checks for HeapNumber and non-cons strings.
5036 Register map = rcx; // Holds map while checking type.
5037 Register length = rcx; // Holds length of object after checking type.
5038 Label not_heap_number;
5039 Label is_data_object;
5041 // Check for heap-number
5042 movp(map, FieldOperand(value, HeapObject::kMapOffset));
5043 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
5044 j(not_equal, ¬_heap_number, Label::kNear);
5045 movp(length, Immediate(HeapNumber::kSize));
5046 jmp(&is_data_object, Label::kNear);
5048 bind(¬_heap_number);
5049 // Check for strings.
5050 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5051 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5052 // If it's a string and it's not a cons string then it's an object containing
5054 Register instance_type = rcx;
5055 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
5056 testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
5057 j(not_zero, value_is_white_and_not_data);
5058 // It's a non-indirect (non-cons and non-slice) string.
5059 // If it's external, the length is just ExternalString::kSize.
5060 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5062 // External strings are the only ones with the kExternalStringTag bit
5064 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
5065 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
5066 testb(instance_type, Immediate(kExternalStringTag));
5067 j(zero, ¬_external, Label::kNear);
5068 movp(length, Immediate(ExternalString::kSize));
5069 jmp(&is_data_object, Label::kNear);
5071 bind(¬_external);
5072 // Sequential string, either Latin1 or UC16.
5073 DCHECK(kOneByteStringTag == 0x04);
5074 andp(length, Immediate(kStringEncodingMask));
5075 xorp(length, Immediate(kStringEncodingMask));
5076 addp(length, Immediate(0x04));
5077 // Value now either 4 (if Latin1) or 8 (if UC16), i.e. char-size shifted by 2.
5078 imulp(length, FieldOperand(value, String::kLengthOffset));
5079 shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
5080 addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
5081 andp(length, Immediate(~kObjectAlignmentMask));
5083 bind(&is_data_object);
5084 // Value is a data object, and it is white. Mark it black. Since we know
5085 // that the object is white we can make it black by flipping one bit.
5086 orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5088 andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
5089 addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
5095 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5097 Register empty_fixed_array_value = r8;
5098 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5101 // Check if the enum length field is properly initialized, indicating that
5102 // there is an enum cache.
5103 movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5105 EnumLength(rdx, rbx);
5106 Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
5107 j(equal, call_runtime);
5113 movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5115 // For all objects but the receiver, check that the cache is empty.
5116 EnumLength(rdx, rbx);
5117 Cmp(rdx, Smi::FromInt(0));
5118 j(not_equal, call_runtime);
5122 // Check that there are no elements. Register rcx contains the current JS
5123 // object we've reached through the prototype chain.
5125 cmpp(empty_fixed_array_value,
5126 FieldOperand(rcx, JSObject::kElementsOffset));
5127 j(equal, &no_elements);
5129 // Second chance, the object may be using the empty slow element dictionary.
5130 LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
5131 cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
5132 j(not_equal, call_runtime);
5135 movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
5136 cmpp(rcx, null_value);
5137 j(not_equal, &next);
5140 void MacroAssembler::TestJSArrayForAllocationMemento(
5141 Register receiver_reg,
5142 Register scratch_reg,
5143 Label* no_memento_found) {
5144 ExternalReference new_space_start =
5145 ExternalReference::new_space_start(isolate());
5146 ExternalReference new_space_allocation_top =
5147 ExternalReference::new_space_allocation_top_address(isolate());
5149 leap(scratch_reg, Operand(receiver_reg,
5150 JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5151 Move(kScratchRegister, new_space_start);
5152 cmpp(scratch_reg, kScratchRegister);
5153 j(less, no_memento_found);
5154 cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
5155 j(greater, no_memento_found);
5156 CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
5157 Heap::kAllocationMementoMapRootIndex);
5161 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5166 DCHECK(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
5167 DCHECK(!scratch1.is(scratch0));
5168 Register current = scratch0;
5171 movp(current, object);
5173 // Loop based on the map going up the prototype chain.
5175 movp(current, FieldOperand(current, HeapObject::kMapOffset));
5176 movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
5177 DecodeField<Map::ElementsKindBits>(scratch1);
5178 cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
5180 movp(current, FieldOperand(current, Map::kPrototypeOffset));
5181 CompareRoot(current, Heap::kNullValueRootIndex);
5182 j(not_equal, &loop_again);
5186 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
5187 DCHECK(!dividend.is(rax));
5188 DCHECK(!dividend.is(rdx));
5189 base::MagicNumbersForDivision<uint32_t> mag =
5190 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
5191 movl(rax, Immediate(mag.multiplier));
5193 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
5194 if (divisor > 0 && neg) addl(rdx, dividend);
5195 if (divisor < 0 && !neg && mag.multiplier > 0) subl(rdx, dividend);
5196 if (mag.shift > 0) sarl(rdx, Immediate(mag.shift));
5197 movl(rax, dividend);
5198 shrl(rax, Immediate(31));
5203 } } // namespace v8::internal
5205 #endif // V8_TARGET_ARCH_X64