1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "src/base/bits.h"
10 #include "src/base/division-by-constant.h"
11 #include "src/bootstrapper.h"
12 #include "src/codegen.h"
13 #include "src/cpu-profiler.h"
14 #include "src/debug.h"
15 #include "src/heap/heap.h"
16 #include "src/isolate-inl.h"
17 #include "src/x64/assembler-x64.h"
18 #include "src/x64/macro-assembler-x64.h"
23 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
24 : Assembler(arg_isolate, buffer, size),
25 generating_stub_(false),
27 root_array_available_(true) {
28 if (isolate() != NULL) {
29 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
35 static const int64_t kInvalidRootRegisterDelta = -1;
38 int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
39 if (predictable_code_size() &&
40 (other.address() < reinterpret_cast<Address>(isolate()) ||
41 other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
42 return kInvalidRootRegisterDelta;
44 Address roots_register_value = kRootRegisterBias +
45 reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
47 int64_t delta = kInvalidRootRegisterDelta; // Bogus initialization.
48 if (kPointerSize == kInt64Size) {
49 delta = other.address() - roots_register_value;
51 // For x32, zero extend the address to 64-bit and calculate the delta.
52 uint64_t o = static_cast<uint32_t>(
53 reinterpret_cast<intptr_t>(other.address()));
54 uint64_t r = static_cast<uint32_t>(
55 reinterpret_cast<intptr_t>(roots_register_value));
62 Operand MacroAssembler::ExternalOperand(ExternalReference target,
64 if (root_array_available_ && !serializer_enabled()) {
65 int64_t delta = RootRegisterDelta(target);
66 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
67 return Operand(kRootRegister, static_cast<int32_t>(delta));
70 Move(scratch, target);
71 return Operand(scratch, 0);
75 void MacroAssembler::Load(Register destination, ExternalReference source) {
76 if (root_array_available_ && !serializer_enabled()) {
77 int64_t delta = RootRegisterDelta(source);
78 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
79 movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
84 if (destination.is(rax)) {
87 Move(kScratchRegister, source);
88 movp(destination, Operand(kScratchRegister, 0));
93 void MacroAssembler::Store(ExternalReference destination, Register source) {
94 if (root_array_available_ && !serializer_enabled()) {
95 int64_t delta = RootRegisterDelta(destination);
96 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
97 movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
102 if (source.is(rax)) {
103 store_rax(destination);
105 Move(kScratchRegister, destination);
106 movp(Operand(kScratchRegister, 0), source);
111 void MacroAssembler::LoadAddress(Register destination,
112 ExternalReference source) {
113 if (root_array_available_ && !serializer_enabled()) {
114 int64_t delta = RootRegisterDelta(source);
115 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
116 leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
121 Move(destination, source);
125 int MacroAssembler::LoadAddressSize(ExternalReference source) {
126 if (root_array_available_ && !serializer_enabled()) {
127 // This calculation depends on the internals of LoadAddress.
128 // It's correctness is ensured by the asserts in the Call
129 // instruction below.
130 int64_t delta = RootRegisterDelta(source);
131 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
132 // Operand is leap(scratch, Operand(kRootRegister, delta));
133 // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
135 if (!is_int8(static_cast<int32_t>(delta))) {
136 size += 3; // Need full four-byte displacement in lea.
141 // Size of movp(destination, src);
142 return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
146 void MacroAssembler::PushAddress(ExternalReference source) {
147 int64_t address = reinterpret_cast<int64_t>(source.address());
148 if (is_int32(address) && !serializer_enabled()) {
149 if (emit_debug_code()) {
150 Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
152 Push(Immediate(static_cast<int32_t>(address)));
155 LoadAddress(kScratchRegister, source);
156 Push(kScratchRegister);
160 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
161 DCHECK(root_array_available_);
162 movp(destination, Operand(kRootRegister,
163 (index << kPointerSizeLog2) - kRootRegisterBias));
167 void MacroAssembler::LoadRootIndexed(Register destination,
168 Register variable_offset,
170 DCHECK(root_array_available_);
172 Operand(kRootRegister,
173 variable_offset, times_pointer_size,
174 (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
178 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
179 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
180 DCHECK(root_array_available_);
181 movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
186 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
187 DCHECK(root_array_available_);
188 Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
192 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
193 DCHECK(root_array_available_);
194 cmpp(with, Operand(kRootRegister,
195 (index << kPointerSizeLog2) - kRootRegisterBias));
199 void MacroAssembler::CompareRoot(const Operand& with,
200 Heap::RootListIndex index) {
201 DCHECK(root_array_available_);
202 DCHECK(!with.AddressUsesRegister(kScratchRegister));
203 LoadRoot(kScratchRegister, index);
204 cmpp(with, kScratchRegister);
208 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
211 SaveFPRegsMode save_fp,
212 RememberedSetFinalAction and_then) {
213 if (emit_debug_code()) {
215 JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
219 // Load store buffer top.
220 LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
221 // Store pointer to buffer.
222 movp(Operand(scratch, 0), addr);
223 // Increment buffer top.
224 addp(scratch, Immediate(kPointerSize));
225 // Write back new top of buffer.
226 StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
227 // Call stub on end of buffer.
229 // Check for end of buffer.
230 testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
231 if (and_then == kReturnAtEnd) {
232 Label buffer_overflowed;
233 j(not_equal, &buffer_overflowed, Label::kNear);
235 bind(&buffer_overflowed);
237 DCHECK(and_then == kFallThroughAtEnd);
238 j(equal, &done, Label::kNear);
240 StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
241 CallStub(&store_buffer_overflow);
242 if (and_then == kReturnAtEnd) {
245 DCHECK(and_then == kFallThroughAtEnd);
251 void MacroAssembler::InNewSpace(Register object,
255 Label::Distance distance) {
256 if (serializer_enabled()) {
257 // Can't do arithmetic on external references if it might get serialized.
258 // The mask isn't really an address. We load it as an external reference in
259 // case the size of the new space is different between the snapshot maker
260 // and the running system.
261 if (scratch.is(object)) {
262 Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
263 andp(scratch, kScratchRegister);
265 Move(scratch, ExternalReference::new_space_mask(isolate()));
266 andp(scratch, object);
268 Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
269 cmpp(scratch, kScratchRegister);
270 j(cc, branch, distance);
272 DCHECK(kPointerSize == kInt64Size
273 ? is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))
274 : kPointerSize == kInt32Size);
275 intptr_t new_space_start =
276 reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
277 Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
278 Assembler::RelocInfoNone());
279 if (scratch.is(object)) {
280 addp(scratch, kScratchRegister);
282 leap(scratch, Operand(object, kScratchRegister, times_1, 0));
285 Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
286 j(cc, branch, distance);
291 void MacroAssembler::RecordWriteField(
296 SaveFPRegsMode save_fp,
297 RememberedSetAction remembered_set_action,
299 PointersToHereCheck pointers_to_here_check_for_value) {
300 // First, check if a write barrier is even needed. The tests below
301 // catch stores of Smis.
304 // Skip barrier if writing a smi.
305 if (smi_check == INLINE_SMI_CHECK) {
306 JumpIfSmi(value, &done);
309 // Although the object register is tagged, the offset is relative to the start
310 // of the object, so so offset must be a multiple of kPointerSize.
311 DCHECK(IsAligned(offset, kPointerSize));
313 leap(dst, FieldOperand(object, offset));
314 if (emit_debug_code()) {
316 testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
317 j(zero, &ok, Label::kNear);
322 RecordWrite(object, dst, value, save_fp, remembered_set_action,
323 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
327 // Clobber clobbered input registers when running with the debug-code flag
328 // turned on to provoke errors.
329 if (emit_debug_code()) {
330 Move(value, kZapValue, Assembler::RelocInfoNone());
331 Move(dst, kZapValue, Assembler::RelocInfoNone());
336 void MacroAssembler::RecordWriteArray(
340 SaveFPRegsMode save_fp,
341 RememberedSetAction remembered_set_action,
343 PointersToHereCheck pointers_to_here_check_for_value) {
344 // First, check if a write barrier is even needed. The tests below
345 // catch stores of Smis.
348 // Skip barrier if writing a smi.
349 if (smi_check == INLINE_SMI_CHECK) {
350 JumpIfSmi(value, &done);
353 // Array access: calculate the destination address. Index is not a smi.
354 Register dst = index;
355 leap(dst, Operand(object, index, times_pointer_size,
356 FixedArray::kHeaderSize - kHeapObjectTag));
358 RecordWrite(object, dst, value, save_fp, remembered_set_action,
359 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
363 // Clobber clobbered input registers when running with the debug-code flag
364 // turned on to provoke errors.
365 if (emit_debug_code()) {
366 Move(value, kZapValue, Assembler::RelocInfoNone());
367 Move(index, kZapValue, Assembler::RelocInfoNone());
372 void MacroAssembler::RecordWriteForMap(Register object,
375 SaveFPRegsMode fp_mode) {
376 DCHECK(!object.is(kScratchRegister));
377 DCHECK(!object.is(map));
378 DCHECK(!object.is(dst));
379 DCHECK(!map.is(dst));
380 AssertNotSmi(object);
382 if (emit_debug_code()) {
384 if (map.is(kScratchRegister)) pushq(map);
385 CompareMap(map, isolate()->factory()->meta_map());
386 if (map.is(kScratchRegister)) popq(map);
387 j(equal, &ok, Label::kNear);
392 if (!FLAG_incremental_marking) {
396 if (emit_debug_code()) {
398 if (map.is(kScratchRegister)) pushq(map);
399 cmpp(map, FieldOperand(object, HeapObject::kMapOffset));
400 if (map.is(kScratchRegister)) popq(map);
401 j(equal, &ok, Label::kNear);
406 // Compute the address.
407 leap(dst, FieldOperand(object, HeapObject::kMapOffset));
409 // First, check if a write barrier is even needed. The tests below
410 // catch stores of smis and stores into the young generation.
413 // A single check of the map's pages interesting flag suffices, since it is
414 // only set during incremental collection, and then it's also guaranteed that
415 // the from object's page's interesting flag is also set. This optimization
416 // relies on the fact that maps can never be in new space.
418 map, // Used as scratch.
419 MemoryChunk::kPointersToHereAreInterestingMask,
424 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
430 // Count number of write barriers in generated code.
431 isolate()->counters()->write_barriers_static()->Increment();
432 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
434 // Clobber clobbered registers when running with the debug-code flag
435 // turned on to provoke errors.
436 if (emit_debug_code()) {
437 Move(dst, kZapValue, Assembler::RelocInfoNone());
438 Move(map, kZapValue, Assembler::RelocInfoNone());
443 void MacroAssembler::RecordWrite(
447 SaveFPRegsMode fp_mode,
448 RememberedSetAction remembered_set_action,
450 PointersToHereCheck pointers_to_here_check_for_value) {
451 DCHECK(!object.is(value));
452 DCHECK(!object.is(address));
453 DCHECK(!value.is(address));
454 AssertNotSmi(object);
456 if (remembered_set_action == OMIT_REMEMBERED_SET &&
457 !FLAG_incremental_marking) {
461 if (emit_debug_code()) {
463 cmpp(value, Operand(address, 0));
464 j(equal, &ok, Label::kNear);
469 // First, check if a write barrier is even needed. The tests below
470 // catch stores of smis and stores into the young generation.
473 if (smi_check == INLINE_SMI_CHECK) {
474 // Skip barrier if writing a smi.
475 JumpIfSmi(value, &done);
478 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
480 value, // Used as scratch.
481 MemoryChunk::kPointersToHereAreInterestingMask,
487 CheckPageFlag(object,
488 value, // Used as scratch.
489 MemoryChunk::kPointersFromHereAreInterestingMask,
494 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
500 // Count number of write barriers in generated code.
501 isolate()->counters()->write_barriers_static()->Increment();
502 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
504 // Clobber clobbered registers when running with the debug-code flag
505 // turned on to provoke errors.
506 if (emit_debug_code()) {
507 Move(address, kZapValue, Assembler::RelocInfoNone());
508 Move(value, kZapValue, Assembler::RelocInfoNone());
513 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
514 if (emit_debug_code()) Check(cc, reason);
518 void MacroAssembler::AssertFastElements(Register elements) {
519 if (emit_debug_code()) {
521 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
522 Heap::kFixedArrayMapRootIndex);
523 j(equal, &ok, Label::kNear);
524 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
525 Heap::kFixedDoubleArrayMapRootIndex);
526 j(equal, &ok, Label::kNear);
527 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
528 Heap::kFixedCOWArrayMapRootIndex);
529 j(equal, &ok, Label::kNear);
530 Abort(kJSObjectWithFastElementsMapHasSlowElements);
536 void MacroAssembler::Check(Condition cc, BailoutReason reason) {
538 j(cc, &L, Label::kNear);
540 // Control will not return here.
545 void MacroAssembler::CheckStackAlignment() {
546 int frame_alignment = base::OS::ActivationFrameAlignment();
547 int frame_alignment_mask = frame_alignment - 1;
548 if (frame_alignment > kPointerSize) {
549 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
550 Label alignment_as_expected;
551 testp(rsp, Immediate(frame_alignment_mask));
552 j(zero, &alignment_as_expected, Label::kNear);
553 // Abort if stack is not aligned.
555 bind(&alignment_as_expected);
560 void MacroAssembler::NegativeZeroTest(Register result,
564 testl(result, result);
565 j(not_zero, &ok, Label::kNear);
572 void MacroAssembler::Abort(BailoutReason reason) {
574 const char* msg = GetBailoutReason(reason);
576 RecordComment("Abort message: ");
580 if (FLAG_trap_on_abort) {
586 Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
587 Assembler::RelocInfoNone());
588 Push(kScratchRegister);
591 // We don't actually want to generate a pile of code for this, so just
592 // claim there is a stack frame, without generating one.
593 FrameScope scope(this, StackFrame::NONE);
594 CallRuntime(Runtime::kAbort, 1);
596 CallRuntime(Runtime::kAbort, 1);
598 // Control will not return here.
603 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
604 DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
605 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
609 void MacroAssembler::TailCallStub(CodeStub* stub) {
610 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
614 void MacroAssembler::StubReturn(int argc) {
615 DCHECK(argc >= 1 && generating_stub());
616 ret((argc - 1) * kPointerSize);
620 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
621 return has_frame_ || !stub->SometimesSetsUpAFrame();
625 void MacroAssembler::IndexFromHash(Register hash, Register index) {
626 // The assert checks that the constants for the maximum number of digits
627 // for an array index cached in the hash field and the number of bits
628 // reserved for it does not conflict.
629 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
630 (1 << String::kArrayIndexValueBits));
631 if (!hash.is(index)) {
634 DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
638 void MacroAssembler::CallRuntime(const Runtime::Function* f,
640 SaveFPRegsMode save_doubles) {
641 // If the expected number of arguments of the runtime function is
642 // constant, we check that the actual number of arguments match the
644 CHECK(f->nargs < 0 || f->nargs == num_arguments);
646 // TODO(1236192): Most runtime routines don't need the number of
647 // arguments passed in because it is constant. At some point we
648 // should remove this need and make the runtime routine entry code
650 Set(rax, num_arguments);
651 LoadAddress(rbx, ExternalReference(f, isolate()));
652 CEntryStub ces(isolate(), f->result_size, save_doubles);
657 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
659 Set(rax, num_arguments);
660 LoadAddress(rbx, ext);
662 CEntryStub stub(isolate(), 1);
667 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
670 // ----------- S t a t e -------------
671 // -- rsp[0] : return address
672 // -- rsp[8] : argument num_arguments - 1
674 // -- rsp[8 * num_arguments] : argument 0 (receiver)
675 // -----------------------------------
677 // TODO(1236192): Most runtime routines don't need the number of
678 // arguments passed in because it is constant. At some point we
679 // should remove this need and make the runtime routine entry code
681 Set(rax, num_arguments);
682 JumpToExternalReference(ext, result_size);
686 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
689 TailCallExternalReference(ExternalReference(fid, isolate()),
695 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
697 // Set the entry point and jump to the C entry runtime stub.
698 LoadAddress(rbx, ext);
699 CEntryStub ces(isolate(), result_size);
700 jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
704 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
706 const CallWrapper& call_wrapper) {
707 // You can't call a builtin without a valid frame.
708 DCHECK(flag == JUMP_FUNCTION || has_frame());
710 // Rely on the assertion to check that the number of provided
711 // arguments match the expected number of arguments. Fake a
712 // parameter count to avoid emitting code to do the check.
713 ParameterCount expected(0);
714 GetBuiltinEntry(rdx, id);
715 InvokeCode(rdx, expected, expected, flag, call_wrapper);
719 void MacroAssembler::GetBuiltinFunction(Register target,
720 Builtins::JavaScript id) {
721 // Load the builtins object into target register.
722 movp(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
723 movp(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
724 movp(target, FieldOperand(target,
725 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
729 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
730 DCHECK(!target.is(rdi));
731 // Load the JavaScript builtin function from the builtins object.
732 GetBuiltinFunction(rdi, id);
733 movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
737 #define REG(Name) { kRegister_ ## Name ## _Code }
739 static const Register saved_regs[] = {
740 REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
741 REG(r9), REG(r10), REG(r11)
746 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
749 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
752 Register exclusion3) {
753 // We don't allow a GC during a store buffer overflow so there is no need to
754 // store the registers in any particular way, but we do have to store and
756 for (int i = 0; i < kNumberOfSavedRegs; i++) {
757 Register reg = saved_regs[i];
758 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
762 // R12 to r15 are callee save on all platforms.
763 if (fp_mode == kSaveFPRegs) {
764 subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
765 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
766 XMMRegister reg = XMMRegister::from_code(i);
767 movsd(Operand(rsp, i * kDoubleSize), reg);
773 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
776 Register exclusion3) {
777 if (fp_mode == kSaveFPRegs) {
778 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
779 XMMRegister reg = XMMRegister::from_code(i);
780 movsd(reg, Operand(rsp, i * kDoubleSize));
782 addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
784 for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
785 Register reg = saved_regs[i];
786 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
793 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
799 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
805 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
806 DCHECK(!r.IsDouble());
807 if (r.IsInteger8()) {
809 } else if (r.IsUInteger8()) {
811 } else if (r.IsInteger16()) {
813 } else if (r.IsUInteger16()) {
815 } else if (r.IsInteger32()) {
823 void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
824 DCHECK(!r.IsDouble());
825 if (r.IsInteger8() || r.IsUInteger8()) {
827 } else if (r.IsInteger16() || r.IsUInteger16()) {
829 } else if (r.IsInteger32()) {
832 if (r.IsHeapObject()) {
834 } else if (r.IsSmi()) {
842 void MacroAssembler::Set(Register dst, int64_t x) {
845 } else if (is_uint32(x)) {
846 movl(dst, Immediate(static_cast<uint32_t>(x)));
847 } else if (is_int32(x)) {
848 movq(dst, Immediate(static_cast<int32_t>(x)));
855 void MacroAssembler::Set(const Operand& dst, intptr_t x) {
856 if (kPointerSize == kInt64Size) {
858 movp(dst, Immediate(static_cast<int32_t>(x)));
860 Set(kScratchRegister, x);
861 movp(dst, kScratchRegister);
864 movp(dst, Immediate(static_cast<int32_t>(x)));
869 // ----------------------------------------------------------------------------
870 // Smi tagging, untagging and tag detection.
872 bool MacroAssembler::IsUnsafeInt(const int32_t x) {
873 static const int kMaxBits = 17;
874 return !is_intn(x, kMaxBits);
878 void MacroAssembler::SafeMove(Register dst, Smi* src) {
879 DCHECK(!dst.is(kScratchRegister));
880 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
881 if (SmiValuesAre32Bits()) {
882 // JIT cookie can be converted to Smi.
883 Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
884 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
885 xorp(dst, kScratchRegister);
887 DCHECK(SmiValuesAre31Bits());
888 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
889 movp(dst, Immediate(value ^ jit_cookie()));
890 xorp(dst, Immediate(jit_cookie()));
898 void MacroAssembler::SafePush(Smi* src) {
899 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
900 if (SmiValuesAre32Bits()) {
901 // JIT cookie can be converted to Smi.
902 Push(Smi::FromInt(src->value() ^ jit_cookie()));
903 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
904 xorp(Operand(rsp, 0), kScratchRegister);
906 DCHECK(SmiValuesAre31Bits());
907 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
908 Push(Immediate(value ^ jit_cookie()));
909 xorp(Operand(rsp, 0), Immediate(jit_cookie()));
917 Register MacroAssembler::GetSmiConstant(Smi* source) {
918 int value = source->value();
920 xorl(kScratchRegister, kScratchRegister);
921 return kScratchRegister;
923 LoadSmiConstant(kScratchRegister, source);
924 return kScratchRegister;
928 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
929 Move(dst, source, Assembler::RelocInfoNone());
933 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
934 STATIC_ASSERT(kSmiTag == 0);
938 shlp(dst, Immediate(kSmiShift));
942 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
943 if (emit_debug_code()) {
944 testb(dst, Immediate(0x01));
946 j(zero, &ok, Label::kNear);
947 Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
951 if (SmiValuesAre32Bits()) {
952 DCHECK(kSmiShift % kBitsPerByte == 0);
953 movl(Operand(dst, kSmiShift / kBitsPerByte), src);
955 DCHECK(SmiValuesAre31Bits());
956 Integer32ToSmi(kScratchRegister, src);
957 movp(dst, kScratchRegister);
962 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
966 addl(dst, Immediate(constant));
968 leal(dst, Operand(src, constant));
970 shlp(dst, Immediate(kSmiShift));
974 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
975 STATIC_ASSERT(kSmiTag == 0);
980 if (SmiValuesAre32Bits()) {
981 shrp(dst, Immediate(kSmiShift));
983 DCHECK(SmiValuesAre31Bits());
984 sarl(dst, Immediate(kSmiShift));
989 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
990 if (SmiValuesAre32Bits()) {
991 movl(dst, Operand(src, kSmiShift / kBitsPerByte));
993 DCHECK(SmiValuesAre31Bits());
995 sarl(dst, Immediate(kSmiShift));
1000 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
1001 STATIC_ASSERT(kSmiTag == 0);
1005 sarp(dst, Immediate(kSmiShift));
1006 if (kPointerSize == kInt32Size) {
1007 // Sign extend to 64-bit.
1013 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
1014 if (SmiValuesAre32Bits()) {
1015 movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
1017 DCHECK(SmiValuesAre31Bits());
1019 SmiToInteger64(dst, dst);
1024 void MacroAssembler::SmiTest(Register src) {
1030 void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1037 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
1043 void MacroAssembler::Cmp(Register dst, Smi* src) {
1044 DCHECK(!dst.is(kScratchRegister));
1045 if (src->value() == 0) {
1048 Register constant_reg = GetSmiConstant(src);
1049 cmpp(dst, constant_reg);
1054 void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
1061 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
1068 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
1070 if (SmiValuesAre32Bits()) {
1071 cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
1073 DCHECK(SmiValuesAre31Bits());
1074 cmpl(dst, Immediate(src));
1079 void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
1080 // The Operand cannot use the smi register.
1081 Register smi_reg = GetSmiConstant(src);
1082 DCHECK(!dst.AddressUsesRegister(smi_reg));
1087 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
1088 if (SmiValuesAre32Bits()) {
1089 cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
1091 DCHECK(SmiValuesAre31Bits());
1092 SmiToInteger32(kScratchRegister, dst);
1093 cmpl(kScratchRegister, src);
1098 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
1104 SmiToInteger64(dst, src);
1110 if (power < kSmiShift) {
1111 sarp(dst, Immediate(kSmiShift - power));
1112 } else if (power > kSmiShift) {
1113 shlp(dst, Immediate(power - kSmiShift));
1118 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
1121 DCHECK((0 <= power) && (power < 32));
1123 shrp(dst, Immediate(power + kSmiShift));
1125 UNIMPLEMENTED(); // Not used.
1130 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1132 Label::Distance near_jump) {
1133 if (dst.is(src1) || dst.is(src2)) {
1134 DCHECK(!src1.is(kScratchRegister));
1135 DCHECK(!src2.is(kScratchRegister));
1136 movp(kScratchRegister, src1);
1137 orp(kScratchRegister, src2);
1138 JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
1139 movp(dst, kScratchRegister);
1143 JumpIfNotSmi(dst, on_not_smis, near_jump);
1148 Condition MacroAssembler::CheckSmi(Register src) {
1149 STATIC_ASSERT(kSmiTag == 0);
1150 testb(src, Immediate(kSmiTagMask));
1155 Condition MacroAssembler::CheckSmi(const Operand& src) {
1156 STATIC_ASSERT(kSmiTag == 0);
1157 testb(src, Immediate(kSmiTagMask));
1162 Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
1163 STATIC_ASSERT(kSmiTag == 0);
1164 // Test that both bits of the mask 0x8000000000000001 are zero.
1165 movp(kScratchRegister, src);
1166 rolp(kScratchRegister, Immediate(1));
1167 testb(kScratchRegister, Immediate(3));
1172 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
1173 if (first.is(second)) {
1174 return CheckSmi(first);
1176 STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
1177 if (SmiValuesAre32Bits()) {
1178 leal(kScratchRegister, Operand(first, second, times_1, 0));
1179 testb(kScratchRegister, Immediate(0x03));
1181 DCHECK(SmiValuesAre31Bits());
1182 movl(kScratchRegister, first);
1183 orl(kScratchRegister, second);
1184 testb(kScratchRegister, Immediate(kSmiTagMask));
1190 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
1192 if (first.is(second)) {
1193 return CheckNonNegativeSmi(first);
1195 movp(kScratchRegister, first);
1196 orp(kScratchRegister, second);
1197 rolp(kScratchRegister, Immediate(1));
1198 testl(kScratchRegister, Immediate(3));
1203 Condition MacroAssembler::CheckEitherSmi(Register first,
1206 if (first.is(second)) {
1207 return CheckSmi(first);
1209 if (scratch.is(second)) {
1210 andl(scratch, first);
1212 if (!scratch.is(first)) {
1213 movl(scratch, first);
1215 andl(scratch, second);
1217 testb(scratch, Immediate(kSmiTagMask));
1222 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
1223 if (SmiValuesAre32Bits()) {
1224 // A 32-bit integer value can always be converted to a smi.
1227 DCHECK(SmiValuesAre31Bits());
1228 cmpl(src, Immediate(0xc0000000));
1234 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
1235 if (SmiValuesAre32Bits()) {
1236 // An unsigned 32-bit integer value is valid as long as the high bit
1241 DCHECK(SmiValuesAre31Bits());
1242 testl(src, Immediate(0xc0000000));
1248 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
1250 andl(dst, Immediate(kSmiTagMask));
1252 movl(dst, Immediate(kSmiTagMask));
1258 void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
1259 if (!(src.AddressUsesRegister(dst))) {
1260 movl(dst, Immediate(kSmiTagMask));
1264 andl(dst, Immediate(kSmiTagMask));
1269 void MacroAssembler::JumpIfValidSmiValue(Register src,
1271 Label::Distance near_jump) {
1272 Condition is_valid = CheckInteger32ValidSmiValue(src);
1273 j(is_valid, on_valid, near_jump);
1277 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1279 Label::Distance near_jump) {
1280 Condition is_valid = CheckInteger32ValidSmiValue(src);
1281 j(NegateCondition(is_valid), on_invalid, near_jump);
1285 void MacroAssembler::JumpIfUIntValidSmiValue(Register src,
1287 Label::Distance near_jump) {
1288 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1289 j(is_valid, on_valid, near_jump);
1293 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1295 Label::Distance near_jump) {
1296 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1297 j(NegateCondition(is_valid), on_invalid, near_jump);
1301 void MacroAssembler::JumpIfSmi(Register src,
1303 Label::Distance near_jump) {
1304 Condition smi = CheckSmi(src);
1305 j(smi, on_smi, near_jump);
1309 void MacroAssembler::JumpIfNotSmi(Register src,
1311 Label::Distance near_jump) {
1312 Condition smi = CheckSmi(src);
1313 j(NegateCondition(smi), on_not_smi, near_jump);
1317 void MacroAssembler::JumpUnlessNonNegativeSmi(
1318 Register src, Label* on_not_smi_or_negative,
1319 Label::Distance near_jump) {
1320 Condition non_negative_smi = CheckNonNegativeSmi(src);
1321 j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
1325 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1328 Label::Distance near_jump) {
1329 SmiCompare(src, constant);
1330 j(equal, on_equals, near_jump);
1334 void MacroAssembler::JumpIfNotBothSmi(Register src1,
1336 Label* on_not_both_smi,
1337 Label::Distance near_jump) {
1338 Condition both_smi = CheckBothSmi(src1, src2);
1339 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1343 void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
1345 Label* on_not_both_smi,
1346 Label::Distance near_jump) {
1347 Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
1348 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1352 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1353 if (constant->value() == 0) {
1358 } else if (dst.is(src)) {
1359 DCHECK(!dst.is(kScratchRegister));
1360 Register constant_reg = GetSmiConstant(constant);
1361 addp(dst, constant_reg);
1363 LoadSmiConstant(dst, constant);
1369 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1370 if (constant->value() != 0) {
1371 if (SmiValuesAre32Bits()) {
1372 addl(Operand(dst, kSmiShift / kBitsPerByte),
1373 Immediate(constant->value()));
1375 DCHECK(SmiValuesAre31Bits());
1376 addp(dst, Immediate(constant));
1382 void MacroAssembler::SmiAddConstant(Register dst,
1385 SmiOperationExecutionMode mode,
1386 Label* bailout_label,
1387 Label::Distance near_jump) {
1388 if (constant->value() == 0) {
1392 } else if (dst.is(src)) {
1393 DCHECK(!dst.is(kScratchRegister));
1394 LoadSmiConstant(kScratchRegister, constant);
1395 addp(dst, kScratchRegister);
1396 if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
1397 j(no_overflow, bailout_label, near_jump);
1398 DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
1399 subp(dst, kScratchRegister);
1400 } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
1401 if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
1403 j(no_overflow, &done, Label::kNear);
1404 subp(dst, kScratchRegister);
1405 jmp(bailout_label, near_jump);
1408 // Bailout if overflow without reserving src.
1409 j(overflow, bailout_label, near_jump);
1412 CHECK(mode.IsEmpty());
1415 DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
1416 DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW));
1417 LoadSmiConstant(dst, constant);
1419 j(overflow, bailout_label, near_jump);
1424 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1425 if (constant->value() == 0) {
1429 } else if (dst.is(src)) {
1430 DCHECK(!dst.is(kScratchRegister));
1431 Register constant_reg = GetSmiConstant(constant);
1432 subp(dst, constant_reg);
1434 if (constant->value() == Smi::kMinValue) {
1435 LoadSmiConstant(dst, constant);
1436 // Adding and subtracting the min-value gives the same result, it only
1437 // differs on the overflow bit, which we don't check here.
1440 // Subtract by adding the negation.
1441 LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1448 void MacroAssembler::SmiSubConstant(Register dst,
1451 SmiOperationExecutionMode mode,
1452 Label* bailout_label,
1453 Label::Distance near_jump) {
1454 if (constant->value() == 0) {
1458 } else if (dst.is(src)) {
1459 DCHECK(!dst.is(kScratchRegister));
1460 LoadSmiConstant(kScratchRegister, constant);
1461 subp(dst, kScratchRegister);
1462 if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
1463 j(no_overflow, bailout_label, near_jump);
1464 DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
1465 addp(dst, kScratchRegister);
1466 } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
1467 if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
1469 j(no_overflow, &done, Label::kNear);
1470 addp(dst, kScratchRegister);
1471 jmp(bailout_label, near_jump);
1474 // Bailout if overflow without reserving src.
1475 j(overflow, bailout_label, near_jump);
1478 CHECK(mode.IsEmpty());
1481 DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
1482 DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW));
1483 if (constant->value() == Smi::kMinValue) {
1484 DCHECK(!dst.is(kScratchRegister));
1486 LoadSmiConstant(kScratchRegister, constant);
1487 subp(dst, kScratchRegister);
1488 j(overflow, bailout_label, near_jump);
1490 // Subtract by adding the negation.
1491 LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1493 j(overflow, bailout_label, near_jump);
1499 void MacroAssembler::SmiNeg(Register dst,
1501 Label* on_smi_result,
1502 Label::Distance near_jump) {
1504 DCHECK(!dst.is(kScratchRegister));
1505 movp(kScratchRegister, src);
1506 negp(dst); // Low 32 bits are retained as zero by negation.
1507 // Test if result is zero or Smi::kMinValue.
1508 cmpp(dst, kScratchRegister);
1509 j(not_equal, on_smi_result, near_jump);
1510 movp(src, kScratchRegister);
1515 // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1516 j(not_equal, on_smi_result, near_jump);
1522 static void SmiAddHelper(MacroAssembler* masm,
1526 Label* on_not_smi_result,
1527 Label::Distance near_jump) {
1530 masm->addp(dst, src2);
1531 masm->j(no_overflow, &done, Label::kNear);
1533 masm->subp(dst, src2);
1534 masm->jmp(on_not_smi_result, near_jump);
1537 masm->movp(dst, src1);
1538 masm->addp(dst, src2);
1539 masm->j(overflow, on_not_smi_result, near_jump);
1544 void MacroAssembler::SmiAdd(Register dst,
1547 Label* on_not_smi_result,
1548 Label::Distance near_jump) {
1549 DCHECK_NOT_NULL(on_not_smi_result);
1550 DCHECK(!dst.is(src2));
1551 SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1555 void MacroAssembler::SmiAdd(Register dst,
1557 const Operand& src2,
1558 Label* on_not_smi_result,
1559 Label::Distance near_jump) {
1560 DCHECK_NOT_NULL(on_not_smi_result);
1561 DCHECK(!src2.AddressUsesRegister(dst));
1562 SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1566 void MacroAssembler::SmiAdd(Register dst,
1569 // No overflow checking. Use only when it's known that
1570 // overflowing is impossible.
1571 if (!dst.is(src1)) {
1572 if (emit_debug_code()) {
1573 movp(kScratchRegister, src1);
1574 addp(kScratchRegister, src2);
1575 Check(no_overflow, kSmiAdditionOverflow);
1577 leap(dst, Operand(src1, src2, times_1, 0));
1580 Assert(no_overflow, kSmiAdditionOverflow);
1586 static void SmiSubHelper(MacroAssembler* masm,
1590 Label* on_not_smi_result,
1591 Label::Distance near_jump) {
1594 masm->subp(dst, src2);
1595 masm->j(no_overflow, &done, Label::kNear);
1597 masm->addp(dst, src2);
1598 masm->jmp(on_not_smi_result, near_jump);
1601 masm->movp(dst, src1);
1602 masm->subp(dst, src2);
1603 masm->j(overflow, on_not_smi_result, near_jump);
1608 void MacroAssembler::SmiSub(Register dst,
1611 Label* on_not_smi_result,
1612 Label::Distance near_jump) {
1613 DCHECK_NOT_NULL(on_not_smi_result);
1614 DCHECK(!dst.is(src2));
1615 SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1619 void MacroAssembler::SmiSub(Register dst,
1621 const Operand& src2,
1622 Label* on_not_smi_result,
1623 Label::Distance near_jump) {
1624 DCHECK_NOT_NULL(on_not_smi_result);
1625 DCHECK(!src2.AddressUsesRegister(dst));
1626 SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1631 static void SmiSubNoOverflowHelper(MacroAssembler* masm,
1635 // No overflow checking. Use only when it's known that
1636 // overflowing is impossible (e.g., subtracting two positive smis).
1637 if (!dst.is(src1)) {
1638 masm->movp(dst, src1);
1640 masm->subp(dst, src2);
1641 masm->Assert(no_overflow, kSmiSubtractionOverflow);
1645 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1646 DCHECK(!dst.is(src2));
1647 SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
1651 void MacroAssembler::SmiSub(Register dst,
1653 const Operand& src2) {
1654 SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
1658 void MacroAssembler::SmiMul(Register dst,
1661 Label* on_not_smi_result,
1662 Label::Distance near_jump) {
1663 DCHECK(!dst.is(src2));
1664 DCHECK(!dst.is(kScratchRegister));
1665 DCHECK(!src1.is(kScratchRegister));
1666 DCHECK(!src2.is(kScratchRegister));
1669 Label failure, zero_correct_result;
1670 movp(kScratchRegister, src1); // Create backup for later testing.
1671 SmiToInteger64(dst, src1);
1673 j(overflow, &failure, Label::kNear);
1675 // Check for negative zero result. If product is zero, and one
1676 // argument is negative, go to slow case.
1677 Label correct_result;
1679 j(not_zero, &correct_result, Label::kNear);
1681 movp(dst, kScratchRegister);
1683 // Result was positive zero.
1684 j(positive, &zero_correct_result, Label::kNear);
1686 bind(&failure); // Reused failure exit, restores src1.
1687 movp(src1, kScratchRegister);
1688 jmp(on_not_smi_result, near_jump);
1690 bind(&zero_correct_result);
1693 bind(&correct_result);
1695 SmiToInteger64(dst, src1);
1697 j(overflow, on_not_smi_result, near_jump);
1698 // Check for negative zero result. If product is zero, and one
1699 // argument is negative, go to slow case.
1700 Label correct_result;
1702 j(not_zero, &correct_result, Label::kNear);
1703 // One of src1 and src2 is zero, the check whether the other is
1705 movp(kScratchRegister, src1);
1706 xorp(kScratchRegister, src2);
1707 j(negative, on_not_smi_result, near_jump);
1708 bind(&correct_result);
1713 void MacroAssembler::SmiDiv(Register dst,
1716 Label* on_not_smi_result,
1717 Label::Distance near_jump) {
1718 DCHECK(!src1.is(kScratchRegister));
1719 DCHECK(!src2.is(kScratchRegister));
1720 DCHECK(!dst.is(kScratchRegister));
1721 DCHECK(!src2.is(rax));
1722 DCHECK(!src2.is(rdx));
1723 DCHECK(!src1.is(rdx));
1725 // Check for 0 divisor (result is +/-Infinity).
1727 j(zero, on_not_smi_result, near_jump);
1730 movp(kScratchRegister, src1);
1732 SmiToInteger32(rax, src1);
1733 // We need to rule out dividing Smi::kMinValue by -1, since that would
1734 // overflow in idiv and raise an exception.
1735 // We combine this with negative zero test (negative zero only happens
1736 // when dividing zero by a negative number).
1738 // We overshoot a little and go to slow case if we divide min-value
1739 // by any negative value, not just -1.
1741 testl(rax, Immediate(~Smi::kMinValue));
1742 j(not_zero, &safe_div, Label::kNear);
1745 j(positive, &safe_div, Label::kNear);
1746 movp(src1, kScratchRegister);
1747 jmp(on_not_smi_result, near_jump);
1749 j(negative, on_not_smi_result, near_jump);
1753 SmiToInteger32(src2, src2);
1754 // Sign extend src1 into edx:eax.
1757 Integer32ToSmi(src2, src2);
1758 // Check that the remainder is zero.
1762 j(zero, &smi_result, Label::kNear);
1763 movp(src1, kScratchRegister);
1764 jmp(on_not_smi_result, near_jump);
1767 j(not_zero, on_not_smi_result, near_jump);
1769 if (!dst.is(src1) && src1.is(rax)) {
1770 movp(src1, kScratchRegister);
1772 Integer32ToSmi(dst, rax);
1776 void MacroAssembler::SmiMod(Register dst,
1779 Label* on_not_smi_result,
1780 Label::Distance near_jump) {
1781 DCHECK(!dst.is(kScratchRegister));
1782 DCHECK(!src1.is(kScratchRegister));
1783 DCHECK(!src2.is(kScratchRegister));
1784 DCHECK(!src2.is(rax));
1785 DCHECK(!src2.is(rdx));
1786 DCHECK(!src1.is(rdx));
1787 DCHECK(!src1.is(src2));
1790 j(zero, on_not_smi_result, near_jump);
1793 movp(kScratchRegister, src1);
1795 SmiToInteger32(rax, src1);
1796 SmiToInteger32(src2, src2);
1798 // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
1800 cmpl(rax, Immediate(Smi::kMinValue));
1801 j(not_equal, &safe_div, Label::kNear);
1802 cmpl(src2, Immediate(-1));
1803 j(not_equal, &safe_div, Label::kNear);
1804 // Retag inputs and go slow case.
1805 Integer32ToSmi(src2, src2);
1807 movp(src1, kScratchRegister);
1809 jmp(on_not_smi_result, near_jump);
1812 // Sign extend eax into edx:eax.
1815 // Restore smi tags on inputs.
1816 Integer32ToSmi(src2, src2);
1818 movp(src1, kScratchRegister);
1820 // Check for a negative zero result. If the result is zero, and the
1821 // dividend is negative, go slow to return a floating point negative zero.
1824 j(not_zero, &smi_result, Label::kNear);
1826 j(negative, on_not_smi_result, near_jump);
1828 Integer32ToSmi(dst, rdx);
1832 void MacroAssembler::SmiNot(Register dst, Register src) {
1833 DCHECK(!dst.is(kScratchRegister));
1834 DCHECK(!src.is(kScratchRegister));
1835 if (SmiValuesAre32Bits()) {
1836 // Set tag and padding bits before negating, so that they are zero
1838 movl(kScratchRegister, Immediate(~0));
1840 DCHECK(SmiValuesAre31Bits());
1841 movl(kScratchRegister, Immediate(1));
1844 xorp(dst, kScratchRegister);
1846 leap(dst, Operand(src, kScratchRegister, times_1, 0));
1852 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
1853 DCHECK(!dst.is(src2));
1854 if (!dst.is(src1)) {
1861 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
1862 if (constant->value() == 0) {
1864 } else if (dst.is(src)) {
1865 DCHECK(!dst.is(kScratchRegister));
1866 Register constant_reg = GetSmiConstant(constant);
1867 andp(dst, constant_reg);
1869 LoadSmiConstant(dst, constant);
1875 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
1876 if (!dst.is(src1)) {
1877 DCHECK(!src1.is(src2));
1884 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
1886 DCHECK(!dst.is(kScratchRegister));
1887 Register constant_reg = GetSmiConstant(constant);
1888 orp(dst, constant_reg);
1890 LoadSmiConstant(dst, constant);
1896 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
1897 if (!dst.is(src1)) {
1898 DCHECK(!src1.is(src2));
1905 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
1907 DCHECK(!dst.is(kScratchRegister));
1908 Register constant_reg = GetSmiConstant(constant);
1909 xorp(dst, constant_reg);
1911 LoadSmiConstant(dst, constant);
1917 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
1920 DCHECK(is_uint5(shift_value));
1921 if (shift_value > 0) {
1923 sarp(dst, Immediate(shift_value + kSmiShift));
1924 shlp(dst, Immediate(kSmiShift));
1926 UNIMPLEMENTED(); // Not used.
1932 void MacroAssembler::SmiShiftLeftConstant(Register dst,
1935 Label* on_not_smi_result,
1936 Label::Distance near_jump) {
1937 if (SmiValuesAre32Bits()) {
1941 if (shift_value > 0) {
1942 // Shift amount specified by lower 5 bits, not six as the shl opcode.
1943 shlq(dst, Immediate(shift_value & 0x1f));
1946 DCHECK(SmiValuesAre31Bits());
1948 UNIMPLEMENTED(); // Not used.
1950 SmiToInteger32(dst, src);
1951 shll(dst, Immediate(shift_value));
1952 JumpIfNotValidSmiValue(dst, on_not_smi_result, near_jump);
1953 Integer32ToSmi(dst, dst);
1959 void MacroAssembler::SmiShiftLogicalRightConstant(
1960 Register dst, Register src, int shift_value,
1961 Label* on_not_smi_result, Label::Distance near_jump) {
1962 // Logic right shift interprets its result as an *unsigned* number.
1964 UNIMPLEMENTED(); // Not used.
1966 if (shift_value == 0) {
1968 j(negative, on_not_smi_result, near_jump);
1970 if (SmiValuesAre32Bits()) {
1972 shrp(dst, Immediate(shift_value + kSmiShift));
1973 shlp(dst, Immediate(kSmiShift));
1975 DCHECK(SmiValuesAre31Bits());
1976 SmiToInteger32(dst, src);
1977 shrp(dst, Immediate(shift_value));
1978 JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump);
1979 Integer32ToSmi(dst, dst);
1985 void MacroAssembler::SmiShiftLeft(Register dst,
1988 Label* on_not_smi_result,
1989 Label::Distance near_jump) {
1990 if (SmiValuesAre32Bits()) {
1991 DCHECK(!dst.is(rcx));
1992 if (!dst.is(src1)) {
1995 // Untag shift amount.
1996 SmiToInteger32(rcx, src2);
1997 // Shift amount specified by lower 5 bits, not six as the shl opcode.
1998 andp(rcx, Immediate(0x1f));
2001 DCHECK(SmiValuesAre31Bits());
2002 DCHECK(!dst.is(kScratchRegister));
2003 DCHECK(!src1.is(kScratchRegister));
2004 DCHECK(!src2.is(kScratchRegister));
2005 DCHECK(!dst.is(src2));
2006 DCHECK(!dst.is(rcx));
2008 if (src1.is(rcx) || src2.is(rcx)) {
2009 movq(kScratchRegister, rcx);
2012 UNIMPLEMENTED(); // Not used.
2015 SmiToInteger32(dst, src1);
2016 SmiToInteger32(rcx, src2);
2018 JumpIfValidSmiValue(dst, &valid_result, Label::kNear);
2019 // As src1 or src2 could not be dst, we do not need to restore them for
2021 if (src1.is(rcx) || src2.is(rcx)) {
2023 movq(src1, kScratchRegister);
2025 movq(src2, kScratchRegister);
2028 jmp(on_not_smi_result, near_jump);
2029 bind(&valid_result);
2030 Integer32ToSmi(dst, dst);
2036 void MacroAssembler::SmiShiftLogicalRight(Register dst,
2039 Label* on_not_smi_result,
2040 Label::Distance near_jump) {
2041 DCHECK(!dst.is(kScratchRegister));
2042 DCHECK(!src1.is(kScratchRegister));
2043 DCHECK(!src2.is(kScratchRegister));
2044 DCHECK(!dst.is(src2));
2045 DCHECK(!dst.is(rcx));
2046 if (src1.is(rcx) || src2.is(rcx)) {
2047 movq(kScratchRegister, rcx);
2050 UNIMPLEMENTED(); // Not used.
2053 SmiToInteger32(dst, src1);
2054 SmiToInteger32(rcx, src2);
2056 JumpIfUIntValidSmiValue(dst, &valid_result, Label::kNear);
2057 // As src1 or src2 could not be dst, we do not need to restore them for
2059 if (src1.is(rcx) || src2.is(rcx)) {
2061 movq(src1, kScratchRegister);
2063 movq(src2, kScratchRegister);
2066 jmp(on_not_smi_result, near_jump);
2067 bind(&valid_result);
2068 Integer32ToSmi(dst, dst);
2073 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
2076 DCHECK(!dst.is(kScratchRegister));
2077 DCHECK(!src1.is(kScratchRegister));
2078 DCHECK(!src2.is(kScratchRegister));
2079 DCHECK(!dst.is(rcx));
2081 SmiToInteger32(rcx, src2);
2082 if (!dst.is(src1)) {
2085 SmiToInteger32(dst, dst);
2087 Integer32ToSmi(dst, dst);
2091 void MacroAssembler::SelectNonSmi(Register dst,
2095 Label::Distance near_jump) {
2096 DCHECK(!dst.is(kScratchRegister));
2097 DCHECK(!src1.is(kScratchRegister));
2098 DCHECK(!src2.is(kScratchRegister));
2099 DCHECK(!dst.is(src1));
2100 DCHECK(!dst.is(src2));
2101 // Both operands must not be smis.
2103 Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
2104 Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
2106 STATIC_ASSERT(kSmiTag == 0);
2107 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
2108 movl(kScratchRegister, Immediate(kSmiTagMask));
2109 andp(kScratchRegister, src1);
2110 testl(kScratchRegister, src2);
2111 // If non-zero then both are smis.
2112 j(not_zero, on_not_smis, near_jump);
2114 // Exactly one operand is a smi.
2115 DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
2116 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
2117 subp(kScratchRegister, Immediate(1));
2118 // If src1 is a smi, then scratch register all 1s, else it is all 0s.
2121 andp(dst, kScratchRegister);
2122 // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
2124 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
2128 SmiIndex MacroAssembler::SmiToIndex(Register dst,
2131 if (SmiValuesAre32Bits()) {
2132 DCHECK(is_uint6(shift));
2133 // There is a possible optimization if shift is in the range 60-63, but that
2134 // will (and must) never happen.
2138 if (shift < kSmiShift) {
2139 sarp(dst, Immediate(kSmiShift - shift));
2141 shlp(dst, Immediate(shift - kSmiShift));
2143 return SmiIndex(dst, times_1);
2145 DCHECK(SmiValuesAre31Bits());
2146 DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2150 // We have to sign extend the index register to 64-bit as the SMI might
2153 if (shift == times_1) {
2154 sarq(dst, Immediate(kSmiShift));
2155 return SmiIndex(dst, times_1);
2157 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2162 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
2165 if (SmiValuesAre32Bits()) {
2166 // Register src holds a positive smi.
2167 DCHECK(is_uint6(shift));
2172 if (shift < kSmiShift) {
2173 sarp(dst, Immediate(kSmiShift - shift));
2175 shlp(dst, Immediate(shift - kSmiShift));
2177 return SmiIndex(dst, times_1);
2179 DCHECK(SmiValuesAre31Bits());
2180 DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2185 if (shift == times_1) {
2186 sarq(dst, Immediate(kSmiShift));
2187 return SmiIndex(dst, times_1);
2189 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2194 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
2195 if (SmiValuesAre32Bits()) {
2196 DCHECK_EQ(0, kSmiShift % kBitsPerByte);
2197 addl(dst, Operand(src, kSmiShift / kBitsPerByte));
2199 DCHECK(SmiValuesAre31Bits());
2200 SmiToInteger32(kScratchRegister, src);
2201 addl(dst, kScratchRegister);
2206 void MacroAssembler::Push(Smi* source) {
2207 intptr_t smi = reinterpret_cast<intptr_t>(source);
2208 if (is_int32(smi)) {
2209 Push(Immediate(static_cast<int32_t>(smi)));
2211 Register constant = GetSmiConstant(source);
2217 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
2218 DCHECK(!src.is(scratch));
2221 shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2222 shlp(src, Immediate(kSmiShift));
2225 shlp(scratch, Immediate(kSmiShift));
2230 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
2231 DCHECK(!dst.is(scratch));
2234 shrp(scratch, Immediate(kSmiShift));
2236 shrp(dst, Immediate(kSmiShift));
2238 shlp(dst, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2243 void MacroAssembler::Test(const Operand& src, Smi* source) {
2244 if (SmiValuesAre32Bits()) {
2245 testl(Operand(src, kIntSize), Immediate(source->value()));
2247 DCHECK(SmiValuesAre31Bits());
2248 testl(src, Immediate(source));
2253 // ----------------------------------------------------------------------------
2256 void MacroAssembler::LookupNumberStringCache(Register object,
2261 // Use of registers. Register result is used as a temporary.
2262 Register number_string_cache = result;
2263 Register mask = scratch1;
2264 Register scratch = scratch2;
2266 // Load the number string cache.
2267 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2269 // Make the hash mask from the length of the number string cache. It
2270 // contains two elements (number and string) for each cache entry.
2272 mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
2273 shrl(mask, Immediate(1));
2274 subp(mask, Immediate(1)); // Make mask.
2276 // Calculate the entry in the number string cache. The hash value in the
2277 // number string cache for smis is just the smi value, and the hash for
2278 // doubles is the xor of the upper and lower words. See
2279 // Heap::GetNumberStringCache.
2281 Label load_result_from_cache;
2282 JumpIfSmi(object, &is_smi);
2284 isolate()->factory()->heap_number_map(),
2288 STATIC_ASSERT(8 == kDoubleSize);
2289 movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
2290 xorp(scratch, FieldOperand(object, HeapNumber::kValueOffset));
2291 andp(scratch, mask);
2292 // Each entry in string cache consists of two pointer sized fields,
2293 // but times_twice_pointer_size (multiplication by 16) scale factor
2294 // is not supported by addrmode on x64 platform.
2295 // So we have to premultiply entry index before lookup.
2296 shlp(scratch, Immediate(kPointerSizeLog2 + 1));
2298 Register index = scratch;
2299 Register probe = mask;
2301 FieldOperand(number_string_cache,
2304 FixedArray::kHeaderSize));
2305 JumpIfSmi(probe, not_found);
2306 movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
2307 ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
2308 j(parity_even, not_found); // Bail out if NaN is involved.
2309 j(not_equal, not_found); // The cache did not contain this value.
2310 jmp(&load_result_from_cache);
2313 SmiToInteger32(scratch, object);
2314 andp(scratch, mask);
2315 // Each entry in string cache consists of two pointer sized fields,
2316 // but times_twice_pointer_size (multiplication by 16) scale factor
2317 // is not supported by addrmode on x64 platform.
2318 // So we have to premultiply entry index before lookup.
2319 shlp(scratch, Immediate(kPointerSizeLog2 + 1));
2321 // Check if the entry is the smi we are looking for.
2323 FieldOperand(number_string_cache,
2326 FixedArray::kHeaderSize));
2327 j(not_equal, not_found);
2329 // Get the result from the cache.
2330 bind(&load_result_from_cache);
2332 FieldOperand(number_string_cache,
2335 FixedArray::kHeaderSize + kPointerSize));
2336 IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
2340 void MacroAssembler::JumpIfNotString(Register object,
2341 Register object_map,
2343 Label::Distance near_jump) {
2344 Condition is_smi = CheckSmi(object);
2345 j(is_smi, not_string, near_jump);
2346 CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
2347 j(above_equal, not_string, near_jump);
2351 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(
2352 Register first_object, Register second_object, Register scratch1,
2353 Register scratch2, Label* on_fail, Label::Distance near_jump) {
2354 // Check that both objects are not smis.
2355 Condition either_smi = CheckEitherSmi(first_object, second_object);
2356 j(either_smi, on_fail, near_jump);
2358 // Load instance type for both strings.
2359 movp(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
2360 movp(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
2361 movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2362 movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2364 // Check that both are flat one-byte strings.
2365 DCHECK(kNotStringTag != 0);
2366 const int kFlatOneByteStringMask =
2367 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2368 const int kFlatOneByteStringTag =
2369 kStringTag | kOneByteStringTag | kSeqStringTag;
2371 andl(scratch1, Immediate(kFlatOneByteStringMask));
2372 andl(scratch2, Immediate(kFlatOneByteStringMask));
2373 // Interleave the bits to check both scratch1 and scratch2 in one test.
2374 DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
2375 leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2377 Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
2378 j(not_equal, on_fail, near_jump);
2382 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
2383 Register instance_type, Register scratch, Label* failure,
2384 Label::Distance near_jump) {
2385 if (!scratch.is(instance_type)) {
2386 movl(scratch, instance_type);
2389 const int kFlatOneByteStringMask =
2390 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2392 andl(scratch, Immediate(kFlatOneByteStringMask));
2393 cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
2394 j(not_equal, failure, near_jump);
2398 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
2399 Register first_object_instance_type, Register second_object_instance_type,
2400 Register scratch1, Register scratch2, Label* on_fail,
2401 Label::Distance near_jump) {
2402 // Load instance type for both strings.
2403 movp(scratch1, first_object_instance_type);
2404 movp(scratch2, second_object_instance_type);
2406 // Check that both are flat one-byte strings.
2407 DCHECK(kNotStringTag != 0);
2408 const int kFlatOneByteStringMask =
2409 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2410 const int kFlatOneByteStringTag =
2411 kStringTag | kOneByteStringTag | kSeqStringTag;
2413 andl(scratch1, Immediate(kFlatOneByteStringMask));
2414 andl(scratch2, Immediate(kFlatOneByteStringMask));
2415 // Interleave the bits to check both scratch1 and scratch2 in one test.
2416 DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
2417 leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2419 Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
2420 j(not_equal, on_fail, near_jump);
2425 static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
2426 T operand_or_register,
2427 Label* not_unique_name,
2428 Label::Distance distance) {
2429 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2431 masm->testb(operand_or_register,
2432 Immediate(kIsNotStringMask | kIsNotInternalizedMask));
2433 masm->j(zero, &succeed, Label::kNear);
2434 masm->cmpb(operand_or_register, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
2435 masm->j(not_equal, not_unique_name, distance);
2437 masm->bind(&succeed);
2441 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
2442 Label* not_unique_name,
2443 Label::Distance distance) {
2444 JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
2448 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
2449 Label* not_unique_name,
2450 Label::Distance distance) {
2451 JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
2455 void MacroAssembler::Move(Register dst, Register src) {
2462 void MacroAssembler::Move(Register dst, Handle<Object> source) {
2463 AllowDeferredHandleDereference smi_check;
2464 if (source->IsSmi()) {
2465 Move(dst, Smi::cast(*source));
2467 MoveHeapObject(dst, source);
2472 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
2473 AllowDeferredHandleDereference smi_check;
2474 if (source->IsSmi()) {
2475 Move(dst, Smi::cast(*source));
2477 MoveHeapObject(kScratchRegister, source);
2478 movp(dst, kScratchRegister);
2483 void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
2487 unsigned pop = base::bits::CountPopulation32(src);
2492 movl(kScratchRegister, Immediate(src));
2493 movq(dst, kScratchRegister);
2499 void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
2503 unsigned nlz = base::bits::CountLeadingZeros64(src);
2504 unsigned ntz = base::bits::CountTrailingZeros64(src);
2505 unsigned pop = base::bits::CountPopulation64(src);
2509 } else if (pop + ntz == 64) {
2512 } else if (pop + nlz == 64) {
2516 uint32_t lower = static_cast<uint32_t>(src);
2517 uint32_t upper = static_cast<uint32_t>(src >> 32);
2521 movq(kScratchRegister, src);
2522 movq(dst, kScratchRegister);
2529 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
2530 AllowDeferredHandleDereference smi_check;
2531 if (source->IsSmi()) {
2532 Cmp(dst, Smi::cast(*source));
2534 MoveHeapObject(kScratchRegister, source);
2535 cmpp(dst, kScratchRegister);
2540 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
2541 AllowDeferredHandleDereference smi_check;
2542 if (source->IsSmi()) {
2543 Cmp(dst, Smi::cast(*source));
2545 MoveHeapObject(kScratchRegister, source);
2546 cmpp(dst, kScratchRegister);
2551 void MacroAssembler::Push(Handle<Object> source) {
2552 AllowDeferredHandleDereference smi_check;
2553 if (source->IsSmi()) {
2554 Push(Smi::cast(*source));
2556 MoveHeapObject(kScratchRegister, source);
2557 Push(kScratchRegister);
2562 void MacroAssembler::MoveHeapObject(Register result,
2563 Handle<Object> object) {
2564 AllowDeferredHandleDereference using_raw_address;
2565 DCHECK(object->IsHeapObject());
2566 if (isolate()->heap()->InNewSpace(*object)) {
2567 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2568 Move(result, cell, RelocInfo::CELL);
2569 movp(result, Operand(result, 0));
2571 Move(result, object, RelocInfo::EMBEDDED_OBJECT);
2576 void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
2578 AllowDeferredHandleDereference embedding_raw_address;
2579 load_rax(cell.location(), RelocInfo::CELL);
2581 Move(dst, cell, RelocInfo::CELL);
2582 movp(dst, Operand(dst, 0));
2587 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
2589 Move(scratch, cell, RelocInfo::EMBEDDED_OBJECT);
2590 cmpp(value, FieldOperand(scratch, WeakCell::kValueOffset));
2594 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
2595 Move(value, cell, RelocInfo::EMBEDDED_OBJECT);
2596 movp(value, FieldOperand(value, WeakCell::kValueOffset));
2600 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
2602 GetWeakValue(value, cell);
2603 JumpIfSmi(value, miss);
2607 void MacroAssembler::Drop(int stack_elements) {
2608 if (stack_elements > 0) {
2609 addp(rsp, Immediate(stack_elements * kPointerSize));
2614 void MacroAssembler::DropUnderReturnAddress(int stack_elements,
2616 DCHECK(stack_elements > 0);
2617 if (kPointerSize == kInt64Size && stack_elements == 1) {
2618 popq(MemOperand(rsp, 0));
2622 PopReturnAddressTo(scratch);
2623 Drop(stack_elements);
2624 PushReturnAddressFrom(scratch);
2628 void MacroAssembler::Push(Register src) {
2629 if (kPointerSize == kInt64Size) {
2632 // x32 uses 64-bit push for rbp in the prologue.
2633 DCHECK(src.code() != rbp.code());
2634 leal(rsp, Operand(rsp, -4));
2635 movp(Operand(rsp, 0), src);
2640 void MacroAssembler::Push(const Operand& src) {
2641 if (kPointerSize == kInt64Size) {
2644 movp(kScratchRegister, src);
2645 leal(rsp, Operand(rsp, -4));
2646 movp(Operand(rsp, 0), kScratchRegister);
2651 void MacroAssembler::PushQuad(const Operand& src) {
2652 if (kPointerSize == kInt64Size) {
2655 movp(kScratchRegister, src);
2656 pushq(kScratchRegister);
2661 void MacroAssembler::Push(Immediate value) {
2662 if (kPointerSize == kInt64Size) {
2665 leal(rsp, Operand(rsp, -4));
2666 movp(Operand(rsp, 0), value);
2671 void MacroAssembler::PushImm32(int32_t imm32) {
2672 if (kPointerSize == kInt64Size) {
2675 leal(rsp, Operand(rsp, -4));
2676 movp(Operand(rsp, 0), Immediate(imm32));
2681 void MacroAssembler::Pop(Register dst) {
2682 if (kPointerSize == kInt64Size) {
2685 // x32 uses 64-bit pop for rbp in the epilogue.
2686 DCHECK(dst.code() != rbp.code());
2687 movp(dst, Operand(rsp, 0));
2688 leal(rsp, Operand(rsp, 4));
2693 void MacroAssembler::Pop(const Operand& dst) {
2694 if (kPointerSize == kInt64Size) {
2697 Register scratch = dst.AddressUsesRegister(kScratchRegister)
2698 ? kRootRegister : kScratchRegister;
2699 movp(scratch, Operand(rsp, 0));
2701 leal(rsp, Operand(rsp, 4));
2702 if (scratch.is(kRootRegister)) {
2703 // Restore kRootRegister.
2704 InitializeRootRegister();
2710 void MacroAssembler::PopQuad(const Operand& dst) {
2711 if (kPointerSize == kInt64Size) {
2714 popq(kScratchRegister);
2715 movp(dst, kScratchRegister);
2720 void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst,
2723 DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
2724 offset <= SharedFunctionInfo::kSize &&
2725 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
2726 if (kPointerSize == kInt64Size) {
2727 movsxlq(dst, FieldOperand(base, offset));
2729 movp(dst, FieldOperand(base, offset));
2730 SmiToInteger32(dst, dst);
2735 void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base,
2738 DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
2739 offset <= SharedFunctionInfo::kSize &&
2740 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
2741 if (kPointerSize == kInt32Size) {
2742 // On x32, this field is represented by SMI.
2745 int byte_offset = bits / kBitsPerByte;
2746 int bit_in_byte = bits & (kBitsPerByte - 1);
2747 testb(FieldOperand(base, offset + byte_offset), Immediate(1 << bit_in_byte));
2751 void MacroAssembler::Jump(ExternalReference ext) {
2752 LoadAddress(kScratchRegister, ext);
2753 jmp(kScratchRegister);
2757 void MacroAssembler::Jump(const Operand& op) {
2758 if (kPointerSize == kInt64Size) {
2761 movp(kScratchRegister, op);
2762 jmp(kScratchRegister);
2767 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
2768 Move(kScratchRegister, destination, rmode);
2769 jmp(kScratchRegister);
2773 void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
2774 // TODO(X64): Inline this
2775 jmp(code_object, rmode);
2779 int MacroAssembler::CallSize(ExternalReference ext) {
2780 // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
2781 return LoadAddressSize(ext) +
2782 Assembler::kCallScratchRegisterInstructionLength;
2786 void MacroAssembler::Call(ExternalReference ext) {
2788 int end_position = pc_offset() + CallSize(ext);
2790 LoadAddress(kScratchRegister, ext);
2791 call(kScratchRegister);
2793 CHECK_EQ(end_position, pc_offset());
2798 void MacroAssembler::Call(const Operand& op) {
2799 if (kPointerSize == kInt64Size && !CpuFeatures::IsSupported(ATOM)) {
2802 movp(kScratchRegister, op);
2803 call(kScratchRegister);
2808 void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
2810 int end_position = pc_offset() + CallSize(destination);
2812 Move(kScratchRegister, destination, rmode);
2813 call(kScratchRegister);
2815 CHECK_EQ(pc_offset(), end_position);
2820 void MacroAssembler::Call(Handle<Code> code_object,
2821 RelocInfo::Mode rmode,
2822 TypeFeedbackId ast_id) {
2824 int end_position = pc_offset() + CallSize(code_object);
2826 DCHECK(RelocInfo::IsCodeTarget(rmode) ||
2827 rmode == RelocInfo::CODE_AGE_SEQUENCE);
2828 call(code_object, rmode, ast_id);
2830 CHECK_EQ(end_position, pc_offset());
2835 void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
2841 if (CpuFeatures::IsSupported(SSE4_1)) {
2842 CpuFeatureScope sse_scope(this, SSE4_1);
2843 pextrd(dst, src, imm8);
2847 shrq(dst, Immediate(32));
2851 void MacroAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
2852 if (CpuFeatures::IsSupported(SSE4_1)) {
2853 CpuFeatureScope sse_scope(this, SSE4_1);
2854 pinsrd(dst, src, imm8);
2859 punpckldq(dst, xmm0);
2863 punpckldq(xmm0, dst);
2869 void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
2870 DCHECK(imm8 == 0 || imm8 == 1);
2871 if (CpuFeatures::IsSupported(SSE4_1)) {
2872 CpuFeatureScope sse_scope(this, SSE4_1);
2873 pinsrd(dst, src, imm8);
2878 punpckldq(dst, xmm0);
2882 punpckldq(xmm0, dst);
2888 void MacroAssembler::Lzcntl(Register dst, Register src) {
2889 // TODO(intel): Add support for LZCNT (BMI1/ABM).
2892 j(not_zero, ¬_zero_src, Label::kNear);
2893 Set(dst, 63); // 63^31 == 32
2894 bind(¬_zero_src);
2895 xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
2899 void MacroAssembler::Lzcntl(Register dst, const Operand& src) {
2900 // TODO(intel): Add support for LZCNT (BMI1/ABM).
2903 j(not_zero, ¬_zero_src, Label::kNear);
2904 Set(dst, 63); // 63^31 == 32
2905 bind(¬_zero_src);
2906 xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
2910 void MacroAssembler::Pushad() {
2915 // Not pushing rsp or rbp.
2920 // r10 is kScratchRegister.
2923 // r13 is kRootRegister.
2926 STATIC_ASSERT(12 == kNumSafepointSavedRegisters);
2927 // Use lea for symmetry with Popad.
2929 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
2930 leap(rsp, Operand(rsp, -sp_delta));
2934 void MacroAssembler::Popad() {
2935 // Popad must not change the flags, so use lea instead of addq.
2937 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
2938 leap(rsp, Operand(rsp, sp_delta));
2954 void MacroAssembler::Dropad() {
2955 addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
2959 // Order general registers are pushed by Pushad:
2960 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
2962 MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
2982 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
2983 const Immediate& imm) {
2984 movp(SafepointRegisterSlot(dst), imm);
2988 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
2989 movp(SafepointRegisterSlot(dst), src);
2993 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
2994 movp(dst, SafepointRegisterSlot(src));
2998 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
2999 return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
3003 void MacroAssembler::PushStackHandler() {
3004 // Adjust this code if not the case.
3005 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
3006 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3008 // Link the current handler as the next handler.
3009 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3010 Push(ExternalOperand(handler_address));
3012 // Set this new handler as the current one.
3013 movp(ExternalOperand(handler_address), rsp);
3017 void MacroAssembler::PopStackHandler() {
3018 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3019 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3020 Pop(ExternalOperand(handler_address));
3021 addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
3025 void MacroAssembler::Ret() {
3030 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
3031 if (is_uint16(bytes_dropped)) {
3034 PopReturnAddressTo(scratch);
3035 addp(rsp, Immediate(bytes_dropped));
3036 PushReturnAddressFrom(scratch);
3042 void MacroAssembler::FCmp() {
3048 void MacroAssembler::CmpObjectType(Register heap_object,
3051 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3052 CmpInstanceType(map, type);
3056 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
3057 cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
3058 Immediate(static_cast<int8_t>(type)));
3062 void MacroAssembler::CheckFastElements(Register map,
3064 Label::Distance distance) {
3065 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3066 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3067 STATIC_ASSERT(FAST_ELEMENTS == 2);
3068 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3069 cmpb(FieldOperand(map, Map::kBitField2Offset),
3070 Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3071 j(above, fail, distance);
3075 void MacroAssembler::CheckFastObjectElements(Register map,
3077 Label::Distance distance) {
3078 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3079 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3080 STATIC_ASSERT(FAST_ELEMENTS == 2);
3081 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3082 cmpb(FieldOperand(map, Map::kBitField2Offset),
3083 Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3084 j(below_equal, fail, distance);
3085 cmpb(FieldOperand(map, Map::kBitField2Offset),
3086 Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3087 j(above, fail, distance);
3091 void MacroAssembler::CheckFastSmiElements(Register map,
3093 Label::Distance distance) {
3094 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3095 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3096 cmpb(FieldOperand(map, Map::kBitField2Offset),
3097 Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3098 j(above, fail, distance);
3102 void MacroAssembler::StoreNumberToDoubleElements(
3103 Register maybe_number,
3106 XMMRegister xmm_scratch,
3108 int elements_offset) {
3109 Label smi_value, done;
3111 JumpIfSmi(maybe_number, &smi_value, Label::kNear);
3113 CheckMap(maybe_number,
3114 isolate()->factory()->heap_number_map(),
3118 // Double value, turn potential sNaN into qNaN.
3119 Move(xmm_scratch, 1.0);
3120 mulsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
3121 jmp(&done, Label::kNear);
3124 // Value is a smi. convert to a double and store.
3125 // Preserve original value.
3126 SmiToInteger32(kScratchRegister, maybe_number);
3127 Cvtlsi2sd(xmm_scratch, kScratchRegister);
3129 movsd(FieldOperand(elements, index, times_8,
3130 FixedDoubleArray::kHeaderSize - elements_offset),
3135 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
3136 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3140 void MacroAssembler::CheckMap(Register obj,
3143 SmiCheckType smi_check_type) {
3144 if (smi_check_type == DO_SMI_CHECK) {
3145 JumpIfSmi(obj, fail);
3148 CompareMap(obj, map);
3153 void MacroAssembler::ClampUint8(Register reg) {
3155 testl(reg, Immediate(0xFFFFFF00));
3156 j(zero, &done, Label::kNear);
3157 setcc(negative, reg); // 1 if negative, 0 if positive.
3158 decb(reg); // 0 if negative, 255 if positive.
3163 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
3164 XMMRegister temp_xmm_reg,
3165 Register result_reg) {
3168 xorps(temp_xmm_reg, temp_xmm_reg);
3169 cvtsd2si(result_reg, input_reg);
3170 testl(result_reg, Immediate(0xFFFFFF00));
3171 j(zero, &done, Label::kNear);
3172 cmpl(result_reg, Immediate(1));
3173 j(overflow, &conv_failure, Label::kNear);
3174 movl(result_reg, Immediate(0));
3175 setcc(sign, result_reg);
3176 subl(result_reg, Immediate(1));
3177 andl(result_reg, Immediate(255));
3178 jmp(&done, Label::kNear);
3179 bind(&conv_failure);
3181 ucomisd(input_reg, temp_xmm_reg);
3182 j(below, &done, Label::kNear);
3183 Set(result_reg, 255);
3188 void MacroAssembler::LoadUint32(XMMRegister dst,
3190 if (FLAG_debug_code) {
3191 cmpq(src, Immediate(0xffffffff));
3192 Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
3194 cvtqsi2sd(dst, src);
3198 void MacroAssembler::SlowTruncateToI(Register result_reg,
3201 DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
3202 call(stub.GetCode(), RelocInfo::CODE_TARGET);
3206 void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
3207 Register input_reg) {
3209 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3210 cvttsd2siq(result_reg, xmm0);
3211 cmpq(result_reg, Immediate(1));
3212 j(no_overflow, &done, Label::kNear);
3215 if (input_reg.is(result_reg)) {
3216 subp(rsp, Immediate(kDoubleSize));
3217 movsd(MemOperand(rsp, 0), xmm0);
3218 SlowTruncateToI(result_reg, rsp, 0);
3219 addp(rsp, Immediate(kDoubleSize));
3221 SlowTruncateToI(result_reg, input_reg);
3225 // Keep our invariant that the upper 32 bits are zero.
3226 movl(result_reg, result_reg);
3230 void MacroAssembler::TruncateDoubleToI(Register result_reg,
3231 XMMRegister input_reg) {
3233 cvttsd2siq(result_reg, input_reg);
3234 cmpq(result_reg, Immediate(1));
3235 j(no_overflow, &done, Label::kNear);
3237 subp(rsp, Immediate(kDoubleSize));
3238 movsd(MemOperand(rsp, 0), input_reg);
3239 SlowTruncateToI(result_reg, rsp, 0);
3240 addp(rsp, Immediate(kDoubleSize));
3243 // Keep our invariant that the upper 32 bits are zero.
3244 movl(result_reg, result_reg);
3248 void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
3249 XMMRegister scratch,
3250 MinusZeroMode minus_zero_mode,
3251 Label* lost_precision, Label* is_nan,
3252 Label* minus_zero, Label::Distance dst) {
3253 cvttsd2si(result_reg, input_reg);
3254 Cvtlsi2sd(xmm0, result_reg);
3255 ucomisd(xmm0, input_reg);
3256 j(not_equal, lost_precision, dst);
3257 j(parity_even, is_nan, dst); // NaN.
3258 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
3260 // The integer converted back is equal to the original. We
3261 // only have to test if we got -0 as an input.
3262 testl(result_reg, result_reg);
3263 j(not_zero, &done, Label::kNear);
3264 movmskpd(result_reg, input_reg);
3265 // Bit 0 contains the sign of the double in input_reg.
3266 // If input was positive, we are ok and return 0, otherwise
3267 // jump to minus_zero.
3268 andl(result_reg, Immediate(1));
3269 j(not_zero, minus_zero, dst);
3275 void MacroAssembler::LoadInstanceDescriptors(Register map,
3276 Register descriptors) {
3277 movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
3281 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3282 movl(dst, FieldOperand(map, Map::kBitField3Offset));
3283 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3287 void MacroAssembler::EnumLength(Register dst, Register map) {
3288 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3289 movl(dst, FieldOperand(map, Map::kBitField3Offset));
3290 andl(dst, Immediate(Map::EnumLengthBits::kMask));
3291 Integer32ToSmi(dst, dst);
3295 void MacroAssembler::LoadAccessor(Register dst, Register holder,
3297 AccessorComponent accessor) {
3298 movp(dst, FieldOperand(holder, HeapObject::kMapOffset));
3299 LoadInstanceDescriptors(dst, dst);
3300 movp(dst, FieldOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
3301 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
3302 : AccessorPair::kSetterOffset;
3303 movp(dst, FieldOperand(dst, offset));
3307 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
3308 Register scratch2, Handle<WeakCell> cell,
3309 Handle<Code> success,
3310 SmiCheckType smi_check_type) {
3312 if (smi_check_type == DO_SMI_CHECK) {
3313 JumpIfSmi(obj, &fail);
3315 movq(scratch1, FieldOperand(obj, HeapObject::kMapOffset));
3316 CmpWeakValue(scratch1, cell, scratch2);
3317 j(equal, success, RelocInfo::CODE_TARGET);
3322 void MacroAssembler::AssertNumber(Register object) {
3323 if (emit_debug_code()) {
3325 Condition is_smi = CheckSmi(object);
3326 j(is_smi, &ok, Label::kNear);
3327 Cmp(FieldOperand(object, HeapObject::kMapOffset),
3328 isolate()->factory()->heap_number_map());
3329 Check(equal, kOperandIsNotANumber);
3335 void MacroAssembler::AssertNotSmi(Register object) {
3336 if (emit_debug_code()) {
3337 Condition is_smi = CheckSmi(object);
3338 Check(NegateCondition(is_smi), kOperandIsASmi);
3343 void MacroAssembler::AssertSmi(Register object) {
3344 if (emit_debug_code()) {
3345 Condition is_smi = CheckSmi(object);
3346 Check(is_smi, kOperandIsNotASmi);
3351 void MacroAssembler::AssertSmi(const Operand& object) {
3352 if (emit_debug_code()) {
3353 Condition is_smi = CheckSmi(object);
3354 Check(is_smi, kOperandIsNotASmi);
3359 void MacroAssembler::AssertZeroExtended(Register int32_register) {
3360 if (emit_debug_code()) {
3361 DCHECK(!int32_register.is(kScratchRegister));
3362 movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
3363 cmpq(kScratchRegister, int32_register);
3364 Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
3369 void MacroAssembler::AssertString(Register object) {
3370 if (emit_debug_code()) {
3371 testb(object, Immediate(kSmiTagMask));
3372 Check(not_equal, kOperandIsASmiAndNotAString);
3374 movp(object, FieldOperand(object, HeapObject::kMapOffset));
3375 CmpInstanceType(object, FIRST_NONSTRING_TYPE);
3377 Check(below, kOperandIsNotAString);
3382 void MacroAssembler::AssertName(Register object) {
3383 if (emit_debug_code()) {
3384 testb(object, Immediate(kSmiTagMask));
3385 Check(not_equal, kOperandIsASmiAndNotAName);
3387 movp(object, FieldOperand(object, HeapObject::kMapOffset));
3388 CmpInstanceType(object, LAST_NAME_TYPE);
3390 Check(below_equal, kOperandIsNotAName);
3395 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
3396 if (emit_debug_code()) {
3397 Label done_checking;
3398 AssertNotSmi(object);
3399 Cmp(object, isolate()->factory()->undefined_value());
3400 j(equal, &done_checking);
3401 Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
3402 Assert(equal, kExpectedUndefinedOrCell);
3403 bind(&done_checking);
3408 void MacroAssembler::AssertRootValue(Register src,
3409 Heap::RootListIndex root_value_index,
3410 BailoutReason reason) {
3411 if (emit_debug_code()) {
3412 DCHECK(!src.is(kScratchRegister));
3413 LoadRoot(kScratchRegister, root_value_index);
3414 cmpp(src, kScratchRegister);
3415 Check(equal, reason);
3421 Condition MacroAssembler::IsObjectStringType(Register heap_object,
3423 Register instance_type) {
3424 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3425 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3426 STATIC_ASSERT(kNotStringTag != 0);
3427 testb(instance_type, Immediate(kIsNotStringMask));
3432 Condition MacroAssembler::IsObjectNameType(Register heap_object,
3434 Register instance_type) {
3435 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3436 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3437 cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
3442 void MacroAssembler::GetMapConstructor(Register result, Register map,
3445 movp(result, FieldOperand(map, Map::kConstructorOrBackPointerOffset));
3447 JumpIfSmi(result, &done);
3448 CmpObjectType(result, MAP_TYPE, temp);
3449 j(not_equal, &done);
3450 movp(result, FieldOperand(result, Map::kConstructorOrBackPointerOffset));
3456 void MacroAssembler::TryGetFunctionPrototype(Register function,
3459 bool miss_on_bound_function) {
3461 if (miss_on_bound_function) {
3462 // Check that the receiver isn't a smi.
3463 testl(function, Immediate(kSmiTagMask));
3466 // Check that the function really is a function.
3467 CmpObjectType(function, JS_FUNCTION_TYPE, result);
3470 movp(kScratchRegister,
3471 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3472 // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
3474 TestBitSharedFunctionInfoSpecialField(kScratchRegister,
3475 SharedFunctionInfo::kCompilerHintsOffset,
3476 SharedFunctionInfo::kBoundFunction);
3479 // Make sure that the function has an instance prototype.
3480 testb(FieldOperand(result, Map::kBitFieldOffset),
3481 Immediate(1 << Map::kHasNonInstancePrototype));
3482 j(not_zero, &non_instance, Label::kNear);
3485 // Get the prototype or initial map from the function.
3487 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3489 // If the prototype or initial map is the hole, don't return it and
3490 // simply miss the cache instead. This will allow us to allocate a
3491 // prototype object on-demand in the runtime system.
3492 CompareRoot(result, Heap::kTheHoleValueRootIndex);
3495 // If the function does not have an initial map, we're done.
3497 CmpObjectType(result, MAP_TYPE, kScratchRegister);
3498 j(not_equal, &done, Label::kNear);
3500 // Get the prototype from the initial map.
3501 movp(result, FieldOperand(result, Map::kPrototypeOffset));
3503 if (miss_on_bound_function) {
3504 jmp(&done, Label::kNear);
3506 // Non-instance prototype: Fetch prototype from constructor field
3508 bind(&non_instance);
3509 GetMapConstructor(result, result, kScratchRegister);
3517 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
3518 if (FLAG_native_code_counters && counter->Enabled()) {
3519 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3520 movl(counter_operand, Immediate(value));
3525 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
3527 if (FLAG_native_code_counters && counter->Enabled()) {
3528 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3530 incl(counter_operand);
3532 addl(counter_operand, Immediate(value));
3538 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
3540 if (FLAG_native_code_counters && counter->Enabled()) {
3541 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3543 decl(counter_operand);
3545 subl(counter_operand, Immediate(value));
3551 void MacroAssembler::DebugBreak() {
3552 Set(rax, 0); // No arguments.
3553 LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
3554 CEntryStub ces(isolate(), 1);
3555 DCHECK(AllowThisStubCall(&ces));
3556 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3560 void MacroAssembler::InvokeCode(Register code,
3561 const ParameterCount& expected,
3562 const ParameterCount& actual,
3564 const CallWrapper& call_wrapper) {
3565 // You can't call a function without a valid frame.
3566 DCHECK(flag == JUMP_FUNCTION || has_frame());
3569 bool definitely_mismatches = false;
3570 InvokePrologue(expected,
3572 Handle<Code>::null(),
3575 &definitely_mismatches,
3579 if (!definitely_mismatches) {
3580 if (flag == CALL_FUNCTION) {
3581 call_wrapper.BeforeCall(CallSize(code));
3583 call_wrapper.AfterCall();
3585 DCHECK(flag == JUMP_FUNCTION);
3593 void MacroAssembler::InvokeFunction(Register function,
3594 const ParameterCount& actual,
3596 const CallWrapper& call_wrapper) {
3597 // You can't call a function without a valid frame.
3598 DCHECK(flag == JUMP_FUNCTION || has_frame());
3600 DCHECK(function.is(rdi));
3601 movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3602 movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
3603 LoadSharedFunctionInfoSpecialField(rbx, rdx,
3604 SharedFunctionInfo::kFormalParameterCountOffset);
3605 // Advances rdx to the end of the Code object header, to the start of
3606 // the executable code.
3607 movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3609 ParameterCount expected(rbx);
3610 InvokeCode(rdx, expected, actual, flag, call_wrapper);
3614 void MacroAssembler::InvokeFunction(Register function,
3615 const ParameterCount& expected,
3616 const ParameterCount& actual,
3618 const CallWrapper& call_wrapper) {
3619 // You can't call a function without a valid frame.
3620 DCHECK(flag == JUMP_FUNCTION || has_frame());
3622 DCHECK(function.is(rdi));
3623 movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
3624 // Advances rdx to the end of the Code object header, to the start of
3625 // the executable code.
3626 movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3628 InvokeCode(rdx, expected, actual, flag, call_wrapper);
3632 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3633 const ParameterCount& expected,
3634 const ParameterCount& actual,
3636 const CallWrapper& call_wrapper) {
3637 Move(rdi, function);
3638 InvokeFunction(rdi, expected, actual, flag, call_wrapper);
3642 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3643 const ParameterCount& actual,
3644 Handle<Code> code_constant,
3645 Register code_register,
3647 bool* definitely_mismatches,
3649 Label::Distance near_jump,
3650 const CallWrapper& call_wrapper) {
3651 bool definitely_matches = false;
3652 *definitely_mismatches = false;
3654 if (expected.is_immediate()) {
3655 DCHECK(actual.is_immediate());
3656 if (expected.immediate() == actual.immediate()) {
3657 definitely_matches = true;
3659 Set(rax, actual.immediate());
3660 if (expected.immediate() ==
3661 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
3662 // Don't worry about adapting arguments for built-ins that
3663 // don't want that done. Skip adaption code by making it look
3664 // like we have a match between expected and actual number of
3666 definitely_matches = true;
3668 *definitely_mismatches = true;
3669 Set(rbx, expected.immediate());
3673 if (actual.is_immediate()) {
3674 // Expected is in register, actual is immediate. This is the
3675 // case when we invoke function values without going through the
3677 cmpp(expected.reg(), Immediate(actual.immediate()));
3678 j(equal, &invoke, Label::kNear);
3679 DCHECK(expected.reg().is(rbx));
3680 Set(rax, actual.immediate());
3681 } else if (!expected.reg().is(actual.reg())) {
3682 // Both expected and actual are in (different) registers. This
3683 // is the case when we invoke functions using call and apply.
3684 cmpp(expected.reg(), actual.reg());
3685 j(equal, &invoke, Label::kNear);
3686 DCHECK(actual.reg().is(rax));
3687 DCHECK(expected.reg().is(rbx));
3691 if (!definitely_matches) {
3692 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
3693 if (!code_constant.is_null()) {
3694 Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
3695 addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
3696 } else if (!code_register.is(rdx)) {
3697 movp(rdx, code_register);
3700 if (flag == CALL_FUNCTION) {
3701 call_wrapper.BeforeCall(CallSize(adaptor));
3702 Call(adaptor, RelocInfo::CODE_TARGET);
3703 call_wrapper.AfterCall();
3704 if (!*definitely_mismatches) {
3705 jmp(done, near_jump);
3708 Jump(adaptor, RelocInfo::CODE_TARGET);
3715 void MacroAssembler::StubPrologue() {
3716 pushq(rbp); // Caller's frame pointer.
3718 Push(rsi); // Callee's context.
3719 Push(Smi::FromInt(StackFrame::STUB));
3723 void MacroAssembler::Prologue(bool code_pre_aging) {
3724 PredictableCodeSizeScope predictible_code_size_scope(this,
3725 kNoCodeAgeSequenceLength);
3726 if (code_pre_aging) {
3727 // Pre-age the code.
3728 Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
3729 RelocInfo::CODE_AGE_SEQUENCE);
3730 Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
3732 pushq(rbp); // Caller's frame pointer.
3734 Push(rsi); // Callee's context.
3735 Push(rdi); // Callee's JS function.
3740 void MacroAssembler::EnterFrame(StackFrame::Type type,
3741 bool load_constant_pool_pointer_reg) {
3742 // Out-of-line constant pool not implemented on x64.
3747 void MacroAssembler::EnterFrame(StackFrame::Type type) {
3750 Push(rsi); // Context.
3751 Push(Smi::FromInt(type));
3752 Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
3753 Push(kScratchRegister);
3754 if (emit_debug_code()) {
3755 Move(kScratchRegister,
3756 isolate()->factory()->undefined_value(),
3757 RelocInfo::EMBEDDED_OBJECT);
3758 cmpp(Operand(rsp, 0), kScratchRegister);
3759 Check(not_equal, kCodeObjectNotProperlyPatched);
3764 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
3765 if (emit_debug_code()) {
3766 Move(kScratchRegister, Smi::FromInt(type));
3767 cmpp(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
3768 Check(equal, kStackFrameTypesMustMatch);
3775 void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
3776 // Set up the frame structure on the stack.
3777 // All constants are relative to the frame pointer of the exit frame.
3778 DCHECK(ExitFrameConstants::kCallerSPDisplacement ==
3779 kFPOnStackSize + kPCOnStackSize);
3780 DCHECK(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
3781 DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
3785 // Reserve room for entry stack pointer and push the code object.
3786 DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
3787 Push(Immediate(0)); // Saved entry sp, patched before call.
3788 Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
3789 Push(kScratchRegister); // Accessed from EditFrame::code_slot.
3791 // Save the frame pointer and the context in top.
3793 movp(r14, rax); // Backup rax in callee-save register.
3796 Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
3797 Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
3798 Store(ExternalReference(Isolate::kCFunctionAddress, isolate()), rbx);
3802 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
3803 bool save_doubles) {
3805 const int kShadowSpace = 4;
3806 arg_stack_space += kShadowSpace;
3808 // Optionally save all XMM registers.
3810 int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
3811 arg_stack_space * kRegisterSize;
3812 subp(rsp, Immediate(space));
3813 int offset = -2 * kPointerSize;
3814 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
3815 XMMRegister reg = XMMRegister::FromAllocationIndex(i);
3816 movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
3818 } else if (arg_stack_space > 0) {
3819 subp(rsp, Immediate(arg_stack_space * kRegisterSize));
3822 // Get the required frame alignment for the OS.
3823 const int kFrameAlignment = base::OS::ActivationFrameAlignment();
3824 if (kFrameAlignment > 0) {
3825 DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
3826 DCHECK(is_int8(kFrameAlignment));
3827 andp(rsp, Immediate(-kFrameAlignment));
3830 // Patch the saved entry sp.
3831 movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
3835 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
3836 EnterExitFramePrologue(true);
3838 // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
3839 // so it must be retained across the C-call.
3840 int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
3841 leap(r15, Operand(rbp, r14, times_pointer_size, offset));
3843 EnterExitFrameEpilogue(arg_stack_space, save_doubles);
3847 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
3848 EnterExitFramePrologue(false);
3849 EnterExitFrameEpilogue(arg_stack_space, false);
3853 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
3857 int offset = -2 * kPointerSize;
3858 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
3859 XMMRegister reg = XMMRegister::FromAllocationIndex(i);
3860 movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
3863 // Get the return address from the stack and restore the frame pointer.
3864 movp(rcx, Operand(rbp, kFPOnStackSize));
3865 movp(rbp, Operand(rbp, 0 * kPointerSize));
3867 // Drop everything up to and including the arguments and the receiver
3868 // from the caller stack.
3869 leap(rsp, Operand(r15, 1 * kPointerSize));
3871 PushReturnAddressFrom(rcx);
3873 LeaveExitFrameEpilogue(true);
3877 void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
3881 LeaveExitFrameEpilogue(restore_context);
3885 void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
3886 // Restore current context from top and clear it in debug mode.
3887 ExternalReference context_address(Isolate::kContextAddress, isolate());
3888 Operand context_operand = ExternalOperand(context_address);
3889 if (restore_context) {
3890 movp(rsi, context_operand);
3893 movp(context_operand, Immediate(0));
3896 // Clear the top frame.
3897 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
3899 Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
3900 movp(c_entry_fp_operand, Immediate(0));
3904 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
3907 Label same_contexts;
3909 DCHECK(!holder_reg.is(scratch));
3910 DCHECK(!scratch.is(kScratchRegister));
3911 // Load current lexical context from the stack frame.
3912 movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
3914 // When generating debug code, make sure the lexical context is set.
3915 if (emit_debug_code()) {
3916 cmpp(scratch, Immediate(0));
3917 Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
3919 // Load the native context of the current context.
3921 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
3922 movp(scratch, FieldOperand(scratch, offset));
3923 movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
3925 // Check the context is a native context.
3926 if (emit_debug_code()) {
3927 Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
3928 isolate()->factory()->native_context_map());
3929 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
3932 // Check if both contexts are the same.
3933 cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
3934 j(equal, &same_contexts);
3936 // Compare security tokens.
3937 // Check that the security token in the calling global object is
3938 // compatible with the security token in the receiving global
3941 // Check the context is a native context.
3942 if (emit_debug_code()) {
3943 // Preserve original value of holder_reg.
3946 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
3947 CompareRoot(holder_reg, Heap::kNullValueRootIndex);
3948 Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
3950 // Read the first word and compare to native_context_map(),
3951 movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
3952 CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
3953 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
3957 movp(kScratchRegister,
3958 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
3960 Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
3961 movp(scratch, FieldOperand(scratch, token_offset));
3962 cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
3965 bind(&same_contexts);
3969 // Compute the hash code from the untagged key. This must be kept in sync with
3970 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
3971 // code-stub-hydrogen.cc
3972 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
3973 // First of all we assign the hash seed to scratch.
3974 LoadRoot(scratch, Heap::kHashSeedRootIndex);
3975 SmiToInteger32(scratch, scratch);
3977 // Xor original key with a seed.
3980 // Compute the hash code from the untagged key. This must be kept in sync
3981 // with ComputeIntegerHash in utils.h.
3983 // hash = ~hash + (hash << 15);
3986 shll(scratch, Immediate(15));
3988 // hash = hash ^ (hash >> 12);
3990 shrl(scratch, Immediate(12));
3992 // hash = hash + (hash << 2);
3993 leal(r0, Operand(r0, r0, times_4, 0));
3994 // hash = hash ^ (hash >> 4);
3996 shrl(scratch, Immediate(4));
3998 // hash = hash * 2057;
3999 imull(r0, r0, Immediate(2057));
4000 // hash = hash ^ (hash >> 16);
4002 shrl(scratch, Immediate(16));
4008 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
4017 // elements - holds the slow-case elements of the receiver on entry.
4018 // Unchanged unless 'result' is the same register.
4020 // key - holds the smi key on entry.
4021 // Unchanged unless 'result' is the same register.
4023 // Scratch registers:
4025 // r0 - holds the untagged key on entry and holds the hash once computed.
4027 // r1 - used to hold the capacity mask of the dictionary
4029 // r2 - used for the index into the dictionary.
4031 // result - holds the result on exit if the load succeeded.
4032 // Allowed to be the same as 'key' or 'result'.
4033 // Unchanged on bailout so 'key' or 'result' can be used
4034 // in further computation.
4038 GetNumberHash(r0, r1);
4040 // Compute capacity mask.
4041 SmiToInteger32(r1, FieldOperand(elements,
4042 SeededNumberDictionary::kCapacityOffset));
4045 // Generate an unrolled loop that performs a few probes before giving up.
4046 for (int i = 0; i < kNumberDictionaryProbes; i++) {
4047 // Use r2 for index calculations and keep the hash intact in r0.
4049 // Compute the masked index: (hash + i + i * i) & mask.
4051 addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
4055 // Scale the index by multiplying by the entry size.
4056 DCHECK(SeededNumberDictionary::kEntrySize == 3);
4057 leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
4059 // Check if the key matches.
4060 cmpp(key, FieldOperand(elements,
4063 SeededNumberDictionary::kElementsStartOffset));
4064 if (i != (kNumberDictionaryProbes - 1)) {
4072 // Check that the value is a field property.
4073 const int kDetailsOffset =
4074 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
4076 Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
4077 Smi::FromInt(PropertyDetails::TypeField::kMask));
4080 // Get the value at the masked, scaled index.
4081 const int kValueOffset =
4082 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
4083 movp(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
4087 void MacroAssembler::LoadAllocationTopHelper(Register result,
4089 AllocationFlags flags) {
4090 ExternalReference allocation_top =
4091 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4093 // Just return if allocation top is already known.
4094 if ((flags & RESULT_CONTAINS_TOP) != 0) {
4095 // No use of scratch if allocation top is provided.
4096 DCHECK(!scratch.is_valid());
4098 // Assert that result actually contains top on entry.
4099 Operand top_operand = ExternalOperand(allocation_top);
4100 cmpp(result, top_operand);
4101 Check(equal, kUnexpectedAllocationTop);
4106 // Move address of new object to result. Use scratch register if available,
4107 // and keep address in scratch until call to UpdateAllocationTopHelper.
4108 if (scratch.is_valid()) {
4109 LoadAddress(scratch, allocation_top);
4110 movp(result, Operand(scratch, 0));
4112 Load(result, allocation_top);
4117 void MacroAssembler::MakeSureDoubleAlignedHelper(Register result,
4120 AllocationFlags flags) {
4121 if (kPointerSize == kDoubleSize) {
4122 if (FLAG_debug_code) {
4123 testl(result, Immediate(kDoubleAlignmentMask));
4124 Check(zero, kAllocationIsNotDoubleAligned);
4127 // Align the next allocation. Storing the filler map without checking top
4128 // is safe in new-space because the limit of the heap is aligned there.
4129 DCHECK(kPointerSize * 2 == kDoubleSize);
4130 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
4131 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
4132 // Make sure scratch is not clobbered by this function as it might be
4133 // used in UpdateAllocationTopHelper later.
4134 DCHECK(!scratch.is(kScratchRegister));
4136 testl(result, Immediate(kDoubleAlignmentMask));
4137 j(zero, &aligned, Label::kNear);
4138 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
4139 ExternalReference allocation_limit =
4140 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4141 cmpp(result, ExternalOperand(allocation_limit));
4142 j(above_equal, gc_required);
4144 LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex);
4145 movp(Operand(result, 0), kScratchRegister);
4146 addp(result, Immediate(kDoubleSize / 2));
4152 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
4154 AllocationFlags flags) {
4155 if (emit_debug_code()) {
4156 testp(result_end, Immediate(kObjectAlignmentMask));
4157 Check(zero, kUnalignedAllocationInNewSpace);
4160 ExternalReference allocation_top =
4161 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4164 if (scratch.is_valid()) {
4165 // Scratch already contains address of allocation top.
4166 movp(Operand(scratch, 0), result_end);
4168 Store(allocation_top, result_end);
4173 void MacroAssembler::Allocate(int object_size,
4175 Register result_end,
4178 AllocationFlags flags) {
4179 DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
4180 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
4181 if (!FLAG_inline_new) {
4182 if (emit_debug_code()) {
4183 // Trash the registers to simulate an allocation failure.
4184 movl(result, Immediate(0x7091));
4185 if (result_end.is_valid()) {
4186 movl(result_end, Immediate(0x7191));
4188 if (scratch.is_valid()) {
4189 movl(scratch, Immediate(0x7291));
4195 DCHECK(!result.is(result_end));
4197 // Load address of new object into result.
4198 LoadAllocationTopHelper(result, scratch, flags);
4200 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4201 MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4204 // Calculate new top and bail out if new space is exhausted.
4205 ExternalReference allocation_limit =
4206 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4208 Register top_reg = result_end.is_valid() ? result_end : result;
4210 if (!top_reg.is(result)) {
4211 movp(top_reg, result);
4213 addp(top_reg, Immediate(object_size));
4214 j(carry, gc_required);
4215 Operand limit_operand = ExternalOperand(allocation_limit);
4216 cmpp(top_reg, limit_operand);
4217 j(above, gc_required);
4219 // Update allocation top.
4220 UpdateAllocationTopHelper(top_reg, scratch, flags);
4222 bool tag_result = (flags & TAG_OBJECT) != 0;
4223 if (top_reg.is(result)) {
4225 subp(result, Immediate(object_size - kHeapObjectTag));
4227 subp(result, Immediate(object_size));
4229 } else if (tag_result) {
4230 // Tag the result if requested.
4231 DCHECK(kHeapObjectTag == 1);
4237 void MacroAssembler::Allocate(int header_size,
4238 ScaleFactor element_size,
4239 Register element_count,
4241 Register result_end,
4244 AllocationFlags flags) {
4245 DCHECK((flags & SIZE_IN_WORDS) == 0);
4246 leap(result_end, Operand(element_count, element_size, header_size));
4247 Allocate(result_end, result, result_end, scratch, gc_required, flags);
4251 void MacroAssembler::Allocate(Register object_size,
4253 Register result_end,
4256 AllocationFlags flags) {
4257 DCHECK((flags & SIZE_IN_WORDS) == 0);
4258 if (!FLAG_inline_new) {
4259 if (emit_debug_code()) {
4260 // Trash the registers to simulate an allocation failure.
4261 movl(result, Immediate(0x7091));
4262 movl(result_end, Immediate(0x7191));
4263 if (scratch.is_valid()) {
4264 movl(scratch, Immediate(0x7291));
4266 // object_size is left unchanged by this function.
4271 DCHECK(!result.is(result_end));
4273 // Load address of new object into result.
4274 LoadAllocationTopHelper(result, scratch, flags);
4276 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4277 MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4280 // Calculate new top and bail out if new space is exhausted.
4281 ExternalReference allocation_limit =
4282 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4283 if (!object_size.is(result_end)) {
4284 movp(result_end, object_size);
4286 addp(result_end, result);
4287 j(carry, gc_required);
4288 Operand limit_operand = ExternalOperand(allocation_limit);
4289 cmpp(result_end, limit_operand);
4290 j(above, gc_required);
4292 // Update allocation top.
4293 UpdateAllocationTopHelper(result_end, scratch, flags);
4295 // Tag the result if requested.
4296 if ((flags & TAG_OBJECT) != 0) {
4297 addp(result, Immediate(kHeapObjectTag));
4302 void MacroAssembler::UndoAllocationInNewSpace(Register object) {
4303 ExternalReference new_space_allocation_top =
4304 ExternalReference::new_space_allocation_top_address(isolate());
4306 // Make sure the object has no tag before resetting top.
4307 andp(object, Immediate(~kHeapObjectTagMask));
4308 Operand top_operand = ExternalOperand(new_space_allocation_top);
4310 cmpp(object, top_operand);
4311 Check(below, kUndoAllocationOfNonAllocatedMemory);
4313 movp(top_operand, object);
4317 void MacroAssembler::AllocateHeapNumber(Register result,
4321 // Allocate heap number in new space.
4322 Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
4324 Heap::RootListIndex map_index = mode == MUTABLE
4325 ? Heap::kMutableHeapNumberMapRootIndex
4326 : Heap::kHeapNumberMapRootIndex;
4329 LoadRoot(kScratchRegister, map_index);
4330 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4334 void MacroAssembler::AllocateTwoByteString(Register result,
4339 Label* gc_required) {
4340 // Calculate the number of bytes needed for the characters in the string while
4341 // observing object alignment.
4342 const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
4343 kObjectAlignmentMask;
4344 DCHECK(kShortSize == 2);
4345 // scratch1 = length * 2 + kObjectAlignmentMask.
4346 leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
4348 andp(scratch1, Immediate(~kObjectAlignmentMask));
4349 if (kHeaderAlignment > 0) {
4350 subp(scratch1, Immediate(kHeaderAlignment));
4353 // Allocate two byte string in new space.
4354 Allocate(SeqTwoByteString::kHeaderSize,
4363 // Set the map, length and hash field.
4364 LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
4365 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4366 Integer32ToSmi(scratch1, length);
4367 movp(FieldOperand(result, String::kLengthOffset), scratch1);
4368 movp(FieldOperand(result, String::kHashFieldOffset),
4369 Immediate(String::kEmptyHashField));
4373 void MacroAssembler::AllocateOneByteString(Register result, Register length,
4374 Register scratch1, Register scratch2,
4376 Label* gc_required) {
4377 // Calculate the number of bytes needed for the characters in the string while
4378 // observing object alignment.
4379 const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
4380 kObjectAlignmentMask;
4381 movl(scratch1, length);
4382 DCHECK(kCharSize == 1);
4383 addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
4384 andp(scratch1, Immediate(~kObjectAlignmentMask));
4385 if (kHeaderAlignment > 0) {
4386 subp(scratch1, Immediate(kHeaderAlignment));
4389 // Allocate one-byte string in new space.
4390 Allocate(SeqOneByteString::kHeaderSize,
4399 // Set the map, length and hash field.
4400 LoadRoot(kScratchRegister, Heap::kOneByteStringMapRootIndex);
4401 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4402 Integer32ToSmi(scratch1, length);
4403 movp(FieldOperand(result, String::kLengthOffset), scratch1);
4404 movp(FieldOperand(result, String::kHashFieldOffset),
4405 Immediate(String::kEmptyHashField));
4409 void MacroAssembler::AllocateTwoByteConsString(Register result,
4412 Label* gc_required) {
4413 // Allocate heap number in new space.
4414 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4417 // Set the map. The other fields are left uninitialized.
4418 LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
4419 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4423 void MacroAssembler::AllocateOneByteConsString(Register result,
4426 Label* gc_required) {
4427 Allocate(ConsString::kSize,
4434 // Set the map. The other fields are left uninitialized.
4435 LoadRoot(kScratchRegister, Heap::kConsOneByteStringMapRootIndex);
4436 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4440 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
4443 Label* gc_required) {
4444 // Allocate heap number in new space.
4445 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4448 // Set the map. The other fields are left uninitialized.
4449 LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
4450 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4454 void MacroAssembler::AllocateOneByteSlicedString(Register result,
4457 Label* gc_required) {
4458 // Allocate heap number in new space.
4459 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4462 // Set the map. The other fields are left uninitialized.
4463 LoadRoot(kScratchRegister, Heap::kSlicedOneByteStringMapRootIndex);
4464 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4468 // Copy memory, byte-by-byte, from source to destination. Not optimized for
4469 // long or aligned copies. The contents of scratch and length are destroyed.
4470 // Destination is incremented by length, source, length and scratch are
4472 // A simpler loop is faster on small copies, but slower on large ones.
4473 // The cld() instruction must have been emitted, to set the direction flag(),
4474 // before calling this function.
4475 void MacroAssembler::CopyBytes(Register destination,
4480 DCHECK(min_length >= 0);
4481 if (emit_debug_code()) {
4482 cmpl(length, Immediate(min_length));
4483 Assert(greater_equal, kInvalidMinLength);
4485 Label short_loop, len8, len16, len24, done, short_string;
4487 const int kLongStringLimit = 4 * kPointerSize;
4488 if (min_length <= kLongStringLimit) {
4489 cmpl(length, Immediate(kPointerSize));
4490 j(below, &short_string, Label::kNear);
4493 DCHECK(source.is(rsi));
4494 DCHECK(destination.is(rdi));
4495 DCHECK(length.is(rcx));
4497 if (min_length <= kLongStringLimit) {
4498 cmpl(length, Immediate(2 * kPointerSize));
4499 j(below_equal, &len8, Label::kNear);
4500 cmpl(length, Immediate(3 * kPointerSize));
4501 j(below_equal, &len16, Label::kNear);
4502 cmpl(length, Immediate(4 * kPointerSize));
4503 j(below_equal, &len24, Label::kNear);
4506 // Because source is 8-byte aligned in our uses of this function,
4507 // we keep source aligned for the rep movs operation by copying the odd bytes
4508 // at the end of the ranges.
4509 movp(scratch, length);
4510 shrl(length, Immediate(kPointerSizeLog2));
4512 // Move remaining bytes of length.
4513 andl(scratch, Immediate(kPointerSize - 1));
4514 movp(length, Operand(source, scratch, times_1, -kPointerSize));
4515 movp(Operand(destination, scratch, times_1, -kPointerSize), length);
4516 addp(destination, scratch);
4518 if (min_length <= kLongStringLimit) {
4519 jmp(&done, Label::kNear);
4521 movp(scratch, Operand(source, 2 * kPointerSize));
4522 movp(Operand(destination, 2 * kPointerSize), scratch);
4524 movp(scratch, Operand(source, kPointerSize));
4525 movp(Operand(destination, kPointerSize), scratch);
4527 movp(scratch, Operand(source, 0));
4528 movp(Operand(destination, 0), scratch);
4529 // Move remaining bytes of length.
4530 movp(scratch, Operand(source, length, times_1, -kPointerSize));
4531 movp(Operand(destination, length, times_1, -kPointerSize), scratch);
4532 addp(destination, length);
4533 jmp(&done, Label::kNear);
4535 bind(&short_string);
4536 if (min_length == 0) {
4537 testl(length, length);
4538 j(zero, &done, Label::kNear);
4542 movb(scratch, Operand(source, 0));
4543 movb(Operand(destination, 0), scratch);
4547 j(not_zero, &short_loop);
4554 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
4555 Register end_offset,
4560 movp(Operand(start_offset, 0), filler);
4561 addp(start_offset, Immediate(kPointerSize));
4563 cmpp(start_offset, end_offset);
4568 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4569 if (context_chain_length > 0) {
4570 // Move up the chain of contexts to the context containing the slot.
4571 movp(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4572 for (int i = 1; i < context_chain_length; i++) {
4573 movp(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4576 // Slot is in the current function context. Move it into the
4577 // destination register in case we store into it (the write barrier
4578 // cannot be allowed to destroy the context in rsi).
4582 // We should not have found a with context by walking the context
4583 // chain (i.e., the static scope chain and runtime context chain do
4584 // not agree). A variable occurring in such a scope should have
4585 // slot type LOOKUP and not CONTEXT.
4586 if (emit_debug_code()) {
4587 CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
4588 Heap::kWithContextMapRootIndex);
4589 Check(not_equal, kVariableResolvedToWithContext);
4594 void MacroAssembler::LoadTransitionedArrayMapConditional(
4595 ElementsKind expected_kind,
4596 ElementsKind transitioned_kind,
4597 Register map_in_out,
4599 Label* no_map_match) {
4600 // Load the global or builtins object from the current context.
4602 Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4603 movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
4605 // Check that the function's map is the same as the expected cached map.
4606 movp(scratch, Operand(scratch,
4607 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4609 int offset = expected_kind * kPointerSize +
4610 FixedArrayBase::kHeaderSize;
4611 cmpp(map_in_out, FieldOperand(scratch, offset));
4612 j(not_equal, no_map_match);
4614 // Use the transitioned cached map.
4615 offset = transitioned_kind * kPointerSize +
4616 FixedArrayBase::kHeaderSize;
4617 movp(map_in_out, FieldOperand(scratch, offset));
4622 static const int kRegisterPassedArguments = 4;
4624 static const int kRegisterPassedArguments = 6;
4627 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4628 // Load the global or builtins object from the current context.
4630 Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4631 // Load the native context from the global or builtins object.
4632 movp(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
4633 // Load the function from the native context.
4634 movp(function, Operand(function, Context::SlotOffset(index)));
4638 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4640 // Load the initial map. The global functions all have initial maps.
4641 movp(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4642 if (emit_debug_code()) {
4644 CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
4647 Abort(kGlobalFunctionsMustHaveInitialMap);
4653 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
4654 // On Windows 64 stack slots are reserved by the caller for all arguments
4655 // including the ones passed in registers, and space is always allocated for
4656 // the four register arguments even if the function takes fewer than four
4658 // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
4659 // and the caller does not reserve stack slots for them.
4660 DCHECK(num_arguments >= 0);
4662 const int kMinimumStackSlots = kRegisterPassedArguments;
4663 if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
4664 return num_arguments;
4666 if (num_arguments < kRegisterPassedArguments) return 0;
4667 return num_arguments - kRegisterPassedArguments;
4672 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
4675 uint32_t encoding_mask) {
4677 JumpIfNotSmi(string, &is_object);
4682 movp(value, FieldOperand(string, HeapObject::kMapOffset));
4683 movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset));
4685 andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
4686 cmpp(value, Immediate(encoding_mask));
4688 Check(equal, kUnexpectedStringType);
4690 // The index is assumed to be untagged coming in, tag it to compare with the
4691 // string length without using a temp register, it is restored at the end of
4693 Integer32ToSmi(index, index);
4694 SmiCompare(index, FieldOperand(string, String::kLengthOffset));
4695 Check(less, kIndexIsTooLarge);
4697 SmiCompare(index, Smi::FromInt(0));
4698 Check(greater_equal, kIndexIsNegative);
4700 // Restore the index
4701 SmiToInteger32(index, index);
4705 void MacroAssembler::PrepareCallCFunction(int num_arguments) {
4706 int frame_alignment = base::OS::ActivationFrameAlignment();
4707 DCHECK(frame_alignment != 0);
4708 DCHECK(num_arguments >= 0);
4710 // Make stack end at alignment and allocate space for arguments and old rsp.
4711 movp(kScratchRegister, rsp);
4712 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4713 int argument_slots_on_stack =
4714 ArgumentStackSlotsForCFunctionCall(num_arguments);
4715 subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
4716 andp(rsp, Immediate(-frame_alignment));
4717 movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
4721 void MacroAssembler::CallCFunction(ExternalReference function,
4722 int num_arguments) {
4723 LoadAddress(rax, function);
4724 CallCFunction(rax, num_arguments);
4728 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
4729 DCHECK(has_frame());
4730 // Check stack alignment.
4731 if (emit_debug_code()) {
4732 CheckStackAlignment();
4736 DCHECK(base::OS::ActivationFrameAlignment() != 0);
4737 DCHECK(num_arguments >= 0);
4738 int argument_slots_on_stack =
4739 ArgumentStackSlotsForCFunctionCall(num_arguments);
4740 movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
4745 bool AreAliased(Register reg1,
4753 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
4754 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
4755 reg7.is_valid() + reg8.is_valid();
4758 if (reg1.is_valid()) regs |= reg1.bit();
4759 if (reg2.is_valid()) regs |= reg2.bit();
4760 if (reg3.is_valid()) regs |= reg3.bit();
4761 if (reg4.is_valid()) regs |= reg4.bit();
4762 if (reg5.is_valid()) regs |= reg5.bit();
4763 if (reg6.is_valid()) regs |= reg6.bit();
4764 if (reg7.is_valid()) regs |= reg7.bit();
4765 if (reg8.is_valid()) regs |= reg8.bit();
4766 int n_of_non_aliasing_regs = NumRegs(regs);
4768 return n_of_valid_regs != n_of_non_aliasing_regs;
4773 CodePatcher::CodePatcher(byte* address, int size)
4774 : address_(address),
4776 masm_(NULL, address, size + Assembler::kGap) {
4777 // Create a new macro assembler pointing to the address of the code to patch.
4778 // The size is adjusted with kGap on order for the assembler to generate size
4779 // bytes of instructions without failing with buffer size constraints.
4780 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4784 CodePatcher::~CodePatcher() {
4785 // Indicate that code has changed.
4786 CpuFeatures::FlushICache(address_, size_);
4788 // Check that the code was patched as expected.
4789 DCHECK(masm_.pc_ == address_ + size_);
4790 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4794 void MacroAssembler::CheckPageFlag(
4799 Label* condition_met,
4800 Label::Distance condition_met_distance) {
4801 DCHECK(cc == zero || cc == not_zero);
4802 if (scratch.is(object)) {
4803 andp(scratch, Immediate(~Page::kPageAlignmentMask));
4805 movp(scratch, Immediate(~Page::kPageAlignmentMask));
4806 andp(scratch, object);
4808 if (mask < (1 << kBitsPerByte)) {
4809 testb(Operand(scratch, MemoryChunk::kFlagsOffset),
4810 Immediate(static_cast<uint8_t>(mask)));
4812 testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
4814 j(cc, condition_met, condition_met_distance);
4818 void MacroAssembler::JumpIfBlack(Register object,
4819 Register bitmap_scratch,
4820 Register mask_scratch,
4822 Label::Distance on_black_distance) {
4823 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
4824 GetMarkBits(object, bitmap_scratch, mask_scratch);
4826 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
4827 // The mask_scratch register contains a 1 at the position of the first bit
4828 // and a 0 at all other positions, including the position of the second bit.
4829 movp(rcx, mask_scratch);
4830 // Make rcx into a mask that covers both marking bits using the operation
4831 // rcx = mask | (mask << 1).
4832 leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
4833 // Note that we are using a 4-byte aligned 8-byte load.
4834 andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
4835 cmpp(mask_scratch, rcx);
4836 j(equal, on_black, on_black_distance);
4840 // Detect some, but not all, common pointer-free objects. This is used by the
4841 // incremental write barrier which doesn't care about oddballs (they are always
4842 // marked black immediately so this code is not hit).
4843 void MacroAssembler::JumpIfDataObject(
4846 Label* not_data_object,
4847 Label::Distance not_data_object_distance) {
4848 Label is_data_object;
4849 movp(scratch, FieldOperand(value, HeapObject::kMapOffset));
4850 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
4851 j(equal, &is_data_object, Label::kNear);
4852 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4853 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4854 // If it's a string and it's not a cons string then it's an object containing
4856 testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
4857 Immediate(kIsIndirectStringMask | kIsNotStringMask));
4858 j(not_zero, not_data_object, not_data_object_distance);
4859 bind(&is_data_object);
4863 void MacroAssembler::GetMarkBits(Register addr_reg,
4864 Register bitmap_reg,
4865 Register mask_reg) {
4866 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
4867 movp(bitmap_reg, addr_reg);
4868 // Sign extended 32 bit immediate.
4869 andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
4870 movp(rcx, addr_reg);
4872 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
4873 shrl(rcx, Immediate(shift));
4875 Immediate((Page::kPageAlignmentMask >> shift) &
4876 ~(Bitmap::kBytesPerCell - 1)));
4878 addp(bitmap_reg, rcx);
4879 movp(rcx, addr_reg);
4880 shrl(rcx, Immediate(kPointerSizeLog2));
4881 andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
4882 movl(mask_reg, Immediate(1));
4887 void MacroAssembler::EnsureNotWhite(
4889 Register bitmap_scratch,
4890 Register mask_scratch,
4891 Label* value_is_white_and_not_data,
4892 Label::Distance distance) {
4893 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
4894 GetMarkBits(value, bitmap_scratch, mask_scratch);
4896 // If the value is black or grey we don't need to do anything.
4897 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4898 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
4899 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
4900 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4904 // Since both black and grey have a 1 in the first position and white does
4905 // not have a 1 there we only need to check one bit.
4906 testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4907 j(not_zero, &done, Label::kNear);
4909 if (emit_debug_code()) {
4910 // Check for impossible bit pattern.
4913 // shl. May overflow making the check conservative.
4914 addp(mask_scratch, mask_scratch);
4915 testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4916 j(zero, &ok, Label::kNear);
4922 // Value is white. We check whether it is data that doesn't need scanning.
4923 // Currently only checks for HeapNumber and non-cons strings.
4924 Register map = rcx; // Holds map while checking type.
4925 Register length = rcx; // Holds length of object after checking type.
4926 Label not_heap_number;
4927 Label is_data_object;
4929 // Check for heap-number
4930 movp(map, FieldOperand(value, HeapObject::kMapOffset));
4931 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
4932 j(not_equal, ¬_heap_number, Label::kNear);
4933 movp(length, Immediate(HeapNumber::kSize));
4934 jmp(&is_data_object, Label::kNear);
4936 bind(¬_heap_number);
4937 // Check for strings.
4938 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4939 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4940 // If it's a string and it's not a cons string then it's an object containing
4942 Register instance_type = rcx;
4943 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
4944 testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
4945 j(not_zero, value_is_white_and_not_data);
4946 // It's a non-indirect (non-cons and non-slice) string.
4947 // If it's external, the length is just ExternalString::kSize.
4948 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
4950 // External strings are the only ones with the kExternalStringTag bit
4952 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
4953 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
4954 testb(instance_type, Immediate(kExternalStringTag));
4955 j(zero, ¬_external, Label::kNear);
4956 movp(length, Immediate(ExternalString::kSize));
4957 jmp(&is_data_object, Label::kNear);
4959 bind(¬_external);
4960 // Sequential string, either Latin1 or UC16.
4961 DCHECK(kOneByteStringTag == 0x04);
4962 andp(length, Immediate(kStringEncodingMask));
4963 xorp(length, Immediate(kStringEncodingMask));
4964 addp(length, Immediate(0x04));
4965 // Value now either 4 (if Latin1) or 8 (if UC16), i.e. char-size shifted by 2.
4966 imulp(length, FieldOperand(value, String::kLengthOffset));
4967 shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
4968 addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
4969 andp(length, Immediate(~kObjectAlignmentMask));
4971 bind(&is_data_object);
4972 // Value is a data object, and it is white. Mark it black. Since we know
4973 // that the object is white we can make it black by flipping one bit.
4974 orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4976 andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
4977 addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
4983 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
4985 Register empty_fixed_array_value = r8;
4986 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
4989 // Check if the enum length field is properly initialized, indicating that
4990 // there is an enum cache.
4991 movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
4993 EnumLength(rdx, rbx);
4994 Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
4995 j(equal, call_runtime);
5001 movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5003 // For all objects but the receiver, check that the cache is empty.
5004 EnumLength(rdx, rbx);
5005 Cmp(rdx, Smi::FromInt(0));
5006 j(not_equal, call_runtime);
5010 // Check that there are no elements. Register rcx contains the current JS
5011 // object we've reached through the prototype chain.
5013 cmpp(empty_fixed_array_value,
5014 FieldOperand(rcx, JSObject::kElementsOffset));
5015 j(equal, &no_elements);
5017 // Second chance, the object may be using the empty slow element dictionary.
5018 LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
5019 cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
5020 j(not_equal, call_runtime);
5023 movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
5024 cmpp(rcx, null_value);
5025 j(not_equal, &next);
5028 void MacroAssembler::TestJSArrayForAllocationMemento(
5029 Register receiver_reg,
5030 Register scratch_reg,
5031 Label* no_memento_found) {
5032 ExternalReference new_space_start =
5033 ExternalReference::new_space_start(isolate());
5034 ExternalReference new_space_allocation_top =
5035 ExternalReference::new_space_allocation_top_address(isolate());
5037 leap(scratch_reg, Operand(receiver_reg,
5038 JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5039 Move(kScratchRegister, new_space_start);
5040 cmpp(scratch_reg, kScratchRegister);
5041 j(less, no_memento_found);
5042 cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
5043 j(greater, no_memento_found);
5044 CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
5045 Heap::kAllocationMementoMapRootIndex);
5049 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5054 DCHECK(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
5055 DCHECK(!scratch1.is(scratch0));
5056 Register current = scratch0;
5059 movp(current, object);
5061 // Loop based on the map going up the prototype chain.
5063 movp(current, FieldOperand(current, HeapObject::kMapOffset));
5064 movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
5065 DecodeField<Map::ElementsKindBits>(scratch1);
5066 cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
5068 movp(current, FieldOperand(current, Map::kPrototypeOffset));
5069 CompareRoot(current, Heap::kNullValueRootIndex);
5070 j(not_equal, &loop_again);
5074 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
5075 DCHECK(!dividend.is(rax));
5076 DCHECK(!dividend.is(rdx));
5077 base::MagicNumbersForDivision<uint32_t> mag =
5078 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
5079 movl(rax, Immediate(mag.multiplier));
5081 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
5082 if (divisor > 0 && neg) addl(rdx, dividend);
5083 if (divisor < 0 && !neg && mag.multiplier > 0) subl(rdx, dividend);
5084 if (mag.shift > 0) sarl(rdx, Immediate(mag.shift));
5085 movl(rax, dividend);
5086 shrl(rax, Immediate(31));
5091 } } // namespace v8::internal
5093 #endif // V8_TARGET_ARCH_X64