1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/base/bits.h"
8 #include "src/base/division-by-constant.h"
9 #include "src/bootstrapper.h"
10 #include "src/codegen.h"
11 #include "src/cpu-profiler.h"
12 #include "src/debug/debug.h"
13 #include "src/heap/heap.h"
14 #include "src/x64/assembler-x64.h"
15 #include "src/x64/macro-assembler-x64.h"
20 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
21 : Assembler(arg_isolate, buffer, size),
22 generating_stub_(false),
24 root_array_available_(true) {
25 if (isolate() != NULL) {
26 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
32 static const int64_t kInvalidRootRegisterDelta = -1;
35 int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
36 if (predictable_code_size() &&
37 (other.address() < reinterpret_cast<Address>(isolate()) ||
38 other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
39 return kInvalidRootRegisterDelta;
41 Address roots_register_value = kRootRegisterBias +
42 reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
44 int64_t delta = kInvalidRootRegisterDelta; // Bogus initialization.
45 if (kPointerSize == kInt64Size) {
46 delta = other.address() - roots_register_value;
48 // For x32, zero extend the address to 64-bit and calculate the delta.
49 uint64_t o = static_cast<uint32_t>(
50 reinterpret_cast<intptr_t>(other.address()));
51 uint64_t r = static_cast<uint32_t>(
52 reinterpret_cast<intptr_t>(roots_register_value));
59 Operand MacroAssembler::ExternalOperand(ExternalReference target,
61 if (root_array_available_ && !serializer_enabled()) {
62 int64_t delta = RootRegisterDelta(target);
63 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
64 return Operand(kRootRegister, static_cast<int32_t>(delta));
67 Move(scratch, target);
68 return Operand(scratch, 0);
72 void MacroAssembler::Load(Register destination, ExternalReference source) {
73 if (root_array_available_ && !serializer_enabled()) {
74 int64_t delta = RootRegisterDelta(source);
75 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
76 movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
81 if (destination.is(rax)) {
84 Move(kScratchRegister, source);
85 movp(destination, Operand(kScratchRegister, 0));
90 void MacroAssembler::Store(ExternalReference destination, Register source) {
91 if (root_array_available_ && !serializer_enabled()) {
92 int64_t delta = RootRegisterDelta(destination);
93 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
94 movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
100 store_rax(destination);
102 Move(kScratchRegister, destination);
103 movp(Operand(kScratchRegister, 0), source);
108 void MacroAssembler::LoadAddress(Register destination,
109 ExternalReference source) {
110 if (root_array_available_ && !serializer_enabled()) {
111 int64_t delta = RootRegisterDelta(source);
112 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
113 leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
118 Move(destination, source);
122 int MacroAssembler::LoadAddressSize(ExternalReference source) {
123 if (root_array_available_ && !serializer_enabled()) {
124 // This calculation depends on the internals of LoadAddress.
125 // It's correctness is ensured by the asserts in the Call
126 // instruction below.
127 int64_t delta = RootRegisterDelta(source);
128 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
129 // Operand is leap(scratch, Operand(kRootRegister, delta));
130 // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
132 if (!is_int8(static_cast<int32_t>(delta))) {
133 size += 3; // Need full four-byte displacement in lea.
138 // Size of movp(destination, src);
139 return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
143 void MacroAssembler::PushAddress(ExternalReference source) {
144 int64_t address = reinterpret_cast<int64_t>(source.address());
145 if (is_int32(address) && !serializer_enabled()) {
146 if (emit_debug_code()) {
147 Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
149 Push(Immediate(static_cast<int32_t>(address)));
152 LoadAddress(kScratchRegister, source);
153 Push(kScratchRegister);
157 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
158 DCHECK(root_array_available_);
159 movp(destination, Operand(kRootRegister,
160 (index << kPointerSizeLog2) - kRootRegisterBias));
164 void MacroAssembler::LoadRootIndexed(Register destination,
165 Register variable_offset,
167 DCHECK(root_array_available_);
169 Operand(kRootRegister,
170 variable_offset, times_pointer_size,
171 (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
175 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
176 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
177 DCHECK(root_array_available_);
178 movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
183 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
184 DCHECK(root_array_available_);
185 Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
189 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
190 DCHECK(root_array_available_);
191 cmpp(with, Operand(kRootRegister,
192 (index << kPointerSizeLog2) - kRootRegisterBias));
196 void MacroAssembler::CompareRoot(const Operand& with,
197 Heap::RootListIndex index) {
198 DCHECK(root_array_available_);
199 DCHECK(!with.AddressUsesRegister(kScratchRegister));
200 LoadRoot(kScratchRegister, index);
201 cmpp(with, kScratchRegister);
205 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
208 SaveFPRegsMode save_fp,
209 RememberedSetFinalAction and_then) {
210 if (emit_debug_code()) {
212 JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
216 // Load store buffer top.
217 LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
218 // Store pointer to buffer.
219 movp(Operand(scratch, 0), addr);
220 // Increment buffer top.
221 addp(scratch, Immediate(kPointerSize));
222 // Write back new top of buffer.
223 StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
224 // Call stub on end of buffer.
226 // Check for end of buffer.
227 testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
228 if (and_then == kReturnAtEnd) {
229 Label buffer_overflowed;
230 j(not_equal, &buffer_overflowed, Label::kNear);
232 bind(&buffer_overflowed);
234 DCHECK(and_then == kFallThroughAtEnd);
235 j(equal, &done, Label::kNear);
237 StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
238 CallStub(&store_buffer_overflow);
239 if (and_then == kReturnAtEnd) {
242 DCHECK(and_then == kFallThroughAtEnd);
248 void MacroAssembler::InNewSpace(Register object,
252 Label::Distance distance) {
253 if (serializer_enabled()) {
254 // Can't do arithmetic on external references if it might get serialized.
255 // The mask isn't really an address. We load it as an external reference in
256 // case the size of the new space is different between the snapshot maker
257 // and the running system.
258 if (scratch.is(object)) {
259 Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
260 andp(scratch, kScratchRegister);
262 Move(scratch, ExternalReference::new_space_mask(isolate()));
263 andp(scratch, object);
265 Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
266 cmpp(scratch, kScratchRegister);
267 j(cc, branch, distance);
269 DCHECK(kPointerSize == kInt64Size
270 ? is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))
271 : kPointerSize == kInt32Size);
272 intptr_t new_space_start =
273 reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
274 Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
275 Assembler::RelocInfoNone());
276 if (scratch.is(object)) {
277 addp(scratch, kScratchRegister);
279 leap(scratch, Operand(object, kScratchRegister, times_1, 0));
282 Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
283 j(cc, branch, distance);
288 void MacroAssembler::RecordWriteField(
293 SaveFPRegsMode save_fp,
294 RememberedSetAction remembered_set_action,
296 PointersToHereCheck pointers_to_here_check_for_value) {
297 // First, check if a write barrier is even needed. The tests below
298 // catch stores of Smis.
301 // Skip barrier if writing a smi.
302 if (smi_check == INLINE_SMI_CHECK) {
303 JumpIfSmi(value, &done);
306 // Although the object register is tagged, the offset is relative to the start
307 // of the object, so so offset must be a multiple of kPointerSize.
308 DCHECK(IsAligned(offset, kPointerSize));
310 leap(dst, FieldOperand(object, offset));
311 if (emit_debug_code()) {
313 testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
314 j(zero, &ok, Label::kNear);
319 RecordWrite(object, dst, value, save_fp, remembered_set_action,
320 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
324 // Clobber clobbered input registers when running with the debug-code flag
325 // turned on to provoke errors.
326 if (emit_debug_code()) {
327 Move(value, kZapValue, Assembler::RelocInfoNone());
328 Move(dst, kZapValue, Assembler::RelocInfoNone());
333 void MacroAssembler::RecordWriteArray(
337 SaveFPRegsMode save_fp,
338 RememberedSetAction remembered_set_action,
340 PointersToHereCheck pointers_to_here_check_for_value) {
341 // First, check if a write barrier is even needed. The tests below
342 // catch stores of Smis.
345 // Skip barrier if writing a smi.
346 if (smi_check == INLINE_SMI_CHECK) {
347 JumpIfSmi(value, &done);
350 // Array access: calculate the destination address. Index is not a smi.
351 Register dst = index;
352 leap(dst, Operand(object, index, times_pointer_size,
353 FixedArray::kHeaderSize - kHeapObjectTag));
355 RecordWrite(object, dst, value, save_fp, remembered_set_action,
356 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
360 // Clobber clobbered input registers when running with the debug-code flag
361 // turned on to provoke errors.
362 if (emit_debug_code()) {
363 Move(value, kZapValue, Assembler::RelocInfoNone());
364 Move(index, kZapValue, Assembler::RelocInfoNone());
369 void MacroAssembler::RecordWriteForMap(Register object,
372 SaveFPRegsMode fp_mode) {
373 DCHECK(!object.is(kScratchRegister));
374 DCHECK(!object.is(map));
375 DCHECK(!object.is(dst));
376 DCHECK(!map.is(dst));
377 AssertNotSmi(object);
379 if (emit_debug_code()) {
381 if (map.is(kScratchRegister)) pushq(map);
382 CompareMap(map, isolate()->factory()->meta_map());
383 if (map.is(kScratchRegister)) popq(map);
384 j(equal, &ok, Label::kNear);
389 if (!FLAG_incremental_marking) {
393 if (emit_debug_code()) {
395 if (map.is(kScratchRegister)) pushq(map);
396 cmpp(map, FieldOperand(object, HeapObject::kMapOffset));
397 if (map.is(kScratchRegister)) popq(map);
398 j(equal, &ok, Label::kNear);
403 // Compute the address.
404 leap(dst, FieldOperand(object, HeapObject::kMapOffset));
406 // First, check if a write barrier is even needed. The tests below
407 // catch stores of smis and stores into the young generation.
410 // A single check of the map's pages interesting flag suffices, since it is
411 // only set during incremental collection, and then it's also guaranteed that
412 // the from object's page's interesting flag is also set. This optimization
413 // relies on the fact that maps can never be in new space.
415 map, // Used as scratch.
416 MemoryChunk::kPointersToHereAreInterestingMask,
421 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
427 // Count number of write barriers in generated code.
428 isolate()->counters()->write_barriers_static()->Increment();
429 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
431 // Clobber clobbered registers when running with the debug-code flag
432 // turned on to provoke errors.
433 if (emit_debug_code()) {
434 Move(dst, kZapValue, Assembler::RelocInfoNone());
435 Move(map, kZapValue, Assembler::RelocInfoNone());
440 void MacroAssembler::RecordWrite(
444 SaveFPRegsMode fp_mode,
445 RememberedSetAction remembered_set_action,
447 PointersToHereCheck pointers_to_here_check_for_value) {
448 DCHECK(!object.is(value));
449 DCHECK(!object.is(address));
450 DCHECK(!value.is(address));
451 AssertNotSmi(object);
453 if (remembered_set_action == OMIT_REMEMBERED_SET &&
454 !FLAG_incremental_marking) {
458 if (emit_debug_code()) {
460 cmpp(value, Operand(address, 0));
461 j(equal, &ok, Label::kNear);
466 // First, check if a write barrier is even needed. The tests below
467 // catch stores of smis and stores into the young generation.
470 if (smi_check == INLINE_SMI_CHECK) {
471 // Skip barrier if writing a smi.
472 JumpIfSmi(value, &done);
475 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
477 value, // Used as scratch.
478 MemoryChunk::kPointersToHereAreInterestingMask,
484 CheckPageFlag(object,
485 value, // Used as scratch.
486 MemoryChunk::kPointersFromHereAreInterestingMask,
491 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
497 // Count number of write barriers in generated code.
498 isolate()->counters()->write_barriers_static()->Increment();
499 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
501 // Clobber clobbered registers when running with the debug-code flag
502 // turned on to provoke errors.
503 if (emit_debug_code()) {
504 Move(address, kZapValue, Assembler::RelocInfoNone());
505 Move(value, kZapValue, Assembler::RelocInfoNone());
510 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
511 if (emit_debug_code()) Check(cc, reason);
515 void MacroAssembler::AssertFastElements(Register elements) {
516 if (emit_debug_code()) {
518 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
519 Heap::kFixedArrayMapRootIndex);
520 j(equal, &ok, Label::kNear);
521 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
522 Heap::kFixedDoubleArrayMapRootIndex);
523 j(equal, &ok, Label::kNear);
524 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
525 Heap::kFixedCOWArrayMapRootIndex);
526 j(equal, &ok, Label::kNear);
527 Abort(kJSObjectWithFastElementsMapHasSlowElements);
533 void MacroAssembler::Check(Condition cc, BailoutReason reason) {
535 j(cc, &L, Label::kNear);
537 // Control will not return here.
542 void MacroAssembler::CheckStackAlignment() {
543 int frame_alignment = base::OS::ActivationFrameAlignment();
544 int frame_alignment_mask = frame_alignment - 1;
545 if (frame_alignment > kPointerSize) {
546 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
547 Label alignment_as_expected;
548 testp(rsp, Immediate(frame_alignment_mask));
549 j(zero, &alignment_as_expected, Label::kNear);
550 // Abort if stack is not aligned.
552 bind(&alignment_as_expected);
557 void MacroAssembler::NegativeZeroTest(Register result,
561 testl(result, result);
562 j(not_zero, &ok, Label::kNear);
569 void MacroAssembler::Abort(BailoutReason reason) {
571 const char* msg = GetBailoutReason(reason);
573 RecordComment("Abort message: ");
577 if (FLAG_trap_on_abort) {
583 Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
584 Assembler::RelocInfoNone());
585 Push(kScratchRegister);
588 // We don't actually want to generate a pile of code for this, so just
589 // claim there is a stack frame, without generating one.
590 FrameScope scope(this, StackFrame::NONE);
591 CallRuntime(Runtime::kAbort, 1);
593 CallRuntime(Runtime::kAbort, 1);
595 // Control will not return here.
600 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
601 DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
602 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
606 void MacroAssembler::TailCallStub(CodeStub* stub) {
607 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
611 void MacroAssembler::StubReturn(int argc) {
612 DCHECK(argc >= 1 && generating_stub());
613 ret((argc - 1) * kPointerSize);
617 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
618 return has_frame_ || !stub->SometimesSetsUpAFrame();
622 void MacroAssembler::IndexFromHash(Register hash, Register index) {
623 // The assert checks that the constants for the maximum number of digits
624 // for an array index cached in the hash field and the number of bits
625 // reserved for it does not conflict.
626 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
627 (1 << String::kArrayIndexValueBits));
628 if (!hash.is(index)) {
631 DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
635 void MacroAssembler::CallRuntime(const Runtime::Function* f,
637 SaveFPRegsMode save_doubles) {
638 // If the expected number of arguments of the runtime function is
639 // constant, we check that the actual number of arguments match the
641 CHECK(f->nargs < 0 || f->nargs == num_arguments);
643 // TODO(1236192): Most runtime routines don't need the number of
644 // arguments passed in because it is constant. At some point we
645 // should remove this need and make the runtime routine entry code
647 Set(rax, num_arguments);
648 LoadAddress(rbx, ExternalReference(f, isolate()));
649 CEntryStub ces(isolate(), f->result_size, save_doubles);
654 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
656 Set(rax, num_arguments);
657 LoadAddress(rbx, ext);
659 CEntryStub stub(isolate(), 1);
664 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
667 // ----------- S t a t e -------------
668 // -- rsp[0] : return address
669 // -- rsp[8] : argument num_arguments - 1
671 // -- rsp[8 * num_arguments] : argument 0 (receiver)
672 // -----------------------------------
674 // TODO(1236192): Most runtime routines don't need the number of
675 // arguments passed in because it is constant. At some point we
676 // should remove this need and make the runtime routine entry code
678 Set(rax, num_arguments);
679 JumpToExternalReference(ext, result_size);
683 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
686 TailCallExternalReference(ExternalReference(fid, isolate()),
692 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
694 // Set the entry point and jump to the C entry runtime stub.
695 LoadAddress(rbx, ext);
696 CEntryStub ces(isolate(), result_size);
697 jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
701 void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
702 const CallWrapper& call_wrapper) {
703 // You can't call a builtin without a valid frame.
704 DCHECK(flag == JUMP_FUNCTION || has_frame());
706 // Rely on the assertion to check that the number of provided
707 // arguments match the expected number of arguments. Fake a
708 // parameter count to avoid emitting code to do the check.
709 ParameterCount expected(0);
710 GetBuiltinEntry(rdx, native_context_index);
711 InvokeCode(rdx, expected, expected, flag, call_wrapper);
715 void MacroAssembler::GetBuiltinFunction(Register target,
716 int native_context_index) {
717 // Load the builtins object into target register.
718 movp(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
719 movp(target, FieldOperand(target, GlobalObject::kNativeContextOffset));
720 movp(target, ContextOperand(target, native_context_index));
724 void MacroAssembler::GetBuiltinEntry(Register target,
725 int native_context_index) {
726 DCHECK(!target.is(rdi));
727 // Load the JavaScript builtin function from the builtins object.
728 GetBuiltinFunction(rdi, native_context_index);
729 movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
733 #define REG(Name) { kRegister_ ## Name ## _Code }
735 static const Register saved_regs[] = {
736 REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
737 REG(r9), REG(r10), REG(r11)
742 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
745 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
748 Register exclusion3) {
749 // We don't allow a GC during a store buffer overflow so there is no need to
750 // store the registers in any particular way, but we do have to store and
752 for (int i = 0; i < kNumberOfSavedRegs; i++) {
753 Register reg = saved_regs[i];
754 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
758 // R12 to r15 are callee save on all platforms.
759 if (fp_mode == kSaveFPRegs) {
760 subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
761 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
762 XMMRegister reg = XMMRegister::from_code(i);
763 movsd(Operand(rsp, i * kDoubleSize), reg);
769 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
772 Register exclusion3) {
773 if (fp_mode == kSaveFPRegs) {
774 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
775 XMMRegister reg = XMMRegister::from_code(i);
776 movsd(reg, Operand(rsp, i * kDoubleSize));
778 addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
780 for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
781 Register reg = saved_regs[i];
782 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
789 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
795 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
801 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
802 DCHECK(!r.IsDouble());
803 if (r.IsInteger8()) {
805 } else if (r.IsUInteger8()) {
807 } else if (r.IsInteger16()) {
809 } else if (r.IsUInteger16()) {
811 } else if (r.IsInteger32()) {
819 void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
820 DCHECK(!r.IsDouble());
821 if (r.IsInteger8() || r.IsUInteger8()) {
823 } else if (r.IsInteger16() || r.IsUInteger16()) {
825 } else if (r.IsInteger32()) {
828 if (r.IsHeapObject()) {
830 } else if (r.IsSmi()) {
838 void MacroAssembler::Set(Register dst, int64_t x) {
841 } else if (is_uint32(x)) {
842 movl(dst, Immediate(static_cast<uint32_t>(x)));
843 } else if (is_int32(x)) {
844 movq(dst, Immediate(static_cast<int32_t>(x)));
851 void MacroAssembler::Set(const Operand& dst, intptr_t x) {
852 if (kPointerSize == kInt64Size) {
854 movp(dst, Immediate(static_cast<int32_t>(x)));
856 Set(kScratchRegister, x);
857 movp(dst, kScratchRegister);
860 movp(dst, Immediate(static_cast<int32_t>(x)));
865 // ----------------------------------------------------------------------------
866 // Smi tagging, untagging and tag detection.
868 bool MacroAssembler::IsUnsafeInt(const int32_t x) {
869 static const int kMaxBits = 17;
870 return !is_intn(x, kMaxBits);
874 void MacroAssembler::SafeMove(Register dst, Smi* src) {
875 DCHECK(!dst.is(kScratchRegister));
876 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
877 if (SmiValuesAre32Bits()) {
878 // JIT cookie can be converted to Smi.
879 Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
880 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
881 xorp(dst, kScratchRegister);
883 DCHECK(SmiValuesAre31Bits());
884 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
885 movp(dst, Immediate(value ^ jit_cookie()));
886 xorp(dst, Immediate(jit_cookie()));
894 void MacroAssembler::SafePush(Smi* src) {
895 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
896 if (SmiValuesAre32Bits()) {
897 // JIT cookie can be converted to Smi.
898 Push(Smi::FromInt(src->value() ^ jit_cookie()));
899 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
900 xorp(Operand(rsp, 0), kScratchRegister);
902 DCHECK(SmiValuesAre31Bits());
903 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
904 Push(Immediate(value ^ jit_cookie()));
905 xorp(Operand(rsp, 0), Immediate(jit_cookie()));
913 Register MacroAssembler::GetSmiConstant(Smi* source) {
914 int value = source->value();
916 xorl(kScratchRegister, kScratchRegister);
917 return kScratchRegister;
919 LoadSmiConstant(kScratchRegister, source);
920 return kScratchRegister;
924 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
925 // Special-casing 0 here to use xorl seems to make things slower, so we don't
927 Move(dst, source, Assembler::RelocInfoNone());
931 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
932 STATIC_ASSERT(kSmiTag == 0);
936 shlp(dst, Immediate(kSmiShift));
940 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
941 if (emit_debug_code()) {
942 testb(dst, Immediate(0x01));
944 j(zero, &ok, Label::kNear);
945 Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
949 if (SmiValuesAre32Bits()) {
950 DCHECK(kSmiShift % kBitsPerByte == 0);
951 movl(Operand(dst, kSmiShift / kBitsPerByte), src);
953 DCHECK(SmiValuesAre31Bits());
954 Integer32ToSmi(kScratchRegister, src);
955 movp(dst, kScratchRegister);
960 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
964 addl(dst, Immediate(constant));
966 leal(dst, Operand(src, constant));
968 shlp(dst, Immediate(kSmiShift));
972 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
973 STATIC_ASSERT(kSmiTag == 0);
978 if (SmiValuesAre32Bits()) {
979 shrp(dst, Immediate(kSmiShift));
981 DCHECK(SmiValuesAre31Bits());
982 sarl(dst, Immediate(kSmiShift));
987 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
988 if (SmiValuesAre32Bits()) {
989 movl(dst, Operand(src, kSmiShift / kBitsPerByte));
991 DCHECK(SmiValuesAre31Bits());
993 sarl(dst, Immediate(kSmiShift));
998 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
999 STATIC_ASSERT(kSmiTag == 0);
1003 sarp(dst, Immediate(kSmiShift));
1004 if (kPointerSize == kInt32Size) {
1005 // Sign extend to 64-bit.
1011 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
1012 if (SmiValuesAre32Bits()) {
1013 movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
1015 DCHECK(SmiValuesAre31Bits());
1017 SmiToInteger64(dst, dst);
1022 void MacroAssembler::SmiTest(Register src) {
1028 void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1035 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
1041 void MacroAssembler::Cmp(Register dst, Smi* src) {
1042 DCHECK(!dst.is(kScratchRegister));
1043 if (src->value() == 0) {
1046 Register constant_reg = GetSmiConstant(src);
1047 cmpp(dst, constant_reg);
1052 void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
1059 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
1066 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
1068 if (SmiValuesAre32Bits()) {
1069 cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
1071 DCHECK(SmiValuesAre31Bits());
1072 cmpl(dst, Immediate(src));
1077 void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
1078 // The Operand cannot use the smi register.
1079 Register smi_reg = GetSmiConstant(src);
1080 DCHECK(!dst.AddressUsesRegister(smi_reg));
1085 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
1086 if (SmiValuesAre32Bits()) {
1087 cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
1089 DCHECK(SmiValuesAre31Bits());
1090 SmiToInteger32(kScratchRegister, dst);
1091 cmpl(kScratchRegister, src);
1096 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
1102 SmiToInteger64(dst, src);
1108 if (power < kSmiShift) {
1109 sarp(dst, Immediate(kSmiShift - power));
1110 } else if (power > kSmiShift) {
1111 shlp(dst, Immediate(power - kSmiShift));
1116 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
1119 DCHECK((0 <= power) && (power < 32));
1121 shrp(dst, Immediate(power + kSmiShift));
1123 UNIMPLEMENTED(); // Not used.
1128 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1130 Label::Distance near_jump) {
1131 if (dst.is(src1) || dst.is(src2)) {
1132 DCHECK(!src1.is(kScratchRegister));
1133 DCHECK(!src2.is(kScratchRegister));
1134 movp(kScratchRegister, src1);
1135 orp(kScratchRegister, src2);
1136 JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
1137 movp(dst, kScratchRegister);
1141 JumpIfNotSmi(dst, on_not_smis, near_jump);
1146 Condition MacroAssembler::CheckSmi(Register src) {
1147 STATIC_ASSERT(kSmiTag == 0);
1148 testb(src, Immediate(kSmiTagMask));
1153 Condition MacroAssembler::CheckSmi(const Operand& src) {
1154 STATIC_ASSERT(kSmiTag == 0);
1155 testb(src, Immediate(kSmiTagMask));
1160 Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
1161 STATIC_ASSERT(kSmiTag == 0);
1162 // Test that both bits of the mask 0x8000000000000001 are zero.
1163 movp(kScratchRegister, src);
1164 rolp(kScratchRegister, Immediate(1));
1165 testb(kScratchRegister, Immediate(3));
1170 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
1171 if (first.is(second)) {
1172 return CheckSmi(first);
1174 STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
1175 if (SmiValuesAre32Bits()) {
1176 leal(kScratchRegister, Operand(first, second, times_1, 0));
1177 testb(kScratchRegister, Immediate(0x03));
1179 DCHECK(SmiValuesAre31Bits());
1180 movl(kScratchRegister, first);
1181 orl(kScratchRegister, second);
1182 testb(kScratchRegister, Immediate(kSmiTagMask));
1188 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
1190 if (first.is(second)) {
1191 return CheckNonNegativeSmi(first);
1193 movp(kScratchRegister, first);
1194 orp(kScratchRegister, second);
1195 rolp(kScratchRegister, Immediate(1));
1196 testl(kScratchRegister, Immediate(3));
1201 Condition MacroAssembler::CheckEitherSmi(Register first,
1204 if (first.is(second)) {
1205 return CheckSmi(first);
1207 if (scratch.is(second)) {
1208 andl(scratch, first);
1210 if (!scratch.is(first)) {
1211 movl(scratch, first);
1213 andl(scratch, second);
1215 testb(scratch, Immediate(kSmiTagMask));
1220 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
1221 if (SmiValuesAre32Bits()) {
1222 // A 32-bit integer value can always be converted to a smi.
1225 DCHECK(SmiValuesAre31Bits());
1226 cmpl(src, Immediate(0xc0000000));
1232 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
1233 if (SmiValuesAre32Bits()) {
1234 // An unsigned 32-bit integer value is valid as long as the high bit
1239 DCHECK(SmiValuesAre31Bits());
1240 testl(src, Immediate(0xc0000000));
1246 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
1248 andl(dst, Immediate(kSmiTagMask));
1250 movl(dst, Immediate(kSmiTagMask));
1256 void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
1257 if (!(src.AddressUsesRegister(dst))) {
1258 movl(dst, Immediate(kSmiTagMask));
1262 andl(dst, Immediate(kSmiTagMask));
1267 void MacroAssembler::JumpIfValidSmiValue(Register src,
1269 Label::Distance near_jump) {
1270 Condition is_valid = CheckInteger32ValidSmiValue(src);
1271 j(is_valid, on_valid, near_jump);
1275 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1277 Label::Distance near_jump) {
1278 Condition is_valid = CheckInteger32ValidSmiValue(src);
1279 j(NegateCondition(is_valid), on_invalid, near_jump);
1283 void MacroAssembler::JumpIfUIntValidSmiValue(Register src,
1285 Label::Distance near_jump) {
1286 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1287 j(is_valid, on_valid, near_jump);
1291 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1293 Label::Distance near_jump) {
1294 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1295 j(NegateCondition(is_valid), on_invalid, near_jump);
1299 void MacroAssembler::JumpIfSmi(Register src,
1301 Label::Distance near_jump) {
1302 Condition smi = CheckSmi(src);
1303 j(smi, on_smi, near_jump);
1307 void MacroAssembler::JumpIfNotSmi(Register src,
1309 Label::Distance near_jump) {
1310 Condition smi = CheckSmi(src);
1311 j(NegateCondition(smi), on_not_smi, near_jump);
1315 void MacroAssembler::JumpUnlessNonNegativeSmi(
1316 Register src, Label* on_not_smi_or_negative,
1317 Label::Distance near_jump) {
1318 Condition non_negative_smi = CheckNonNegativeSmi(src);
1319 j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
1323 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1326 Label::Distance near_jump) {
1327 SmiCompare(src, constant);
1328 j(equal, on_equals, near_jump);
1332 void MacroAssembler::JumpIfNotBothSmi(Register src1,
1334 Label* on_not_both_smi,
1335 Label::Distance near_jump) {
1336 Condition both_smi = CheckBothSmi(src1, src2);
1337 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1341 void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
1343 Label* on_not_both_smi,
1344 Label::Distance near_jump) {
1345 Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
1346 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1350 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1351 if (constant->value() == 0) {
1356 } else if (dst.is(src)) {
1357 DCHECK(!dst.is(kScratchRegister));
1358 Register constant_reg = GetSmiConstant(constant);
1359 addp(dst, constant_reg);
1361 LoadSmiConstant(dst, constant);
1367 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1368 if (constant->value() != 0) {
1369 if (SmiValuesAre32Bits()) {
1370 addl(Operand(dst, kSmiShift / kBitsPerByte),
1371 Immediate(constant->value()));
1373 DCHECK(SmiValuesAre31Bits());
1374 addp(dst, Immediate(constant));
1380 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant,
1381 SmiOperationConstraints constraints,
1382 Label* bailout_label,
1383 Label::Distance near_jump) {
1384 if (constant->value() == 0) {
1388 } else if (dst.is(src)) {
1389 DCHECK(!dst.is(kScratchRegister));
1390 LoadSmiConstant(kScratchRegister, constant);
1391 addp(dst, kScratchRegister);
1392 if (constraints & SmiOperationConstraint::kBailoutOnNoOverflow) {
1393 j(no_overflow, bailout_label, near_jump);
1394 DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
1395 subp(dst, kScratchRegister);
1396 } else if (constraints & SmiOperationConstraint::kBailoutOnOverflow) {
1397 if (constraints & SmiOperationConstraint::kPreserveSourceRegister) {
1399 j(no_overflow, &done, Label::kNear);
1400 subp(dst, kScratchRegister);
1401 jmp(bailout_label, near_jump);
1404 // Bailout if overflow without reserving src.
1405 j(overflow, bailout_label, near_jump);
1411 DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
1412 DCHECK(constraints & SmiOperationConstraint::kBailoutOnOverflow);
1413 LoadSmiConstant(dst, constant);
1415 j(overflow, bailout_label, near_jump);
1420 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1421 if (constant->value() == 0) {
1425 } else if (dst.is(src)) {
1426 DCHECK(!dst.is(kScratchRegister));
1427 Register constant_reg = GetSmiConstant(constant);
1428 subp(dst, constant_reg);
1430 if (constant->value() == Smi::kMinValue) {
1431 LoadSmiConstant(dst, constant);
1432 // Adding and subtracting the min-value gives the same result, it only
1433 // differs on the overflow bit, which we don't check here.
1436 // Subtract by adding the negation.
1437 LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1444 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant,
1445 SmiOperationConstraints constraints,
1446 Label* bailout_label,
1447 Label::Distance near_jump) {
1448 if (constant->value() == 0) {
1452 } else if (dst.is(src)) {
1453 DCHECK(!dst.is(kScratchRegister));
1454 LoadSmiConstant(kScratchRegister, constant);
1455 subp(dst, kScratchRegister);
1456 if (constraints & SmiOperationConstraint::kBailoutOnNoOverflow) {
1457 j(no_overflow, bailout_label, near_jump);
1458 DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
1459 addp(dst, kScratchRegister);
1460 } else if (constraints & SmiOperationConstraint::kBailoutOnOverflow) {
1461 if (constraints & SmiOperationConstraint::kPreserveSourceRegister) {
1463 j(no_overflow, &done, Label::kNear);
1464 addp(dst, kScratchRegister);
1465 jmp(bailout_label, near_jump);
1468 // Bailout if overflow without reserving src.
1469 j(overflow, bailout_label, near_jump);
1475 DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
1476 DCHECK(constraints & SmiOperationConstraint::kBailoutOnOverflow);
1477 if (constant->value() == Smi::kMinValue) {
1478 DCHECK(!dst.is(kScratchRegister));
1480 LoadSmiConstant(kScratchRegister, constant);
1481 subp(dst, kScratchRegister);
1482 j(overflow, bailout_label, near_jump);
1484 // Subtract by adding the negation.
1485 LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1487 j(overflow, bailout_label, near_jump);
1493 void MacroAssembler::SmiNeg(Register dst,
1495 Label* on_smi_result,
1496 Label::Distance near_jump) {
1498 DCHECK(!dst.is(kScratchRegister));
1499 movp(kScratchRegister, src);
1500 negp(dst); // Low 32 bits are retained as zero by negation.
1501 // Test if result is zero or Smi::kMinValue.
1502 cmpp(dst, kScratchRegister);
1503 j(not_equal, on_smi_result, near_jump);
1504 movp(src, kScratchRegister);
1509 // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1510 j(not_equal, on_smi_result, near_jump);
1516 static void SmiAddHelper(MacroAssembler* masm,
1520 Label* on_not_smi_result,
1521 Label::Distance near_jump) {
1524 masm->addp(dst, src2);
1525 masm->j(no_overflow, &done, Label::kNear);
1527 masm->subp(dst, src2);
1528 masm->jmp(on_not_smi_result, near_jump);
1531 masm->movp(dst, src1);
1532 masm->addp(dst, src2);
1533 masm->j(overflow, on_not_smi_result, near_jump);
1538 void MacroAssembler::SmiAdd(Register dst,
1541 Label* on_not_smi_result,
1542 Label::Distance near_jump) {
1543 DCHECK_NOT_NULL(on_not_smi_result);
1544 DCHECK(!dst.is(src2));
1545 SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1549 void MacroAssembler::SmiAdd(Register dst,
1551 const Operand& src2,
1552 Label* on_not_smi_result,
1553 Label::Distance near_jump) {
1554 DCHECK_NOT_NULL(on_not_smi_result);
1555 DCHECK(!src2.AddressUsesRegister(dst));
1556 SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1560 void MacroAssembler::SmiAdd(Register dst,
1563 // No overflow checking. Use only when it's known that
1564 // overflowing is impossible.
1565 if (!dst.is(src1)) {
1566 if (emit_debug_code()) {
1567 movp(kScratchRegister, src1);
1568 addp(kScratchRegister, src2);
1569 Check(no_overflow, kSmiAdditionOverflow);
1571 leap(dst, Operand(src1, src2, times_1, 0));
1574 Assert(no_overflow, kSmiAdditionOverflow);
1580 static void SmiSubHelper(MacroAssembler* masm,
1584 Label* on_not_smi_result,
1585 Label::Distance near_jump) {
1588 masm->subp(dst, src2);
1589 masm->j(no_overflow, &done, Label::kNear);
1591 masm->addp(dst, src2);
1592 masm->jmp(on_not_smi_result, near_jump);
1595 masm->movp(dst, src1);
1596 masm->subp(dst, src2);
1597 masm->j(overflow, on_not_smi_result, near_jump);
1602 void MacroAssembler::SmiSub(Register dst,
1605 Label* on_not_smi_result,
1606 Label::Distance near_jump) {
1607 DCHECK_NOT_NULL(on_not_smi_result);
1608 DCHECK(!dst.is(src2));
1609 SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1613 void MacroAssembler::SmiSub(Register dst,
1615 const Operand& src2,
1616 Label* on_not_smi_result,
1617 Label::Distance near_jump) {
1618 DCHECK_NOT_NULL(on_not_smi_result);
1619 DCHECK(!src2.AddressUsesRegister(dst));
1620 SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1625 static void SmiSubNoOverflowHelper(MacroAssembler* masm,
1629 // No overflow checking. Use only when it's known that
1630 // overflowing is impossible (e.g., subtracting two positive smis).
1631 if (!dst.is(src1)) {
1632 masm->movp(dst, src1);
1634 masm->subp(dst, src2);
1635 masm->Assert(no_overflow, kSmiSubtractionOverflow);
1639 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1640 DCHECK(!dst.is(src2));
1641 SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
1645 void MacroAssembler::SmiSub(Register dst,
1647 const Operand& src2) {
1648 SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
1652 void MacroAssembler::SmiMul(Register dst,
1655 Label* on_not_smi_result,
1656 Label::Distance near_jump) {
1657 DCHECK(!dst.is(src2));
1658 DCHECK(!dst.is(kScratchRegister));
1659 DCHECK(!src1.is(kScratchRegister));
1660 DCHECK(!src2.is(kScratchRegister));
1663 Label failure, zero_correct_result;
1664 movp(kScratchRegister, src1); // Create backup for later testing.
1665 SmiToInteger64(dst, src1);
1667 j(overflow, &failure, Label::kNear);
1669 // Check for negative zero result. If product is zero, and one
1670 // argument is negative, go to slow case.
1671 Label correct_result;
1673 j(not_zero, &correct_result, Label::kNear);
1675 movp(dst, kScratchRegister);
1677 // Result was positive zero.
1678 j(positive, &zero_correct_result, Label::kNear);
1680 bind(&failure); // Reused failure exit, restores src1.
1681 movp(src1, kScratchRegister);
1682 jmp(on_not_smi_result, near_jump);
1684 bind(&zero_correct_result);
1687 bind(&correct_result);
1689 SmiToInteger64(dst, src1);
1691 j(overflow, on_not_smi_result, near_jump);
1692 // Check for negative zero result. If product is zero, and one
1693 // argument is negative, go to slow case.
1694 Label correct_result;
1696 j(not_zero, &correct_result, Label::kNear);
1697 // One of src1 and src2 is zero, the check whether the other is
1699 movp(kScratchRegister, src1);
1700 xorp(kScratchRegister, src2);
1701 j(negative, on_not_smi_result, near_jump);
1702 bind(&correct_result);
1707 void MacroAssembler::SmiDiv(Register dst,
1710 Label* on_not_smi_result,
1711 Label::Distance near_jump) {
1712 DCHECK(!src1.is(kScratchRegister));
1713 DCHECK(!src2.is(kScratchRegister));
1714 DCHECK(!dst.is(kScratchRegister));
1715 DCHECK(!src2.is(rax));
1716 DCHECK(!src2.is(rdx));
1717 DCHECK(!src1.is(rdx));
1719 // Check for 0 divisor (result is +/-Infinity).
1721 j(zero, on_not_smi_result, near_jump);
1724 movp(kScratchRegister, src1);
1726 SmiToInteger32(rax, src1);
1727 // We need to rule out dividing Smi::kMinValue by -1, since that would
1728 // overflow in idiv and raise an exception.
1729 // We combine this with negative zero test (negative zero only happens
1730 // when dividing zero by a negative number).
1732 // We overshoot a little and go to slow case if we divide min-value
1733 // by any negative value, not just -1.
1735 testl(rax, Immediate(~Smi::kMinValue));
1736 j(not_zero, &safe_div, Label::kNear);
1739 j(positive, &safe_div, Label::kNear);
1740 movp(src1, kScratchRegister);
1741 jmp(on_not_smi_result, near_jump);
1743 j(negative, on_not_smi_result, near_jump);
1747 SmiToInteger32(src2, src2);
1748 // Sign extend src1 into edx:eax.
1751 Integer32ToSmi(src2, src2);
1752 // Check that the remainder is zero.
1756 j(zero, &smi_result, Label::kNear);
1757 movp(src1, kScratchRegister);
1758 jmp(on_not_smi_result, near_jump);
1761 j(not_zero, on_not_smi_result, near_jump);
1763 if (!dst.is(src1) && src1.is(rax)) {
1764 movp(src1, kScratchRegister);
1766 Integer32ToSmi(dst, rax);
1770 void MacroAssembler::SmiMod(Register dst,
1773 Label* on_not_smi_result,
1774 Label::Distance near_jump) {
1775 DCHECK(!dst.is(kScratchRegister));
1776 DCHECK(!src1.is(kScratchRegister));
1777 DCHECK(!src2.is(kScratchRegister));
1778 DCHECK(!src2.is(rax));
1779 DCHECK(!src2.is(rdx));
1780 DCHECK(!src1.is(rdx));
1781 DCHECK(!src1.is(src2));
1784 j(zero, on_not_smi_result, near_jump);
1787 movp(kScratchRegister, src1);
1789 SmiToInteger32(rax, src1);
1790 SmiToInteger32(src2, src2);
1792 // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
1794 cmpl(rax, Immediate(Smi::kMinValue));
1795 j(not_equal, &safe_div, Label::kNear);
1796 cmpl(src2, Immediate(-1));
1797 j(not_equal, &safe_div, Label::kNear);
1798 // Retag inputs and go slow case.
1799 Integer32ToSmi(src2, src2);
1801 movp(src1, kScratchRegister);
1803 jmp(on_not_smi_result, near_jump);
1806 // Sign extend eax into edx:eax.
1809 // Restore smi tags on inputs.
1810 Integer32ToSmi(src2, src2);
1812 movp(src1, kScratchRegister);
1814 // Check for a negative zero result. If the result is zero, and the
1815 // dividend is negative, go slow to return a floating point negative zero.
1818 j(not_zero, &smi_result, Label::kNear);
1820 j(negative, on_not_smi_result, near_jump);
1822 Integer32ToSmi(dst, rdx);
1826 void MacroAssembler::SmiNot(Register dst, Register src) {
1827 DCHECK(!dst.is(kScratchRegister));
1828 DCHECK(!src.is(kScratchRegister));
1829 if (SmiValuesAre32Bits()) {
1830 // Set tag and padding bits before negating, so that they are zero
1832 movl(kScratchRegister, Immediate(~0));
1834 DCHECK(SmiValuesAre31Bits());
1835 movl(kScratchRegister, Immediate(1));
1838 xorp(dst, kScratchRegister);
1840 leap(dst, Operand(src, kScratchRegister, times_1, 0));
1846 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
1847 DCHECK(!dst.is(src2));
1848 if (!dst.is(src1)) {
1855 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
1856 if (constant->value() == 0) {
1858 } else if (dst.is(src)) {
1859 DCHECK(!dst.is(kScratchRegister));
1860 Register constant_reg = GetSmiConstant(constant);
1861 andp(dst, constant_reg);
1863 LoadSmiConstant(dst, constant);
1869 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
1870 if (!dst.is(src1)) {
1871 DCHECK(!src1.is(src2));
1878 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
1880 DCHECK(!dst.is(kScratchRegister));
1881 Register constant_reg = GetSmiConstant(constant);
1882 orp(dst, constant_reg);
1884 LoadSmiConstant(dst, constant);
1890 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
1891 if (!dst.is(src1)) {
1892 DCHECK(!src1.is(src2));
1899 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
1901 DCHECK(!dst.is(kScratchRegister));
1902 Register constant_reg = GetSmiConstant(constant);
1903 xorp(dst, constant_reg);
1905 LoadSmiConstant(dst, constant);
1911 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
1914 DCHECK(is_uint5(shift_value));
1915 if (shift_value > 0) {
1917 sarp(dst, Immediate(shift_value + kSmiShift));
1918 shlp(dst, Immediate(kSmiShift));
1920 UNIMPLEMENTED(); // Not used.
1926 void MacroAssembler::SmiShiftLeftConstant(Register dst,
1929 Label* on_not_smi_result,
1930 Label::Distance near_jump) {
1931 if (SmiValuesAre32Bits()) {
1935 if (shift_value > 0) {
1936 // Shift amount specified by lower 5 bits, not six as the shl opcode.
1937 shlq(dst, Immediate(shift_value & 0x1f));
1940 DCHECK(SmiValuesAre31Bits());
1942 UNIMPLEMENTED(); // Not used.
1944 SmiToInteger32(dst, src);
1945 shll(dst, Immediate(shift_value));
1946 JumpIfNotValidSmiValue(dst, on_not_smi_result, near_jump);
1947 Integer32ToSmi(dst, dst);
1953 void MacroAssembler::SmiShiftLogicalRightConstant(
1954 Register dst, Register src, int shift_value,
1955 Label* on_not_smi_result, Label::Distance near_jump) {
1956 // Logic right shift interprets its result as an *unsigned* number.
1958 UNIMPLEMENTED(); // Not used.
1960 if (shift_value == 0) {
1962 j(negative, on_not_smi_result, near_jump);
1964 if (SmiValuesAre32Bits()) {
1966 shrp(dst, Immediate(shift_value + kSmiShift));
1967 shlp(dst, Immediate(kSmiShift));
1969 DCHECK(SmiValuesAre31Bits());
1970 SmiToInteger32(dst, src);
1971 shrp(dst, Immediate(shift_value));
1972 JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump);
1973 Integer32ToSmi(dst, dst);
1979 void MacroAssembler::SmiShiftLeft(Register dst,
1982 Label* on_not_smi_result,
1983 Label::Distance near_jump) {
1984 if (SmiValuesAre32Bits()) {
1985 DCHECK(!dst.is(rcx));
1986 if (!dst.is(src1)) {
1989 // Untag shift amount.
1990 SmiToInteger32(rcx, src2);
1991 // Shift amount specified by lower 5 bits, not six as the shl opcode.
1992 andp(rcx, Immediate(0x1f));
1995 DCHECK(SmiValuesAre31Bits());
1996 DCHECK(!dst.is(kScratchRegister));
1997 DCHECK(!src1.is(kScratchRegister));
1998 DCHECK(!src2.is(kScratchRegister));
1999 DCHECK(!dst.is(src2));
2000 DCHECK(!dst.is(rcx));
2002 if (src1.is(rcx) || src2.is(rcx)) {
2003 movq(kScratchRegister, rcx);
2006 UNIMPLEMENTED(); // Not used.
2009 SmiToInteger32(dst, src1);
2010 SmiToInteger32(rcx, src2);
2012 JumpIfValidSmiValue(dst, &valid_result, Label::kNear);
2013 // As src1 or src2 could not be dst, we do not need to restore them for
2015 if (src1.is(rcx) || src2.is(rcx)) {
2017 movq(src1, kScratchRegister);
2019 movq(src2, kScratchRegister);
2022 jmp(on_not_smi_result, near_jump);
2023 bind(&valid_result);
2024 Integer32ToSmi(dst, dst);
2030 void MacroAssembler::SmiShiftLogicalRight(Register dst,
2033 Label* on_not_smi_result,
2034 Label::Distance near_jump) {
2035 DCHECK(!dst.is(kScratchRegister));
2036 DCHECK(!src1.is(kScratchRegister));
2037 DCHECK(!src2.is(kScratchRegister));
2038 DCHECK(!dst.is(src2));
2039 DCHECK(!dst.is(rcx));
2040 if (src1.is(rcx) || src2.is(rcx)) {
2041 movq(kScratchRegister, rcx);
2044 UNIMPLEMENTED(); // Not used.
2047 SmiToInteger32(dst, src1);
2048 SmiToInteger32(rcx, src2);
2050 JumpIfUIntValidSmiValue(dst, &valid_result, Label::kNear);
2051 // As src1 or src2 could not be dst, we do not need to restore them for
2053 if (src1.is(rcx) || src2.is(rcx)) {
2055 movq(src1, kScratchRegister);
2057 movq(src2, kScratchRegister);
2060 jmp(on_not_smi_result, near_jump);
2061 bind(&valid_result);
2062 Integer32ToSmi(dst, dst);
2067 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
2070 DCHECK(!dst.is(kScratchRegister));
2071 DCHECK(!src1.is(kScratchRegister));
2072 DCHECK(!src2.is(kScratchRegister));
2073 DCHECK(!dst.is(rcx));
2075 SmiToInteger32(rcx, src2);
2076 if (!dst.is(src1)) {
2079 SmiToInteger32(dst, dst);
2081 Integer32ToSmi(dst, dst);
2085 void MacroAssembler::SelectNonSmi(Register dst,
2089 Label::Distance near_jump) {
2090 DCHECK(!dst.is(kScratchRegister));
2091 DCHECK(!src1.is(kScratchRegister));
2092 DCHECK(!src2.is(kScratchRegister));
2093 DCHECK(!dst.is(src1));
2094 DCHECK(!dst.is(src2));
2095 // Both operands must not be smis.
2097 Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
2098 Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
2100 STATIC_ASSERT(kSmiTag == 0);
2101 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
2102 movl(kScratchRegister, Immediate(kSmiTagMask));
2103 andp(kScratchRegister, src1);
2104 testl(kScratchRegister, src2);
2105 // If non-zero then both are smis.
2106 j(not_zero, on_not_smis, near_jump);
2108 // Exactly one operand is a smi.
2109 DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
2110 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
2111 subp(kScratchRegister, Immediate(1));
2112 // If src1 is a smi, then scratch register all 1s, else it is all 0s.
2115 andp(dst, kScratchRegister);
2116 // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
2118 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
2122 SmiIndex MacroAssembler::SmiToIndex(Register dst,
2125 if (SmiValuesAre32Bits()) {
2126 DCHECK(is_uint6(shift));
2127 // There is a possible optimization if shift is in the range 60-63, but that
2128 // will (and must) never happen.
2132 if (shift < kSmiShift) {
2133 sarp(dst, Immediate(kSmiShift - shift));
2135 shlp(dst, Immediate(shift - kSmiShift));
2137 return SmiIndex(dst, times_1);
2139 DCHECK(SmiValuesAre31Bits());
2140 DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2144 // We have to sign extend the index register to 64-bit as the SMI might
2147 if (shift == times_1) {
2148 sarq(dst, Immediate(kSmiShift));
2149 return SmiIndex(dst, times_1);
2151 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2156 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
2159 if (SmiValuesAre32Bits()) {
2160 // Register src holds a positive smi.
2161 DCHECK(is_uint6(shift));
2166 if (shift < kSmiShift) {
2167 sarp(dst, Immediate(kSmiShift - shift));
2169 shlp(dst, Immediate(shift - kSmiShift));
2171 return SmiIndex(dst, times_1);
2173 DCHECK(SmiValuesAre31Bits());
2174 DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2179 if (shift == times_1) {
2180 sarq(dst, Immediate(kSmiShift));
2181 return SmiIndex(dst, times_1);
2183 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2188 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
2189 if (SmiValuesAre32Bits()) {
2190 DCHECK_EQ(0, kSmiShift % kBitsPerByte);
2191 addl(dst, Operand(src, kSmiShift / kBitsPerByte));
2193 DCHECK(SmiValuesAre31Bits());
2194 SmiToInteger32(kScratchRegister, src);
2195 addl(dst, kScratchRegister);
2200 void MacroAssembler::Push(Smi* source) {
2201 intptr_t smi = reinterpret_cast<intptr_t>(source);
2202 if (is_int32(smi)) {
2203 Push(Immediate(static_cast<int32_t>(smi)));
2205 Register constant = GetSmiConstant(source);
2211 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
2212 DCHECK(!src.is(scratch));
2215 shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2216 shlp(src, Immediate(kSmiShift));
2219 shlp(scratch, Immediate(kSmiShift));
2224 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
2225 DCHECK(!dst.is(scratch));
2228 shrp(scratch, Immediate(kSmiShift));
2230 shrp(dst, Immediate(kSmiShift));
2232 shlp(dst, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2237 void MacroAssembler::Test(const Operand& src, Smi* source) {
2238 if (SmiValuesAre32Bits()) {
2239 testl(Operand(src, kIntSize), Immediate(source->value()));
2241 DCHECK(SmiValuesAre31Bits());
2242 testl(src, Immediate(source));
2247 // ----------------------------------------------------------------------------
2250 void MacroAssembler::LookupNumberStringCache(Register object,
2255 // Use of registers. Register result is used as a temporary.
2256 Register number_string_cache = result;
2257 Register mask = scratch1;
2258 Register scratch = scratch2;
2260 // Load the number string cache.
2261 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2263 // Make the hash mask from the length of the number string cache. It
2264 // contains two elements (number and string) for each cache entry.
2266 mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
2267 shrl(mask, Immediate(1));
2268 subp(mask, Immediate(1)); // Make mask.
2270 // Calculate the entry in the number string cache. The hash value in the
2271 // number string cache for smis is just the smi value, and the hash for
2272 // doubles is the xor of the upper and lower words. See
2273 // Heap::GetNumberStringCache.
2275 Label load_result_from_cache;
2276 JumpIfSmi(object, &is_smi);
2278 isolate()->factory()->heap_number_map(),
2282 STATIC_ASSERT(8 == kDoubleSize);
2283 movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
2284 xorp(scratch, FieldOperand(object, HeapNumber::kValueOffset));
2285 andp(scratch, mask);
2286 // Each entry in string cache consists of two pointer sized fields,
2287 // but times_twice_pointer_size (multiplication by 16) scale factor
2288 // is not supported by addrmode on x64 platform.
2289 // So we have to premultiply entry index before lookup.
2290 shlp(scratch, Immediate(kPointerSizeLog2 + 1));
2292 Register index = scratch;
2293 Register probe = mask;
2295 FieldOperand(number_string_cache,
2298 FixedArray::kHeaderSize));
2299 JumpIfSmi(probe, not_found);
2300 movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
2301 ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
2302 j(parity_even, not_found); // Bail out if NaN is involved.
2303 j(not_equal, not_found); // The cache did not contain this value.
2304 jmp(&load_result_from_cache);
2307 SmiToInteger32(scratch, object);
2308 andp(scratch, mask);
2309 // Each entry in string cache consists of two pointer sized fields,
2310 // but times_twice_pointer_size (multiplication by 16) scale factor
2311 // is not supported by addrmode on x64 platform.
2312 // So we have to premultiply entry index before lookup.
2313 shlp(scratch, Immediate(kPointerSizeLog2 + 1));
2315 // Check if the entry is the smi we are looking for.
2317 FieldOperand(number_string_cache,
2320 FixedArray::kHeaderSize));
2321 j(not_equal, not_found);
2323 // Get the result from the cache.
2324 bind(&load_result_from_cache);
2326 FieldOperand(number_string_cache,
2329 FixedArray::kHeaderSize + kPointerSize));
2330 IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
2334 void MacroAssembler::JumpIfNotString(Register object,
2335 Register object_map,
2337 Label::Distance near_jump) {
2338 Condition is_smi = CheckSmi(object);
2339 j(is_smi, not_string, near_jump);
2340 CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
2341 j(above_equal, not_string, near_jump);
2345 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(
2346 Register first_object, Register second_object, Register scratch1,
2347 Register scratch2, Label* on_fail, Label::Distance near_jump) {
2348 // Check that both objects are not smis.
2349 Condition either_smi = CheckEitherSmi(first_object, second_object);
2350 j(either_smi, on_fail, near_jump);
2352 // Load instance type for both strings.
2353 movp(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
2354 movp(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
2355 movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2356 movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2358 // Check that both are flat one-byte strings.
2359 DCHECK(kNotStringTag != 0);
2360 const int kFlatOneByteStringMask =
2361 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2362 const int kFlatOneByteStringTag =
2363 kStringTag | kOneByteStringTag | kSeqStringTag;
2365 andl(scratch1, Immediate(kFlatOneByteStringMask));
2366 andl(scratch2, Immediate(kFlatOneByteStringMask));
2367 // Interleave the bits to check both scratch1 and scratch2 in one test.
2368 DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
2369 leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2371 Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
2372 j(not_equal, on_fail, near_jump);
2376 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
2377 Register instance_type, Register scratch, Label* failure,
2378 Label::Distance near_jump) {
2379 if (!scratch.is(instance_type)) {
2380 movl(scratch, instance_type);
2383 const int kFlatOneByteStringMask =
2384 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2386 andl(scratch, Immediate(kFlatOneByteStringMask));
2387 cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
2388 j(not_equal, failure, near_jump);
2392 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
2393 Register first_object_instance_type, Register second_object_instance_type,
2394 Register scratch1, Register scratch2, Label* on_fail,
2395 Label::Distance near_jump) {
2396 // Load instance type for both strings.
2397 movp(scratch1, first_object_instance_type);
2398 movp(scratch2, second_object_instance_type);
2400 // Check that both are flat one-byte strings.
2401 DCHECK(kNotStringTag != 0);
2402 const int kFlatOneByteStringMask =
2403 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2404 const int kFlatOneByteStringTag =
2405 kStringTag | kOneByteStringTag | kSeqStringTag;
2407 andl(scratch1, Immediate(kFlatOneByteStringMask));
2408 andl(scratch2, Immediate(kFlatOneByteStringMask));
2409 // Interleave the bits to check both scratch1 and scratch2 in one test.
2410 DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
2411 leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2413 Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
2414 j(not_equal, on_fail, near_jump);
2419 static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
2420 T operand_or_register,
2421 Label* not_unique_name,
2422 Label::Distance distance) {
2423 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2425 masm->testb(operand_or_register,
2426 Immediate(kIsNotStringMask | kIsNotInternalizedMask));
2427 masm->j(zero, &succeed, Label::kNear);
2428 masm->cmpb(operand_or_register, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
2429 masm->j(not_equal, not_unique_name, distance);
2431 masm->bind(&succeed);
2435 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
2436 Label* not_unique_name,
2437 Label::Distance distance) {
2438 JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
2442 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
2443 Label* not_unique_name,
2444 Label::Distance distance) {
2445 JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
2449 void MacroAssembler::Move(Register dst, Register src) {
2456 void MacroAssembler::Move(Register dst, Handle<Object> source) {
2457 AllowDeferredHandleDereference smi_check;
2458 if (source->IsSmi()) {
2459 Move(dst, Smi::cast(*source));
2461 MoveHeapObject(dst, source);
2466 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
2467 AllowDeferredHandleDereference smi_check;
2468 if (source->IsSmi()) {
2469 Move(dst, Smi::cast(*source));
2471 MoveHeapObject(kScratchRegister, source);
2472 movp(dst, kScratchRegister);
2477 void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
2481 unsigned pop = base::bits::CountPopulation32(src);
2486 movl(kScratchRegister, Immediate(src));
2487 movq(dst, kScratchRegister);
2493 void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
2497 unsigned nlz = base::bits::CountLeadingZeros64(src);
2498 unsigned ntz = base::bits::CountTrailingZeros64(src);
2499 unsigned pop = base::bits::CountPopulation64(src);
2503 } else if (pop + ntz == 64) {
2506 } else if (pop + nlz == 64) {
2510 uint32_t lower = static_cast<uint32_t>(src);
2511 uint32_t upper = static_cast<uint32_t>(src >> 32);
2515 movq(kScratchRegister, src);
2516 movq(dst, kScratchRegister);
2523 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
2524 AllowDeferredHandleDereference smi_check;
2525 if (source->IsSmi()) {
2526 Cmp(dst, Smi::cast(*source));
2528 MoveHeapObject(kScratchRegister, source);
2529 cmpp(dst, kScratchRegister);
2534 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
2535 AllowDeferredHandleDereference smi_check;
2536 if (source->IsSmi()) {
2537 Cmp(dst, Smi::cast(*source));
2539 MoveHeapObject(kScratchRegister, source);
2540 cmpp(dst, kScratchRegister);
2545 void MacroAssembler::Push(Handle<Object> source) {
2546 AllowDeferredHandleDereference smi_check;
2547 if (source->IsSmi()) {
2548 Push(Smi::cast(*source));
2550 MoveHeapObject(kScratchRegister, source);
2551 Push(kScratchRegister);
2556 void MacroAssembler::MoveHeapObject(Register result,
2557 Handle<Object> object) {
2558 AllowDeferredHandleDereference using_raw_address;
2559 DCHECK(object->IsHeapObject());
2560 if (isolate()->heap()->InNewSpace(*object)) {
2561 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2562 Move(result, cell, RelocInfo::CELL);
2563 movp(result, Operand(result, 0));
2565 Move(result, object, RelocInfo::EMBEDDED_OBJECT);
2570 void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
2572 AllowDeferredHandleDereference embedding_raw_address;
2573 load_rax(cell.location(), RelocInfo::CELL);
2575 Move(dst, cell, RelocInfo::CELL);
2576 movp(dst, Operand(dst, 0));
2581 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
2583 Move(scratch, cell, RelocInfo::EMBEDDED_OBJECT);
2584 cmpp(value, FieldOperand(scratch, WeakCell::kValueOffset));
2588 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
2589 Move(value, cell, RelocInfo::EMBEDDED_OBJECT);
2590 movp(value, FieldOperand(value, WeakCell::kValueOffset));
2594 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
2596 GetWeakValue(value, cell);
2597 JumpIfSmi(value, miss);
2601 void MacroAssembler::Drop(int stack_elements) {
2602 if (stack_elements > 0) {
2603 addp(rsp, Immediate(stack_elements * kPointerSize));
2608 void MacroAssembler::DropUnderReturnAddress(int stack_elements,
2610 DCHECK(stack_elements > 0);
2611 if (kPointerSize == kInt64Size && stack_elements == 1) {
2612 popq(MemOperand(rsp, 0));
2616 PopReturnAddressTo(scratch);
2617 Drop(stack_elements);
2618 PushReturnAddressFrom(scratch);
2622 void MacroAssembler::Push(Register src) {
2623 if (kPointerSize == kInt64Size) {
2626 // x32 uses 64-bit push for rbp in the prologue.
2627 DCHECK(src.code() != rbp.code());
2628 leal(rsp, Operand(rsp, -4));
2629 movp(Operand(rsp, 0), src);
2634 void MacroAssembler::Push(const Operand& src) {
2635 if (kPointerSize == kInt64Size) {
2638 movp(kScratchRegister, src);
2639 leal(rsp, Operand(rsp, -4));
2640 movp(Operand(rsp, 0), kScratchRegister);
2645 void MacroAssembler::PushQuad(const Operand& src) {
2646 if (kPointerSize == kInt64Size) {
2649 movp(kScratchRegister, src);
2650 pushq(kScratchRegister);
2655 void MacroAssembler::Push(Immediate value) {
2656 if (kPointerSize == kInt64Size) {
2659 leal(rsp, Operand(rsp, -4));
2660 movp(Operand(rsp, 0), value);
2665 void MacroAssembler::PushImm32(int32_t imm32) {
2666 if (kPointerSize == kInt64Size) {
2669 leal(rsp, Operand(rsp, -4));
2670 movp(Operand(rsp, 0), Immediate(imm32));
2675 void MacroAssembler::Pop(Register dst) {
2676 if (kPointerSize == kInt64Size) {
2679 // x32 uses 64-bit pop for rbp in the epilogue.
2680 DCHECK(dst.code() != rbp.code());
2681 movp(dst, Operand(rsp, 0));
2682 leal(rsp, Operand(rsp, 4));
2687 void MacroAssembler::Pop(const Operand& dst) {
2688 if (kPointerSize == kInt64Size) {
2691 Register scratch = dst.AddressUsesRegister(kScratchRegister)
2692 ? kRootRegister : kScratchRegister;
2693 movp(scratch, Operand(rsp, 0));
2695 leal(rsp, Operand(rsp, 4));
2696 if (scratch.is(kRootRegister)) {
2697 // Restore kRootRegister.
2698 InitializeRootRegister();
2704 void MacroAssembler::PopQuad(const Operand& dst) {
2705 if (kPointerSize == kInt64Size) {
2708 popq(kScratchRegister);
2709 movp(dst, kScratchRegister);
2714 void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst,
2717 DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
2718 offset <= SharedFunctionInfo::kSize &&
2719 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
2720 if (kPointerSize == kInt64Size) {
2721 movsxlq(dst, FieldOperand(base, offset));
2723 movp(dst, FieldOperand(base, offset));
2724 SmiToInteger32(dst, dst);
2729 void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base,
2732 DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
2733 offset <= SharedFunctionInfo::kSize &&
2734 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
2735 if (kPointerSize == kInt32Size) {
2736 // On x32, this field is represented by SMI.
2739 int byte_offset = bits / kBitsPerByte;
2740 int bit_in_byte = bits & (kBitsPerByte - 1);
2741 testb(FieldOperand(base, offset + byte_offset), Immediate(1 << bit_in_byte));
2745 void MacroAssembler::Jump(ExternalReference ext) {
2746 LoadAddress(kScratchRegister, ext);
2747 jmp(kScratchRegister);
2751 void MacroAssembler::Jump(const Operand& op) {
2752 if (kPointerSize == kInt64Size) {
2755 movp(kScratchRegister, op);
2756 jmp(kScratchRegister);
2761 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
2762 Move(kScratchRegister, destination, rmode);
2763 jmp(kScratchRegister);
2767 void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
2768 // TODO(X64): Inline this
2769 jmp(code_object, rmode);
2773 int MacroAssembler::CallSize(ExternalReference ext) {
2774 // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
2775 return LoadAddressSize(ext) +
2776 Assembler::kCallScratchRegisterInstructionLength;
2780 void MacroAssembler::Call(ExternalReference ext) {
2782 int end_position = pc_offset() + CallSize(ext);
2784 LoadAddress(kScratchRegister, ext);
2785 call(kScratchRegister);
2787 CHECK_EQ(end_position, pc_offset());
2792 void MacroAssembler::Call(const Operand& op) {
2793 if (kPointerSize == kInt64Size && !CpuFeatures::IsSupported(ATOM)) {
2796 movp(kScratchRegister, op);
2797 call(kScratchRegister);
2802 void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
2804 int end_position = pc_offset() + CallSize(destination);
2806 Move(kScratchRegister, destination, rmode);
2807 call(kScratchRegister);
2809 CHECK_EQ(pc_offset(), end_position);
2814 void MacroAssembler::Call(Handle<Code> code_object,
2815 RelocInfo::Mode rmode,
2816 TypeFeedbackId ast_id) {
2818 int end_position = pc_offset() + CallSize(code_object);
2820 DCHECK(RelocInfo::IsCodeTarget(rmode) ||
2821 rmode == RelocInfo::CODE_AGE_SEQUENCE);
2822 call(code_object, rmode, ast_id);
2824 CHECK_EQ(end_position, pc_offset());
2829 void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
2835 if (CpuFeatures::IsSupported(SSE4_1)) {
2836 CpuFeatureScope sse_scope(this, SSE4_1);
2837 pextrd(dst, src, imm8);
2841 shrq(dst, Immediate(32));
2845 void MacroAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
2846 if (CpuFeatures::IsSupported(SSE4_1)) {
2847 CpuFeatureScope sse_scope(this, SSE4_1);
2848 pinsrd(dst, src, imm8);
2853 punpckldq(dst, xmm0);
2857 punpckldq(xmm0, dst);
2863 void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
2864 DCHECK(imm8 == 0 || imm8 == 1);
2865 if (CpuFeatures::IsSupported(SSE4_1)) {
2866 CpuFeatureScope sse_scope(this, SSE4_1);
2867 pinsrd(dst, src, imm8);
2872 punpckldq(dst, xmm0);
2876 punpckldq(xmm0, dst);
2882 void MacroAssembler::Lzcntl(Register dst, Register src) {
2883 if (CpuFeatures::IsSupported(LZCNT)) {
2884 CpuFeatureScope scope(this, LZCNT);
2890 j(not_zero, ¬_zero_src, Label::kNear);
2891 Set(dst, 63); // 63^31 == 32
2892 bind(¬_zero_src);
2893 xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
2897 void MacroAssembler::Lzcntl(Register dst, const Operand& src) {
2898 if (CpuFeatures::IsSupported(LZCNT)) {
2899 CpuFeatureScope scope(this, LZCNT);
2905 j(not_zero, ¬_zero_src, Label::kNear);
2906 Set(dst, 63); // 63^31 == 32
2907 bind(¬_zero_src);
2908 xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
2912 void MacroAssembler::Pushad() {
2917 // Not pushing rsp or rbp.
2922 // r10 is kScratchRegister.
2925 // r13 is kRootRegister.
2928 STATIC_ASSERT(12 == kNumSafepointSavedRegisters);
2929 // Use lea for symmetry with Popad.
2931 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
2932 leap(rsp, Operand(rsp, -sp_delta));
2936 void MacroAssembler::Popad() {
2937 // Popad must not change the flags, so use lea instead of addq.
2939 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
2940 leap(rsp, Operand(rsp, sp_delta));
2956 void MacroAssembler::Dropad() {
2957 addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
2961 // Order general registers are pushed by Pushad:
2962 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
2964 MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
2984 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
2985 const Immediate& imm) {
2986 movp(SafepointRegisterSlot(dst), imm);
2990 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
2991 movp(SafepointRegisterSlot(dst), src);
2995 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
2996 movp(dst, SafepointRegisterSlot(src));
3000 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
3001 return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
3005 void MacroAssembler::PushStackHandler() {
3006 // Adjust this code if not the case.
3007 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
3008 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3010 // Link the current handler as the next handler.
3011 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3012 Push(ExternalOperand(handler_address));
3014 // Set this new handler as the current one.
3015 movp(ExternalOperand(handler_address), rsp);
3019 void MacroAssembler::PopStackHandler() {
3020 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3021 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3022 Pop(ExternalOperand(handler_address));
3023 addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
3027 void MacroAssembler::Ret() {
3032 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
3033 if (is_uint16(bytes_dropped)) {
3036 PopReturnAddressTo(scratch);
3037 addp(rsp, Immediate(bytes_dropped));
3038 PushReturnAddressFrom(scratch);
3044 void MacroAssembler::FCmp() {
3050 void MacroAssembler::CmpObjectType(Register heap_object,
3053 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3054 CmpInstanceType(map, type);
3058 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
3059 cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
3060 Immediate(static_cast<int8_t>(type)));
3064 void MacroAssembler::CheckFastElements(Register map,
3066 Label::Distance distance) {
3067 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3068 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3069 STATIC_ASSERT(FAST_ELEMENTS == 2);
3070 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3071 cmpb(FieldOperand(map, Map::kBitField2Offset),
3072 Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3073 j(above, fail, distance);
3077 void MacroAssembler::CheckFastObjectElements(Register map,
3079 Label::Distance distance) {
3080 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3081 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3082 STATIC_ASSERT(FAST_ELEMENTS == 2);
3083 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3084 cmpb(FieldOperand(map, Map::kBitField2Offset),
3085 Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3086 j(below_equal, fail, distance);
3087 cmpb(FieldOperand(map, Map::kBitField2Offset),
3088 Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3089 j(above, fail, distance);
3093 void MacroAssembler::CheckFastSmiElements(Register map,
3095 Label::Distance distance) {
3096 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3097 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3098 cmpb(FieldOperand(map, Map::kBitField2Offset),
3099 Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3100 j(above, fail, distance);
3104 void MacroAssembler::StoreNumberToDoubleElements(
3105 Register maybe_number,
3108 XMMRegister xmm_scratch,
3110 int elements_offset) {
3111 Label smi_value, done;
3113 JumpIfSmi(maybe_number, &smi_value, Label::kNear);
3115 CheckMap(maybe_number,
3116 isolate()->factory()->heap_number_map(),
3120 // Double value, turn potential sNaN into qNaN.
3121 Move(xmm_scratch, 1.0);
3122 mulsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
3123 jmp(&done, Label::kNear);
3126 // Value is a smi. convert to a double and store.
3127 // Preserve original value.
3128 SmiToInteger32(kScratchRegister, maybe_number);
3129 Cvtlsi2sd(xmm_scratch, kScratchRegister);
3131 movsd(FieldOperand(elements, index, times_8,
3132 FixedDoubleArray::kHeaderSize - elements_offset),
3137 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
3138 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3142 void MacroAssembler::CheckMap(Register obj,
3145 SmiCheckType smi_check_type) {
3146 if (smi_check_type == DO_SMI_CHECK) {
3147 JumpIfSmi(obj, fail);
3150 CompareMap(obj, map);
3155 void MacroAssembler::ClampUint8(Register reg) {
3157 testl(reg, Immediate(0xFFFFFF00));
3158 j(zero, &done, Label::kNear);
3159 setcc(negative, reg); // 1 if negative, 0 if positive.
3160 decb(reg); // 0 if negative, 255 if positive.
3165 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
3166 XMMRegister temp_xmm_reg,
3167 Register result_reg) {
3170 xorps(temp_xmm_reg, temp_xmm_reg);
3171 cvtsd2si(result_reg, input_reg);
3172 testl(result_reg, Immediate(0xFFFFFF00));
3173 j(zero, &done, Label::kNear);
3174 cmpl(result_reg, Immediate(1));
3175 j(overflow, &conv_failure, Label::kNear);
3176 movl(result_reg, Immediate(0));
3177 setcc(sign, result_reg);
3178 subl(result_reg, Immediate(1));
3179 andl(result_reg, Immediate(255));
3180 jmp(&done, Label::kNear);
3181 bind(&conv_failure);
3183 ucomisd(input_reg, temp_xmm_reg);
3184 j(below, &done, Label::kNear);
3185 Set(result_reg, 255);
3190 void MacroAssembler::LoadUint32(XMMRegister dst,
3192 if (FLAG_debug_code) {
3193 cmpq(src, Immediate(0xffffffff));
3194 Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
3196 cvtqsi2sd(dst, src);
3200 void MacroAssembler::SlowTruncateToI(Register result_reg,
3203 DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
3204 call(stub.GetCode(), RelocInfo::CODE_TARGET);
3208 void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
3209 Register input_reg) {
3211 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3212 cvttsd2siq(result_reg, xmm0);
3213 cmpq(result_reg, Immediate(1));
3214 j(no_overflow, &done, Label::kNear);
3217 if (input_reg.is(result_reg)) {
3218 subp(rsp, Immediate(kDoubleSize));
3219 movsd(MemOperand(rsp, 0), xmm0);
3220 SlowTruncateToI(result_reg, rsp, 0);
3221 addp(rsp, Immediate(kDoubleSize));
3223 SlowTruncateToI(result_reg, input_reg);
3227 // Keep our invariant that the upper 32 bits are zero.
3228 movl(result_reg, result_reg);
3232 void MacroAssembler::TruncateDoubleToI(Register result_reg,
3233 XMMRegister input_reg) {
3235 cvttsd2siq(result_reg, input_reg);
3236 cmpq(result_reg, Immediate(1));
3237 j(no_overflow, &done, Label::kNear);
3239 subp(rsp, Immediate(kDoubleSize));
3240 movsd(MemOperand(rsp, 0), input_reg);
3241 SlowTruncateToI(result_reg, rsp, 0);
3242 addp(rsp, Immediate(kDoubleSize));
3245 // Keep our invariant that the upper 32 bits are zero.
3246 movl(result_reg, result_reg);
3250 void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
3251 XMMRegister scratch,
3252 MinusZeroMode minus_zero_mode,
3253 Label* lost_precision, Label* is_nan,
3254 Label* minus_zero, Label::Distance dst) {
3255 cvttsd2si(result_reg, input_reg);
3256 Cvtlsi2sd(xmm0, result_reg);
3257 ucomisd(xmm0, input_reg);
3258 j(not_equal, lost_precision, dst);
3259 j(parity_even, is_nan, dst); // NaN.
3260 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
3262 // The integer converted back is equal to the original. We
3263 // only have to test if we got -0 as an input.
3264 testl(result_reg, result_reg);
3265 j(not_zero, &done, Label::kNear);
3266 movmskpd(result_reg, input_reg);
3267 // Bit 0 contains the sign of the double in input_reg.
3268 // If input was positive, we are ok and return 0, otherwise
3269 // jump to minus_zero.
3270 andl(result_reg, Immediate(1));
3271 j(not_zero, minus_zero, dst);
3277 void MacroAssembler::LoadInstanceDescriptors(Register map,
3278 Register descriptors) {
3279 movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
3283 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3284 movl(dst, FieldOperand(map, Map::kBitField3Offset));
3285 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3289 void MacroAssembler::EnumLength(Register dst, Register map) {
3290 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3291 movl(dst, FieldOperand(map, Map::kBitField3Offset));
3292 andl(dst, Immediate(Map::EnumLengthBits::kMask));
3293 Integer32ToSmi(dst, dst);
3297 void MacroAssembler::LoadAccessor(Register dst, Register holder,
3299 AccessorComponent accessor) {
3300 movp(dst, FieldOperand(holder, HeapObject::kMapOffset));
3301 LoadInstanceDescriptors(dst, dst);
3302 movp(dst, FieldOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
3303 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
3304 : AccessorPair::kSetterOffset;
3305 movp(dst, FieldOperand(dst, offset));
3309 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
3310 Register scratch2, Handle<WeakCell> cell,
3311 Handle<Code> success,
3312 SmiCheckType smi_check_type) {
3314 if (smi_check_type == DO_SMI_CHECK) {
3315 JumpIfSmi(obj, &fail);
3317 movq(scratch1, FieldOperand(obj, HeapObject::kMapOffset));
3318 CmpWeakValue(scratch1, cell, scratch2);
3319 j(equal, success, RelocInfo::CODE_TARGET);
3324 void MacroAssembler::AssertNumber(Register object) {
3325 if (emit_debug_code()) {
3327 Condition is_smi = CheckSmi(object);
3328 j(is_smi, &ok, Label::kNear);
3329 Cmp(FieldOperand(object, HeapObject::kMapOffset),
3330 isolate()->factory()->heap_number_map());
3331 Check(equal, kOperandIsNotANumber);
3337 void MacroAssembler::AssertNotSmi(Register object) {
3338 if (emit_debug_code()) {
3339 Condition is_smi = CheckSmi(object);
3340 Check(NegateCondition(is_smi), kOperandIsASmi);
3345 void MacroAssembler::AssertSmi(Register object) {
3346 if (emit_debug_code()) {
3347 Condition is_smi = CheckSmi(object);
3348 Check(is_smi, kOperandIsNotASmi);
3353 void MacroAssembler::AssertSmi(const Operand& object) {
3354 if (emit_debug_code()) {
3355 Condition is_smi = CheckSmi(object);
3356 Check(is_smi, kOperandIsNotASmi);
3361 void MacroAssembler::AssertZeroExtended(Register int32_register) {
3362 if (emit_debug_code()) {
3363 DCHECK(!int32_register.is(kScratchRegister));
3364 movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
3365 cmpq(kScratchRegister, int32_register);
3366 Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
3371 void MacroAssembler::AssertString(Register object) {
3372 if (emit_debug_code()) {
3373 testb(object, Immediate(kSmiTagMask));
3374 Check(not_equal, kOperandIsASmiAndNotAString);
3376 movp(object, FieldOperand(object, HeapObject::kMapOffset));
3377 CmpInstanceType(object, FIRST_NONSTRING_TYPE);
3379 Check(below, kOperandIsNotAString);
3384 void MacroAssembler::AssertName(Register object) {
3385 if (emit_debug_code()) {
3386 testb(object, Immediate(kSmiTagMask));
3387 Check(not_equal, kOperandIsASmiAndNotAName);
3389 movp(object, FieldOperand(object, HeapObject::kMapOffset));
3390 CmpInstanceType(object, LAST_NAME_TYPE);
3392 Check(below_equal, kOperandIsNotAName);
3397 void MacroAssembler::AssertFunction(Register object) {
3398 if (emit_debug_code()) {
3399 testb(object, Immediate(kSmiTagMask));
3400 Check(not_equal, kOperandIsASmiAndNotAFunction);
3402 CmpObjectType(object, JS_FUNCTION_TYPE, object);
3404 Check(not_equal, kOperandIsNotAFunction);
3409 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
3410 if (emit_debug_code()) {
3411 Label done_checking;
3412 AssertNotSmi(object);
3413 Cmp(object, isolate()->factory()->undefined_value());
3414 j(equal, &done_checking);
3415 Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
3416 Assert(equal, kExpectedUndefinedOrCell);
3417 bind(&done_checking);
3422 void MacroAssembler::AssertRootValue(Register src,
3423 Heap::RootListIndex root_value_index,
3424 BailoutReason reason) {
3425 if (emit_debug_code()) {
3426 DCHECK(!src.is(kScratchRegister));
3427 LoadRoot(kScratchRegister, root_value_index);
3428 cmpp(src, kScratchRegister);
3429 Check(equal, reason);
3435 Condition MacroAssembler::IsObjectStringType(Register heap_object,
3437 Register instance_type) {
3438 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3439 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3440 STATIC_ASSERT(kNotStringTag != 0);
3441 testb(instance_type, Immediate(kIsNotStringMask));
3446 Condition MacroAssembler::IsObjectNameType(Register heap_object,
3448 Register instance_type) {
3449 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3450 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3451 cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
3456 void MacroAssembler::GetMapConstructor(Register result, Register map,
3459 movp(result, FieldOperand(map, Map::kConstructorOrBackPointerOffset));
3461 JumpIfSmi(result, &done, Label::kNear);
3462 CmpObjectType(result, MAP_TYPE, temp);
3463 j(not_equal, &done, Label::kNear);
3464 movp(result, FieldOperand(result, Map::kConstructorOrBackPointerOffset));
3470 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
3472 // Get the prototype or initial map from the function.
3474 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3476 // If the prototype or initial map is the hole, don't return it and
3477 // simply miss the cache instead. This will allow us to allocate a
3478 // prototype object on-demand in the runtime system.
3479 CompareRoot(result, Heap::kTheHoleValueRootIndex);
3482 // If the function does not have an initial map, we're done.
3484 CmpObjectType(result, MAP_TYPE, kScratchRegister);
3485 j(not_equal, &done, Label::kNear);
3487 // Get the prototype from the initial map.
3488 movp(result, FieldOperand(result, Map::kPrototypeOffset));
3495 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
3496 if (FLAG_native_code_counters && counter->Enabled()) {
3497 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3498 movl(counter_operand, Immediate(value));
3503 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
3505 if (FLAG_native_code_counters && counter->Enabled()) {
3506 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3508 incl(counter_operand);
3510 addl(counter_operand, Immediate(value));
3516 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
3518 if (FLAG_native_code_counters && counter->Enabled()) {
3519 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3521 decl(counter_operand);
3523 subl(counter_operand, Immediate(value));
3529 void MacroAssembler::DebugBreak() {
3530 Set(rax, 0); // No arguments.
3532 ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
3533 CEntryStub ces(isolate(), 1);
3534 DCHECK(AllowThisStubCall(&ces));
3535 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
3539 void MacroAssembler::InvokeCode(Register code,
3540 const ParameterCount& expected,
3541 const ParameterCount& actual,
3543 const CallWrapper& call_wrapper) {
3544 // You can't call a function without a valid frame.
3545 DCHECK(flag == JUMP_FUNCTION || has_frame());
3548 bool definitely_mismatches = false;
3549 InvokePrologue(expected,
3551 Handle<Code>::null(),
3554 &definitely_mismatches,
3558 if (!definitely_mismatches) {
3559 if (flag == CALL_FUNCTION) {
3560 call_wrapper.BeforeCall(CallSize(code));
3562 call_wrapper.AfterCall();
3564 DCHECK(flag == JUMP_FUNCTION);
3572 void MacroAssembler::InvokeFunction(Register function,
3573 const ParameterCount& actual,
3575 const CallWrapper& call_wrapper) {
3576 // You can't call a function without a valid frame.
3577 DCHECK(flag == JUMP_FUNCTION || has_frame());
3579 DCHECK(function.is(rdi));
3580 movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3581 movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
3582 LoadSharedFunctionInfoSpecialField(rbx, rdx,
3583 SharedFunctionInfo::kFormalParameterCountOffset);
3584 // Advances rdx to the end of the Code object header, to the start of
3585 // the executable code.
3586 movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3588 ParameterCount expected(rbx);
3589 InvokeCode(rdx, expected, actual, flag, call_wrapper);
3593 void MacroAssembler::InvokeFunction(Register function,
3594 const ParameterCount& expected,
3595 const ParameterCount& actual,
3597 const CallWrapper& call_wrapper) {
3598 // You can't call a function without a valid frame.
3599 DCHECK(flag == JUMP_FUNCTION || has_frame());
3601 DCHECK(function.is(rdi));
3602 movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
3603 // Advances rdx to the end of the Code object header, to the start of
3604 // the executable code.
3605 movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3607 InvokeCode(rdx, expected, actual, flag, call_wrapper);
3611 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3612 const ParameterCount& expected,
3613 const ParameterCount& actual,
3615 const CallWrapper& call_wrapper) {
3616 Move(rdi, function);
3617 InvokeFunction(rdi, expected, actual, flag, call_wrapper);
3621 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3622 const ParameterCount& actual,
3623 Handle<Code> code_constant,
3624 Register code_register,
3626 bool* definitely_mismatches,
3628 Label::Distance near_jump,
3629 const CallWrapper& call_wrapper) {
3630 bool definitely_matches = false;
3631 *definitely_mismatches = false;
3633 if (expected.is_immediate()) {
3634 DCHECK(actual.is_immediate());
3635 if (expected.immediate() == actual.immediate()) {
3636 definitely_matches = true;
3638 Set(rax, actual.immediate());
3639 if (expected.immediate() ==
3640 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
3641 // Don't worry about adapting arguments for built-ins that
3642 // don't want that done. Skip adaption code by making it look
3643 // like we have a match between expected and actual number of
3645 definitely_matches = true;
3647 *definitely_mismatches = true;
3648 Set(rbx, expected.immediate());
3652 if (actual.is_immediate()) {
3653 // Expected is in register, actual is immediate. This is the
3654 // case when we invoke function values without going through the
3656 cmpp(expected.reg(), Immediate(actual.immediate()));
3657 j(equal, &invoke, Label::kNear);
3658 DCHECK(expected.reg().is(rbx));
3659 Set(rax, actual.immediate());
3660 } else if (!expected.reg().is(actual.reg())) {
3661 // Both expected and actual are in (different) registers. This
3662 // is the case when we invoke functions using call and apply.
3663 cmpp(expected.reg(), actual.reg());
3664 j(equal, &invoke, Label::kNear);
3665 DCHECK(actual.reg().is(rax));
3666 DCHECK(expected.reg().is(rbx));
3670 if (!definitely_matches) {
3671 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
3672 if (!code_constant.is_null()) {
3673 Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
3674 addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
3675 } else if (!code_register.is(rdx)) {
3676 movp(rdx, code_register);
3679 if (flag == CALL_FUNCTION) {
3680 call_wrapper.BeforeCall(CallSize(adaptor));
3681 Call(adaptor, RelocInfo::CODE_TARGET);
3682 call_wrapper.AfterCall();
3683 if (!*definitely_mismatches) {
3684 jmp(done, near_jump);
3687 Jump(adaptor, RelocInfo::CODE_TARGET);
3694 void MacroAssembler::StubPrologue() {
3695 pushq(rbp); // Caller's frame pointer.
3697 Push(rsi); // Callee's context.
3698 Push(Smi::FromInt(StackFrame::STUB));
3702 void MacroAssembler::Prologue(bool code_pre_aging) {
3703 PredictableCodeSizeScope predictible_code_size_scope(this,
3704 kNoCodeAgeSequenceLength);
3705 if (code_pre_aging) {
3706 // Pre-age the code.
3707 Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
3708 RelocInfo::CODE_AGE_SEQUENCE);
3709 Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
3711 pushq(rbp); // Caller's frame pointer.
3713 Push(rsi); // Callee's context.
3714 Push(rdi); // Callee's JS function.
3719 void MacroAssembler::EnterFrame(StackFrame::Type type,
3720 bool load_constant_pool_pointer_reg) {
3721 // Out-of-line constant pool not implemented on x64.
3726 void MacroAssembler::EnterFrame(StackFrame::Type type) {
3729 Push(rsi); // Context.
3730 Push(Smi::FromInt(type));
3731 Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
3732 Push(kScratchRegister);
3733 if (emit_debug_code()) {
3734 Move(kScratchRegister,
3735 isolate()->factory()->undefined_value(),
3736 RelocInfo::EMBEDDED_OBJECT);
3737 cmpp(Operand(rsp, 0), kScratchRegister);
3738 Check(not_equal, kCodeObjectNotProperlyPatched);
3743 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
3744 if (emit_debug_code()) {
3745 Move(kScratchRegister, Smi::FromInt(type));
3746 cmpp(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
3747 Check(equal, kStackFrameTypesMustMatch);
3754 void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
3755 // Set up the frame structure on the stack.
3756 // All constants are relative to the frame pointer of the exit frame.
3757 DCHECK(ExitFrameConstants::kCallerSPDisplacement ==
3758 kFPOnStackSize + kPCOnStackSize);
3759 DCHECK(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
3760 DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
3764 // Reserve room for entry stack pointer and push the code object.
3765 DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
3766 Push(Immediate(0)); // Saved entry sp, patched before call.
3767 Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
3768 Push(kScratchRegister); // Accessed from EditFrame::code_slot.
3770 // Save the frame pointer and the context in top.
3772 movp(r14, rax); // Backup rax in callee-save register.
3775 Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
3776 Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
3777 Store(ExternalReference(Isolate::kCFunctionAddress, isolate()), rbx);
3781 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
3782 bool save_doubles) {
3784 const int kShadowSpace = 4;
3785 arg_stack_space += kShadowSpace;
3787 // Optionally save all XMM registers.
3789 int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
3790 arg_stack_space * kRegisterSize;
3791 subp(rsp, Immediate(space));
3792 int offset = -2 * kPointerSize;
3793 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
3794 XMMRegister reg = XMMRegister::FromAllocationIndex(i);
3795 movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
3797 } else if (arg_stack_space > 0) {
3798 subp(rsp, Immediate(arg_stack_space * kRegisterSize));
3801 // Get the required frame alignment for the OS.
3802 const int kFrameAlignment = base::OS::ActivationFrameAlignment();
3803 if (kFrameAlignment > 0) {
3804 DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
3805 DCHECK(is_int8(kFrameAlignment));
3806 andp(rsp, Immediate(-kFrameAlignment));
3809 // Patch the saved entry sp.
3810 movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
3814 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
3815 EnterExitFramePrologue(true);
3817 // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
3818 // so it must be retained across the C-call.
3819 int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
3820 leap(r15, Operand(rbp, r14, times_pointer_size, offset));
3822 EnterExitFrameEpilogue(arg_stack_space, save_doubles);
3826 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
3827 EnterExitFramePrologue(false);
3828 EnterExitFrameEpilogue(arg_stack_space, false);
3832 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
3836 int offset = -2 * kPointerSize;
3837 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
3838 XMMRegister reg = XMMRegister::FromAllocationIndex(i);
3839 movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
3842 // Get the return address from the stack and restore the frame pointer.
3843 movp(rcx, Operand(rbp, kFPOnStackSize));
3844 movp(rbp, Operand(rbp, 0 * kPointerSize));
3846 // Drop everything up to and including the arguments and the receiver
3847 // from the caller stack.
3848 leap(rsp, Operand(r15, 1 * kPointerSize));
3850 PushReturnAddressFrom(rcx);
3852 LeaveExitFrameEpilogue(true);
3856 void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
3860 LeaveExitFrameEpilogue(restore_context);
3864 void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
3865 // Restore current context from top and clear it in debug mode.
3866 ExternalReference context_address(Isolate::kContextAddress, isolate());
3867 Operand context_operand = ExternalOperand(context_address);
3868 if (restore_context) {
3869 movp(rsi, context_operand);
3872 movp(context_operand, Immediate(0));
3875 // Clear the top frame.
3876 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
3878 Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
3879 movp(c_entry_fp_operand, Immediate(0));
3883 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
3886 Label same_contexts;
3888 DCHECK(!holder_reg.is(scratch));
3889 DCHECK(!scratch.is(kScratchRegister));
3890 // Load current lexical context from the stack frame.
3891 movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
3893 // When generating debug code, make sure the lexical context is set.
3894 if (emit_debug_code()) {
3895 cmpp(scratch, Immediate(0));
3896 Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
3898 // Load the native context of the current context.
3900 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
3901 movp(scratch, FieldOperand(scratch, offset));
3902 movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
3904 // Check the context is a native context.
3905 if (emit_debug_code()) {
3906 Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
3907 isolate()->factory()->native_context_map());
3908 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
3911 // Check if both contexts are the same.
3912 cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
3913 j(equal, &same_contexts);
3915 // Compare security tokens.
3916 // Check that the security token in the calling global object is
3917 // compatible with the security token in the receiving global
3920 // Check the context is a native context.
3921 if (emit_debug_code()) {
3922 // Preserve original value of holder_reg.
3925 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
3926 CompareRoot(holder_reg, Heap::kNullValueRootIndex);
3927 Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
3929 // Read the first word and compare to native_context_map(),
3930 movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
3931 CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
3932 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
3936 movp(kScratchRegister,
3937 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
3939 Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
3940 movp(scratch, FieldOperand(scratch, token_offset));
3941 cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
3944 bind(&same_contexts);
3948 // Compute the hash code from the untagged key. This must be kept in sync with
3949 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
3950 // code-stub-hydrogen.cc
3951 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
3952 // First of all we assign the hash seed to scratch.
3953 LoadRoot(scratch, Heap::kHashSeedRootIndex);
3954 SmiToInteger32(scratch, scratch);
3956 // Xor original key with a seed.
3959 // Compute the hash code from the untagged key. This must be kept in sync
3960 // with ComputeIntegerHash in utils.h.
3962 // hash = ~hash + (hash << 15);
3965 shll(scratch, Immediate(15));
3967 // hash = hash ^ (hash >> 12);
3969 shrl(scratch, Immediate(12));
3971 // hash = hash + (hash << 2);
3972 leal(r0, Operand(r0, r0, times_4, 0));
3973 // hash = hash ^ (hash >> 4);
3975 shrl(scratch, Immediate(4));
3977 // hash = hash * 2057;
3978 imull(r0, r0, Immediate(2057));
3979 // hash = hash ^ (hash >> 16);
3981 shrl(scratch, Immediate(16));
3983 andl(r0, Immediate(0x3fffffff));
3988 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
3997 // elements - holds the slow-case elements of the receiver on entry.
3998 // Unchanged unless 'result' is the same register.
4000 // key - holds the smi key on entry.
4001 // Unchanged unless 'result' is the same register.
4003 // Scratch registers:
4005 // r0 - holds the untagged key on entry and holds the hash once computed.
4007 // r1 - used to hold the capacity mask of the dictionary
4009 // r2 - used for the index into the dictionary.
4011 // result - holds the result on exit if the load succeeded.
4012 // Allowed to be the same as 'key' or 'result'.
4013 // Unchanged on bailout so 'key' or 'result' can be used
4014 // in further computation.
4018 GetNumberHash(r0, r1);
4020 // Compute capacity mask.
4021 SmiToInteger32(r1, FieldOperand(elements,
4022 SeededNumberDictionary::kCapacityOffset));
4025 // Generate an unrolled loop that performs a few probes before giving up.
4026 for (int i = 0; i < kNumberDictionaryProbes; i++) {
4027 // Use r2 for index calculations and keep the hash intact in r0.
4029 // Compute the masked index: (hash + i + i * i) & mask.
4031 addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
4035 // Scale the index by multiplying by the entry size.
4036 DCHECK(SeededNumberDictionary::kEntrySize == 3);
4037 leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
4039 // Check if the key matches.
4040 cmpp(key, FieldOperand(elements,
4043 SeededNumberDictionary::kElementsStartOffset));
4044 if (i != (kNumberDictionaryProbes - 1)) {
4052 // Check that the value is a field property.
4053 const int kDetailsOffset =
4054 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
4056 Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
4057 Smi::FromInt(PropertyDetails::TypeField::kMask));
4060 // Get the value at the masked, scaled index.
4061 const int kValueOffset =
4062 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
4063 movp(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
4067 void MacroAssembler::LoadAllocationTopHelper(Register result,
4069 AllocationFlags flags) {
4070 ExternalReference allocation_top =
4071 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4073 // Just return if allocation top is already known.
4074 if ((flags & RESULT_CONTAINS_TOP) != 0) {
4075 // No use of scratch if allocation top is provided.
4076 DCHECK(!scratch.is_valid());
4078 // Assert that result actually contains top on entry.
4079 Operand top_operand = ExternalOperand(allocation_top);
4080 cmpp(result, top_operand);
4081 Check(equal, kUnexpectedAllocationTop);
4086 // Move address of new object to result. Use scratch register if available,
4087 // and keep address in scratch until call to UpdateAllocationTopHelper.
4088 if (scratch.is_valid()) {
4089 LoadAddress(scratch, allocation_top);
4090 movp(result, Operand(scratch, 0));
4092 Load(result, allocation_top);
4097 void MacroAssembler::MakeSureDoubleAlignedHelper(Register result,
4100 AllocationFlags flags) {
4101 if (kPointerSize == kDoubleSize) {
4102 if (FLAG_debug_code) {
4103 testl(result, Immediate(kDoubleAlignmentMask));
4104 Check(zero, kAllocationIsNotDoubleAligned);
4107 // Align the next allocation. Storing the filler map without checking top
4108 // is safe in new-space because the limit of the heap is aligned there.
4109 DCHECK(kPointerSize * 2 == kDoubleSize);
4110 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
4111 // Make sure scratch is not clobbered by this function as it might be
4112 // used in UpdateAllocationTopHelper later.
4113 DCHECK(!scratch.is(kScratchRegister));
4115 testl(result, Immediate(kDoubleAlignmentMask));
4116 j(zero, &aligned, Label::kNear);
4117 if ((flags & PRETENURE) != 0) {
4118 ExternalReference allocation_limit =
4119 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4120 cmpp(result, ExternalOperand(allocation_limit));
4121 j(above_equal, gc_required);
4123 LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex);
4124 movp(Operand(result, 0), kScratchRegister);
4125 addp(result, Immediate(kDoubleSize / 2));
4131 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
4133 AllocationFlags flags) {
4134 if (emit_debug_code()) {
4135 testp(result_end, Immediate(kObjectAlignmentMask));
4136 Check(zero, kUnalignedAllocationInNewSpace);
4139 ExternalReference allocation_top =
4140 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4143 if (scratch.is_valid()) {
4144 // Scratch already contains address of allocation top.
4145 movp(Operand(scratch, 0), result_end);
4147 Store(allocation_top, result_end);
4152 void MacroAssembler::Allocate(int object_size,
4154 Register result_end,
4157 AllocationFlags flags) {
4158 DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
4159 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
4160 if (!FLAG_inline_new) {
4161 if (emit_debug_code()) {
4162 // Trash the registers to simulate an allocation failure.
4163 movl(result, Immediate(0x7091));
4164 if (result_end.is_valid()) {
4165 movl(result_end, Immediate(0x7191));
4167 if (scratch.is_valid()) {
4168 movl(scratch, Immediate(0x7291));
4174 DCHECK(!result.is(result_end));
4176 // Load address of new object into result.
4177 LoadAllocationTopHelper(result, scratch, flags);
4179 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4180 MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4183 // Calculate new top and bail out if new space is exhausted.
4184 ExternalReference allocation_limit =
4185 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4187 Register top_reg = result_end.is_valid() ? result_end : result;
4189 if (!top_reg.is(result)) {
4190 movp(top_reg, result);
4192 addp(top_reg, Immediate(object_size));
4193 j(carry, gc_required);
4194 Operand limit_operand = ExternalOperand(allocation_limit);
4195 cmpp(top_reg, limit_operand);
4196 j(above, gc_required);
4198 // Update allocation top.
4199 UpdateAllocationTopHelper(top_reg, scratch, flags);
4201 bool tag_result = (flags & TAG_OBJECT) != 0;
4202 if (top_reg.is(result)) {
4204 subp(result, Immediate(object_size - kHeapObjectTag));
4206 subp(result, Immediate(object_size));
4208 } else if (tag_result) {
4209 // Tag the result if requested.
4210 DCHECK(kHeapObjectTag == 1);
4216 void MacroAssembler::Allocate(int header_size,
4217 ScaleFactor element_size,
4218 Register element_count,
4220 Register result_end,
4223 AllocationFlags flags) {
4224 DCHECK((flags & SIZE_IN_WORDS) == 0);
4225 leap(result_end, Operand(element_count, element_size, header_size));
4226 Allocate(result_end, result, result_end, scratch, gc_required, flags);
4230 void MacroAssembler::Allocate(Register object_size,
4232 Register result_end,
4235 AllocationFlags flags) {
4236 DCHECK((flags & SIZE_IN_WORDS) == 0);
4237 if (!FLAG_inline_new) {
4238 if (emit_debug_code()) {
4239 // Trash the registers to simulate an allocation failure.
4240 movl(result, Immediate(0x7091));
4241 movl(result_end, Immediate(0x7191));
4242 if (scratch.is_valid()) {
4243 movl(scratch, Immediate(0x7291));
4245 // object_size is left unchanged by this function.
4250 DCHECK(!result.is(result_end));
4252 // Load address of new object into result.
4253 LoadAllocationTopHelper(result, scratch, flags);
4255 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4256 MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4259 // Calculate new top and bail out if new space is exhausted.
4260 ExternalReference allocation_limit =
4261 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4262 if (!object_size.is(result_end)) {
4263 movp(result_end, object_size);
4265 addp(result_end, result);
4266 j(carry, gc_required);
4267 Operand limit_operand = ExternalOperand(allocation_limit);
4268 cmpp(result_end, limit_operand);
4269 j(above, gc_required);
4271 // Update allocation top.
4272 UpdateAllocationTopHelper(result_end, scratch, flags);
4274 // Tag the result if requested.
4275 if ((flags & TAG_OBJECT) != 0) {
4276 addp(result, Immediate(kHeapObjectTag));
4281 void MacroAssembler::AllocateHeapNumber(Register result,
4285 // Allocate heap number in new space.
4286 Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
4288 Heap::RootListIndex map_index = mode == MUTABLE
4289 ? Heap::kMutableHeapNumberMapRootIndex
4290 : Heap::kHeapNumberMapRootIndex;
4293 LoadRoot(kScratchRegister, map_index);
4294 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4298 void MacroAssembler::AllocateTwoByteString(Register result,
4303 Label* gc_required) {
4304 // Calculate the number of bytes needed for the characters in the string while
4305 // observing object alignment.
4306 const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
4307 kObjectAlignmentMask;
4308 DCHECK(kShortSize == 2);
4309 // scratch1 = length * 2 + kObjectAlignmentMask.
4310 leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
4312 andp(scratch1, Immediate(~kObjectAlignmentMask));
4313 if (kHeaderAlignment > 0) {
4314 subp(scratch1, Immediate(kHeaderAlignment));
4317 // Allocate two byte string in new space.
4318 Allocate(SeqTwoByteString::kHeaderSize,
4327 // Set the map, length and hash field.
4328 LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
4329 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4330 Integer32ToSmi(scratch1, length);
4331 movp(FieldOperand(result, String::kLengthOffset), scratch1);
4332 movp(FieldOperand(result, String::kHashFieldOffset),
4333 Immediate(String::kEmptyHashField));
4337 void MacroAssembler::AllocateOneByteString(Register result, Register length,
4338 Register scratch1, Register scratch2,
4340 Label* gc_required) {
4341 // Calculate the number of bytes needed for the characters in the string while
4342 // observing object alignment.
4343 const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
4344 kObjectAlignmentMask;
4345 movl(scratch1, length);
4346 DCHECK(kCharSize == 1);
4347 addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
4348 andp(scratch1, Immediate(~kObjectAlignmentMask));
4349 if (kHeaderAlignment > 0) {
4350 subp(scratch1, Immediate(kHeaderAlignment));
4353 // Allocate one-byte string in new space.
4354 Allocate(SeqOneByteString::kHeaderSize,
4363 // Set the map, length and hash field.
4364 LoadRoot(kScratchRegister, Heap::kOneByteStringMapRootIndex);
4365 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4366 Integer32ToSmi(scratch1, length);
4367 movp(FieldOperand(result, String::kLengthOffset), scratch1);
4368 movp(FieldOperand(result, String::kHashFieldOffset),
4369 Immediate(String::kEmptyHashField));
4373 void MacroAssembler::AllocateTwoByteConsString(Register result,
4376 Label* gc_required) {
4377 // Allocate heap number in new space.
4378 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4381 // Set the map. The other fields are left uninitialized.
4382 LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
4383 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4387 void MacroAssembler::AllocateOneByteConsString(Register result,
4390 Label* gc_required) {
4391 Allocate(ConsString::kSize,
4398 // Set the map. The other fields are left uninitialized.
4399 LoadRoot(kScratchRegister, Heap::kConsOneByteStringMapRootIndex);
4400 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4404 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
4407 Label* gc_required) {
4408 // Allocate heap number in new space.
4409 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4412 // Set the map. The other fields are left uninitialized.
4413 LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
4414 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4418 void MacroAssembler::AllocateOneByteSlicedString(Register result,
4421 Label* gc_required) {
4422 // Allocate heap number in new space.
4423 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4426 // Set the map. The other fields are left uninitialized.
4427 LoadRoot(kScratchRegister, Heap::kSlicedOneByteStringMapRootIndex);
4428 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4432 // Copy memory, byte-by-byte, from source to destination. Not optimized for
4433 // long or aligned copies. The contents of scratch and length are destroyed.
4434 // Destination is incremented by length, source, length and scratch are
4436 // A simpler loop is faster on small copies, but slower on large ones.
4437 // The cld() instruction must have been emitted, to set the direction flag(),
4438 // before calling this function.
4439 void MacroAssembler::CopyBytes(Register destination,
4444 DCHECK(min_length >= 0);
4445 if (emit_debug_code()) {
4446 cmpl(length, Immediate(min_length));
4447 Assert(greater_equal, kInvalidMinLength);
4449 Label short_loop, len8, len16, len24, done, short_string;
4451 const int kLongStringLimit = 4 * kPointerSize;
4452 if (min_length <= kLongStringLimit) {
4453 cmpl(length, Immediate(kPointerSize));
4454 j(below, &short_string, Label::kNear);
4457 DCHECK(source.is(rsi));
4458 DCHECK(destination.is(rdi));
4459 DCHECK(length.is(rcx));
4461 if (min_length <= kLongStringLimit) {
4462 cmpl(length, Immediate(2 * kPointerSize));
4463 j(below_equal, &len8, Label::kNear);
4464 cmpl(length, Immediate(3 * kPointerSize));
4465 j(below_equal, &len16, Label::kNear);
4466 cmpl(length, Immediate(4 * kPointerSize));
4467 j(below_equal, &len24, Label::kNear);
4470 // Because source is 8-byte aligned in our uses of this function,
4471 // we keep source aligned for the rep movs operation by copying the odd bytes
4472 // at the end of the ranges.
4473 movp(scratch, length);
4474 shrl(length, Immediate(kPointerSizeLog2));
4476 // Move remaining bytes of length.
4477 andl(scratch, Immediate(kPointerSize - 1));
4478 movp(length, Operand(source, scratch, times_1, -kPointerSize));
4479 movp(Operand(destination, scratch, times_1, -kPointerSize), length);
4480 addp(destination, scratch);
4482 if (min_length <= kLongStringLimit) {
4483 jmp(&done, Label::kNear);
4485 movp(scratch, Operand(source, 2 * kPointerSize));
4486 movp(Operand(destination, 2 * kPointerSize), scratch);
4488 movp(scratch, Operand(source, kPointerSize));
4489 movp(Operand(destination, kPointerSize), scratch);
4491 movp(scratch, Operand(source, 0));
4492 movp(Operand(destination, 0), scratch);
4493 // Move remaining bytes of length.
4494 movp(scratch, Operand(source, length, times_1, -kPointerSize));
4495 movp(Operand(destination, length, times_1, -kPointerSize), scratch);
4496 addp(destination, length);
4497 jmp(&done, Label::kNear);
4499 bind(&short_string);
4500 if (min_length == 0) {
4501 testl(length, length);
4502 j(zero, &done, Label::kNear);
4506 movb(scratch, Operand(source, 0));
4507 movb(Operand(destination, 0), scratch);
4511 j(not_zero, &short_loop);
4518 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
4519 Register end_offset,
4524 movp(Operand(start_offset, 0), filler);
4525 addp(start_offset, Immediate(kPointerSize));
4527 cmpp(start_offset, end_offset);
4532 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4533 if (context_chain_length > 0) {
4534 // Move up the chain of contexts to the context containing the slot.
4535 movp(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4536 for (int i = 1; i < context_chain_length; i++) {
4537 movp(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4540 // Slot is in the current function context. Move it into the
4541 // destination register in case we store into it (the write barrier
4542 // cannot be allowed to destroy the context in rsi).
4546 // We should not have found a with context by walking the context
4547 // chain (i.e., the static scope chain and runtime context chain do
4548 // not agree). A variable occurring in such a scope should have
4549 // slot type LOOKUP and not CONTEXT.
4550 if (emit_debug_code()) {
4551 CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
4552 Heap::kWithContextMapRootIndex);
4553 Check(not_equal, kVariableResolvedToWithContext);
4558 void MacroAssembler::LoadGlobalProxy(Register dst) {
4559 movp(dst, GlobalObjectOperand());
4560 movp(dst, FieldOperand(dst, GlobalObject::kGlobalProxyOffset));
4564 void MacroAssembler::LoadTransitionedArrayMapConditional(
4565 ElementsKind expected_kind,
4566 ElementsKind transitioned_kind,
4567 Register map_in_out,
4569 Label* no_map_match) {
4570 // Load the global or builtins object from the current context.
4572 Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4573 movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
4575 // Check that the function's map is the same as the expected cached map.
4576 movp(scratch, Operand(scratch,
4577 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4579 int offset = expected_kind * kPointerSize +
4580 FixedArrayBase::kHeaderSize;
4581 cmpp(map_in_out, FieldOperand(scratch, offset));
4582 j(not_equal, no_map_match);
4584 // Use the transitioned cached map.
4585 offset = transitioned_kind * kPointerSize +
4586 FixedArrayBase::kHeaderSize;
4587 movp(map_in_out, FieldOperand(scratch, offset));
4592 static const int kRegisterPassedArguments = 4;
4594 static const int kRegisterPassedArguments = 6;
4597 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4598 // Load the global or builtins object from the current context.
4600 Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4601 // Load the native context from the global or builtins object.
4602 movp(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
4603 // Load the function from the native context.
4604 movp(function, Operand(function, Context::SlotOffset(index)));
4608 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4610 // Load the initial map. The global functions all have initial maps.
4611 movp(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4612 if (emit_debug_code()) {
4614 CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
4617 Abort(kGlobalFunctionsMustHaveInitialMap);
4623 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
4624 // On Windows 64 stack slots are reserved by the caller for all arguments
4625 // including the ones passed in registers, and space is always allocated for
4626 // the four register arguments even if the function takes fewer than four
4628 // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
4629 // and the caller does not reserve stack slots for them.
4630 DCHECK(num_arguments >= 0);
4632 const int kMinimumStackSlots = kRegisterPassedArguments;
4633 if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
4634 return num_arguments;
4636 if (num_arguments < kRegisterPassedArguments) return 0;
4637 return num_arguments - kRegisterPassedArguments;
4642 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
4645 uint32_t encoding_mask) {
4647 JumpIfNotSmi(string, &is_object);
4652 movp(value, FieldOperand(string, HeapObject::kMapOffset));
4653 movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset));
4655 andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
4656 cmpp(value, Immediate(encoding_mask));
4658 Check(equal, kUnexpectedStringType);
4660 // The index is assumed to be untagged coming in, tag it to compare with the
4661 // string length without using a temp register, it is restored at the end of
4663 Integer32ToSmi(index, index);
4664 SmiCompare(index, FieldOperand(string, String::kLengthOffset));
4665 Check(less, kIndexIsTooLarge);
4667 SmiCompare(index, Smi::FromInt(0));
4668 Check(greater_equal, kIndexIsNegative);
4670 // Restore the index
4671 SmiToInteger32(index, index);
4675 void MacroAssembler::PrepareCallCFunction(int num_arguments) {
4676 int frame_alignment = base::OS::ActivationFrameAlignment();
4677 DCHECK(frame_alignment != 0);
4678 DCHECK(num_arguments >= 0);
4680 // Make stack end at alignment and allocate space for arguments and old rsp.
4681 movp(kScratchRegister, rsp);
4682 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4683 int argument_slots_on_stack =
4684 ArgumentStackSlotsForCFunctionCall(num_arguments);
4685 subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
4686 andp(rsp, Immediate(-frame_alignment));
4687 movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
4691 void MacroAssembler::CallCFunction(ExternalReference function,
4692 int num_arguments) {
4693 LoadAddress(rax, function);
4694 CallCFunction(rax, num_arguments);
4698 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
4699 DCHECK(has_frame());
4700 // Check stack alignment.
4701 if (emit_debug_code()) {
4702 CheckStackAlignment();
4706 DCHECK(base::OS::ActivationFrameAlignment() != 0);
4707 DCHECK(num_arguments >= 0);
4708 int argument_slots_on_stack =
4709 ArgumentStackSlotsForCFunctionCall(num_arguments);
4710 movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
4715 bool AreAliased(Register reg1,
4723 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
4724 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
4725 reg7.is_valid() + reg8.is_valid();
4728 if (reg1.is_valid()) regs |= reg1.bit();
4729 if (reg2.is_valid()) regs |= reg2.bit();
4730 if (reg3.is_valid()) regs |= reg3.bit();
4731 if (reg4.is_valid()) regs |= reg4.bit();
4732 if (reg5.is_valid()) regs |= reg5.bit();
4733 if (reg6.is_valid()) regs |= reg6.bit();
4734 if (reg7.is_valid()) regs |= reg7.bit();
4735 if (reg8.is_valid()) regs |= reg8.bit();
4736 int n_of_non_aliasing_regs = NumRegs(regs);
4738 return n_of_valid_regs != n_of_non_aliasing_regs;
4743 CodePatcher::CodePatcher(byte* address, int size)
4744 : address_(address),
4746 masm_(NULL, address, size + Assembler::kGap) {
4747 // Create a new macro assembler pointing to the address of the code to patch.
4748 // The size is adjusted with kGap on order for the assembler to generate size
4749 // bytes of instructions without failing with buffer size constraints.
4750 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4754 CodePatcher::~CodePatcher() {
4755 // Indicate that code has changed.
4756 CpuFeatures::FlushICache(address_, size_);
4758 // Check that the code was patched as expected.
4759 DCHECK(masm_.pc_ == address_ + size_);
4760 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4764 void MacroAssembler::CheckPageFlag(
4769 Label* condition_met,
4770 Label::Distance condition_met_distance) {
4771 DCHECK(cc == zero || cc == not_zero);
4772 if (scratch.is(object)) {
4773 andp(scratch, Immediate(~Page::kPageAlignmentMask));
4775 movp(scratch, Immediate(~Page::kPageAlignmentMask));
4776 andp(scratch, object);
4778 if (mask < (1 << kBitsPerByte)) {
4779 testb(Operand(scratch, MemoryChunk::kFlagsOffset),
4780 Immediate(static_cast<uint8_t>(mask)));
4782 testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
4784 j(cc, condition_met, condition_met_distance);
4788 void MacroAssembler::JumpIfBlack(Register object,
4789 Register bitmap_scratch,
4790 Register mask_scratch,
4792 Label::Distance on_black_distance) {
4793 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
4794 GetMarkBits(object, bitmap_scratch, mask_scratch);
4796 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
4797 // The mask_scratch register contains a 1 at the position of the first bit
4798 // and a 0 at all other positions, including the position of the second bit.
4799 movp(rcx, mask_scratch);
4800 // Make rcx into a mask that covers both marking bits using the operation
4801 // rcx = mask | (mask << 1).
4802 leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
4803 // Note that we are using a 4-byte aligned 8-byte load.
4804 andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
4805 cmpp(mask_scratch, rcx);
4806 j(equal, on_black, on_black_distance);
4810 // Detect some, but not all, common pointer-free objects. This is used by the
4811 // incremental write barrier which doesn't care about oddballs (they are always
4812 // marked black immediately so this code is not hit).
4813 void MacroAssembler::JumpIfDataObject(
4816 Label* not_data_object,
4817 Label::Distance not_data_object_distance) {
4818 Label is_data_object;
4819 movp(scratch, FieldOperand(value, HeapObject::kMapOffset));
4820 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
4821 j(equal, &is_data_object, Label::kNear);
4822 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4823 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4824 // If it's a string and it's not a cons string then it's an object containing
4826 testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
4827 Immediate(kIsIndirectStringMask | kIsNotStringMask));
4828 j(not_zero, not_data_object, not_data_object_distance);
4829 bind(&is_data_object);
4833 void MacroAssembler::GetMarkBits(Register addr_reg,
4834 Register bitmap_reg,
4835 Register mask_reg) {
4836 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
4837 movp(bitmap_reg, addr_reg);
4838 // Sign extended 32 bit immediate.
4839 andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
4840 movp(rcx, addr_reg);
4842 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
4843 shrl(rcx, Immediate(shift));
4845 Immediate((Page::kPageAlignmentMask >> shift) &
4846 ~(Bitmap::kBytesPerCell - 1)));
4848 addp(bitmap_reg, rcx);
4849 movp(rcx, addr_reg);
4850 shrl(rcx, Immediate(kPointerSizeLog2));
4851 andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
4852 movl(mask_reg, Immediate(1));
4857 void MacroAssembler::EnsureNotWhite(
4859 Register bitmap_scratch,
4860 Register mask_scratch,
4861 Label* value_is_white_and_not_data,
4862 Label::Distance distance) {
4863 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
4864 GetMarkBits(value, bitmap_scratch, mask_scratch);
4866 // If the value is black or grey we don't need to do anything.
4867 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4868 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
4869 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
4870 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4874 // Since both black and grey have a 1 in the first position and white does
4875 // not have a 1 there we only need to check one bit.
4876 testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4877 j(not_zero, &done, Label::kNear);
4879 if (emit_debug_code()) {
4880 // Check for impossible bit pattern.
4883 // shl. May overflow making the check conservative.
4884 addp(mask_scratch, mask_scratch);
4885 testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4886 j(zero, &ok, Label::kNear);
4892 // Value is white. We check whether it is data that doesn't need scanning.
4893 // Currently only checks for HeapNumber and non-cons strings.
4894 Register map = rcx; // Holds map while checking type.
4895 Register length = rcx; // Holds length of object after checking type.
4896 Label not_heap_number;
4897 Label is_data_object;
4899 // Check for heap-number
4900 movp(map, FieldOperand(value, HeapObject::kMapOffset));
4901 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
4902 j(not_equal, ¬_heap_number, Label::kNear);
4903 movp(length, Immediate(HeapNumber::kSize));
4904 jmp(&is_data_object, Label::kNear);
4906 bind(¬_heap_number);
4907 // Check for strings.
4908 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4909 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4910 // If it's a string and it's not a cons string then it's an object containing
4912 Register instance_type = rcx;
4913 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
4914 testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
4915 j(not_zero, value_is_white_and_not_data);
4916 // It's a non-indirect (non-cons and non-slice) string.
4917 // If it's external, the length is just ExternalString::kSize.
4918 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
4920 // External strings are the only ones with the kExternalStringTag bit
4922 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
4923 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
4924 testb(instance_type, Immediate(kExternalStringTag));
4925 j(zero, ¬_external, Label::kNear);
4926 movp(length, Immediate(ExternalString::kSize));
4927 jmp(&is_data_object, Label::kNear);
4929 bind(¬_external);
4930 // Sequential string, either Latin1 or UC16.
4931 DCHECK(kOneByteStringTag == 0x04);
4932 andp(length, Immediate(kStringEncodingMask));
4933 xorp(length, Immediate(kStringEncodingMask));
4934 addp(length, Immediate(0x04));
4935 // Value now either 4 (if Latin1) or 8 (if UC16), i.e. char-size shifted by 2.
4936 imulp(length, FieldOperand(value, String::kLengthOffset));
4937 shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
4938 addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
4939 andp(length, Immediate(~kObjectAlignmentMask));
4941 bind(&is_data_object);
4942 // Value is a data object, and it is white. Mark it black. Since we know
4943 // that the object is white we can make it black by flipping one bit.
4944 orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4946 andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
4947 addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
4953 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
4955 Register empty_fixed_array_value = r8;
4956 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
4959 // Check if the enum length field is properly initialized, indicating that
4960 // there is an enum cache.
4961 movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
4963 EnumLength(rdx, rbx);
4964 Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
4965 j(equal, call_runtime);
4971 movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
4973 // For all objects but the receiver, check that the cache is empty.
4974 EnumLength(rdx, rbx);
4975 Cmp(rdx, Smi::FromInt(0));
4976 j(not_equal, call_runtime);
4980 // Check that there are no elements. Register rcx contains the current JS
4981 // object we've reached through the prototype chain.
4983 cmpp(empty_fixed_array_value,
4984 FieldOperand(rcx, JSObject::kElementsOffset));
4985 j(equal, &no_elements);
4987 // Second chance, the object may be using the empty slow element dictionary.
4988 LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
4989 cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
4990 j(not_equal, call_runtime);
4993 movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
4994 cmpp(rcx, null_value);
4995 j(not_equal, &next);
4998 void MacroAssembler::TestJSArrayForAllocationMemento(
4999 Register receiver_reg,
5000 Register scratch_reg,
5001 Label* no_memento_found) {
5002 ExternalReference new_space_start =
5003 ExternalReference::new_space_start(isolate());
5004 ExternalReference new_space_allocation_top =
5005 ExternalReference::new_space_allocation_top_address(isolate());
5007 leap(scratch_reg, Operand(receiver_reg,
5008 JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5009 Move(kScratchRegister, new_space_start);
5010 cmpp(scratch_reg, kScratchRegister);
5011 j(less, no_memento_found);
5012 cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
5013 j(greater, no_memento_found);
5014 CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
5015 Heap::kAllocationMementoMapRootIndex);
5019 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5024 DCHECK(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
5025 DCHECK(!scratch1.is(scratch0));
5026 Register current = scratch0;
5027 Label loop_again, end;
5029 movp(current, object);
5030 movp(current, FieldOperand(current, HeapObject::kMapOffset));
5031 movp(current, FieldOperand(current, Map::kPrototypeOffset));
5032 CompareRoot(current, Heap::kNullValueRootIndex);
5035 // Loop based on the map going up the prototype chain.
5037 movp(current, FieldOperand(current, HeapObject::kMapOffset));
5038 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
5039 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
5040 CmpInstanceType(current, JS_OBJECT_TYPE);
5042 movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
5043 DecodeField<Map::ElementsKindBits>(scratch1);
5044 cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
5046 movp(current, FieldOperand(current, Map::kPrototypeOffset));
5047 CompareRoot(current, Heap::kNullValueRootIndex);
5048 j(not_equal, &loop_again);
5054 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
5055 DCHECK(!dividend.is(rax));
5056 DCHECK(!dividend.is(rdx));
5057 base::MagicNumbersForDivision<uint32_t> mag =
5058 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
5059 movl(rax, Immediate(mag.multiplier));
5061 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
5062 if (divisor > 0 && neg) addl(rdx, dividend);
5063 if (divisor < 0 && !neg && mag.multiplier > 0) subl(rdx, dividend);
5064 if (mag.shift > 0) sarl(rdx, Immediate(mag.shift));
5065 movl(rax, dividend);
5066 shrl(rax, Immediate(31));
5071 } // namespace internal
5074 #endif // V8_TARGET_ARCH_X64