1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/base/bits.h"
8 #include "src/base/division-by-constant.h"
9 #include "src/bootstrapper.h"
10 #include "src/codegen.h"
11 #include "src/cpu-profiler.h"
12 #include "src/debug/debug.h"
13 #include "src/heap/heap.h"
14 #include "src/x64/assembler-x64.h"
15 #include "src/x64/macro-assembler-x64.h"
20 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
21 : Assembler(arg_isolate, buffer, size),
22 generating_stub_(false),
24 root_array_available_(true) {
25 if (isolate() != NULL) {
26 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
32 static const int64_t kInvalidRootRegisterDelta = -1;
35 int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
36 if (predictable_code_size() &&
37 (other.address() < reinterpret_cast<Address>(isolate()) ||
38 other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
39 return kInvalidRootRegisterDelta;
41 Address roots_register_value = kRootRegisterBias +
42 reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
44 int64_t delta = kInvalidRootRegisterDelta; // Bogus initialization.
45 if (kPointerSize == kInt64Size) {
46 delta = other.address() - roots_register_value;
48 // For x32, zero extend the address to 64-bit and calculate the delta.
49 uint64_t o = static_cast<uint32_t>(
50 reinterpret_cast<intptr_t>(other.address()));
51 uint64_t r = static_cast<uint32_t>(
52 reinterpret_cast<intptr_t>(roots_register_value));
59 Operand MacroAssembler::ExternalOperand(ExternalReference target,
61 if (root_array_available_ && !serializer_enabled()) {
62 int64_t delta = RootRegisterDelta(target);
63 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
64 return Operand(kRootRegister, static_cast<int32_t>(delta));
67 Move(scratch, target);
68 return Operand(scratch, 0);
72 void MacroAssembler::Load(Register destination, ExternalReference source) {
73 if (root_array_available_ && !serializer_enabled()) {
74 int64_t delta = RootRegisterDelta(source);
75 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
76 movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
81 if (destination.is(rax)) {
84 Move(kScratchRegister, source);
85 movp(destination, Operand(kScratchRegister, 0));
90 void MacroAssembler::Store(ExternalReference destination, Register source) {
91 if (root_array_available_ && !serializer_enabled()) {
92 int64_t delta = RootRegisterDelta(destination);
93 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
94 movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
100 store_rax(destination);
102 Move(kScratchRegister, destination);
103 movp(Operand(kScratchRegister, 0), source);
108 void MacroAssembler::LoadAddress(Register destination,
109 ExternalReference source) {
110 if (root_array_available_ && !serializer_enabled()) {
111 int64_t delta = RootRegisterDelta(source);
112 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
113 leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
118 Move(destination, source);
122 int MacroAssembler::LoadAddressSize(ExternalReference source) {
123 if (root_array_available_ && !serializer_enabled()) {
124 // This calculation depends on the internals of LoadAddress.
125 // It's correctness is ensured by the asserts in the Call
126 // instruction below.
127 int64_t delta = RootRegisterDelta(source);
128 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
129 // Operand is leap(scratch, Operand(kRootRegister, delta));
130 // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
132 if (!is_int8(static_cast<int32_t>(delta))) {
133 size += 3; // Need full four-byte displacement in lea.
138 // Size of movp(destination, src);
139 return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
143 void MacroAssembler::PushAddress(ExternalReference source) {
144 int64_t address = reinterpret_cast<int64_t>(source.address());
145 if (is_int32(address) && !serializer_enabled()) {
146 if (emit_debug_code()) {
147 Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
149 Push(Immediate(static_cast<int32_t>(address)));
152 LoadAddress(kScratchRegister, source);
153 Push(kScratchRegister);
157 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
158 DCHECK(root_array_available_);
159 movp(destination, Operand(kRootRegister,
160 (index << kPointerSizeLog2) - kRootRegisterBias));
164 void MacroAssembler::LoadRootIndexed(Register destination,
165 Register variable_offset,
167 DCHECK(root_array_available_);
169 Operand(kRootRegister,
170 variable_offset, times_pointer_size,
171 (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
175 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
176 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
177 DCHECK(root_array_available_);
178 movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
183 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
184 DCHECK(root_array_available_);
185 Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
189 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
190 DCHECK(root_array_available_);
191 cmpp(with, Operand(kRootRegister,
192 (index << kPointerSizeLog2) - kRootRegisterBias));
196 void MacroAssembler::CompareRoot(const Operand& with,
197 Heap::RootListIndex index) {
198 DCHECK(root_array_available_);
199 DCHECK(!with.AddressUsesRegister(kScratchRegister));
200 LoadRoot(kScratchRegister, index);
201 cmpp(with, kScratchRegister);
205 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
208 SaveFPRegsMode save_fp,
209 RememberedSetFinalAction and_then) {
210 if (emit_debug_code()) {
212 JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
216 // Load store buffer top.
217 LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
218 // Store pointer to buffer.
219 movp(Operand(scratch, 0), addr);
220 // Increment buffer top.
221 addp(scratch, Immediate(kPointerSize));
222 // Write back new top of buffer.
223 StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
224 // Call stub on end of buffer.
226 // Check for end of buffer.
227 testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
228 if (and_then == kReturnAtEnd) {
229 Label buffer_overflowed;
230 j(not_equal, &buffer_overflowed, Label::kNear);
232 bind(&buffer_overflowed);
234 DCHECK(and_then == kFallThroughAtEnd);
235 j(equal, &done, Label::kNear);
237 StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
238 CallStub(&store_buffer_overflow);
239 if (and_then == kReturnAtEnd) {
242 DCHECK(and_then == kFallThroughAtEnd);
248 void MacroAssembler::InNewSpace(Register object,
252 Label::Distance distance) {
253 if (serializer_enabled()) {
254 // Can't do arithmetic on external references if it might get serialized.
255 // The mask isn't really an address. We load it as an external reference in
256 // case the size of the new space is different between the snapshot maker
257 // and the running system.
258 if (scratch.is(object)) {
259 Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
260 andp(scratch, kScratchRegister);
262 Move(scratch, ExternalReference::new_space_mask(isolate()));
263 andp(scratch, object);
265 Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
266 cmpp(scratch, kScratchRegister);
267 j(cc, branch, distance);
269 DCHECK(kPointerSize == kInt64Size
270 ? is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))
271 : kPointerSize == kInt32Size);
272 intptr_t new_space_start =
273 reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
274 Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
275 Assembler::RelocInfoNone());
276 if (scratch.is(object)) {
277 addp(scratch, kScratchRegister);
279 leap(scratch, Operand(object, kScratchRegister, times_1, 0));
282 Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
283 j(cc, branch, distance);
288 void MacroAssembler::RecordWriteField(
293 SaveFPRegsMode save_fp,
294 RememberedSetAction remembered_set_action,
296 PointersToHereCheck pointers_to_here_check_for_value) {
297 // First, check if a write barrier is even needed. The tests below
298 // catch stores of Smis.
301 // Skip barrier if writing a smi.
302 if (smi_check == INLINE_SMI_CHECK) {
303 JumpIfSmi(value, &done);
306 // Although the object register is tagged, the offset is relative to the start
307 // of the object, so so offset must be a multiple of kPointerSize.
308 DCHECK(IsAligned(offset, kPointerSize));
310 leap(dst, FieldOperand(object, offset));
311 if (emit_debug_code()) {
313 testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
314 j(zero, &ok, Label::kNear);
319 RecordWrite(object, dst, value, save_fp, remembered_set_action,
320 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
324 // Clobber clobbered input registers when running with the debug-code flag
325 // turned on to provoke errors.
326 if (emit_debug_code()) {
327 Move(value, kZapValue, Assembler::RelocInfoNone());
328 Move(dst, kZapValue, Assembler::RelocInfoNone());
333 void MacroAssembler::RecordWriteArray(
337 SaveFPRegsMode save_fp,
338 RememberedSetAction remembered_set_action,
340 PointersToHereCheck pointers_to_here_check_for_value) {
341 // First, check if a write barrier is even needed. The tests below
342 // catch stores of Smis.
345 // Skip barrier if writing a smi.
346 if (smi_check == INLINE_SMI_CHECK) {
347 JumpIfSmi(value, &done);
350 // Array access: calculate the destination address. Index is not a smi.
351 Register dst = index;
352 leap(dst, Operand(object, index, times_pointer_size,
353 FixedArray::kHeaderSize - kHeapObjectTag));
355 RecordWrite(object, dst, value, save_fp, remembered_set_action,
356 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
360 // Clobber clobbered input registers when running with the debug-code flag
361 // turned on to provoke errors.
362 if (emit_debug_code()) {
363 Move(value, kZapValue, Assembler::RelocInfoNone());
364 Move(index, kZapValue, Assembler::RelocInfoNone());
369 void MacroAssembler::RecordWriteForMap(Register object,
372 SaveFPRegsMode fp_mode) {
373 DCHECK(!object.is(kScratchRegister));
374 DCHECK(!object.is(map));
375 DCHECK(!object.is(dst));
376 DCHECK(!map.is(dst));
377 AssertNotSmi(object);
379 if (emit_debug_code()) {
381 if (map.is(kScratchRegister)) pushq(map);
382 CompareMap(map, isolate()->factory()->meta_map());
383 if (map.is(kScratchRegister)) popq(map);
384 j(equal, &ok, Label::kNear);
389 if (!FLAG_incremental_marking) {
393 if (emit_debug_code()) {
395 if (map.is(kScratchRegister)) pushq(map);
396 cmpp(map, FieldOperand(object, HeapObject::kMapOffset));
397 if (map.is(kScratchRegister)) popq(map);
398 j(equal, &ok, Label::kNear);
403 // Compute the address.
404 leap(dst, FieldOperand(object, HeapObject::kMapOffset));
406 // First, check if a write barrier is even needed. The tests below
407 // catch stores of smis and stores into the young generation.
410 // A single check of the map's pages interesting flag suffices, since it is
411 // only set during incremental collection, and then it's also guaranteed that
412 // the from object's page's interesting flag is also set. This optimization
413 // relies on the fact that maps can never be in new space.
415 map, // Used as scratch.
416 MemoryChunk::kPointersToHereAreInterestingMask,
421 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
427 // Count number of write barriers in generated code.
428 isolate()->counters()->write_barriers_static()->Increment();
429 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
431 // Clobber clobbered registers when running with the debug-code flag
432 // turned on to provoke errors.
433 if (emit_debug_code()) {
434 Move(dst, kZapValue, Assembler::RelocInfoNone());
435 Move(map, kZapValue, Assembler::RelocInfoNone());
440 void MacroAssembler::RecordWrite(
444 SaveFPRegsMode fp_mode,
445 RememberedSetAction remembered_set_action,
447 PointersToHereCheck pointers_to_here_check_for_value) {
448 DCHECK(!object.is(value));
449 DCHECK(!object.is(address));
450 DCHECK(!value.is(address));
451 AssertNotSmi(object);
453 if (remembered_set_action == OMIT_REMEMBERED_SET &&
454 !FLAG_incremental_marking) {
458 if (emit_debug_code()) {
460 cmpp(value, Operand(address, 0));
461 j(equal, &ok, Label::kNear);
466 // First, check if a write barrier is even needed. The tests below
467 // catch stores of smis and stores into the young generation.
470 if (smi_check == INLINE_SMI_CHECK) {
471 // Skip barrier if writing a smi.
472 JumpIfSmi(value, &done);
475 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
477 value, // Used as scratch.
478 MemoryChunk::kPointersToHereAreInterestingMask,
484 CheckPageFlag(object,
485 value, // Used as scratch.
486 MemoryChunk::kPointersFromHereAreInterestingMask,
491 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
497 // Count number of write barriers in generated code.
498 isolate()->counters()->write_barriers_static()->Increment();
499 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
501 // Clobber clobbered registers when running with the debug-code flag
502 // turned on to provoke errors.
503 if (emit_debug_code()) {
504 Move(address, kZapValue, Assembler::RelocInfoNone());
505 Move(value, kZapValue, Assembler::RelocInfoNone());
510 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
511 if (emit_debug_code()) Check(cc, reason);
515 void MacroAssembler::AssertFastElements(Register elements) {
516 if (emit_debug_code()) {
518 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
519 Heap::kFixedArrayMapRootIndex);
520 j(equal, &ok, Label::kNear);
521 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
522 Heap::kFixedDoubleArrayMapRootIndex);
523 j(equal, &ok, Label::kNear);
524 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
525 Heap::kFixedCOWArrayMapRootIndex);
526 j(equal, &ok, Label::kNear);
527 Abort(kJSObjectWithFastElementsMapHasSlowElements);
533 void MacroAssembler::Check(Condition cc, BailoutReason reason) {
535 j(cc, &L, Label::kNear);
537 // Control will not return here.
542 void MacroAssembler::CheckStackAlignment() {
543 int frame_alignment = base::OS::ActivationFrameAlignment();
544 int frame_alignment_mask = frame_alignment - 1;
545 if (frame_alignment > kPointerSize) {
546 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
547 Label alignment_as_expected;
548 testp(rsp, Immediate(frame_alignment_mask));
549 j(zero, &alignment_as_expected, Label::kNear);
550 // Abort if stack is not aligned.
552 bind(&alignment_as_expected);
557 void MacroAssembler::NegativeZeroTest(Register result,
561 testl(result, result);
562 j(not_zero, &ok, Label::kNear);
569 void MacroAssembler::Abort(BailoutReason reason) {
571 const char* msg = GetBailoutReason(reason);
573 RecordComment("Abort message: ");
577 if (FLAG_trap_on_abort) {
583 Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
584 Assembler::RelocInfoNone());
585 Push(kScratchRegister);
588 // We don't actually want to generate a pile of code for this, so just
589 // claim there is a stack frame, without generating one.
590 FrameScope scope(this, StackFrame::NONE);
591 CallRuntime(Runtime::kAbort, 1);
593 CallRuntime(Runtime::kAbort, 1);
595 // Control will not return here.
600 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
601 DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
602 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
606 void MacroAssembler::TailCallStub(CodeStub* stub) {
607 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
611 void MacroAssembler::StubReturn(int argc) {
612 DCHECK(argc >= 1 && generating_stub());
613 ret((argc - 1) * kPointerSize);
617 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
618 return has_frame_ || !stub->SometimesSetsUpAFrame();
622 void MacroAssembler::IndexFromHash(Register hash, Register index) {
623 // The assert checks that the constants for the maximum number of digits
624 // for an array index cached in the hash field and the number of bits
625 // reserved for it does not conflict.
626 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
627 (1 << String::kArrayIndexValueBits));
628 if (!hash.is(index)) {
631 DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
635 void MacroAssembler::CallRuntime(const Runtime::Function* f,
637 SaveFPRegsMode save_doubles) {
638 // If the expected number of arguments of the runtime function is
639 // constant, we check that the actual number of arguments match the
641 CHECK(f->nargs < 0 || f->nargs == num_arguments);
643 // TODO(1236192): Most runtime routines don't need the number of
644 // arguments passed in because it is constant. At some point we
645 // should remove this need and make the runtime routine entry code
647 Set(rax, num_arguments);
648 LoadAddress(rbx, ExternalReference(f, isolate()));
649 CEntryStub ces(isolate(), f->result_size, save_doubles);
654 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
656 Set(rax, num_arguments);
657 LoadAddress(rbx, ext);
659 CEntryStub stub(isolate(), 1);
664 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
667 // ----------- S t a t e -------------
668 // -- rsp[0] : return address
669 // -- rsp[8] : argument num_arguments - 1
671 // -- rsp[8 * num_arguments] : argument 0 (receiver)
672 // -----------------------------------
674 // TODO(1236192): Most runtime routines don't need the number of
675 // arguments passed in because it is constant. At some point we
676 // should remove this need and make the runtime routine entry code
678 Set(rax, num_arguments);
679 JumpToExternalReference(ext, result_size);
683 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
686 TailCallExternalReference(ExternalReference(fid, isolate()),
692 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
694 // Set the entry point and jump to the C entry runtime stub.
695 LoadAddress(rbx, ext);
696 CEntryStub ces(isolate(), result_size);
697 jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
701 void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
702 const CallWrapper& call_wrapper) {
703 // You can't call a builtin without a valid frame.
704 DCHECK(flag == JUMP_FUNCTION || has_frame());
706 // Rely on the assertion to check that the number of provided
707 // arguments match the expected number of arguments. Fake a
708 // parameter count to avoid emitting code to do the check.
709 ParameterCount expected(0);
710 GetBuiltinEntry(rdx, native_context_index);
711 InvokeCode(rdx, expected, expected, flag, call_wrapper);
715 void MacroAssembler::GetBuiltinFunction(Register target,
716 int native_context_index) {
717 // Load the builtins object into target register.
718 movp(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
719 movp(target, FieldOperand(target, GlobalObject::kNativeContextOffset));
720 movp(target, ContextOperand(target, native_context_index));
724 void MacroAssembler::GetBuiltinEntry(Register target,
725 int native_context_index) {
726 DCHECK(!target.is(rdi));
727 // Load the JavaScript builtin function from the builtins object.
728 GetBuiltinFunction(rdi, native_context_index);
729 movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
733 #define REG(Name) { kRegister_ ## Name ## _Code }
735 static const Register saved_regs[] = {
736 REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
737 REG(r9), REG(r10), REG(r11)
742 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
745 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
748 Register exclusion3) {
749 // We don't allow a GC during a store buffer overflow so there is no need to
750 // store the registers in any particular way, but we do have to store and
752 for (int i = 0; i < kNumberOfSavedRegs; i++) {
753 Register reg = saved_regs[i];
754 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
758 // R12 to r15 are callee save on all platforms.
759 if (fp_mode == kSaveFPRegs) {
760 subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
761 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
762 XMMRegister reg = XMMRegister::from_code(i);
763 movsd(Operand(rsp, i * kDoubleSize), reg);
769 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
772 Register exclusion3) {
773 if (fp_mode == kSaveFPRegs) {
774 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
775 XMMRegister reg = XMMRegister::from_code(i);
776 movsd(reg, Operand(rsp, i * kDoubleSize));
778 addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
780 for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
781 Register reg = saved_regs[i];
782 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
789 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
795 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
801 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
802 DCHECK(!r.IsDouble());
803 if (r.IsInteger8()) {
805 } else if (r.IsUInteger8()) {
807 } else if (r.IsInteger16()) {
809 } else if (r.IsUInteger16()) {
811 } else if (r.IsInteger32()) {
819 void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
820 DCHECK(!r.IsDouble());
821 if (r.IsInteger8() || r.IsUInteger8()) {
823 } else if (r.IsInteger16() || r.IsUInteger16()) {
825 } else if (r.IsInteger32()) {
828 if (r.IsHeapObject()) {
830 } else if (r.IsSmi()) {
838 void MacroAssembler::Set(Register dst, int64_t x) {
841 } else if (is_uint32(x)) {
842 movl(dst, Immediate(static_cast<uint32_t>(x)));
843 } else if (is_int32(x)) {
844 movq(dst, Immediate(static_cast<int32_t>(x)));
851 void MacroAssembler::Set(const Operand& dst, intptr_t x) {
852 if (kPointerSize == kInt64Size) {
854 movp(dst, Immediate(static_cast<int32_t>(x)));
856 Set(kScratchRegister, x);
857 movp(dst, kScratchRegister);
860 movp(dst, Immediate(static_cast<int32_t>(x)));
865 // ----------------------------------------------------------------------------
866 // Smi tagging, untagging and tag detection.
868 bool MacroAssembler::IsUnsafeInt(const int32_t x) {
869 static const int kMaxBits = 17;
870 return !is_intn(x, kMaxBits);
874 void MacroAssembler::SafeMove(Register dst, Smi* src) {
875 DCHECK(!dst.is(kScratchRegister));
876 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
877 if (SmiValuesAre32Bits()) {
878 // JIT cookie can be converted to Smi.
879 Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
880 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
881 xorp(dst, kScratchRegister);
883 DCHECK(SmiValuesAre31Bits());
884 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
885 movp(dst, Immediate(value ^ jit_cookie()));
886 xorp(dst, Immediate(jit_cookie()));
894 void MacroAssembler::SafePush(Smi* src) {
895 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
896 if (SmiValuesAre32Bits()) {
897 // JIT cookie can be converted to Smi.
898 Push(Smi::FromInt(src->value() ^ jit_cookie()));
899 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
900 xorp(Operand(rsp, 0), kScratchRegister);
902 DCHECK(SmiValuesAre31Bits());
903 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
904 Push(Immediate(value ^ jit_cookie()));
905 xorp(Operand(rsp, 0), Immediate(jit_cookie()));
913 Register MacroAssembler::GetSmiConstant(Smi* source) {
914 int value = source->value();
916 xorl(kScratchRegister, kScratchRegister);
917 return kScratchRegister;
919 LoadSmiConstant(kScratchRegister, source);
920 return kScratchRegister;
924 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
925 // Special-casing 0 here to use xorl seems to make things slower, so we don't
927 Move(dst, source, Assembler::RelocInfoNone());
931 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
932 STATIC_ASSERT(kSmiTag == 0);
936 shlp(dst, Immediate(kSmiShift));
940 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
941 if (emit_debug_code()) {
942 testb(dst, Immediate(0x01));
944 j(zero, &ok, Label::kNear);
945 Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
949 if (SmiValuesAre32Bits()) {
950 DCHECK(kSmiShift % kBitsPerByte == 0);
951 movl(Operand(dst, kSmiShift / kBitsPerByte), src);
953 DCHECK(SmiValuesAre31Bits());
954 Integer32ToSmi(kScratchRegister, src);
955 movp(dst, kScratchRegister);
960 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
964 addl(dst, Immediate(constant));
966 leal(dst, Operand(src, constant));
968 shlp(dst, Immediate(kSmiShift));
972 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
973 STATIC_ASSERT(kSmiTag == 0);
978 if (SmiValuesAre32Bits()) {
979 shrp(dst, Immediate(kSmiShift));
981 DCHECK(SmiValuesAre31Bits());
982 sarl(dst, Immediate(kSmiShift));
987 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
988 if (SmiValuesAre32Bits()) {
989 movl(dst, Operand(src, kSmiShift / kBitsPerByte));
991 DCHECK(SmiValuesAre31Bits());
993 sarl(dst, Immediate(kSmiShift));
998 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
999 STATIC_ASSERT(kSmiTag == 0);
1003 sarp(dst, Immediate(kSmiShift));
1004 if (kPointerSize == kInt32Size) {
1005 // Sign extend to 64-bit.
1011 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
1012 if (SmiValuesAre32Bits()) {
1013 movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
1015 DCHECK(SmiValuesAre31Bits());
1017 SmiToInteger64(dst, dst);
1022 void MacroAssembler::SmiTest(Register src) {
1028 void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1035 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
1041 void MacroAssembler::Cmp(Register dst, Smi* src) {
1042 DCHECK(!dst.is(kScratchRegister));
1043 if (src->value() == 0) {
1046 Register constant_reg = GetSmiConstant(src);
1047 cmpp(dst, constant_reg);
1052 void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
1059 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
1066 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
1068 if (SmiValuesAre32Bits()) {
1069 cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
1071 DCHECK(SmiValuesAre31Bits());
1072 cmpl(dst, Immediate(src));
1077 void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
1078 // The Operand cannot use the smi register.
1079 Register smi_reg = GetSmiConstant(src);
1080 DCHECK(!dst.AddressUsesRegister(smi_reg));
1085 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
1086 if (SmiValuesAre32Bits()) {
1087 cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
1089 DCHECK(SmiValuesAre31Bits());
1090 SmiToInteger32(kScratchRegister, dst);
1091 cmpl(kScratchRegister, src);
1096 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
1102 SmiToInteger64(dst, src);
1108 if (power < kSmiShift) {
1109 sarp(dst, Immediate(kSmiShift - power));
1110 } else if (power > kSmiShift) {
1111 shlp(dst, Immediate(power - kSmiShift));
1116 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
1119 DCHECK((0 <= power) && (power < 32));
1121 shrp(dst, Immediate(power + kSmiShift));
1123 UNIMPLEMENTED(); // Not used.
1128 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1130 Label::Distance near_jump) {
1131 if (dst.is(src1) || dst.is(src2)) {
1132 DCHECK(!src1.is(kScratchRegister));
1133 DCHECK(!src2.is(kScratchRegister));
1134 movp(kScratchRegister, src1);
1135 orp(kScratchRegister, src2);
1136 JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
1137 movp(dst, kScratchRegister);
1141 JumpIfNotSmi(dst, on_not_smis, near_jump);
1146 Condition MacroAssembler::CheckSmi(Register src) {
1147 STATIC_ASSERT(kSmiTag == 0);
1148 testb(src, Immediate(kSmiTagMask));
1153 Condition MacroAssembler::CheckSmi(const Operand& src) {
1154 STATIC_ASSERT(kSmiTag == 0);
1155 testb(src, Immediate(kSmiTagMask));
1160 Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
1161 STATIC_ASSERT(kSmiTag == 0);
1162 // Test that both bits of the mask 0x8000000000000001 are zero.
1163 movp(kScratchRegister, src);
1164 rolp(kScratchRegister, Immediate(1));
1165 testb(kScratchRegister, Immediate(3));
1170 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
1171 if (first.is(second)) {
1172 return CheckSmi(first);
1174 STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
1175 if (SmiValuesAre32Bits()) {
1176 leal(kScratchRegister, Operand(first, second, times_1, 0));
1177 testb(kScratchRegister, Immediate(0x03));
1179 DCHECK(SmiValuesAre31Bits());
1180 movl(kScratchRegister, first);
1181 orl(kScratchRegister, second);
1182 testb(kScratchRegister, Immediate(kSmiTagMask));
1188 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
1190 if (first.is(second)) {
1191 return CheckNonNegativeSmi(first);
1193 movp(kScratchRegister, first);
1194 orp(kScratchRegister, second);
1195 rolp(kScratchRegister, Immediate(1));
1196 testl(kScratchRegister, Immediate(3));
1201 Condition MacroAssembler::CheckEitherSmi(Register first,
1204 if (first.is(second)) {
1205 return CheckSmi(first);
1207 if (scratch.is(second)) {
1208 andl(scratch, first);
1210 if (!scratch.is(first)) {
1211 movl(scratch, first);
1213 andl(scratch, second);
1215 testb(scratch, Immediate(kSmiTagMask));
1220 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
1221 if (SmiValuesAre32Bits()) {
1222 // A 32-bit integer value can always be converted to a smi.
1225 DCHECK(SmiValuesAre31Bits());
1226 cmpl(src, Immediate(0xc0000000));
1232 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
1233 if (SmiValuesAre32Bits()) {
1234 // An unsigned 32-bit integer value is valid as long as the high bit
1239 DCHECK(SmiValuesAre31Bits());
1240 testl(src, Immediate(0xc0000000));
1246 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
1248 andl(dst, Immediate(kSmiTagMask));
1250 movl(dst, Immediate(kSmiTagMask));
1256 void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
1257 if (!(src.AddressUsesRegister(dst))) {
1258 movl(dst, Immediate(kSmiTagMask));
1262 andl(dst, Immediate(kSmiTagMask));
1267 void MacroAssembler::JumpIfValidSmiValue(Register src,
1269 Label::Distance near_jump) {
1270 Condition is_valid = CheckInteger32ValidSmiValue(src);
1271 j(is_valid, on_valid, near_jump);
1275 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1277 Label::Distance near_jump) {
1278 Condition is_valid = CheckInteger32ValidSmiValue(src);
1279 j(NegateCondition(is_valid), on_invalid, near_jump);
1283 void MacroAssembler::JumpIfUIntValidSmiValue(Register src,
1285 Label::Distance near_jump) {
1286 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1287 j(is_valid, on_valid, near_jump);
1291 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1293 Label::Distance near_jump) {
1294 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1295 j(NegateCondition(is_valid), on_invalid, near_jump);
1299 void MacroAssembler::JumpIfSmi(Register src,
1301 Label::Distance near_jump) {
1302 Condition smi = CheckSmi(src);
1303 j(smi, on_smi, near_jump);
1307 void MacroAssembler::JumpIfNotSmi(Register src,
1309 Label::Distance near_jump) {
1310 Condition smi = CheckSmi(src);
1311 j(NegateCondition(smi), on_not_smi, near_jump);
1315 void MacroAssembler::JumpUnlessNonNegativeSmi(
1316 Register src, Label* on_not_smi_or_negative,
1317 Label::Distance near_jump) {
1318 Condition non_negative_smi = CheckNonNegativeSmi(src);
1319 j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
1323 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1326 Label::Distance near_jump) {
1327 SmiCompare(src, constant);
1328 j(equal, on_equals, near_jump);
1332 void MacroAssembler::JumpIfNotBothSmi(Register src1,
1334 Label* on_not_both_smi,
1335 Label::Distance near_jump) {
1336 Condition both_smi = CheckBothSmi(src1, src2);
1337 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1341 void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
1343 Label* on_not_both_smi,
1344 Label::Distance near_jump) {
1345 Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
1346 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1350 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1351 if (constant->value() == 0) {
1356 } else if (dst.is(src)) {
1357 DCHECK(!dst.is(kScratchRegister));
1358 Register constant_reg = GetSmiConstant(constant);
1359 addp(dst, constant_reg);
1361 LoadSmiConstant(dst, constant);
1367 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1368 if (constant->value() != 0) {
1369 if (SmiValuesAre32Bits()) {
1370 addl(Operand(dst, kSmiShift / kBitsPerByte),
1371 Immediate(constant->value()));
1373 DCHECK(SmiValuesAre31Bits());
1374 addp(dst, Immediate(constant));
1380 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant,
1381 SmiOperationConstraints constraints,
1382 Label* bailout_label,
1383 Label::Distance near_jump) {
1384 if (constant->value() == 0) {
1388 } else if (dst.is(src)) {
1389 DCHECK(!dst.is(kScratchRegister));
1390 LoadSmiConstant(kScratchRegister, constant);
1391 addp(dst, kScratchRegister);
1392 if (constraints & SmiOperationConstraint::kBailoutOnNoOverflow) {
1393 j(no_overflow, bailout_label, near_jump);
1394 DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
1395 subp(dst, kScratchRegister);
1396 } else if (constraints & SmiOperationConstraint::kBailoutOnOverflow) {
1397 if (constraints & SmiOperationConstraint::kPreserveSourceRegister) {
1399 j(no_overflow, &done, Label::kNear);
1400 subp(dst, kScratchRegister);
1401 jmp(bailout_label, near_jump);
1404 // Bailout if overflow without reserving src.
1405 j(overflow, bailout_label, near_jump);
1411 DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
1412 DCHECK(constraints & SmiOperationConstraint::kBailoutOnOverflow);
1413 LoadSmiConstant(dst, constant);
1415 j(overflow, bailout_label, near_jump);
1420 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1421 if (constant->value() == 0) {
1425 } else if (dst.is(src)) {
1426 DCHECK(!dst.is(kScratchRegister));
1427 Register constant_reg = GetSmiConstant(constant);
1428 subp(dst, constant_reg);
1430 if (constant->value() == Smi::kMinValue) {
1431 LoadSmiConstant(dst, constant);
1432 // Adding and subtracting the min-value gives the same result, it only
1433 // differs on the overflow bit, which we don't check here.
1436 // Subtract by adding the negation.
1437 LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1444 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant,
1445 SmiOperationConstraints constraints,
1446 Label* bailout_label,
1447 Label::Distance near_jump) {
1448 if (constant->value() == 0) {
1452 } else if (dst.is(src)) {
1453 DCHECK(!dst.is(kScratchRegister));
1454 LoadSmiConstant(kScratchRegister, constant);
1455 subp(dst, kScratchRegister);
1456 if (constraints & SmiOperationConstraint::kBailoutOnNoOverflow) {
1457 j(no_overflow, bailout_label, near_jump);
1458 DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
1459 addp(dst, kScratchRegister);
1460 } else if (constraints & SmiOperationConstraint::kBailoutOnOverflow) {
1461 if (constraints & SmiOperationConstraint::kPreserveSourceRegister) {
1463 j(no_overflow, &done, Label::kNear);
1464 addp(dst, kScratchRegister);
1465 jmp(bailout_label, near_jump);
1468 // Bailout if overflow without reserving src.
1469 j(overflow, bailout_label, near_jump);
1475 DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
1476 DCHECK(constraints & SmiOperationConstraint::kBailoutOnOverflow);
1477 if (constant->value() == Smi::kMinValue) {
1478 DCHECK(!dst.is(kScratchRegister));
1480 LoadSmiConstant(kScratchRegister, constant);
1481 subp(dst, kScratchRegister);
1482 j(overflow, bailout_label, near_jump);
1484 // Subtract by adding the negation.
1485 LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1487 j(overflow, bailout_label, near_jump);
1493 void MacroAssembler::SmiNeg(Register dst,
1495 Label* on_smi_result,
1496 Label::Distance near_jump) {
1498 DCHECK(!dst.is(kScratchRegister));
1499 movp(kScratchRegister, src);
1500 negp(dst); // Low 32 bits are retained as zero by negation.
1501 // Test if result is zero or Smi::kMinValue.
1502 cmpp(dst, kScratchRegister);
1503 j(not_equal, on_smi_result, near_jump);
1504 movp(src, kScratchRegister);
1509 // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1510 j(not_equal, on_smi_result, near_jump);
1516 static void SmiAddHelper(MacroAssembler* masm,
1520 Label* on_not_smi_result,
1521 Label::Distance near_jump) {
1524 masm->addp(dst, src2);
1525 masm->j(no_overflow, &done, Label::kNear);
1527 masm->subp(dst, src2);
1528 masm->jmp(on_not_smi_result, near_jump);
1531 masm->movp(dst, src1);
1532 masm->addp(dst, src2);
1533 masm->j(overflow, on_not_smi_result, near_jump);
1538 void MacroAssembler::SmiAdd(Register dst,
1541 Label* on_not_smi_result,
1542 Label::Distance near_jump) {
1543 DCHECK_NOT_NULL(on_not_smi_result);
1544 DCHECK(!dst.is(src2));
1545 SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1549 void MacroAssembler::SmiAdd(Register dst,
1551 const Operand& src2,
1552 Label* on_not_smi_result,
1553 Label::Distance near_jump) {
1554 DCHECK_NOT_NULL(on_not_smi_result);
1555 DCHECK(!src2.AddressUsesRegister(dst));
1556 SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1560 void MacroAssembler::SmiAdd(Register dst,
1563 // No overflow checking. Use only when it's known that
1564 // overflowing is impossible.
1565 if (!dst.is(src1)) {
1566 if (emit_debug_code()) {
1567 movp(kScratchRegister, src1);
1568 addp(kScratchRegister, src2);
1569 Check(no_overflow, kSmiAdditionOverflow);
1571 leap(dst, Operand(src1, src2, times_1, 0));
1574 Assert(no_overflow, kSmiAdditionOverflow);
1580 static void SmiSubHelper(MacroAssembler* masm,
1584 Label* on_not_smi_result,
1585 Label::Distance near_jump) {
1588 masm->subp(dst, src2);
1589 masm->j(no_overflow, &done, Label::kNear);
1591 masm->addp(dst, src2);
1592 masm->jmp(on_not_smi_result, near_jump);
1595 masm->movp(dst, src1);
1596 masm->subp(dst, src2);
1597 masm->j(overflow, on_not_smi_result, near_jump);
1602 void MacroAssembler::SmiSub(Register dst,
1605 Label* on_not_smi_result,
1606 Label::Distance near_jump) {
1607 DCHECK_NOT_NULL(on_not_smi_result);
1608 DCHECK(!dst.is(src2));
1609 SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1613 void MacroAssembler::SmiSub(Register dst,
1615 const Operand& src2,
1616 Label* on_not_smi_result,
1617 Label::Distance near_jump) {
1618 DCHECK_NOT_NULL(on_not_smi_result);
1619 DCHECK(!src2.AddressUsesRegister(dst));
1620 SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1625 static void SmiSubNoOverflowHelper(MacroAssembler* masm,
1629 // No overflow checking. Use only when it's known that
1630 // overflowing is impossible (e.g., subtracting two positive smis).
1631 if (!dst.is(src1)) {
1632 masm->movp(dst, src1);
1634 masm->subp(dst, src2);
1635 masm->Assert(no_overflow, kSmiSubtractionOverflow);
1639 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1640 DCHECK(!dst.is(src2));
1641 SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
1645 void MacroAssembler::SmiSub(Register dst,
1647 const Operand& src2) {
1648 SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
1652 void MacroAssembler::SmiMul(Register dst,
1655 Label* on_not_smi_result,
1656 Label::Distance near_jump) {
1657 DCHECK(!dst.is(src2));
1658 DCHECK(!dst.is(kScratchRegister));
1659 DCHECK(!src1.is(kScratchRegister));
1660 DCHECK(!src2.is(kScratchRegister));
1663 Label failure, zero_correct_result;
1664 movp(kScratchRegister, src1); // Create backup for later testing.
1665 SmiToInteger64(dst, src1);
1667 j(overflow, &failure, Label::kNear);
1669 // Check for negative zero result. If product is zero, and one
1670 // argument is negative, go to slow case.
1671 Label correct_result;
1673 j(not_zero, &correct_result, Label::kNear);
1675 movp(dst, kScratchRegister);
1677 // Result was positive zero.
1678 j(positive, &zero_correct_result, Label::kNear);
1680 bind(&failure); // Reused failure exit, restores src1.
1681 movp(src1, kScratchRegister);
1682 jmp(on_not_smi_result, near_jump);
1684 bind(&zero_correct_result);
1687 bind(&correct_result);
1689 SmiToInteger64(dst, src1);
1691 j(overflow, on_not_smi_result, near_jump);
1692 // Check for negative zero result. If product is zero, and one
1693 // argument is negative, go to slow case.
1694 Label correct_result;
1696 j(not_zero, &correct_result, Label::kNear);
1697 // One of src1 and src2 is zero, the check whether the other is
1699 movp(kScratchRegister, src1);
1700 xorp(kScratchRegister, src2);
1701 j(negative, on_not_smi_result, near_jump);
1702 bind(&correct_result);
1707 void MacroAssembler::SmiDiv(Register dst,
1710 Label* on_not_smi_result,
1711 Label::Distance near_jump) {
1712 DCHECK(!src1.is(kScratchRegister));
1713 DCHECK(!src2.is(kScratchRegister));
1714 DCHECK(!dst.is(kScratchRegister));
1715 DCHECK(!src2.is(rax));
1716 DCHECK(!src2.is(rdx));
1717 DCHECK(!src1.is(rdx));
1719 // Check for 0 divisor (result is +/-Infinity).
1721 j(zero, on_not_smi_result, near_jump);
1724 movp(kScratchRegister, src1);
1726 SmiToInteger32(rax, src1);
1727 // We need to rule out dividing Smi::kMinValue by -1, since that would
1728 // overflow in idiv and raise an exception.
1729 // We combine this with negative zero test (negative zero only happens
1730 // when dividing zero by a negative number).
1732 // We overshoot a little and go to slow case if we divide min-value
1733 // by any negative value, not just -1.
1735 testl(rax, Immediate(~Smi::kMinValue));
1736 j(not_zero, &safe_div, Label::kNear);
1739 j(positive, &safe_div, Label::kNear);
1740 movp(src1, kScratchRegister);
1741 jmp(on_not_smi_result, near_jump);
1743 j(negative, on_not_smi_result, near_jump);
1747 SmiToInteger32(src2, src2);
1748 // Sign extend src1 into edx:eax.
1751 Integer32ToSmi(src2, src2);
1752 // Check that the remainder is zero.
1756 j(zero, &smi_result, Label::kNear);
1757 movp(src1, kScratchRegister);
1758 jmp(on_not_smi_result, near_jump);
1761 j(not_zero, on_not_smi_result, near_jump);
1763 if (!dst.is(src1) && src1.is(rax)) {
1764 movp(src1, kScratchRegister);
1766 Integer32ToSmi(dst, rax);
1770 void MacroAssembler::SmiMod(Register dst,
1773 Label* on_not_smi_result,
1774 Label::Distance near_jump) {
1775 DCHECK(!dst.is(kScratchRegister));
1776 DCHECK(!src1.is(kScratchRegister));
1777 DCHECK(!src2.is(kScratchRegister));
1778 DCHECK(!src2.is(rax));
1779 DCHECK(!src2.is(rdx));
1780 DCHECK(!src1.is(rdx));
1781 DCHECK(!src1.is(src2));
1784 j(zero, on_not_smi_result, near_jump);
1787 movp(kScratchRegister, src1);
1789 SmiToInteger32(rax, src1);
1790 SmiToInteger32(src2, src2);
1792 // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
1794 cmpl(rax, Immediate(Smi::kMinValue));
1795 j(not_equal, &safe_div, Label::kNear);
1796 cmpl(src2, Immediate(-1));
1797 j(not_equal, &safe_div, Label::kNear);
1798 // Retag inputs and go slow case.
1799 Integer32ToSmi(src2, src2);
1801 movp(src1, kScratchRegister);
1803 jmp(on_not_smi_result, near_jump);
1806 // Sign extend eax into edx:eax.
1809 // Restore smi tags on inputs.
1810 Integer32ToSmi(src2, src2);
1812 movp(src1, kScratchRegister);
1814 // Check for a negative zero result. If the result is zero, and the
1815 // dividend is negative, go slow to return a floating point negative zero.
1818 j(not_zero, &smi_result, Label::kNear);
1820 j(negative, on_not_smi_result, near_jump);
1822 Integer32ToSmi(dst, rdx);
1826 void MacroAssembler::SmiNot(Register dst, Register src) {
1827 DCHECK(!dst.is(kScratchRegister));
1828 DCHECK(!src.is(kScratchRegister));
1829 if (SmiValuesAre32Bits()) {
1830 // Set tag and padding bits before negating, so that they are zero
1832 movl(kScratchRegister, Immediate(~0));
1834 DCHECK(SmiValuesAre31Bits());
1835 movl(kScratchRegister, Immediate(1));
1838 xorp(dst, kScratchRegister);
1840 leap(dst, Operand(src, kScratchRegister, times_1, 0));
1846 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
1847 DCHECK(!dst.is(src2));
1848 if (!dst.is(src1)) {
1855 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
1856 if (constant->value() == 0) {
1858 } else if (dst.is(src)) {
1859 DCHECK(!dst.is(kScratchRegister));
1860 Register constant_reg = GetSmiConstant(constant);
1861 andp(dst, constant_reg);
1863 LoadSmiConstant(dst, constant);
1869 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
1870 if (!dst.is(src1)) {
1871 DCHECK(!src1.is(src2));
1878 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
1880 DCHECK(!dst.is(kScratchRegister));
1881 Register constant_reg = GetSmiConstant(constant);
1882 orp(dst, constant_reg);
1884 LoadSmiConstant(dst, constant);
1890 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
1891 if (!dst.is(src1)) {
1892 DCHECK(!src1.is(src2));
1899 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
1901 DCHECK(!dst.is(kScratchRegister));
1902 Register constant_reg = GetSmiConstant(constant);
1903 xorp(dst, constant_reg);
1905 LoadSmiConstant(dst, constant);
1911 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
1914 DCHECK(is_uint5(shift_value));
1915 if (shift_value > 0) {
1917 sarp(dst, Immediate(shift_value + kSmiShift));
1918 shlp(dst, Immediate(kSmiShift));
1920 UNIMPLEMENTED(); // Not used.
1926 void MacroAssembler::SmiShiftLeftConstant(Register dst,
1929 Label* on_not_smi_result,
1930 Label::Distance near_jump) {
1931 if (SmiValuesAre32Bits()) {
1935 if (shift_value > 0) {
1936 // Shift amount specified by lower 5 bits, not six as the shl opcode.
1937 shlq(dst, Immediate(shift_value & 0x1f));
1940 DCHECK(SmiValuesAre31Bits());
1942 UNIMPLEMENTED(); // Not used.
1944 SmiToInteger32(dst, src);
1945 shll(dst, Immediate(shift_value));
1946 JumpIfNotValidSmiValue(dst, on_not_smi_result, near_jump);
1947 Integer32ToSmi(dst, dst);
1953 void MacroAssembler::SmiShiftLogicalRightConstant(
1954 Register dst, Register src, int shift_value,
1955 Label* on_not_smi_result, Label::Distance near_jump) {
1956 // Logic right shift interprets its result as an *unsigned* number.
1958 UNIMPLEMENTED(); // Not used.
1960 if (shift_value == 0) {
1962 j(negative, on_not_smi_result, near_jump);
1964 if (SmiValuesAre32Bits()) {
1966 shrp(dst, Immediate(shift_value + kSmiShift));
1967 shlp(dst, Immediate(kSmiShift));
1969 DCHECK(SmiValuesAre31Bits());
1970 SmiToInteger32(dst, src);
1971 shrp(dst, Immediate(shift_value));
1972 JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump);
1973 Integer32ToSmi(dst, dst);
1979 void MacroAssembler::SmiShiftLeft(Register dst,
1982 Label* on_not_smi_result,
1983 Label::Distance near_jump) {
1984 if (SmiValuesAre32Bits()) {
1985 DCHECK(!dst.is(rcx));
1986 if (!dst.is(src1)) {
1989 // Untag shift amount.
1990 SmiToInteger32(rcx, src2);
1991 // Shift amount specified by lower 5 bits, not six as the shl opcode.
1992 andp(rcx, Immediate(0x1f));
1995 DCHECK(SmiValuesAre31Bits());
1996 DCHECK(!dst.is(kScratchRegister));
1997 DCHECK(!src1.is(kScratchRegister));
1998 DCHECK(!src2.is(kScratchRegister));
1999 DCHECK(!dst.is(src2));
2000 DCHECK(!dst.is(rcx));
2002 if (src1.is(rcx) || src2.is(rcx)) {
2003 movq(kScratchRegister, rcx);
2006 UNIMPLEMENTED(); // Not used.
2009 SmiToInteger32(dst, src1);
2010 SmiToInteger32(rcx, src2);
2012 JumpIfValidSmiValue(dst, &valid_result, Label::kNear);
2013 // As src1 or src2 could not be dst, we do not need to restore them for
2015 if (src1.is(rcx) || src2.is(rcx)) {
2017 movq(src1, kScratchRegister);
2019 movq(src2, kScratchRegister);
2022 jmp(on_not_smi_result, near_jump);
2023 bind(&valid_result);
2024 Integer32ToSmi(dst, dst);
2030 void MacroAssembler::SmiShiftLogicalRight(Register dst,
2033 Label* on_not_smi_result,
2034 Label::Distance near_jump) {
2035 DCHECK(!dst.is(kScratchRegister));
2036 DCHECK(!src1.is(kScratchRegister));
2037 DCHECK(!src2.is(kScratchRegister));
2038 DCHECK(!dst.is(src2));
2039 DCHECK(!dst.is(rcx));
2040 if (src1.is(rcx) || src2.is(rcx)) {
2041 movq(kScratchRegister, rcx);
2044 UNIMPLEMENTED(); // Not used.
2047 SmiToInteger32(dst, src1);
2048 SmiToInteger32(rcx, src2);
2050 JumpIfUIntValidSmiValue(dst, &valid_result, Label::kNear);
2051 // As src1 or src2 could not be dst, we do not need to restore them for
2053 if (src1.is(rcx) || src2.is(rcx)) {
2055 movq(src1, kScratchRegister);
2057 movq(src2, kScratchRegister);
2060 jmp(on_not_smi_result, near_jump);
2061 bind(&valid_result);
2062 Integer32ToSmi(dst, dst);
2067 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
2070 DCHECK(!dst.is(kScratchRegister));
2071 DCHECK(!src1.is(kScratchRegister));
2072 DCHECK(!src2.is(kScratchRegister));
2073 DCHECK(!dst.is(rcx));
2075 SmiToInteger32(rcx, src2);
2076 if (!dst.is(src1)) {
2079 SmiToInteger32(dst, dst);
2081 Integer32ToSmi(dst, dst);
2085 void MacroAssembler::SelectNonSmi(Register dst,
2089 Label::Distance near_jump) {
2090 DCHECK(!dst.is(kScratchRegister));
2091 DCHECK(!src1.is(kScratchRegister));
2092 DCHECK(!src2.is(kScratchRegister));
2093 DCHECK(!dst.is(src1));
2094 DCHECK(!dst.is(src2));
2095 // Both operands must not be smis.
2097 Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
2098 Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
2100 STATIC_ASSERT(kSmiTag == 0);
2101 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
2102 movl(kScratchRegister, Immediate(kSmiTagMask));
2103 andp(kScratchRegister, src1);
2104 testl(kScratchRegister, src2);
2105 // If non-zero then both are smis.
2106 j(not_zero, on_not_smis, near_jump);
2108 // Exactly one operand is a smi.
2109 DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
2110 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
2111 subp(kScratchRegister, Immediate(1));
2112 // If src1 is a smi, then scratch register all 1s, else it is all 0s.
2115 andp(dst, kScratchRegister);
2116 // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
2118 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
2122 SmiIndex MacroAssembler::SmiToIndex(Register dst,
2125 if (SmiValuesAre32Bits()) {
2126 DCHECK(is_uint6(shift));
2127 // There is a possible optimization if shift is in the range 60-63, but that
2128 // will (and must) never happen.
2132 if (shift < kSmiShift) {
2133 sarp(dst, Immediate(kSmiShift - shift));
2135 shlp(dst, Immediate(shift - kSmiShift));
2137 return SmiIndex(dst, times_1);
2139 DCHECK(SmiValuesAre31Bits());
2140 DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2144 // We have to sign extend the index register to 64-bit as the SMI might
2147 if (shift == times_1) {
2148 sarq(dst, Immediate(kSmiShift));
2149 return SmiIndex(dst, times_1);
2151 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2156 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
2159 if (SmiValuesAre32Bits()) {
2160 // Register src holds a positive smi.
2161 DCHECK(is_uint6(shift));
2166 if (shift < kSmiShift) {
2167 sarp(dst, Immediate(kSmiShift - shift));
2169 shlp(dst, Immediate(shift - kSmiShift));
2171 return SmiIndex(dst, times_1);
2173 DCHECK(SmiValuesAre31Bits());
2174 DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2179 if (shift == times_1) {
2180 sarq(dst, Immediate(kSmiShift));
2181 return SmiIndex(dst, times_1);
2183 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2188 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
2189 if (SmiValuesAre32Bits()) {
2190 DCHECK_EQ(0, kSmiShift % kBitsPerByte);
2191 addl(dst, Operand(src, kSmiShift / kBitsPerByte));
2193 DCHECK(SmiValuesAre31Bits());
2194 SmiToInteger32(kScratchRegister, src);
2195 addl(dst, kScratchRegister);
2200 void MacroAssembler::Push(Smi* source) {
2201 intptr_t smi = reinterpret_cast<intptr_t>(source);
2202 if (is_int32(smi)) {
2203 Push(Immediate(static_cast<int32_t>(smi)));
2205 Register constant = GetSmiConstant(source);
2211 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
2212 DCHECK(!src.is(scratch));
2215 shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2216 shlp(src, Immediate(kSmiShift));
2219 shlp(scratch, Immediate(kSmiShift));
2224 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
2225 DCHECK(!dst.is(scratch));
2228 shrp(scratch, Immediate(kSmiShift));
2230 shrp(dst, Immediate(kSmiShift));
2232 shlp(dst, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2237 void MacroAssembler::Test(const Operand& src, Smi* source) {
2238 if (SmiValuesAre32Bits()) {
2239 testl(Operand(src, kIntSize), Immediate(source->value()));
2241 DCHECK(SmiValuesAre31Bits());
2242 testl(src, Immediate(source));
2247 // ----------------------------------------------------------------------------
2250 void MacroAssembler::LookupNumberStringCache(Register object,
2255 // Use of registers. Register result is used as a temporary.
2256 Register number_string_cache = result;
2257 Register mask = scratch1;
2258 Register scratch = scratch2;
2260 // Load the number string cache.
2261 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2263 // Make the hash mask from the length of the number string cache. It
2264 // contains two elements (number and string) for each cache entry.
2266 mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
2267 shrl(mask, Immediate(1));
2268 subp(mask, Immediate(1)); // Make mask.
2270 // Calculate the entry in the number string cache. The hash value in the
2271 // number string cache for smis is just the smi value, and the hash for
2272 // doubles is the xor of the upper and lower words. See
2273 // Heap::GetNumberStringCache.
2275 Label load_result_from_cache;
2276 JumpIfSmi(object, &is_smi);
2278 isolate()->factory()->heap_number_map(),
2282 STATIC_ASSERT(8 == kDoubleSize);
2283 movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
2284 xorp(scratch, FieldOperand(object, HeapNumber::kValueOffset));
2285 andp(scratch, mask);
2286 // Each entry in string cache consists of two pointer sized fields,
2287 // but times_twice_pointer_size (multiplication by 16) scale factor
2288 // is not supported by addrmode on x64 platform.
2289 // So we have to premultiply entry index before lookup.
2290 shlp(scratch, Immediate(kPointerSizeLog2 + 1));
2292 Register index = scratch;
2293 Register probe = mask;
2295 FieldOperand(number_string_cache,
2298 FixedArray::kHeaderSize));
2299 JumpIfSmi(probe, not_found);
2300 movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
2301 ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
2302 j(parity_even, not_found); // Bail out if NaN is involved.
2303 j(not_equal, not_found); // The cache did not contain this value.
2304 jmp(&load_result_from_cache);
2307 SmiToInteger32(scratch, object);
2308 andp(scratch, mask);
2309 // Each entry in string cache consists of two pointer sized fields,
2310 // but times_twice_pointer_size (multiplication by 16) scale factor
2311 // is not supported by addrmode on x64 platform.
2312 // So we have to premultiply entry index before lookup.
2313 shlp(scratch, Immediate(kPointerSizeLog2 + 1));
2315 // Check if the entry is the smi we are looking for.
2317 FieldOperand(number_string_cache,
2320 FixedArray::kHeaderSize));
2321 j(not_equal, not_found);
2323 // Get the result from the cache.
2324 bind(&load_result_from_cache);
2326 FieldOperand(number_string_cache,
2329 FixedArray::kHeaderSize + kPointerSize));
2330 IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
2334 void MacroAssembler::JumpIfNotString(Register object,
2335 Register object_map,
2337 Label::Distance near_jump) {
2338 Condition is_smi = CheckSmi(object);
2339 j(is_smi, not_string, near_jump);
2340 CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
2341 j(above_equal, not_string, near_jump);
2345 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(
2346 Register first_object, Register second_object, Register scratch1,
2347 Register scratch2, Label* on_fail, Label::Distance near_jump) {
2348 // Check that both objects are not smis.
2349 Condition either_smi = CheckEitherSmi(first_object, second_object);
2350 j(either_smi, on_fail, near_jump);
2352 // Load instance type for both strings.
2353 movp(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
2354 movp(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
2355 movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2356 movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2358 // Check that both are flat one-byte strings.
2359 DCHECK(kNotStringTag != 0);
2360 const int kFlatOneByteStringMask =
2361 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2362 const int kFlatOneByteStringTag =
2363 kStringTag | kOneByteStringTag | kSeqStringTag;
2365 andl(scratch1, Immediate(kFlatOneByteStringMask));
2366 andl(scratch2, Immediate(kFlatOneByteStringMask));
2367 // Interleave the bits to check both scratch1 and scratch2 in one test.
2368 DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
2369 leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2371 Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
2372 j(not_equal, on_fail, near_jump);
2376 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
2377 Register instance_type, Register scratch, Label* failure,
2378 Label::Distance near_jump) {
2379 if (!scratch.is(instance_type)) {
2380 movl(scratch, instance_type);
2383 const int kFlatOneByteStringMask =
2384 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2386 andl(scratch, Immediate(kFlatOneByteStringMask));
2387 cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
2388 j(not_equal, failure, near_jump);
2392 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
2393 Register first_object_instance_type, Register second_object_instance_type,
2394 Register scratch1, Register scratch2, Label* on_fail,
2395 Label::Distance near_jump) {
2396 // Load instance type for both strings.
2397 movp(scratch1, first_object_instance_type);
2398 movp(scratch2, second_object_instance_type);
2400 // Check that both are flat one-byte strings.
2401 DCHECK(kNotStringTag != 0);
2402 const int kFlatOneByteStringMask =
2403 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2404 const int kFlatOneByteStringTag =
2405 kStringTag | kOneByteStringTag | kSeqStringTag;
2407 andl(scratch1, Immediate(kFlatOneByteStringMask));
2408 andl(scratch2, Immediate(kFlatOneByteStringMask));
2409 // Interleave the bits to check both scratch1 and scratch2 in one test.
2410 DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
2411 leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2413 Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
2414 j(not_equal, on_fail, near_jump);
2419 static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
2420 T operand_or_register,
2421 Label* not_unique_name,
2422 Label::Distance distance) {
2423 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2425 masm->testb(operand_or_register,
2426 Immediate(kIsNotStringMask | kIsNotInternalizedMask));
2427 masm->j(zero, &succeed, Label::kNear);
2428 masm->cmpb(operand_or_register, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
2429 masm->j(not_equal, not_unique_name, distance);
2431 masm->bind(&succeed);
2435 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
2436 Label* not_unique_name,
2437 Label::Distance distance) {
2438 JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
2442 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
2443 Label* not_unique_name,
2444 Label::Distance distance) {
2445 JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
2449 void MacroAssembler::Move(Register dst, Register src) {
2456 void MacroAssembler::Move(Register dst, Handle<Object> source) {
2457 AllowDeferredHandleDereference smi_check;
2458 if (source->IsSmi()) {
2459 Move(dst, Smi::cast(*source));
2461 MoveHeapObject(dst, source);
2466 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
2467 AllowDeferredHandleDereference smi_check;
2468 if (source->IsSmi()) {
2469 Move(dst, Smi::cast(*source));
2471 MoveHeapObject(kScratchRegister, source);
2472 movp(dst, kScratchRegister);
2477 void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
2481 unsigned pop = base::bits::CountPopulation32(src);
2486 movl(kScratchRegister, Immediate(src));
2487 movq(dst, kScratchRegister);
2493 void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
2497 unsigned nlz = base::bits::CountLeadingZeros64(src);
2498 unsigned ntz = base::bits::CountTrailingZeros64(src);
2499 unsigned pop = base::bits::CountPopulation64(src);
2503 } else if (pop + ntz == 64) {
2506 } else if (pop + nlz == 64) {
2510 uint32_t lower = static_cast<uint32_t>(src);
2511 uint32_t upper = static_cast<uint32_t>(src >> 32);
2515 movq(kScratchRegister, src);
2516 movq(dst, kScratchRegister);
2523 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
2524 AllowDeferredHandleDereference smi_check;
2525 if (source->IsSmi()) {
2526 Cmp(dst, Smi::cast(*source));
2528 MoveHeapObject(kScratchRegister, source);
2529 cmpp(dst, kScratchRegister);
2534 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
2535 AllowDeferredHandleDereference smi_check;
2536 if (source->IsSmi()) {
2537 Cmp(dst, Smi::cast(*source));
2539 MoveHeapObject(kScratchRegister, source);
2540 cmpp(dst, kScratchRegister);
2545 void MacroAssembler::Push(Handle<Object> source) {
2546 AllowDeferredHandleDereference smi_check;
2547 if (source->IsSmi()) {
2548 Push(Smi::cast(*source));
2550 MoveHeapObject(kScratchRegister, source);
2551 Push(kScratchRegister);
2556 void MacroAssembler::MoveHeapObject(Register result,
2557 Handle<Object> object) {
2558 AllowDeferredHandleDereference using_raw_address;
2559 DCHECK(object->IsHeapObject());
2560 if (isolate()->heap()->InNewSpace(*object)) {
2561 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2562 Move(result, cell, RelocInfo::CELL);
2563 movp(result, Operand(result, 0));
2565 Move(result, object, RelocInfo::EMBEDDED_OBJECT);
2570 void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
2572 AllowDeferredHandleDereference embedding_raw_address;
2573 load_rax(cell.location(), RelocInfo::CELL);
2575 Move(dst, cell, RelocInfo::CELL);
2576 movp(dst, Operand(dst, 0));
2581 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
2583 Move(scratch, cell, RelocInfo::EMBEDDED_OBJECT);
2584 cmpp(value, FieldOperand(scratch, WeakCell::kValueOffset));
2588 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
2589 Move(value, cell, RelocInfo::EMBEDDED_OBJECT);
2590 movp(value, FieldOperand(value, WeakCell::kValueOffset));
2594 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
2596 GetWeakValue(value, cell);
2597 JumpIfSmi(value, miss);
2601 void MacroAssembler::Drop(int stack_elements) {
2602 if (stack_elements > 0) {
2603 addp(rsp, Immediate(stack_elements * kPointerSize));
2608 void MacroAssembler::DropUnderReturnAddress(int stack_elements,
2610 DCHECK(stack_elements > 0);
2611 if (kPointerSize == kInt64Size && stack_elements == 1) {
2612 popq(MemOperand(rsp, 0));
2616 PopReturnAddressTo(scratch);
2617 Drop(stack_elements);
2618 PushReturnAddressFrom(scratch);
2622 void MacroAssembler::Push(Register src) {
2623 if (kPointerSize == kInt64Size) {
2626 // x32 uses 64-bit push for rbp in the prologue.
2627 DCHECK(src.code() != rbp.code());
2628 leal(rsp, Operand(rsp, -4));
2629 movp(Operand(rsp, 0), src);
2634 void MacroAssembler::Push(const Operand& src) {
2635 if (kPointerSize == kInt64Size) {
2638 movp(kScratchRegister, src);
2639 leal(rsp, Operand(rsp, -4));
2640 movp(Operand(rsp, 0), kScratchRegister);
2645 void MacroAssembler::PushQuad(const Operand& src) {
2646 if (kPointerSize == kInt64Size) {
2649 movp(kScratchRegister, src);
2650 pushq(kScratchRegister);
2655 void MacroAssembler::Push(Immediate value) {
2656 if (kPointerSize == kInt64Size) {
2659 leal(rsp, Operand(rsp, -4));
2660 movp(Operand(rsp, 0), value);
2665 void MacroAssembler::PushImm32(int32_t imm32) {
2666 if (kPointerSize == kInt64Size) {
2669 leal(rsp, Operand(rsp, -4));
2670 movp(Operand(rsp, 0), Immediate(imm32));
2675 void MacroAssembler::Pop(Register dst) {
2676 if (kPointerSize == kInt64Size) {
2679 // x32 uses 64-bit pop for rbp in the epilogue.
2680 DCHECK(dst.code() != rbp.code());
2681 movp(dst, Operand(rsp, 0));
2682 leal(rsp, Operand(rsp, 4));
2687 void MacroAssembler::Pop(const Operand& dst) {
2688 if (kPointerSize == kInt64Size) {
2691 Register scratch = dst.AddressUsesRegister(kScratchRegister)
2692 ? kRootRegister : kScratchRegister;
2693 movp(scratch, Operand(rsp, 0));
2695 leal(rsp, Operand(rsp, 4));
2696 if (scratch.is(kRootRegister)) {
2697 // Restore kRootRegister.
2698 InitializeRootRegister();
2704 void MacroAssembler::PopQuad(const Operand& dst) {
2705 if (kPointerSize == kInt64Size) {
2708 popq(kScratchRegister);
2709 movp(dst, kScratchRegister);
2714 void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst,
2717 DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
2718 offset <= SharedFunctionInfo::kSize &&
2719 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
2720 if (kPointerSize == kInt64Size) {
2721 movsxlq(dst, FieldOperand(base, offset));
2723 movp(dst, FieldOperand(base, offset));
2724 SmiToInteger32(dst, dst);
2729 void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base,
2732 DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
2733 offset <= SharedFunctionInfo::kSize &&
2734 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
2735 if (kPointerSize == kInt32Size) {
2736 // On x32, this field is represented by SMI.
2739 int byte_offset = bits / kBitsPerByte;
2740 int bit_in_byte = bits & (kBitsPerByte - 1);
2741 testb(FieldOperand(base, offset + byte_offset), Immediate(1 << bit_in_byte));
2745 void MacroAssembler::Jump(ExternalReference ext) {
2746 LoadAddress(kScratchRegister, ext);
2747 jmp(kScratchRegister);
2751 void MacroAssembler::Jump(const Operand& op) {
2752 if (kPointerSize == kInt64Size) {
2755 movp(kScratchRegister, op);
2756 jmp(kScratchRegister);
2761 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
2762 Move(kScratchRegister, destination, rmode);
2763 jmp(kScratchRegister);
2767 void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
2768 // TODO(X64): Inline this
2769 jmp(code_object, rmode);
2773 int MacroAssembler::CallSize(ExternalReference ext) {
2774 // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
2775 return LoadAddressSize(ext) +
2776 Assembler::kCallScratchRegisterInstructionLength;
2780 void MacroAssembler::Call(ExternalReference ext) {
2782 int end_position = pc_offset() + CallSize(ext);
2784 LoadAddress(kScratchRegister, ext);
2785 call(kScratchRegister);
2787 CHECK_EQ(end_position, pc_offset());
2792 void MacroAssembler::Call(const Operand& op) {
2793 if (kPointerSize == kInt64Size && !CpuFeatures::IsSupported(ATOM)) {
2796 movp(kScratchRegister, op);
2797 call(kScratchRegister);
2802 void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
2804 int end_position = pc_offset() + CallSize(destination);
2806 Move(kScratchRegister, destination, rmode);
2807 call(kScratchRegister);
2809 CHECK_EQ(pc_offset(), end_position);
2814 void MacroAssembler::Call(Handle<Code> code_object,
2815 RelocInfo::Mode rmode,
2816 TypeFeedbackId ast_id) {
2818 int end_position = pc_offset() + CallSize(code_object);
2820 DCHECK(RelocInfo::IsCodeTarget(rmode) ||
2821 rmode == RelocInfo::CODE_AGE_SEQUENCE);
2822 call(code_object, rmode, ast_id);
2824 CHECK_EQ(end_position, pc_offset());
2829 void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
2835 if (CpuFeatures::IsSupported(SSE4_1)) {
2836 CpuFeatureScope sse_scope(this, SSE4_1);
2837 pextrd(dst, src, imm8);
2841 shrq(dst, Immediate(32));
2845 void MacroAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
2846 if (CpuFeatures::IsSupported(SSE4_1)) {
2847 CpuFeatureScope sse_scope(this, SSE4_1);
2848 pinsrd(dst, src, imm8);
2853 punpckldq(dst, xmm0);
2857 punpckldq(xmm0, dst);
2863 void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
2864 DCHECK(imm8 == 0 || imm8 == 1);
2865 if (CpuFeatures::IsSupported(SSE4_1)) {
2866 CpuFeatureScope sse_scope(this, SSE4_1);
2867 pinsrd(dst, src, imm8);
2872 punpckldq(dst, xmm0);
2876 punpckldq(xmm0, dst);
2882 void MacroAssembler::Lzcntl(Register dst, Register src) {
2883 if (CpuFeatures::IsSupported(LZCNT)) {
2884 CpuFeatureScope scope(this, LZCNT);
2890 j(not_zero, ¬_zero_src, Label::kNear);
2891 Set(dst, 63); // 63^31 == 32
2892 bind(¬_zero_src);
2893 xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
2897 void MacroAssembler::Lzcntl(Register dst, const Operand& src) {
2898 if (CpuFeatures::IsSupported(LZCNT)) {
2899 CpuFeatureScope scope(this, LZCNT);
2905 j(not_zero, ¬_zero_src, Label::kNear);
2906 Set(dst, 63); // 63^31 == 32
2907 bind(¬_zero_src);
2908 xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
2912 void MacroAssembler::Pushad() {
2917 // Not pushing rsp or rbp.
2922 // r10 is kScratchRegister.
2925 // r13 is kRootRegister.
2928 STATIC_ASSERT(12 == kNumSafepointSavedRegisters);
2929 // Use lea for symmetry with Popad.
2931 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
2932 leap(rsp, Operand(rsp, -sp_delta));
2936 void MacroAssembler::Popad() {
2937 // Popad must not change the flags, so use lea instead of addq.
2939 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
2940 leap(rsp, Operand(rsp, sp_delta));
2956 void MacroAssembler::Dropad() {
2957 addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
2961 // Order general registers are pushed by Pushad:
2962 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
2964 MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
2984 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
2985 const Immediate& imm) {
2986 movp(SafepointRegisterSlot(dst), imm);
2990 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
2991 movp(SafepointRegisterSlot(dst), src);
2995 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
2996 movp(dst, SafepointRegisterSlot(src));
3000 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
3001 return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
3005 void MacroAssembler::PushStackHandler() {
3006 // Adjust this code if not the case.
3007 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
3008 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3010 // Link the current handler as the next handler.
3011 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3012 Push(ExternalOperand(handler_address));
3014 // Set this new handler as the current one.
3015 movp(ExternalOperand(handler_address), rsp);
3019 void MacroAssembler::PopStackHandler() {
3020 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3021 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3022 Pop(ExternalOperand(handler_address));
3023 addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
3027 void MacroAssembler::Ret() {
3032 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
3033 if (is_uint16(bytes_dropped)) {
3036 PopReturnAddressTo(scratch);
3037 addp(rsp, Immediate(bytes_dropped));
3038 PushReturnAddressFrom(scratch);
3044 void MacroAssembler::FCmp() {
3050 void MacroAssembler::CmpObjectType(Register heap_object,
3053 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3054 CmpInstanceType(map, type);
3058 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
3059 cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
3060 Immediate(static_cast<int8_t>(type)));
3064 void MacroAssembler::CheckFastElements(Register map,
3066 Label::Distance distance) {
3067 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3068 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3069 STATIC_ASSERT(FAST_ELEMENTS == 2);
3070 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3071 cmpb(FieldOperand(map, Map::kBitField2Offset),
3072 Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3073 j(above, fail, distance);
3077 void MacroAssembler::CheckFastObjectElements(Register map,
3079 Label::Distance distance) {
3080 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3081 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3082 STATIC_ASSERT(FAST_ELEMENTS == 2);
3083 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3084 cmpb(FieldOperand(map, Map::kBitField2Offset),
3085 Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3086 j(below_equal, fail, distance);
3087 cmpb(FieldOperand(map, Map::kBitField2Offset),
3088 Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3089 j(above, fail, distance);
3093 void MacroAssembler::CheckFastSmiElements(Register map,
3095 Label::Distance distance) {
3096 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3097 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3098 cmpb(FieldOperand(map, Map::kBitField2Offset),
3099 Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3100 j(above, fail, distance);
3104 void MacroAssembler::StoreNumberToDoubleElements(
3105 Register maybe_number,
3108 XMMRegister xmm_scratch,
3110 int elements_offset) {
3111 Label smi_value, done;
3113 JumpIfSmi(maybe_number, &smi_value, Label::kNear);
3115 CheckMap(maybe_number,
3116 isolate()->factory()->heap_number_map(),
3120 // Double value, turn potential sNaN into qNaN.
3121 Move(xmm_scratch, 1.0);
3122 mulsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
3123 jmp(&done, Label::kNear);
3126 // Value is a smi. convert to a double and store.
3127 // Preserve original value.
3128 SmiToInteger32(kScratchRegister, maybe_number);
3129 Cvtlsi2sd(xmm_scratch, kScratchRegister);
3131 movsd(FieldOperand(elements, index, times_8,
3132 FixedDoubleArray::kHeaderSize - elements_offset),
3137 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
3138 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3142 void MacroAssembler::CheckMap(Register obj,
3145 SmiCheckType smi_check_type) {
3146 if (smi_check_type == DO_SMI_CHECK) {
3147 JumpIfSmi(obj, fail);
3150 CompareMap(obj, map);
3155 void MacroAssembler::ClampUint8(Register reg) {
3157 testl(reg, Immediate(0xFFFFFF00));
3158 j(zero, &done, Label::kNear);
3159 setcc(negative, reg); // 1 if negative, 0 if positive.
3160 decb(reg); // 0 if negative, 255 if positive.
3165 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
3166 XMMRegister temp_xmm_reg,
3167 Register result_reg) {
3170 xorps(temp_xmm_reg, temp_xmm_reg);
3171 cvtsd2si(result_reg, input_reg);
3172 testl(result_reg, Immediate(0xFFFFFF00));
3173 j(zero, &done, Label::kNear);
3174 cmpl(result_reg, Immediate(1));
3175 j(overflow, &conv_failure, Label::kNear);
3176 movl(result_reg, Immediate(0));
3177 setcc(sign, result_reg);
3178 subl(result_reg, Immediate(1));
3179 andl(result_reg, Immediate(255));
3180 jmp(&done, Label::kNear);
3181 bind(&conv_failure);
3183 ucomisd(input_reg, temp_xmm_reg);
3184 j(below, &done, Label::kNear);
3185 Set(result_reg, 255);
3190 void MacroAssembler::LoadUint32(XMMRegister dst,
3192 if (FLAG_debug_code) {
3193 cmpq(src, Immediate(0xffffffff));
3194 Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
3196 cvtqsi2sd(dst, src);
3200 void MacroAssembler::SlowTruncateToI(Register result_reg,
3203 DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
3204 call(stub.GetCode(), RelocInfo::CODE_TARGET);
3208 void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
3209 Register input_reg) {
3211 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3212 cvttsd2siq(result_reg, xmm0);
3213 cmpq(result_reg, Immediate(1));
3214 j(no_overflow, &done, Label::kNear);
3217 if (input_reg.is(result_reg)) {
3218 subp(rsp, Immediate(kDoubleSize));
3219 movsd(MemOperand(rsp, 0), xmm0);
3220 SlowTruncateToI(result_reg, rsp, 0);
3221 addp(rsp, Immediate(kDoubleSize));
3223 SlowTruncateToI(result_reg, input_reg);
3227 // Keep our invariant that the upper 32 bits are zero.
3228 movl(result_reg, result_reg);
3232 void MacroAssembler::TruncateDoubleToI(Register result_reg,
3233 XMMRegister input_reg) {
3235 cvttsd2siq(result_reg, input_reg);
3236 cmpq(result_reg, Immediate(1));
3237 j(no_overflow, &done, Label::kNear);
3239 subp(rsp, Immediate(kDoubleSize));
3240 movsd(MemOperand(rsp, 0), input_reg);
3241 SlowTruncateToI(result_reg, rsp, 0);
3242 addp(rsp, Immediate(kDoubleSize));
3245 // Keep our invariant that the upper 32 bits are zero.
3246 movl(result_reg, result_reg);
3250 void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
3251 XMMRegister scratch,
3252 MinusZeroMode minus_zero_mode,
3253 Label* lost_precision, Label* is_nan,
3254 Label* minus_zero, Label::Distance dst) {
3255 cvttsd2si(result_reg, input_reg);
3256 Cvtlsi2sd(xmm0, result_reg);
3257 ucomisd(xmm0, input_reg);
3258 j(not_equal, lost_precision, dst);
3259 j(parity_even, is_nan, dst); // NaN.
3260 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
3262 // The integer converted back is equal to the original. We
3263 // only have to test if we got -0 as an input.
3264 testl(result_reg, result_reg);
3265 j(not_zero, &done, Label::kNear);
3266 movmskpd(result_reg, input_reg);
3267 // Bit 0 contains the sign of the double in input_reg.
3268 // If input was positive, we are ok and return 0, otherwise
3269 // jump to minus_zero.
3270 andl(result_reg, Immediate(1));
3271 j(not_zero, minus_zero, dst);
3277 void MacroAssembler::LoadInstanceDescriptors(Register map,
3278 Register descriptors) {
3279 movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
3283 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3284 movl(dst, FieldOperand(map, Map::kBitField3Offset));
3285 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3289 void MacroAssembler::EnumLength(Register dst, Register map) {
3290 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3291 movl(dst, FieldOperand(map, Map::kBitField3Offset));
3292 andl(dst, Immediate(Map::EnumLengthBits::kMask));
3293 Integer32ToSmi(dst, dst);
3297 void MacroAssembler::LoadAccessor(Register dst, Register holder,
3299 AccessorComponent accessor) {
3300 movp(dst, FieldOperand(holder, HeapObject::kMapOffset));
3301 LoadInstanceDescriptors(dst, dst);
3302 movp(dst, FieldOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
3303 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
3304 : AccessorPair::kSetterOffset;
3305 movp(dst, FieldOperand(dst, offset));
3309 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
3310 Register scratch2, Handle<WeakCell> cell,
3311 Handle<Code> success,
3312 SmiCheckType smi_check_type) {
3314 if (smi_check_type == DO_SMI_CHECK) {
3315 JumpIfSmi(obj, &fail);
3317 movq(scratch1, FieldOperand(obj, HeapObject::kMapOffset));
3318 CmpWeakValue(scratch1, cell, scratch2);
3319 j(equal, success, RelocInfo::CODE_TARGET);
3324 void MacroAssembler::AssertNumber(Register object) {
3325 if (emit_debug_code()) {
3327 Condition is_smi = CheckSmi(object);
3328 j(is_smi, &ok, Label::kNear);
3329 Cmp(FieldOperand(object, HeapObject::kMapOffset),
3330 isolate()->factory()->heap_number_map());
3331 Check(equal, kOperandIsNotANumber);
3337 void MacroAssembler::AssertNotSmi(Register object) {
3338 if (emit_debug_code()) {
3339 Condition is_smi = CheckSmi(object);
3340 Check(NegateCondition(is_smi), kOperandIsASmi);
3345 void MacroAssembler::AssertSmi(Register object) {
3346 if (emit_debug_code()) {
3347 Condition is_smi = CheckSmi(object);
3348 Check(is_smi, kOperandIsNotASmi);
3353 void MacroAssembler::AssertSmi(const Operand& object) {
3354 if (emit_debug_code()) {
3355 Condition is_smi = CheckSmi(object);
3356 Check(is_smi, kOperandIsNotASmi);
3361 void MacroAssembler::AssertZeroExtended(Register int32_register) {
3362 if (emit_debug_code()) {
3363 DCHECK(!int32_register.is(kScratchRegister));
3364 movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
3365 cmpq(kScratchRegister, int32_register);
3366 Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
3371 void MacroAssembler::AssertString(Register object) {
3372 if (emit_debug_code()) {
3373 testb(object, Immediate(kSmiTagMask));
3374 Check(not_equal, kOperandIsASmiAndNotAString);
3376 movp(object, FieldOperand(object, HeapObject::kMapOffset));
3377 CmpInstanceType(object, FIRST_NONSTRING_TYPE);
3379 Check(below, kOperandIsNotAString);
3384 void MacroAssembler::AssertName(Register object) {
3385 if (emit_debug_code()) {
3386 testb(object, Immediate(kSmiTagMask));
3387 Check(not_equal, kOperandIsASmiAndNotAName);
3389 movp(object, FieldOperand(object, HeapObject::kMapOffset));
3390 CmpInstanceType(object, LAST_NAME_TYPE);
3392 Check(below_equal, kOperandIsNotAName);
3397 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
3398 if (emit_debug_code()) {
3399 Label done_checking;
3400 AssertNotSmi(object);
3401 Cmp(object, isolate()->factory()->undefined_value());
3402 j(equal, &done_checking);
3403 Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
3404 Assert(equal, kExpectedUndefinedOrCell);
3405 bind(&done_checking);
3410 void MacroAssembler::AssertRootValue(Register src,
3411 Heap::RootListIndex root_value_index,
3412 BailoutReason reason) {
3413 if (emit_debug_code()) {
3414 DCHECK(!src.is(kScratchRegister));
3415 LoadRoot(kScratchRegister, root_value_index);
3416 cmpp(src, kScratchRegister);
3417 Check(equal, reason);
3423 Condition MacroAssembler::IsObjectStringType(Register heap_object,
3425 Register instance_type) {
3426 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3427 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3428 STATIC_ASSERT(kNotStringTag != 0);
3429 testb(instance_type, Immediate(kIsNotStringMask));
3434 Condition MacroAssembler::IsObjectNameType(Register heap_object,
3436 Register instance_type) {
3437 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3438 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3439 cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
3444 void MacroAssembler::GetMapConstructor(Register result, Register map,
3447 movp(result, FieldOperand(map, Map::kConstructorOrBackPointerOffset));
3449 JumpIfSmi(result, &done, Label::kNear);
3450 CmpObjectType(result, MAP_TYPE, temp);
3451 j(not_equal, &done, Label::kNear);
3452 movp(result, FieldOperand(result, Map::kConstructorOrBackPointerOffset));
3458 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
3460 // Get the prototype or initial map from the function.
3462 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3464 // If the prototype or initial map is the hole, don't return it and
3465 // simply miss the cache instead. This will allow us to allocate a
3466 // prototype object on-demand in the runtime system.
3467 CompareRoot(result, Heap::kTheHoleValueRootIndex);
3470 // If the function does not have an initial map, we're done.
3472 CmpObjectType(result, MAP_TYPE, kScratchRegister);
3473 j(not_equal, &done, Label::kNear);
3475 // Get the prototype from the initial map.
3476 movp(result, FieldOperand(result, Map::kPrototypeOffset));
3483 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
3484 if (FLAG_native_code_counters && counter->Enabled()) {
3485 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3486 movl(counter_operand, Immediate(value));
3491 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
3493 if (FLAG_native_code_counters && counter->Enabled()) {
3494 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3496 incl(counter_operand);
3498 addl(counter_operand, Immediate(value));
3504 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
3506 if (FLAG_native_code_counters && counter->Enabled()) {
3507 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3509 decl(counter_operand);
3511 subl(counter_operand, Immediate(value));
3517 void MacroAssembler::DebugBreak() {
3518 Set(rax, 0); // No arguments.
3520 ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
3521 CEntryStub ces(isolate(), 1);
3522 DCHECK(AllowThisStubCall(&ces));
3523 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
3527 void MacroAssembler::InvokeCode(Register code,
3528 const ParameterCount& expected,
3529 const ParameterCount& actual,
3531 const CallWrapper& call_wrapper) {
3532 // You can't call a function without a valid frame.
3533 DCHECK(flag == JUMP_FUNCTION || has_frame());
3536 bool definitely_mismatches = false;
3537 InvokePrologue(expected,
3539 Handle<Code>::null(),
3542 &definitely_mismatches,
3546 if (!definitely_mismatches) {
3547 if (flag == CALL_FUNCTION) {
3548 call_wrapper.BeforeCall(CallSize(code));
3550 call_wrapper.AfterCall();
3552 DCHECK(flag == JUMP_FUNCTION);
3560 void MacroAssembler::InvokeFunction(Register function,
3561 const ParameterCount& actual,
3563 const CallWrapper& call_wrapper) {
3564 // You can't call a function without a valid frame.
3565 DCHECK(flag == JUMP_FUNCTION || has_frame());
3567 DCHECK(function.is(rdi));
3568 movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3569 movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
3570 LoadSharedFunctionInfoSpecialField(rbx, rdx,
3571 SharedFunctionInfo::kFormalParameterCountOffset);
3572 // Advances rdx to the end of the Code object header, to the start of
3573 // the executable code.
3574 movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3576 ParameterCount expected(rbx);
3577 InvokeCode(rdx, expected, actual, flag, call_wrapper);
3581 void MacroAssembler::InvokeFunction(Register function,
3582 const ParameterCount& expected,
3583 const ParameterCount& actual,
3585 const CallWrapper& call_wrapper) {
3586 // You can't call a function without a valid frame.
3587 DCHECK(flag == JUMP_FUNCTION || has_frame());
3589 DCHECK(function.is(rdi));
3590 movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
3591 // Advances rdx to the end of the Code object header, to the start of
3592 // the executable code.
3593 movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3595 InvokeCode(rdx, expected, actual, flag, call_wrapper);
3599 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3600 const ParameterCount& expected,
3601 const ParameterCount& actual,
3603 const CallWrapper& call_wrapper) {
3604 Move(rdi, function);
3605 InvokeFunction(rdi, expected, actual, flag, call_wrapper);
3609 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3610 const ParameterCount& actual,
3611 Handle<Code> code_constant,
3612 Register code_register,
3614 bool* definitely_mismatches,
3616 Label::Distance near_jump,
3617 const CallWrapper& call_wrapper) {
3618 bool definitely_matches = false;
3619 *definitely_mismatches = false;
3621 if (expected.is_immediate()) {
3622 DCHECK(actual.is_immediate());
3623 if (expected.immediate() == actual.immediate()) {
3624 definitely_matches = true;
3626 Set(rax, actual.immediate());
3627 if (expected.immediate() ==
3628 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
3629 // Don't worry about adapting arguments for built-ins that
3630 // don't want that done. Skip adaption code by making it look
3631 // like we have a match between expected and actual number of
3633 definitely_matches = true;
3635 *definitely_mismatches = true;
3636 Set(rbx, expected.immediate());
3640 if (actual.is_immediate()) {
3641 // Expected is in register, actual is immediate. This is the
3642 // case when we invoke function values without going through the
3644 cmpp(expected.reg(), Immediate(actual.immediate()));
3645 j(equal, &invoke, Label::kNear);
3646 DCHECK(expected.reg().is(rbx));
3647 Set(rax, actual.immediate());
3648 } else if (!expected.reg().is(actual.reg())) {
3649 // Both expected and actual are in (different) registers. This
3650 // is the case when we invoke functions using call and apply.
3651 cmpp(expected.reg(), actual.reg());
3652 j(equal, &invoke, Label::kNear);
3653 DCHECK(actual.reg().is(rax));
3654 DCHECK(expected.reg().is(rbx));
3658 if (!definitely_matches) {
3659 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
3660 if (!code_constant.is_null()) {
3661 Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
3662 addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
3663 } else if (!code_register.is(rdx)) {
3664 movp(rdx, code_register);
3667 if (flag == CALL_FUNCTION) {
3668 call_wrapper.BeforeCall(CallSize(adaptor));
3669 Call(adaptor, RelocInfo::CODE_TARGET);
3670 call_wrapper.AfterCall();
3671 if (!*definitely_mismatches) {
3672 jmp(done, near_jump);
3675 Jump(adaptor, RelocInfo::CODE_TARGET);
3682 void MacroAssembler::StubPrologue() {
3683 pushq(rbp); // Caller's frame pointer.
3685 Push(rsi); // Callee's context.
3686 Push(Smi::FromInt(StackFrame::STUB));
3690 void MacroAssembler::Prologue(bool code_pre_aging) {
3691 PredictableCodeSizeScope predictible_code_size_scope(this,
3692 kNoCodeAgeSequenceLength);
3693 if (code_pre_aging) {
3694 // Pre-age the code.
3695 Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
3696 RelocInfo::CODE_AGE_SEQUENCE);
3697 Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
3699 pushq(rbp); // Caller's frame pointer.
3701 Push(rsi); // Callee's context.
3702 Push(rdi); // Callee's JS function.
3707 void MacroAssembler::EnterFrame(StackFrame::Type type,
3708 bool load_constant_pool_pointer_reg) {
3709 // Out-of-line constant pool not implemented on x64.
3714 void MacroAssembler::EnterFrame(StackFrame::Type type) {
3717 Push(rsi); // Context.
3718 Push(Smi::FromInt(type));
3719 Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
3720 Push(kScratchRegister);
3721 if (emit_debug_code()) {
3722 Move(kScratchRegister,
3723 isolate()->factory()->undefined_value(),
3724 RelocInfo::EMBEDDED_OBJECT);
3725 cmpp(Operand(rsp, 0), kScratchRegister);
3726 Check(not_equal, kCodeObjectNotProperlyPatched);
3731 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
3732 if (emit_debug_code()) {
3733 Move(kScratchRegister, Smi::FromInt(type));
3734 cmpp(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
3735 Check(equal, kStackFrameTypesMustMatch);
3742 void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
3743 // Set up the frame structure on the stack.
3744 // All constants are relative to the frame pointer of the exit frame.
3745 DCHECK(ExitFrameConstants::kCallerSPDisplacement ==
3746 kFPOnStackSize + kPCOnStackSize);
3747 DCHECK(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
3748 DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
3752 // Reserve room for entry stack pointer and push the code object.
3753 DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
3754 Push(Immediate(0)); // Saved entry sp, patched before call.
3755 Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
3756 Push(kScratchRegister); // Accessed from EditFrame::code_slot.
3758 // Save the frame pointer and the context in top.
3760 movp(r14, rax); // Backup rax in callee-save register.
3763 Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
3764 Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
3765 Store(ExternalReference(Isolate::kCFunctionAddress, isolate()), rbx);
3769 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
3770 bool save_doubles) {
3772 const int kShadowSpace = 4;
3773 arg_stack_space += kShadowSpace;
3775 // Optionally save all XMM registers.
3777 int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
3778 arg_stack_space * kRegisterSize;
3779 subp(rsp, Immediate(space));
3780 int offset = -2 * kPointerSize;
3781 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
3782 XMMRegister reg = XMMRegister::FromAllocationIndex(i);
3783 movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
3785 } else if (arg_stack_space > 0) {
3786 subp(rsp, Immediate(arg_stack_space * kRegisterSize));
3789 // Get the required frame alignment for the OS.
3790 const int kFrameAlignment = base::OS::ActivationFrameAlignment();
3791 if (kFrameAlignment > 0) {
3792 DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
3793 DCHECK(is_int8(kFrameAlignment));
3794 andp(rsp, Immediate(-kFrameAlignment));
3797 // Patch the saved entry sp.
3798 movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
3802 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
3803 EnterExitFramePrologue(true);
3805 // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
3806 // so it must be retained across the C-call.
3807 int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
3808 leap(r15, Operand(rbp, r14, times_pointer_size, offset));
3810 EnterExitFrameEpilogue(arg_stack_space, save_doubles);
3814 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
3815 EnterExitFramePrologue(false);
3816 EnterExitFrameEpilogue(arg_stack_space, false);
3820 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
3824 int offset = -2 * kPointerSize;
3825 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
3826 XMMRegister reg = XMMRegister::FromAllocationIndex(i);
3827 movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
3830 // Get the return address from the stack and restore the frame pointer.
3831 movp(rcx, Operand(rbp, kFPOnStackSize));
3832 movp(rbp, Operand(rbp, 0 * kPointerSize));
3834 // Drop everything up to and including the arguments and the receiver
3835 // from the caller stack.
3836 leap(rsp, Operand(r15, 1 * kPointerSize));
3838 PushReturnAddressFrom(rcx);
3840 LeaveExitFrameEpilogue(true);
3844 void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
3848 LeaveExitFrameEpilogue(restore_context);
3852 void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
3853 // Restore current context from top and clear it in debug mode.
3854 ExternalReference context_address(Isolate::kContextAddress, isolate());
3855 Operand context_operand = ExternalOperand(context_address);
3856 if (restore_context) {
3857 movp(rsi, context_operand);
3860 movp(context_operand, Immediate(0));
3863 // Clear the top frame.
3864 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
3866 Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
3867 movp(c_entry_fp_operand, Immediate(0));
3871 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
3874 Label same_contexts;
3876 DCHECK(!holder_reg.is(scratch));
3877 DCHECK(!scratch.is(kScratchRegister));
3878 // Load current lexical context from the stack frame.
3879 movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
3881 // When generating debug code, make sure the lexical context is set.
3882 if (emit_debug_code()) {
3883 cmpp(scratch, Immediate(0));
3884 Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
3886 // Load the native context of the current context.
3888 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
3889 movp(scratch, FieldOperand(scratch, offset));
3890 movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
3892 // Check the context is a native context.
3893 if (emit_debug_code()) {
3894 Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
3895 isolate()->factory()->native_context_map());
3896 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
3899 // Check if both contexts are the same.
3900 cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
3901 j(equal, &same_contexts);
3903 // Compare security tokens.
3904 // Check that the security token in the calling global object is
3905 // compatible with the security token in the receiving global
3908 // Check the context is a native context.
3909 if (emit_debug_code()) {
3910 // Preserve original value of holder_reg.
3913 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
3914 CompareRoot(holder_reg, Heap::kNullValueRootIndex);
3915 Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
3917 // Read the first word and compare to native_context_map(),
3918 movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
3919 CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
3920 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
3924 movp(kScratchRegister,
3925 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
3927 Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
3928 movp(scratch, FieldOperand(scratch, token_offset));
3929 cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
3932 bind(&same_contexts);
3936 // Compute the hash code from the untagged key. This must be kept in sync with
3937 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
3938 // code-stub-hydrogen.cc
3939 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
3940 // First of all we assign the hash seed to scratch.
3941 LoadRoot(scratch, Heap::kHashSeedRootIndex);
3942 SmiToInteger32(scratch, scratch);
3944 // Xor original key with a seed.
3947 // Compute the hash code from the untagged key. This must be kept in sync
3948 // with ComputeIntegerHash in utils.h.
3950 // hash = ~hash + (hash << 15);
3953 shll(scratch, Immediate(15));
3955 // hash = hash ^ (hash >> 12);
3957 shrl(scratch, Immediate(12));
3959 // hash = hash + (hash << 2);
3960 leal(r0, Operand(r0, r0, times_4, 0));
3961 // hash = hash ^ (hash >> 4);
3963 shrl(scratch, Immediate(4));
3965 // hash = hash * 2057;
3966 imull(r0, r0, Immediate(2057));
3967 // hash = hash ^ (hash >> 16);
3969 shrl(scratch, Immediate(16));
3971 andl(r0, Immediate(0x3fffffff));
3976 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
3985 // elements - holds the slow-case elements of the receiver on entry.
3986 // Unchanged unless 'result' is the same register.
3988 // key - holds the smi key on entry.
3989 // Unchanged unless 'result' is the same register.
3991 // Scratch registers:
3993 // r0 - holds the untagged key on entry and holds the hash once computed.
3995 // r1 - used to hold the capacity mask of the dictionary
3997 // r2 - used for the index into the dictionary.
3999 // result - holds the result on exit if the load succeeded.
4000 // Allowed to be the same as 'key' or 'result'.
4001 // Unchanged on bailout so 'key' or 'result' can be used
4002 // in further computation.
4006 GetNumberHash(r0, r1);
4008 // Compute capacity mask.
4009 SmiToInteger32(r1, FieldOperand(elements,
4010 SeededNumberDictionary::kCapacityOffset));
4013 // Generate an unrolled loop that performs a few probes before giving up.
4014 for (int i = 0; i < kNumberDictionaryProbes; i++) {
4015 // Use r2 for index calculations and keep the hash intact in r0.
4017 // Compute the masked index: (hash + i + i * i) & mask.
4019 addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
4023 // Scale the index by multiplying by the entry size.
4024 DCHECK(SeededNumberDictionary::kEntrySize == 3);
4025 leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
4027 // Check if the key matches.
4028 cmpp(key, FieldOperand(elements,
4031 SeededNumberDictionary::kElementsStartOffset));
4032 if (i != (kNumberDictionaryProbes - 1)) {
4040 // Check that the value is a field property.
4041 const int kDetailsOffset =
4042 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
4044 Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
4045 Smi::FromInt(PropertyDetails::TypeField::kMask));
4048 // Get the value at the masked, scaled index.
4049 const int kValueOffset =
4050 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
4051 movp(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
4055 void MacroAssembler::LoadAllocationTopHelper(Register result,
4057 AllocationFlags flags) {
4058 ExternalReference allocation_top =
4059 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4061 // Just return if allocation top is already known.
4062 if ((flags & RESULT_CONTAINS_TOP) != 0) {
4063 // No use of scratch if allocation top is provided.
4064 DCHECK(!scratch.is_valid());
4066 // Assert that result actually contains top on entry.
4067 Operand top_operand = ExternalOperand(allocation_top);
4068 cmpp(result, top_operand);
4069 Check(equal, kUnexpectedAllocationTop);
4074 // Move address of new object to result. Use scratch register if available,
4075 // and keep address in scratch until call to UpdateAllocationTopHelper.
4076 if (scratch.is_valid()) {
4077 LoadAddress(scratch, allocation_top);
4078 movp(result, Operand(scratch, 0));
4080 Load(result, allocation_top);
4085 void MacroAssembler::MakeSureDoubleAlignedHelper(Register result,
4088 AllocationFlags flags) {
4089 if (kPointerSize == kDoubleSize) {
4090 if (FLAG_debug_code) {
4091 testl(result, Immediate(kDoubleAlignmentMask));
4092 Check(zero, kAllocationIsNotDoubleAligned);
4095 // Align the next allocation. Storing the filler map without checking top
4096 // is safe in new-space because the limit of the heap is aligned there.
4097 DCHECK(kPointerSize * 2 == kDoubleSize);
4098 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
4099 // Make sure scratch is not clobbered by this function as it might be
4100 // used in UpdateAllocationTopHelper later.
4101 DCHECK(!scratch.is(kScratchRegister));
4103 testl(result, Immediate(kDoubleAlignmentMask));
4104 j(zero, &aligned, Label::kNear);
4105 if ((flags & PRETENURE) != 0) {
4106 ExternalReference allocation_limit =
4107 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4108 cmpp(result, ExternalOperand(allocation_limit));
4109 j(above_equal, gc_required);
4111 LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex);
4112 movp(Operand(result, 0), kScratchRegister);
4113 addp(result, Immediate(kDoubleSize / 2));
4119 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
4121 AllocationFlags flags) {
4122 if (emit_debug_code()) {
4123 testp(result_end, Immediate(kObjectAlignmentMask));
4124 Check(zero, kUnalignedAllocationInNewSpace);
4127 ExternalReference allocation_top =
4128 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4131 if (scratch.is_valid()) {
4132 // Scratch already contains address of allocation top.
4133 movp(Operand(scratch, 0), result_end);
4135 Store(allocation_top, result_end);
4140 void MacroAssembler::Allocate(int object_size,
4142 Register result_end,
4145 AllocationFlags flags) {
4146 DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
4147 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
4148 if (!FLAG_inline_new) {
4149 if (emit_debug_code()) {
4150 // Trash the registers to simulate an allocation failure.
4151 movl(result, Immediate(0x7091));
4152 if (result_end.is_valid()) {
4153 movl(result_end, Immediate(0x7191));
4155 if (scratch.is_valid()) {
4156 movl(scratch, Immediate(0x7291));
4162 DCHECK(!result.is(result_end));
4164 // Load address of new object into result.
4165 LoadAllocationTopHelper(result, scratch, flags);
4167 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4168 MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4171 // Calculate new top and bail out if new space is exhausted.
4172 ExternalReference allocation_limit =
4173 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4175 Register top_reg = result_end.is_valid() ? result_end : result;
4177 if (!top_reg.is(result)) {
4178 movp(top_reg, result);
4180 addp(top_reg, Immediate(object_size));
4181 j(carry, gc_required);
4182 Operand limit_operand = ExternalOperand(allocation_limit);
4183 cmpp(top_reg, limit_operand);
4184 j(above, gc_required);
4186 // Update allocation top.
4187 UpdateAllocationTopHelper(top_reg, scratch, flags);
4189 bool tag_result = (flags & TAG_OBJECT) != 0;
4190 if (top_reg.is(result)) {
4192 subp(result, Immediate(object_size - kHeapObjectTag));
4194 subp(result, Immediate(object_size));
4196 } else if (tag_result) {
4197 // Tag the result if requested.
4198 DCHECK(kHeapObjectTag == 1);
4204 void MacroAssembler::Allocate(int header_size,
4205 ScaleFactor element_size,
4206 Register element_count,
4208 Register result_end,
4211 AllocationFlags flags) {
4212 DCHECK((flags & SIZE_IN_WORDS) == 0);
4213 leap(result_end, Operand(element_count, element_size, header_size));
4214 Allocate(result_end, result, result_end, scratch, gc_required, flags);
4218 void MacroAssembler::Allocate(Register object_size,
4220 Register result_end,
4223 AllocationFlags flags) {
4224 DCHECK((flags & SIZE_IN_WORDS) == 0);
4225 if (!FLAG_inline_new) {
4226 if (emit_debug_code()) {
4227 // Trash the registers to simulate an allocation failure.
4228 movl(result, Immediate(0x7091));
4229 movl(result_end, Immediate(0x7191));
4230 if (scratch.is_valid()) {
4231 movl(scratch, Immediate(0x7291));
4233 // object_size is left unchanged by this function.
4238 DCHECK(!result.is(result_end));
4240 // Load address of new object into result.
4241 LoadAllocationTopHelper(result, scratch, flags);
4243 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4244 MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4247 // Calculate new top and bail out if new space is exhausted.
4248 ExternalReference allocation_limit =
4249 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4250 if (!object_size.is(result_end)) {
4251 movp(result_end, object_size);
4253 addp(result_end, result);
4254 j(carry, gc_required);
4255 Operand limit_operand = ExternalOperand(allocation_limit);
4256 cmpp(result_end, limit_operand);
4257 j(above, gc_required);
4259 // Update allocation top.
4260 UpdateAllocationTopHelper(result_end, scratch, flags);
4262 // Tag the result if requested.
4263 if ((flags & TAG_OBJECT) != 0) {
4264 addp(result, Immediate(kHeapObjectTag));
4269 void MacroAssembler::AllocateHeapNumber(Register result,
4273 // Allocate heap number in new space.
4274 Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
4276 Heap::RootListIndex map_index = mode == MUTABLE
4277 ? Heap::kMutableHeapNumberMapRootIndex
4278 : Heap::kHeapNumberMapRootIndex;
4281 LoadRoot(kScratchRegister, map_index);
4282 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4286 void MacroAssembler::AllocateTwoByteString(Register result,
4291 Label* gc_required) {
4292 // Calculate the number of bytes needed for the characters in the string while
4293 // observing object alignment.
4294 const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
4295 kObjectAlignmentMask;
4296 DCHECK(kShortSize == 2);
4297 // scratch1 = length * 2 + kObjectAlignmentMask.
4298 leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
4300 andp(scratch1, Immediate(~kObjectAlignmentMask));
4301 if (kHeaderAlignment > 0) {
4302 subp(scratch1, Immediate(kHeaderAlignment));
4305 // Allocate two byte string in new space.
4306 Allocate(SeqTwoByteString::kHeaderSize,
4315 // Set the map, length and hash field.
4316 LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
4317 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4318 Integer32ToSmi(scratch1, length);
4319 movp(FieldOperand(result, String::kLengthOffset), scratch1);
4320 movp(FieldOperand(result, String::kHashFieldOffset),
4321 Immediate(String::kEmptyHashField));
4325 void MacroAssembler::AllocateOneByteString(Register result, Register length,
4326 Register scratch1, Register scratch2,
4328 Label* gc_required) {
4329 // Calculate the number of bytes needed for the characters in the string while
4330 // observing object alignment.
4331 const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
4332 kObjectAlignmentMask;
4333 movl(scratch1, length);
4334 DCHECK(kCharSize == 1);
4335 addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
4336 andp(scratch1, Immediate(~kObjectAlignmentMask));
4337 if (kHeaderAlignment > 0) {
4338 subp(scratch1, Immediate(kHeaderAlignment));
4341 // Allocate one-byte string in new space.
4342 Allocate(SeqOneByteString::kHeaderSize,
4351 // Set the map, length and hash field.
4352 LoadRoot(kScratchRegister, Heap::kOneByteStringMapRootIndex);
4353 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4354 Integer32ToSmi(scratch1, length);
4355 movp(FieldOperand(result, String::kLengthOffset), scratch1);
4356 movp(FieldOperand(result, String::kHashFieldOffset),
4357 Immediate(String::kEmptyHashField));
4361 void MacroAssembler::AllocateTwoByteConsString(Register result,
4364 Label* gc_required) {
4365 // Allocate heap number in new space.
4366 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4369 // Set the map. The other fields are left uninitialized.
4370 LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
4371 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4375 void MacroAssembler::AllocateOneByteConsString(Register result,
4378 Label* gc_required) {
4379 Allocate(ConsString::kSize,
4386 // Set the map. The other fields are left uninitialized.
4387 LoadRoot(kScratchRegister, Heap::kConsOneByteStringMapRootIndex);
4388 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4392 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
4395 Label* gc_required) {
4396 // Allocate heap number in new space.
4397 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4400 // Set the map. The other fields are left uninitialized.
4401 LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
4402 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4406 void MacroAssembler::AllocateOneByteSlicedString(Register result,
4409 Label* gc_required) {
4410 // Allocate heap number in new space.
4411 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4414 // Set the map. The other fields are left uninitialized.
4415 LoadRoot(kScratchRegister, Heap::kSlicedOneByteStringMapRootIndex);
4416 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4420 // Copy memory, byte-by-byte, from source to destination. Not optimized for
4421 // long or aligned copies. The contents of scratch and length are destroyed.
4422 // Destination is incremented by length, source, length and scratch are
4424 // A simpler loop is faster on small copies, but slower on large ones.
4425 // The cld() instruction must have been emitted, to set the direction flag(),
4426 // before calling this function.
4427 void MacroAssembler::CopyBytes(Register destination,
4432 DCHECK(min_length >= 0);
4433 if (emit_debug_code()) {
4434 cmpl(length, Immediate(min_length));
4435 Assert(greater_equal, kInvalidMinLength);
4437 Label short_loop, len8, len16, len24, done, short_string;
4439 const int kLongStringLimit = 4 * kPointerSize;
4440 if (min_length <= kLongStringLimit) {
4441 cmpl(length, Immediate(kPointerSize));
4442 j(below, &short_string, Label::kNear);
4445 DCHECK(source.is(rsi));
4446 DCHECK(destination.is(rdi));
4447 DCHECK(length.is(rcx));
4449 if (min_length <= kLongStringLimit) {
4450 cmpl(length, Immediate(2 * kPointerSize));
4451 j(below_equal, &len8, Label::kNear);
4452 cmpl(length, Immediate(3 * kPointerSize));
4453 j(below_equal, &len16, Label::kNear);
4454 cmpl(length, Immediate(4 * kPointerSize));
4455 j(below_equal, &len24, Label::kNear);
4458 // Because source is 8-byte aligned in our uses of this function,
4459 // we keep source aligned for the rep movs operation by copying the odd bytes
4460 // at the end of the ranges.
4461 movp(scratch, length);
4462 shrl(length, Immediate(kPointerSizeLog2));
4464 // Move remaining bytes of length.
4465 andl(scratch, Immediate(kPointerSize - 1));
4466 movp(length, Operand(source, scratch, times_1, -kPointerSize));
4467 movp(Operand(destination, scratch, times_1, -kPointerSize), length);
4468 addp(destination, scratch);
4470 if (min_length <= kLongStringLimit) {
4471 jmp(&done, Label::kNear);
4473 movp(scratch, Operand(source, 2 * kPointerSize));
4474 movp(Operand(destination, 2 * kPointerSize), scratch);
4476 movp(scratch, Operand(source, kPointerSize));
4477 movp(Operand(destination, kPointerSize), scratch);
4479 movp(scratch, Operand(source, 0));
4480 movp(Operand(destination, 0), scratch);
4481 // Move remaining bytes of length.
4482 movp(scratch, Operand(source, length, times_1, -kPointerSize));
4483 movp(Operand(destination, length, times_1, -kPointerSize), scratch);
4484 addp(destination, length);
4485 jmp(&done, Label::kNear);
4487 bind(&short_string);
4488 if (min_length == 0) {
4489 testl(length, length);
4490 j(zero, &done, Label::kNear);
4494 movb(scratch, Operand(source, 0));
4495 movb(Operand(destination, 0), scratch);
4499 j(not_zero, &short_loop);
4506 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
4507 Register end_offset,
4512 movp(Operand(start_offset, 0), filler);
4513 addp(start_offset, Immediate(kPointerSize));
4515 cmpp(start_offset, end_offset);
4520 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4521 if (context_chain_length > 0) {
4522 // Move up the chain of contexts to the context containing the slot.
4523 movp(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4524 for (int i = 1; i < context_chain_length; i++) {
4525 movp(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4528 // Slot is in the current function context. Move it into the
4529 // destination register in case we store into it (the write barrier
4530 // cannot be allowed to destroy the context in rsi).
4534 // We should not have found a with context by walking the context
4535 // chain (i.e., the static scope chain and runtime context chain do
4536 // not agree). A variable occurring in such a scope should have
4537 // slot type LOOKUP and not CONTEXT.
4538 if (emit_debug_code()) {
4539 CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
4540 Heap::kWithContextMapRootIndex);
4541 Check(not_equal, kVariableResolvedToWithContext);
4546 void MacroAssembler::LoadTransitionedArrayMapConditional(
4547 ElementsKind expected_kind,
4548 ElementsKind transitioned_kind,
4549 Register map_in_out,
4551 Label* no_map_match) {
4552 // Load the global or builtins object from the current context.
4554 Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4555 movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
4557 // Check that the function's map is the same as the expected cached map.
4558 movp(scratch, Operand(scratch,
4559 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4561 int offset = expected_kind * kPointerSize +
4562 FixedArrayBase::kHeaderSize;
4563 cmpp(map_in_out, FieldOperand(scratch, offset));
4564 j(not_equal, no_map_match);
4566 // Use the transitioned cached map.
4567 offset = transitioned_kind * kPointerSize +
4568 FixedArrayBase::kHeaderSize;
4569 movp(map_in_out, FieldOperand(scratch, offset));
4574 static const int kRegisterPassedArguments = 4;
4576 static const int kRegisterPassedArguments = 6;
4579 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4580 // Load the global or builtins object from the current context.
4582 Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4583 // Load the native context from the global or builtins object.
4584 movp(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
4585 // Load the function from the native context.
4586 movp(function, Operand(function, Context::SlotOffset(index)));
4590 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4592 // Load the initial map. The global functions all have initial maps.
4593 movp(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4594 if (emit_debug_code()) {
4596 CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
4599 Abort(kGlobalFunctionsMustHaveInitialMap);
4605 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
4606 // On Windows 64 stack slots are reserved by the caller for all arguments
4607 // including the ones passed in registers, and space is always allocated for
4608 // the four register arguments even if the function takes fewer than four
4610 // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
4611 // and the caller does not reserve stack slots for them.
4612 DCHECK(num_arguments >= 0);
4614 const int kMinimumStackSlots = kRegisterPassedArguments;
4615 if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
4616 return num_arguments;
4618 if (num_arguments < kRegisterPassedArguments) return 0;
4619 return num_arguments - kRegisterPassedArguments;
4624 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
4627 uint32_t encoding_mask) {
4629 JumpIfNotSmi(string, &is_object);
4634 movp(value, FieldOperand(string, HeapObject::kMapOffset));
4635 movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset));
4637 andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
4638 cmpp(value, Immediate(encoding_mask));
4640 Check(equal, kUnexpectedStringType);
4642 // The index is assumed to be untagged coming in, tag it to compare with the
4643 // string length without using a temp register, it is restored at the end of
4645 Integer32ToSmi(index, index);
4646 SmiCompare(index, FieldOperand(string, String::kLengthOffset));
4647 Check(less, kIndexIsTooLarge);
4649 SmiCompare(index, Smi::FromInt(0));
4650 Check(greater_equal, kIndexIsNegative);
4652 // Restore the index
4653 SmiToInteger32(index, index);
4657 void MacroAssembler::PrepareCallCFunction(int num_arguments) {
4658 int frame_alignment = base::OS::ActivationFrameAlignment();
4659 DCHECK(frame_alignment != 0);
4660 DCHECK(num_arguments >= 0);
4662 // Make stack end at alignment and allocate space for arguments and old rsp.
4663 movp(kScratchRegister, rsp);
4664 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4665 int argument_slots_on_stack =
4666 ArgumentStackSlotsForCFunctionCall(num_arguments);
4667 subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
4668 andp(rsp, Immediate(-frame_alignment));
4669 movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
4673 void MacroAssembler::CallCFunction(ExternalReference function,
4674 int num_arguments) {
4675 LoadAddress(rax, function);
4676 CallCFunction(rax, num_arguments);
4680 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
4681 DCHECK(has_frame());
4682 // Check stack alignment.
4683 if (emit_debug_code()) {
4684 CheckStackAlignment();
4688 DCHECK(base::OS::ActivationFrameAlignment() != 0);
4689 DCHECK(num_arguments >= 0);
4690 int argument_slots_on_stack =
4691 ArgumentStackSlotsForCFunctionCall(num_arguments);
4692 movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
4697 bool AreAliased(Register reg1,
4705 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
4706 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
4707 reg7.is_valid() + reg8.is_valid();
4710 if (reg1.is_valid()) regs |= reg1.bit();
4711 if (reg2.is_valid()) regs |= reg2.bit();
4712 if (reg3.is_valid()) regs |= reg3.bit();
4713 if (reg4.is_valid()) regs |= reg4.bit();
4714 if (reg5.is_valid()) regs |= reg5.bit();
4715 if (reg6.is_valid()) regs |= reg6.bit();
4716 if (reg7.is_valid()) regs |= reg7.bit();
4717 if (reg8.is_valid()) regs |= reg8.bit();
4718 int n_of_non_aliasing_regs = NumRegs(regs);
4720 return n_of_valid_regs != n_of_non_aliasing_regs;
4725 CodePatcher::CodePatcher(byte* address, int size)
4726 : address_(address),
4728 masm_(NULL, address, size + Assembler::kGap) {
4729 // Create a new macro assembler pointing to the address of the code to patch.
4730 // The size is adjusted with kGap on order for the assembler to generate size
4731 // bytes of instructions without failing with buffer size constraints.
4732 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4736 CodePatcher::~CodePatcher() {
4737 // Indicate that code has changed.
4738 CpuFeatures::FlushICache(address_, size_);
4740 // Check that the code was patched as expected.
4741 DCHECK(masm_.pc_ == address_ + size_);
4742 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4746 void MacroAssembler::CheckPageFlag(
4751 Label* condition_met,
4752 Label::Distance condition_met_distance) {
4753 DCHECK(cc == zero || cc == not_zero);
4754 if (scratch.is(object)) {
4755 andp(scratch, Immediate(~Page::kPageAlignmentMask));
4757 movp(scratch, Immediate(~Page::kPageAlignmentMask));
4758 andp(scratch, object);
4760 if (mask < (1 << kBitsPerByte)) {
4761 testb(Operand(scratch, MemoryChunk::kFlagsOffset),
4762 Immediate(static_cast<uint8_t>(mask)));
4764 testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
4766 j(cc, condition_met, condition_met_distance);
4770 void MacroAssembler::JumpIfBlack(Register object,
4771 Register bitmap_scratch,
4772 Register mask_scratch,
4774 Label::Distance on_black_distance) {
4775 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
4776 GetMarkBits(object, bitmap_scratch, mask_scratch);
4778 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
4779 // The mask_scratch register contains a 1 at the position of the first bit
4780 // and a 0 at all other positions, including the position of the second bit.
4781 movp(rcx, mask_scratch);
4782 // Make rcx into a mask that covers both marking bits using the operation
4783 // rcx = mask | (mask << 1).
4784 leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
4785 // Note that we are using a 4-byte aligned 8-byte load.
4786 andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
4787 cmpp(mask_scratch, rcx);
4788 j(equal, on_black, on_black_distance);
4792 // Detect some, but not all, common pointer-free objects. This is used by the
4793 // incremental write barrier which doesn't care about oddballs (they are always
4794 // marked black immediately so this code is not hit).
4795 void MacroAssembler::JumpIfDataObject(
4798 Label* not_data_object,
4799 Label::Distance not_data_object_distance) {
4800 Label is_data_object;
4801 movp(scratch, FieldOperand(value, HeapObject::kMapOffset));
4802 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
4803 j(equal, &is_data_object, Label::kNear);
4804 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4805 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4806 // If it's a string and it's not a cons string then it's an object containing
4808 testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
4809 Immediate(kIsIndirectStringMask | kIsNotStringMask));
4810 j(not_zero, not_data_object, not_data_object_distance);
4811 bind(&is_data_object);
4815 void MacroAssembler::GetMarkBits(Register addr_reg,
4816 Register bitmap_reg,
4817 Register mask_reg) {
4818 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
4819 movp(bitmap_reg, addr_reg);
4820 // Sign extended 32 bit immediate.
4821 andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
4822 movp(rcx, addr_reg);
4824 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
4825 shrl(rcx, Immediate(shift));
4827 Immediate((Page::kPageAlignmentMask >> shift) &
4828 ~(Bitmap::kBytesPerCell - 1)));
4830 addp(bitmap_reg, rcx);
4831 movp(rcx, addr_reg);
4832 shrl(rcx, Immediate(kPointerSizeLog2));
4833 andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
4834 movl(mask_reg, Immediate(1));
4839 void MacroAssembler::EnsureNotWhite(
4841 Register bitmap_scratch,
4842 Register mask_scratch,
4843 Label* value_is_white_and_not_data,
4844 Label::Distance distance) {
4845 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
4846 GetMarkBits(value, bitmap_scratch, mask_scratch);
4848 // If the value is black or grey we don't need to do anything.
4849 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4850 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
4851 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
4852 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4856 // Since both black and grey have a 1 in the first position and white does
4857 // not have a 1 there we only need to check one bit.
4858 testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4859 j(not_zero, &done, Label::kNear);
4861 if (emit_debug_code()) {
4862 // Check for impossible bit pattern.
4865 // shl. May overflow making the check conservative.
4866 addp(mask_scratch, mask_scratch);
4867 testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4868 j(zero, &ok, Label::kNear);
4874 // Value is white. We check whether it is data that doesn't need scanning.
4875 // Currently only checks for HeapNumber and non-cons strings.
4876 Register map = rcx; // Holds map while checking type.
4877 Register length = rcx; // Holds length of object after checking type.
4878 Label not_heap_number;
4879 Label is_data_object;
4881 // Check for heap-number
4882 movp(map, FieldOperand(value, HeapObject::kMapOffset));
4883 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
4884 j(not_equal, ¬_heap_number, Label::kNear);
4885 movp(length, Immediate(HeapNumber::kSize));
4886 jmp(&is_data_object, Label::kNear);
4888 bind(¬_heap_number);
4889 // Check for strings.
4890 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4891 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4892 // If it's a string and it's not a cons string then it's an object containing
4894 Register instance_type = rcx;
4895 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
4896 testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
4897 j(not_zero, value_is_white_and_not_data);
4898 // It's a non-indirect (non-cons and non-slice) string.
4899 // If it's external, the length is just ExternalString::kSize.
4900 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
4902 // External strings are the only ones with the kExternalStringTag bit
4904 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
4905 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
4906 testb(instance_type, Immediate(kExternalStringTag));
4907 j(zero, ¬_external, Label::kNear);
4908 movp(length, Immediate(ExternalString::kSize));
4909 jmp(&is_data_object, Label::kNear);
4911 bind(¬_external);
4912 // Sequential string, either Latin1 or UC16.
4913 DCHECK(kOneByteStringTag == 0x04);
4914 andp(length, Immediate(kStringEncodingMask));
4915 xorp(length, Immediate(kStringEncodingMask));
4916 addp(length, Immediate(0x04));
4917 // Value now either 4 (if Latin1) or 8 (if UC16), i.e. char-size shifted by 2.
4918 imulp(length, FieldOperand(value, String::kLengthOffset));
4919 shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
4920 addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
4921 andp(length, Immediate(~kObjectAlignmentMask));
4923 bind(&is_data_object);
4924 // Value is a data object, and it is white. Mark it black. Since we know
4925 // that the object is white we can make it black by flipping one bit.
4926 orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4928 andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
4929 addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
4935 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
4937 Register empty_fixed_array_value = r8;
4938 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
4941 // Check if the enum length field is properly initialized, indicating that
4942 // there is an enum cache.
4943 movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
4945 EnumLength(rdx, rbx);
4946 Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
4947 j(equal, call_runtime);
4953 movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
4955 // For all objects but the receiver, check that the cache is empty.
4956 EnumLength(rdx, rbx);
4957 Cmp(rdx, Smi::FromInt(0));
4958 j(not_equal, call_runtime);
4962 // Check that there are no elements. Register rcx contains the current JS
4963 // object we've reached through the prototype chain.
4965 cmpp(empty_fixed_array_value,
4966 FieldOperand(rcx, JSObject::kElementsOffset));
4967 j(equal, &no_elements);
4969 // Second chance, the object may be using the empty slow element dictionary.
4970 LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
4971 cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
4972 j(not_equal, call_runtime);
4975 movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
4976 cmpp(rcx, null_value);
4977 j(not_equal, &next);
4980 void MacroAssembler::TestJSArrayForAllocationMemento(
4981 Register receiver_reg,
4982 Register scratch_reg,
4983 Label* no_memento_found) {
4984 ExternalReference new_space_start =
4985 ExternalReference::new_space_start(isolate());
4986 ExternalReference new_space_allocation_top =
4987 ExternalReference::new_space_allocation_top_address(isolate());
4989 leap(scratch_reg, Operand(receiver_reg,
4990 JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
4991 Move(kScratchRegister, new_space_start);
4992 cmpp(scratch_reg, kScratchRegister);
4993 j(less, no_memento_found);
4994 cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
4995 j(greater, no_memento_found);
4996 CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
4997 Heap::kAllocationMementoMapRootIndex);
5001 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5006 DCHECK(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
5007 DCHECK(!scratch1.is(scratch0));
5008 Register current = scratch0;
5009 Label loop_again, end;
5011 movp(current, object);
5012 movp(current, FieldOperand(current, HeapObject::kMapOffset));
5013 movp(current, FieldOperand(current, Map::kPrototypeOffset));
5014 CompareRoot(current, Heap::kNullValueRootIndex);
5017 // Loop based on the map going up the prototype chain.
5019 movp(current, FieldOperand(current, HeapObject::kMapOffset));
5020 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
5021 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
5022 CmpInstanceType(current, JS_OBJECT_TYPE);
5024 movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
5025 DecodeField<Map::ElementsKindBits>(scratch1);
5026 cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
5028 movp(current, FieldOperand(current, Map::kPrototypeOffset));
5029 CompareRoot(current, Heap::kNullValueRootIndex);
5030 j(not_equal, &loop_again);
5036 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
5037 DCHECK(!dividend.is(rax));
5038 DCHECK(!dividend.is(rdx));
5039 base::MagicNumbersForDivision<uint32_t> mag =
5040 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
5041 movl(rax, Immediate(mag.multiplier));
5043 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
5044 if (divisor > 0 && neg) addl(rdx, dividend);
5045 if (divisor < 0 && !neg && mag.multiplier > 0) subl(rdx, dividend);
5046 if (mag.shift > 0) sarl(rdx, Immediate(mag.shift));
5047 movl(rax, dividend);
5048 shrl(rax, Immediate(31));
5053 } // namespace internal
5056 #endif // V8_TARGET_ARCH_X64