1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #if defined(V8_TARGET_ARCH_X64)
32 #include "bootstrapper.h"
34 #include "assembler-x64.h"
35 #include "macro-assembler-x64.h"
36 #include "serialize.h"
43 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
44 : Assembler(arg_isolate, buffer, size),
45 generating_stub_(false),
46 allow_stub_calls_(true),
48 root_array_available_(true) {
49 if (isolate() != NULL) {
50 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
56 static intptr_t RootRegisterDelta(ExternalReference other, Isolate* isolate) {
57 Address roots_register_value = kRootRegisterBias +
58 reinterpret_cast<Address>(isolate->heap()->roots_array_start());
59 intptr_t delta = other.address() - roots_register_value;
64 Operand MacroAssembler::ExternalOperand(ExternalReference target,
66 if (root_array_available_ && !Serializer::enabled()) {
67 intptr_t delta = RootRegisterDelta(target, isolate());
68 if (is_int32(delta)) {
69 Serializer::TooLateToEnableNow();
70 return Operand(kRootRegister, static_cast<int32_t>(delta));
73 movq(scratch, target);
74 return Operand(scratch, 0);
78 void MacroAssembler::Load(Register destination, ExternalReference source) {
79 if (root_array_available_ && !Serializer::enabled()) {
80 intptr_t delta = RootRegisterDelta(source, isolate());
81 if (is_int32(delta)) {
82 Serializer::TooLateToEnableNow();
83 movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
88 if (destination.is(rax)) {
91 movq(kScratchRegister, source);
92 movq(destination, Operand(kScratchRegister, 0));
97 void MacroAssembler::Store(ExternalReference destination, Register source) {
98 if (root_array_available_ && !Serializer::enabled()) {
99 intptr_t delta = RootRegisterDelta(destination, isolate());
100 if (is_int32(delta)) {
101 Serializer::TooLateToEnableNow();
102 movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
107 if (source.is(rax)) {
108 store_rax(destination);
110 movq(kScratchRegister, destination);
111 movq(Operand(kScratchRegister, 0), source);
116 void MacroAssembler::LoadAddress(Register destination,
117 ExternalReference source) {
118 if (root_array_available_ && !Serializer::enabled()) {
119 intptr_t delta = RootRegisterDelta(source, isolate());
120 if (is_int32(delta)) {
121 Serializer::TooLateToEnableNow();
122 lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
127 movq(destination, source);
131 int MacroAssembler::LoadAddressSize(ExternalReference source) {
132 if (root_array_available_ && !Serializer::enabled()) {
133 // This calculation depends on the internals of LoadAddress.
134 // It's correctness is ensured by the asserts in the Call
135 // instruction below.
136 intptr_t delta = RootRegisterDelta(source, isolate());
137 if (is_int32(delta)) {
138 Serializer::TooLateToEnableNow();
139 // Operand is lea(scratch, Operand(kRootRegister, delta));
140 // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
142 if (!is_int8(static_cast<int32_t>(delta))) {
143 size += 3; // Need full four-byte displacement in lea.
148 // Size of movq(destination, src);
153 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
154 ASSERT(root_array_available_);
155 movq(destination, Operand(kRootRegister,
156 (index << kPointerSizeLog2) - kRootRegisterBias));
160 void MacroAssembler::LoadRootIndexed(Register destination,
161 Register variable_offset,
163 ASSERT(root_array_available_);
165 Operand(kRootRegister,
166 variable_offset, times_pointer_size,
167 (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
171 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
172 ASSERT(root_array_available_);
173 movq(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
178 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
179 ASSERT(root_array_available_);
180 push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
184 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
185 ASSERT(root_array_available_);
186 cmpq(with, Operand(kRootRegister,
187 (index << kPointerSizeLog2) - kRootRegisterBias));
191 void MacroAssembler::CompareRoot(const Operand& with,
192 Heap::RootListIndex index) {
193 ASSERT(root_array_available_);
194 ASSERT(!with.AddressUsesRegister(kScratchRegister));
195 LoadRoot(kScratchRegister, index);
196 cmpq(with, kScratchRegister);
200 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
203 SaveFPRegsMode save_fp,
204 RememberedSetFinalAction and_then) {
205 if (FLAG_debug_code) {
207 JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
211 // Load store buffer top.
212 LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
213 // Store pointer to buffer.
214 movq(Operand(scratch, 0), addr);
215 // Increment buffer top.
216 addq(scratch, Immediate(kPointerSize));
217 // Write back new top of buffer.
218 StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
219 // Call stub on end of buffer.
221 // Check for end of buffer.
222 testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
223 if (and_then == kReturnAtEnd) {
224 Label buffer_overflowed;
225 j(not_equal, &buffer_overflowed, Label::kNear);
227 bind(&buffer_overflowed);
229 ASSERT(and_then == kFallThroughAtEnd);
230 j(equal, &done, Label::kNear);
232 StoreBufferOverflowStub store_buffer_overflow =
233 StoreBufferOverflowStub(save_fp);
234 CallStub(&store_buffer_overflow);
235 if (and_then == kReturnAtEnd) {
238 ASSERT(and_then == kFallThroughAtEnd);
244 void MacroAssembler::InNewSpace(Register object,
248 Label::Distance distance) {
249 if (Serializer::enabled()) {
250 // Can't do arithmetic on external references if it might get serialized.
251 // The mask isn't really an address. We load it as an external reference in
252 // case the size of the new space is different between the snapshot maker
253 // and the running system.
254 if (scratch.is(object)) {
255 movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
256 and_(scratch, kScratchRegister);
258 movq(scratch, ExternalReference::new_space_mask(isolate()));
259 and_(scratch, object);
261 movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
262 cmpq(scratch, kScratchRegister);
263 j(cc, branch, distance);
265 ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
266 intptr_t new_space_start =
267 reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
268 movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
269 if (scratch.is(object)) {
270 addq(scratch, kScratchRegister);
272 lea(scratch, Operand(object, kScratchRegister, times_1, 0));
274 and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
275 j(cc, branch, distance);
280 void MacroAssembler::RecordWriteField(
285 SaveFPRegsMode save_fp,
286 RememberedSetAction remembered_set_action,
287 SmiCheck smi_check) {
288 // The compiled code assumes that record write doesn't change the
289 // context register, so we check that none of the clobbered
290 // registers are rsi.
291 ASSERT(!value.is(rsi) && !dst.is(rsi));
293 // First, check if a write barrier is even needed. The tests below
294 // catch stores of Smis.
297 // Skip barrier if writing a smi.
298 if (smi_check == INLINE_SMI_CHECK) {
299 JumpIfSmi(value, &done);
302 // Although the object register is tagged, the offset is relative to the start
303 // of the object, so so offset must be a multiple of kPointerSize.
304 ASSERT(IsAligned(offset, kPointerSize));
306 lea(dst, FieldOperand(object, offset));
307 if (emit_debug_code()) {
309 testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
310 j(zero, &ok, Label::kNear);
316 object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
320 // Clobber clobbered input registers when running with the debug-code flag
321 // turned on to provoke errors.
322 if (emit_debug_code()) {
323 movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
324 movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
329 void MacroAssembler::RecordWriteArray(Register object,
332 SaveFPRegsMode save_fp,
333 RememberedSetAction remembered_set_action,
334 SmiCheck smi_check) {
335 // First, check if a write barrier is even needed. The tests below
336 // catch stores of Smis.
339 // Skip barrier if writing a smi.
340 if (smi_check == INLINE_SMI_CHECK) {
341 JumpIfSmi(value, &done);
344 // Array access: calculate the destination address. Index is not a smi.
345 Register dst = index;
346 lea(dst, Operand(object, index, times_pointer_size,
347 FixedArray::kHeaderSize - kHeapObjectTag));
350 object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
354 // Clobber clobbered input registers when running with the debug-code flag
355 // turned on to provoke errors.
356 if (emit_debug_code()) {
357 movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
358 movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
363 void MacroAssembler::RecordWrite(Register object,
366 SaveFPRegsMode fp_mode,
367 RememberedSetAction remembered_set_action,
368 SmiCheck smi_check) {
369 // The compiled code assumes that record write doesn't change the
370 // context register, so we check that none of the clobbered
371 // registers are rsi.
372 ASSERT(!value.is(rsi) && !address.is(rsi));
374 ASSERT(!object.is(value));
375 ASSERT(!object.is(address));
376 ASSERT(!value.is(address));
377 if (emit_debug_code()) {
381 if (remembered_set_action == OMIT_REMEMBERED_SET &&
382 !FLAG_incremental_marking) {
386 if (FLAG_debug_code) {
388 cmpq(value, Operand(address, 0));
389 j(equal, &ok, Label::kNear);
394 // First, check if a write barrier is even needed. The tests below
395 // catch stores of smis and stores into the young generation.
398 if (smi_check == INLINE_SMI_CHECK) {
399 // Skip barrier if writing a smi.
400 JumpIfSmi(value, &done);
404 value, // Used as scratch.
405 MemoryChunk::kPointersToHereAreInterestingMask,
410 CheckPageFlag(object,
411 value, // Used as scratch.
412 MemoryChunk::kPointersFromHereAreInterestingMask,
417 RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
422 // Clobber clobbered registers when running with the debug-code flag
423 // turned on to provoke errors.
424 if (emit_debug_code()) {
425 movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
426 movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
431 void MacroAssembler::Assert(Condition cc, const char* msg) {
432 if (emit_debug_code()) Check(cc, msg);
436 void MacroAssembler::AssertFastElements(Register elements) {
437 if (emit_debug_code()) {
439 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
440 Heap::kFixedArrayMapRootIndex);
441 j(equal, &ok, Label::kNear);
442 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
443 Heap::kFixedDoubleArrayMapRootIndex);
444 j(equal, &ok, Label::kNear);
445 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
446 Heap::kFixedCOWArrayMapRootIndex);
447 j(equal, &ok, Label::kNear);
448 Abort("JSObject with fast elements map has slow elements");
454 void MacroAssembler::Check(Condition cc, const char* msg) {
456 j(cc, &L, Label::kNear);
458 // Control will not return here.
463 void MacroAssembler::CheckStackAlignment() {
464 int frame_alignment = OS::ActivationFrameAlignment();
465 int frame_alignment_mask = frame_alignment - 1;
466 if (frame_alignment > kPointerSize) {
467 ASSERT(IsPowerOf2(frame_alignment));
468 Label alignment_as_expected;
469 testq(rsp, Immediate(frame_alignment_mask));
470 j(zero, &alignment_as_expected, Label::kNear);
471 // Abort if stack is not aligned.
473 bind(&alignment_as_expected);
478 void MacroAssembler::NegativeZeroTest(Register result,
482 testl(result, result);
483 j(not_zero, &ok, Label::kNear);
490 void MacroAssembler::Abort(const char* msg) {
491 // We want to pass the msg string like a smi to avoid GC
492 // problems, however msg is not guaranteed to be aligned
493 // properly. Instead, we pass an aligned pointer that is
494 // a proper v8 smi, but also pass the alignment difference
495 // from the real pointer as a smi.
496 intptr_t p1 = reinterpret_cast<intptr_t>(msg);
497 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
498 // Note: p0 might not be a valid Smi *value*, but it has a valid Smi tag.
499 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
502 RecordComment("Abort message: ");
507 movq(kScratchRegister, p0, RelocInfo::NONE);
508 push(kScratchRegister);
509 movq(kScratchRegister,
510 reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
512 push(kScratchRegister);
515 // We don't actually want to generate a pile of code for this, so just
516 // claim there is a stack frame, without generating one.
517 FrameScope scope(this, StackFrame::NONE);
518 CallRuntime(Runtime::kAbort, 2);
520 CallRuntime(Runtime::kAbort, 2);
522 // Control will not return here.
527 void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
528 ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
529 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
533 void MacroAssembler::TailCallStub(CodeStub* stub) {
534 ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
535 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
539 void MacroAssembler::StubReturn(int argc) {
540 ASSERT(argc >= 1 && generating_stub());
541 ret((argc - 1) * kPointerSize);
545 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
546 if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
547 return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
551 void MacroAssembler::IllegalOperation(int num_arguments) {
552 if (num_arguments > 0) {
553 addq(rsp, Immediate(num_arguments * kPointerSize));
555 LoadRoot(rax, Heap::kUndefinedValueRootIndex);
559 void MacroAssembler::IndexFromHash(Register hash, Register index) {
560 // The assert checks that the constants for the maximum number of digits
561 // for an array index cached in the hash field and the number of bits
562 // reserved for it does not conflict.
563 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
564 (1 << String::kArrayIndexValueBits));
565 // We want the smi-tagged index in key. Even if we subsequently go to
566 // the slow case, converting the key to a smi is always valid.
568 // hash: key's hash field, including its array index value.
569 and_(hash, Immediate(String::kArrayIndexValueMask));
570 shr(hash, Immediate(String::kHashShift));
571 // Here we actually clobber the key which will be used if calling into
572 // runtime later. However as the new key is the numeric value of a string key
573 // there is no difference in using either key.
574 Integer32ToSmi(index, hash);
578 void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
579 CallRuntime(Runtime::FunctionForId(id), num_arguments);
583 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
584 const Runtime::Function* function = Runtime::FunctionForId(id);
585 Set(rax, function->nargs);
586 LoadAddress(rbx, ExternalReference(function, isolate()));
587 CEntryStub ces(1, kSaveFPRegs);
592 void MacroAssembler::CallRuntime(const Runtime::Function* f,
594 // If the expected number of arguments of the runtime function is
595 // constant, we check that the actual number of arguments match the
597 if (f->nargs >= 0 && f->nargs != num_arguments) {
598 IllegalOperation(num_arguments);
602 // TODO(1236192): Most runtime routines don't need the number of
603 // arguments passed in because it is constant. At some point we
604 // should remove this need and make the runtime routine entry code
606 Set(rax, num_arguments);
607 LoadAddress(rbx, ExternalReference(f, isolate()));
608 CEntryStub ces(f->result_size);
613 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
615 Set(rax, num_arguments);
616 LoadAddress(rbx, ext);
623 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
626 // ----------- S t a t e -------------
627 // -- rsp[0] : return address
628 // -- rsp[8] : argument num_arguments - 1
630 // -- rsp[8 * num_arguments] : argument 0 (receiver)
631 // -----------------------------------
633 // TODO(1236192): Most runtime routines don't need the number of
634 // arguments passed in because it is constant. At some point we
635 // should remove this need and make the runtime routine entry code
637 Set(rax, num_arguments);
638 JumpToExternalReference(ext, result_size);
642 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
645 TailCallExternalReference(ExternalReference(fid, isolate()),
651 static int Offset(ExternalReference ref0, ExternalReference ref1) {
652 int64_t offset = (ref0.address() - ref1.address());
653 // Check that fits into int.
654 ASSERT(static_cast<int>(offset) == offset);
655 return static_cast<int>(offset);
659 void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
661 // We need to prepare a slot for result handle on stack and put
662 // a pointer to it into 1st arg register.
663 EnterApiExitFrame(arg_stack_space + 1);
665 // rcx must be used to pass the pointer to the return value slot.
666 lea(rcx, StackSpaceOperand(arg_stack_space));
668 EnterApiExitFrame(arg_stack_space);
673 void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
677 Label promote_scheduled_exception;
678 Label delete_allocated_handles;
679 Label leave_exit_frame;
682 Factory* factory = isolate()->factory();
683 ExternalReference next_address =
684 ExternalReference::handle_scope_next_address();
685 const int kNextOffset = 0;
686 const int kLimitOffset = Offset(
687 ExternalReference::handle_scope_limit_address(),
689 const int kLevelOffset = Offset(
690 ExternalReference::handle_scope_level_address(),
692 ExternalReference scheduled_exception_address =
693 ExternalReference::scheduled_exception_address(isolate());
695 // Allocate HandleScope in callee-save registers.
696 Register prev_next_address_reg = r14;
697 Register prev_limit_reg = rbx;
698 Register base_reg = r15;
699 movq(base_reg, next_address);
700 movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
701 movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
702 addl(Operand(base_reg, kLevelOffset), Immediate(1));
703 // Call the api function!
704 movq(rax, reinterpret_cast<int64_t>(function_address),
705 RelocInfo::RUNTIME_ENTRY);
709 // rax keeps a pointer to v8::Handle, unpack it.
710 movq(rax, Operand(rax, 0));
712 // Check if the result handle holds 0.
714 j(zero, &empty_result);
715 // It was non-zero. Dereference to get the result value.
716 movq(rax, Operand(rax, 0));
719 // No more valid handles (the result handle was the last one). Restore
720 // previous handle scope.
721 subl(Operand(base_reg, kLevelOffset), Immediate(1));
722 movq(Operand(base_reg, kNextOffset), prev_next_address_reg);
723 cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
724 j(not_equal, &delete_allocated_handles);
725 bind(&leave_exit_frame);
727 // Check if the function scheduled an exception.
728 movq(rsi, scheduled_exception_address);
729 Cmp(Operand(rsi, 0), factory->the_hole_value());
730 j(not_equal, &promote_scheduled_exception);
733 ret(stack_space * kPointerSize);
735 bind(&promote_scheduled_exception);
736 TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
739 // It was zero; the result is undefined.
740 Move(rax, factory->undefined_value());
743 // HandleScope limit has changed. Delete allocated extensions.
744 bind(&delete_allocated_handles);
745 movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
746 movq(prev_limit_reg, rax);
748 LoadAddress(rcx, ExternalReference::isolate_address());
750 LoadAddress(rdi, ExternalReference::isolate_address());
753 ExternalReference::delete_handle_scope_extensions(isolate()));
755 movq(rax, prev_limit_reg);
756 jmp(&leave_exit_frame);
760 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
762 // Set the entry point and jump to the C entry runtime stub.
763 LoadAddress(rbx, ext);
764 CEntryStub ces(result_size);
765 jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
769 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
771 const CallWrapper& call_wrapper) {
772 // You can't call a builtin without a valid frame.
773 ASSERT(flag == JUMP_FUNCTION || has_frame());
775 // Rely on the assertion to check that the number of provided
776 // arguments match the expected number of arguments. Fake a
777 // parameter count to avoid emitting code to do the check.
778 ParameterCount expected(0);
779 GetBuiltinEntry(rdx, id);
780 InvokeCode(rdx, expected, expected, flag, call_wrapper, CALL_AS_METHOD);
784 void MacroAssembler::GetBuiltinFunction(Register target,
785 Builtins::JavaScript id) {
786 // Load the builtins object into target register.
787 movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
788 movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
789 movq(target, FieldOperand(target,
790 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
794 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
795 ASSERT(!target.is(rdi));
796 // Load the JavaScript builtin function from the builtins object.
797 GetBuiltinFunction(rdi, id);
798 movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
802 static const Register saved_regs[] =
803 { rax, rcx, rdx, rbx, rbp, rsi, rdi, r8, r9, r10, r11 };
804 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
807 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
810 Register exclusion3) {
811 // We don't allow a GC during a store buffer overflow so there is no need to
812 // store the registers in any particular way, but we do have to store and
814 for (int i = 0; i < kNumberOfSavedRegs; i++) {
815 Register reg = saved_regs[i];
816 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
820 // R12 to r15 are callee save on all platforms.
821 if (fp_mode == kSaveFPRegs) {
822 CpuFeatures::Scope scope(SSE2);
823 subq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
824 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
825 XMMRegister reg = XMMRegister::from_code(i);
826 movsd(Operand(rsp, i * kDoubleSize), reg);
832 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
835 Register exclusion3) {
836 if (fp_mode == kSaveFPRegs) {
837 CpuFeatures::Scope scope(SSE2);
838 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
839 XMMRegister reg = XMMRegister::from_code(i);
840 movsd(reg, Operand(rsp, i * kDoubleSize));
842 addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
844 for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
845 Register reg = saved_regs[i];
846 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
853 void MacroAssembler::Set(Register dst, int64_t x) {
856 } else if (is_uint32(x)) {
857 movl(dst, Immediate(static_cast<uint32_t>(x)));
858 } else if (is_int32(x)) {
859 movq(dst, Immediate(static_cast<int32_t>(x)));
861 movq(dst, x, RelocInfo::NONE);
865 void MacroAssembler::Set(const Operand& dst, int64_t x) {
867 movq(dst, Immediate(static_cast<int32_t>(x)));
869 Set(kScratchRegister, x);
870 movq(dst, kScratchRegister);
874 // ----------------------------------------------------------------------------
875 // Smi tagging, untagging and tag detection.
877 Register MacroAssembler::GetSmiConstant(Smi* source) {
878 int value = source->value();
880 xorl(kScratchRegister, kScratchRegister);
881 return kScratchRegister;
884 return kSmiConstantRegister;
886 LoadSmiConstant(kScratchRegister, source);
887 return kScratchRegister;
890 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
891 if (emit_debug_code()) {
893 reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
895 cmpq(dst, kSmiConstantRegister);
896 if (allow_stub_calls()) {
897 Assert(equal, "Uninitialized kSmiConstantRegister");
900 j(equal, &ok, Label::kNear);
905 int value = source->value();
910 bool negative = value < 0;
911 unsigned int uvalue = negative ? -value : value;
915 lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
919 lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
923 lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
926 lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
929 lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
932 lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
935 movq(dst, kSmiConstantRegister);
941 movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE);
950 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
951 STATIC_ASSERT(kSmiTag == 0);
955 shl(dst, Immediate(kSmiShift));
959 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
960 if (emit_debug_code()) {
961 testb(dst, Immediate(0x01));
963 j(zero, &ok, Label::kNear);
964 if (allow_stub_calls()) {
965 Abort("Integer32ToSmiField writing to non-smi location");
971 ASSERT(kSmiShift % kBitsPerByte == 0);
972 movl(Operand(dst, kSmiShift / kBitsPerByte), src);
976 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
980 addl(dst, Immediate(constant));
982 leal(dst, Operand(src, constant));
984 shl(dst, Immediate(kSmiShift));
988 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
989 STATIC_ASSERT(kSmiTag == 0);
993 shr(dst, Immediate(kSmiShift));
997 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
998 movl(dst, Operand(src, kSmiShift / kBitsPerByte));
1002 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
1003 STATIC_ASSERT(kSmiTag == 0);
1007 sar(dst, Immediate(kSmiShift));
1011 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
1012 movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
1016 void MacroAssembler::SmiTest(Register src) {
1021 void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1022 if (emit_debug_code()) {
1023 AbortIfNotSmi(smi1);
1024 AbortIfNotSmi(smi2);
1030 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
1031 if (emit_debug_code()) {
1038 void MacroAssembler::Cmp(Register dst, Smi* src) {
1039 ASSERT(!dst.is(kScratchRegister));
1040 if (src->value() == 0) {
1043 Register constant_reg = GetSmiConstant(src);
1044 cmpq(dst, constant_reg);
1049 void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
1050 if (emit_debug_code()) {
1058 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
1059 if (emit_debug_code()) {
1067 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
1068 if (emit_debug_code()) {
1071 cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
1075 void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
1076 // The Operand cannot use the smi register.
1077 Register smi_reg = GetSmiConstant(src);
1078 ASSERT(!dst.AddressUsesRegister(smi_reg));
1083 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
1084 cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
1088 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
1094 SmiToInteger64(dst, src);
1100 if (power < kSmiShift) {
1101 sar(dst, Immediate(kSmiShift - power));
1102 } else if (power > kSmiShift) {
1103 shl(dst, Immediate(power - kSmiShift));
1108 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
1111 ASSERT((0 <= power) && (power < 32));
1113 shr(dst, Immediate(power + kSmiShift));
1115 UNIMPLEMENTED(); // Not used.
1120 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1122 Label::Distance near_jump) {
1123 if (dst.is(src1) || dst.is(src2)) {
1124 ASSERT(!src1.is(kScratchRegister));
1125 ASSERT(!src2.is(kScratchRegister));
1126 movq(kScratchRegister, src1);
1127 or_(kScratchRegister, src2);
1128 JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
1129 movq(dst, kScratchRegister);
1133 JumpIfNotSmi(dst, on_not_smis, near_jump);
1138 Condition MacroAssembler::CheckSmi(Register src) {
1139 STATIC_ASSERT(kSmiTag == 0);
1140 testb(src, Immediate(kSmiTagMask));
1145 Condition MacroAssembler::CheckSmi(const Operand& src) {
1146 STATIC_ASSERT(kSmiTag == 0);
1147 testb(src, Immediate(kSmiTagMask));
1152 Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
1153 STATIC_ASSERT(kSmiTag == 0);
1154 // Test that both bits of the mask 0x8000000000000001 are zero.
1155 movq(kScratchRegister, src);
1156 rol(kScratchRegister, Immediate(1));
1157 testb(kScratchRegister, Immediate(3));
1162 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
1163 if (first.is(second)) {
1164 return CheckSmi(first);
1166 STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
1167 leal(kScratchRegister, Operand(first, second, times_1, 0));
1168 testb(kScratchRegister, Immediate(0x03));
1173 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
1175 if (first.is(second)) {
1176 return CheckNonNegativeSmi(first);
1178 movq(kScratchRegister, first);
1179 or_(kScratchRegister, second);
1180 rol(kScratchRegister, Immediate(1));
1181 testl(kScratchRegister, Immediate(3));
1186 Condition MacroAssembler::CheckEitherSmi(Register first,
1189 if (first.is(second)) {
1190 return CheckSmi(first);
1192 if (scratch.is(second)) {
1193 andl(scratch, first);
1195 if (!scratch.is(first)) {
1196 movl(scratch, first);
1198 andl(scratch, second);
1200 testb(scratch, Immediate(kSmiTagMask));
1205 Condition MacroAssembler::CheckIsMinSmi(Register src) {
1206 ASSERT(!src.is(kScratchRegister));
1207 // If we overflow by subtracting one, it's the minimal smi value.
1208 cmpq(src, kSmiConstantRegister);
1213 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
1214 // A 32-bit integer value can always be converted to a smi.
1219 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
1220 // An unsigned 32-bit integer value is valid as long as the high bit
1227 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
1229 andl(dst, Immediate(kSmiTagMask));
1231 movl(dst, Immediate(kSmiTagMask));
1237 void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
1238 if (!(src.AddressUsesRegister(dst))) {
1239 movl(dst, Immediate(kSmiTagMask));
1243 andl(dst, Immediate(kSmiTagMask));
1248 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1250 Label::Distance near_jump) {
1251 Condition is_valid = CheckInteger32ValidSmiValue(src);
1252 j(NegateCondition(is_valid), on_invalid, near_jump);
1256 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1258 Label::Distance near_jump) {
1259 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1260 j(NegateCondition(is_valid), on_invalid, near_jump);
1264 void MacroAssembler::JumpIfSmi(Register src,
1266 Label::Distance near_jump) {
1267 Condition smi = CheckSmi(src);
1268 j(smi, on_smi, near_jump);
1272 void MacroAssembler::JumpIfNotSmi(Register src,
1274 Label::Distance near_jump) {
1275 Condition smi = CheckSmi(src);
1276 j(NegateCondition(smi), on_not_smi, near_jump);
1280 void MacroAssembler::JumpUnlessNonNegativeSmi(
1281 Register src, Label* on_not_smi_or_negative,
1282 Label::Distance near_jump) {
1283 Condition non_negative_smi = CheckNonNegativeSmi(src);
1284 j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
1288 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1291 Label::Distance near_jump) {
1292 SmiCompare(src, constant);
1293 j(equal, on_equals, near_jump);
1297 void MacroAssembler::JumpIfNotBothSmi(Register src1,
1299 Label* on_not_both_smi,
1300 Label::Distance near_jump) {
1301 Condition both_smi = CheckBothSmi(src1, src2);
1302 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1306 void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
1308 Label* on_not_both_smi,
1309 Label::Distance near_jump) {
1310 Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
1311 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1315 void MacroAssembler::SmiTryAddConstant(Register dst,
1318 Label* on_not_smi_result,
1319 Label::Distance near_jump) {
1320 // Does not assume that src is a smi.
1321 ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
1322 STATIC_ASSERT(kSmiTag == 0);
1323 ASSERT(!dst.is(kScratchRegister));
1324 ASSERT(!src.is(kScratchRegister));
1326 JumpIfNotSmi(src, on_not_smi_result, near_jump);
1327 Register tmp = (dst.is(src) ? kScratchRegister : dst);
1328 LoadSmiConstant(tmp, constant);
1330 j(overflow, on_not_smi_result, near_jump);
1337 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1338 if (constant->value() == 0) {
1343 } else if (dst.is(src)) {
1344 ASSERT(!dst.is(kScratchRegister));
1345 switch (constant->value()) {
1347 addq(dst, kSmiConstantRegister);
1350 lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1353 lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1356 lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1359 Register constant_reg = GetSmiConstant(constant);
1360 addq(dst, constant_reg);
1364 switch (constant->value()) {
1366 lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
1369 lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1372 lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1375 lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1378 LoadSmiConstant(dst, constant);
1386 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1387 if (constant->value() != 0) {
1388 addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
1393 void MacroAssembler::SmiAddConstant(Register dst,
1396 Label* on_not_smi_result,
1397 Label::Distance near_jump) {
1398 if (constant->value() == 0) {
1402 } else if (dst.is(src)) {
1403 ASSERT(!dst.is(kScratchRegister));
1405 LoadSmiConstant(kScratchRegister, constant);
1406 addq(kScratchRegister, src);
1407 j(overflow, on_not_smi_result, near_jump);
1408 movq(dst, kScratchRegister);
1410 LoadSmiConstant(dst, constant);
1412 j(overflow, on_not_smi_result, near_jump);
1417 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1418 if (constant->value() == 0) {
1422 } else if (dst.is(src)) {
1423 ASSERT(!dst.is(kScratchRegister));
1424 Register constant_reg = GetSmiConstant(constant);
1425 subq(dst, constant_reg);
1427 if (constant->value() == Smi::kMinValue) {
1428 LoadSmiConstant(dst, constant);
1429 // Adding and subtracting the min-value gives the same result, it only
1430 // differs on the overflow bit, which we don't check here.
1433 // Subtract by adding the negation.
1434 LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1441 void MacroAssembler::SmiSubConstant(Register dst,
1444 Label* on_not_smi_result,
1445 Label::Distance near_jump) {
1446 if (constant->value() == 0) {
1450 } else if (dst.is(src)) {
1451 ASSERT(!dst.is(kScratchRegister));
1452 if (constant->value() == Smi::kMinValue) {
1453 // Subtracting min-value from any non-negative value will overflow.
1454 // We test the non-negativeness before doing the subtraction.
1456 j(not_sign, on_not_smi_result, near_jump);
1457 LoadSmiConstant(kScratchRegister, constant);
1458 subq(dst, kScratchRegister);
1460 // Subtract by adding the negation.
1461 LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
1462 addq(kScratchRegister, dst);
1463 j(overflow, on_not_smi_result, near_jump);
1464 movq(dst, kScratchRegister);
1467 if (constant->value() == Smi::kMinValue) {
1468 // Subtracting min-value from any non-negative value will overflow.
1469 // We test the non-negativeness before doing the subtraction.
1471 j(not_sign, on_not_smi_result, near_jump);
1472 LoadSmiConstant(dst, constant);
1473 // Adding and subtracting the min-value gives the same result, it only
1474 // differs on the overflow bit, which we don't check here.
1477 // Subtract by adding the negation.
1478 LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1480 j(overflow, on_not_smi_result, near_jump);
1486 void MacroAssembler::SmiNeg(Register dst,
1488 Label* on_smi_result,
1489 Label::Distance near_jump) {
1491 ASSERT(!dst.is(kScratchRegister));
1492 movq(kScratchRegister, src);
1493 neg(dst); // Low 32 bits are retained as zero by negation.
1494 // Test if result is zero or Smi::kMinValue.
1495 cmpq(dst, kScratchRegister);
1496 j(not_equal, on_smi_result, near_jump);
1497 movq(src, kScratchRegister);
1502 // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1503 j(not_equal, on_smi_result, near_jump);
1508 void MacroAssembler::SmiAdd(Register dst,
1511 Label* on_not_smi_result,
1512 Label::Distance near_jump) {
1513 ASSERT_NOT_NULL(on_not_smi_result);
1514 ASSERT(!dst.is(src2));
1516 movq(kScratchRegister, src1);
1517 addq(kScratchRegister, src2);
1518 j(overflow, on_not_smi_result, near_jump);
1519 movq(dst, kScratchRegister);
1523 j(overflow, on_not_smi_result, near_jump);
1528 void MacroAssembler::SmiAdd(Register dst,
1530 const Operand& src2,
1531 Label* on_not_smi_result,
1532 Label::Distance near_jump) {
1533 ASSERT_NOT_NULL(on_not_smi_result);
1535 movq(kScratchRegister, src1);
1536 addq(kScratchRegister, src2);
1537 j(overflow, on_not_smi_result, near_jump);
1538 movq(dst, kScratchRegister);
1540 ASSERT(!src2.AddressUsesRegister(dst));
1543 j(overflow, on_not_smi_result, near_jump);
1548 void MacroAssembler::SmiAdd(Register dst,
1551 // No overflow checking. Use only when it's known that
1552 // overflowing is impossible.
1553 if (!dst.is(src1)) {
1554 if (emit_debug_code()) {
1555 movq(kScratchRegister, src1);
1556 addq(kScratchRegister, src2);
1557 Check(no_overflow, "Smi addition overflow");
1559 lea(dst, Operand(src1, src2, times_1, 0));
1562 Assert(no_overflow, "Smi addition overflow");
1567 void MacroAssembler::SmiSub(Register dst,
1570 Label* on_not_smi_result,
1571 Label::Distance near_jump) {
1572 ASSERT_NOT_NULL(on_not_smi_result);
1573 ASSERT(!dst.is(src2));
1576 j(overflow, on_not_smi_result, near_jump);
1581 j(overflow, on_not_smi_result, near_jump);
1586 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1587 // No overflow checking. Use only when it's known that
1588 // overflowing is impossible (e.g., subtracting two positive smis).
1589 ASSERT(!dst.is(src2));
1590 if (!dst.is(src1)) {
1594 Assert(no_overflow, "Smi subtraction overflow");
1598 void MacroAssembler::SmiSub(Register dst,
1600 const Operand& src2,
1601 Label* on_not_smi_result,
1602 Label::Distance near_jump) {
1603 ASSERT_NOT_NULL(on_not_smi_result);
1605 movq(kScratchRegister, src2);
1606 cmpq(src1, kScratchRegister);
1607 j(overflow, on_not_smi_result, near_jump);
1608 subq(src1, kScratchRegister);
1612 j(overflow, on_not_smi_result, near_jump);
1617 void MacroAssembler::SmiSub(Register dst,
1619 const Operand& src2) {
1620 // No overflow checking. Use only when it's known that
1621 // overflowing is impossible (e.g., subtracting two positive smis).
1622 if (!dst.is(src1)) {
1626 Assert(no_overflow, "Smi subtraction overflow");
1630 void MacroAssembler::SmiMul(Register dst,
1633 Label* on_not_smi_result,
1634 Label::Distance near_jump) {
1635 ASSERT(!dst.is(src2));
1636 ASSERT(!dst.is(kScratchRegister));
1637 ASSERT(!src1.is(kScratchRegister));
1638 ASSERT(!src2.is(kScratchRegister));
1641 Label failure, zero_correct_result;
1642 movq(kScratchRegister, src1); // Create backup for later testing.
1643 SmiToInteger64(dst, src1);
1645 j(overflow, &failure, Label::kNear);
1647 // Check for negative zero result. If product is zero, and one
1648 // argument is negative, go to slow case.
1649 Label correct_result;
1651 j(not_zero, &correct_result, Label::kNear);
1653 movq(dst, kScratchRegister);
1655 // Result was positive zero.
1656 j(positive, &zero_correct_result, Label::kNear);
1658 bind(&failure); // Reused failure exit, restores src1.
1659 movq(src1, kScratchRegister);
1660 jmp(on_not_smi_result, near_jump);
1662 bind(&zero_correct_result);
1665 bind(&correct_result);
1667 SmiToInteger64(dst, src1);
1669 j(overflow, on_not_smi_result, near_jump);
1670 // Check for negative zero result. If product is zero, and one
1671 // argument is negative, go to slow case.
1672 Label correct_result;
1674 j(not_zero, &correct_result, Label::kNear);
1675 // One of src1 and src2 is zero, the check whether the other is
1677 movq(kScratchRegister, src1);
1678 xor_(kScratchRegister, src2);
1679 j(negative, on_not_smi_result, near_jump);
1680 bind(&correct_result);
1685 void MacroAssembler::SmiDiv(Register dst,
1688 Label* on_not_smi_result,
1689 Label::Distance near_jump) {
1690 ASSERT(!src1.is(kScratchRegister));
1691 ASSERT(!src2.is(kScratchRegister));
1692 ASSERT(!dst.is(kScratchRegister));
1693 ASSERT(!src2.is(rax));
1694 ASSERT(!src2.is(rdx));
1695 ASSERT(!src1.is(rdx));
1697 // Check for 0 divisor (result is +/-Infinity).
1699 j(zero, on_not_smi_result, near_jump);
1702 movq(kScratchRegister, src1);
1704 SmiToInteger32(rax, src1);
1705 // We need to rule out dividing Smi::kMinValue by -1, since that would
1706 // overflow in idiv and raise an exception.
1707 // We combine this with negative zero test (negative zero only happens
1708 // when dividing zero by a negative number).
1710 // We overshoot a little and go to slow case if we divide min-value
1711 // by any negative value, not just -1.
1713 testl(rax, Immediate(0x7fffffff));
1714 j(not_zero, &safe_div, Label::kNear);
1717 j(positive, &safe_div, Label::kNear);
1718 movq(src1, kScratchRegister);
1719 jmp(on_not_smi_result, near_jump);
1721 j(negative, on_not_smi_result, near_jump);
1725 SmiToInteger32(src2, src2);
1726 // Sign extend src1 into edx:eax.
1729 Integer32ToSmi(src2, src2);
1730 // Check that the remainder is zero.
1734 j(zero, &smi_result, Label::kNear);
1735 movq(src1, kScratchRegister);
1736 jmp(on_not_smi_result, near_jump);
1739 j(not_zero, on_not_smi_result, near_jump);
1741 if (!dst.is(src1) && src1.is(rax)) {
1742 movq(src1, kScratchRegister);
1744 Integer32ToSmi(dst, rax);
1748 void MacroAssembler::SmiMod(Register dst,
1751 Label* on_not_smi_result,
1752 Label::Distance near_jump) {
1753 ASSERT(!dst.is(kScratchRegister));
1754 ASSERT(!src1.is(kScratchRegister));
1755 ASSERT(!src2.is(kScratchRegister));
1756 ASSERT(!src2.is(rax));
1757 ASSERT(!src2.is(rdx));
1758 ASSERT(!src1.is(rdx));
1759 ASSERT(!src1.is(src2));
1762 j(zero, on_not_smi_result, near_jump);
1765 movq(kScratchRegister, src1);
1767 SmiToInteger32(rax, src1);
1768 SmiToInteger32(src2, src2);
1770 // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
1772 cmpl(rax, Immediate(Smi::kMinValue));
1773 j(not_equal, &safe_div, Label::kNear);
1774 cmpl(src2, Immediate(-1));
1775 j(not_equal, &safe_div, Label::kNear);
1776 // Retag inputs and go slow case.
1777 Integer32ToSmi(src2, src2);
1779 movq(src1, kScratchRegister);
1781 jmp(on_not_smi_result, near_jump);
1784 // Sign extend eax into edx:eax.
1787 // Restore smi tags on inputs.
1788 Integer32ToSmi(src2, src2);
1790 movq(src1, kScratchRegister);
1792 // Check for a negative zero result. If the result is zero, and the
1793 // dividend is negative, go slow to return a floating point negative zero.
1796 j(not_zero, &smi_result, Label::kNear);
1798 j(negative, on_not_smi_result, near_jump);
1800 Integer32ToSmi(dst, rdx);
1804 void MacroAssembler::SmiNot(Register dst, Register src) {
1805 ASSERT(!dst.is(kScratchRegister));
1806 ASSERT(!src.is(kScratchRegister));
1807 // Set tag and padding bits before negating, so that they are zero afterwards.
1808 movl(kScratchRegister, Immediate(~0));
1810 xor_(dst, kScratchRegister);
1812 lea(dst, Operand(src, kScratchRegister, times_1, 0));
1818 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
1819 ASSERT(!dst.is(src2));
1820 if (!dst.is(src1)) {
1827 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
1828 if (constant->value() == 0) {
1830 } else if (dst.is(src)) {
1831 ASSERT(!dst.is(kScratchRegister));
1832 Register constant_reg = GetSmiConstant(constant);
1833 and_(dst, constant_reg);
1835 LoadSmiConstant(dst, constant);
1841 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
1842 if (!dst.is(src1)) {
1843 ASSERT(!src1.is(src2));
1850 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
1852 ASSERT(!dst.is(kScratchRegister));
1853 Register constant_reg = GetSmiConstant(constant);
1854 or_(dst, constant_reg);
1856 LoadSmiConstant(dst, constant);
1862 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
1863 if (!dst.is(src1)) {
1864 ASSERT(!src1.is(src2));
1871 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
1873 ASSERT(!dst.is(kScratchRegister));
1874 Register constant_reg = GetSmiConstant(constant);
1875 xor_(dst, constant_reg);
1877 LoadSmiConstant(dst, constant);
1883 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
1886 ASSERT(is_uint5(shift_value));
1887 if (shift_value > 0) {
1889 sar(dst, Immediate(shift_value + kSmiShift));
1890 shl(dst, Immediate(kSmiShift));
1892 UNIMPLEMENTED(); // Not used.
1898 void MacroAssembler::SmiShiftLeftConstant(Register dst,
1904 if (shift_value > 0) {
1905 shl(dst, Immediate(shift_value));
1910 void MacroAssembler::SmiShiftLogicalRightConstant(
1911 Register dst, Register src, int shift_value,
1912 Label* on_not_smi_result, Label::Distance near_jump) {
1913 // Logic right shift interprets its result as an *unsigned* number.
1915 UNIMPLEMENTED(); // Not used.
1918 if (shift_value == 0) {
1920 j(negative, on_not_smi_result, near_jump);
1922 shr(dst, Immediate(shift_value + kSmiShift));
1923 shl(dst, Immediate(kSmiShift));
1928 void MacroAssembler::SmiShiftLeft(Register dst,
1931 ASSERT(!dst.is(rcx));
1932 // Untag shift amount.
1933 if (!dst.is(src1)) {
1936 SmiToInteger32(rcx, src2);
1937 // Shift amount specified by lower 5 bits, not six as the shl opcode.
1938 and_(rcx, Immediate(0x1f));
1943 void MacroAssembler::SmiShiftLogicalRight(Register dst,
1946 Label* on_not_smi_result,
1947 Label::Distance near_jump) {
1948 ASSERT(!dst.is(kScratchRegister));
1949 ASSERT(!src1.is(kScratchRegister));
1950 ASSERT(!src2.is(kScratchRegister));
1951 ASSERT(!dst.is(rcx));
1952 // dst and src1 can be the same, because the one case that bails out
1953 // is a shift by 0, which leaves dst, and therefore src1, unchanged.
1954 if (src1.is(rcx) || src2.is(rcx)) {
1955 movq(kScratchRegister, rcx);
1957 if (!dst.is(src1)) {
1960 SmiToInteger32(rcx, src2);
1961 orl(rcx, Immediate(kSmiShift));
1962 shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
1963 shl(dst, Immediate(kSmiShift));
1965 if (src1.is(rcx) || src2.is(rcx)) {
1966 Label positive_result;
1967 j(positive, &positive_result, Label::kNear);
1969 movq(src1, kScratchRegister);
1971 movq(src2, kScratchRegister);
1973 jmp(on_not_smi_result, near_jump);
1974 bind(&positive_result);
1976 // src2 was zero and src1 negative.
1977 j(negative, on_not_smi_result, near_jump);
1982 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
1985 ASSERT(!dst.is(kScratchRegister));
1986 ASSERT(!src1.is(kScratchRegister));
1987 ASSERT(!src2.is(kScratchRegister));
1988 ASSERT(!dst.is(rcx));
1990 movq(kScratchRegister, src1);
1991 } else if (src2.is(rcx)) {
1992 movq(kScratchRegister, src2);
1994 if (!dst.is(src1)) {
1997 SmiToInteger32(rcx, src2);
1998 orl(rcx, Immediate(kSmiShift));
1999 sar_cl(dst); // Shift 32 + original rcx & 0x1f.
2000 shl(dst, Immediate(kSmiShift));
2002 movq(src1, kScratchRegister);
2003 } else if (src2.is(rcx)) {
2004 movq(src2, kScratchRegister);
2009 void MacroAssembler::SelectNonSmi(Register dst,
2013 Label::Distance near_jump) {
2014 ASSERT(!dst.is(kScratchRegister));
2015 ASSERT(!src1.is(kScratchRegister));
2016 ASSERT(!src2.is(kScratchRegister));
2017 ASSERT(!dst.is(src1));
2018 ASSERT(!dst.is(src2));
2019 // Both operands must not be smis.
2021 if (allow_stub_calls()) { // Check contains a stub call.
2022 Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
2023 Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
2026 STATIC_ASSERT(kSmiTag == 0);
2027 ASSERT_EQ(0, Smi::FromInt(0));
2028 movl(kScratchRegister, Immediate(kSmiTagMask));
2029 and_(kScratchRegister, src1);
2030 testl(kScratchRegister, src2);
2031 // If non-zero then both are smis.
2032 j(not_zero, on_not_smis, near_jump);
2034 // Exactly one operand is a smi.
2035 ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
2036 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
2037 subq(kScratchRegister, Immediate(1));
2038 // If src1 is a smi, then scratch register all 1s, else it is all 0s.
2041 and_(dst, kScratchRegister);
2042 // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
2044 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
2048 SmiIndex MacroAssembler::SmiToIndex(Register dst,
2051 ASSERT(is_uint6(shift));
2052 // There is a possible optimization if shift is in the range 60-63, but that
2053 // will (and must) never happen.
2057 if (shift < kSmiShift) {
2058 sar(dst, Immediate(kSmiShift - shift));
2060 shl(dst, Immediate(shift - kSmiShift));
2062 return SmiIndex(dst, times_1);
2065 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
2068 // Register src holds a positive smi.
2069 ASSERT(is_uint6(shift));
2074 if (shift < kSmiShift) {
2075 sar(dst, Immediate(kSmiShift - shift));
2077 shl(dst, Immediate(shift - kSmiShift));
2079 return SmiIndex(dst, times_1);
2083 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
2084 ASSERT_EQ(0, kSmiShift % kBitsPerByte);
2085 addl(dst, Operand(src, kSmiShift / kBitsPerByte));
2089 void MacroAssembler::JumpIfNotString(Register object,
2090 Register object_map,
2092 Label::Distance near_jump) {
2093 Condition is_smi = CheckSmi(object);
2094 j(is_smi, not_string, near_jump);
2095 CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
2096 j(above_equal, not_string, near_jump);
2100 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
2101 Register first_object,
2102 Register second_object,
2106 Label::Distance near_jump) {
2107 // Check that both objects are not smis.
2108 Condition either_smi = CheckEitherSmi(first_object, second_object);
2109 j(either_smi, on_fail, near_jump);
2111 // Load instance type for both strings.
2112 movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
2113 movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
2114 movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2115 movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2117 // Check that both are flat ascii strings.
2118 ASSERT(kNotStringTag != 0);
2119 const int kFlatAsciiStringMask =
2120 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2121 const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
2123 andl(scratch1, Immediate(kFlatAsciiStringMask));
2124 andl(scratch2, Immediate(kFlatAsciiStringMask));
2125 // Interleave the bits to check both scratch1 and scratch2 in one test.
2126 ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2127 lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
2129 Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
2130 j(not_equal, on_fail, near_jump);
2134 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
2135 Register instance_type,
2138 Label::Distance near_jump) {
2139 if (!scratch.is(instance_type)) {
2140 movl(scratch, instance_type);
2143 const int kFlatAsciiStringMask =
2144 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2146 andl(scratch, Immediate(kFlatAsciiStringMask));
2147 cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
2148 j(not_equal, failure, near_jump);
2152 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
2153 Register first_object_instance_type,
2154 Register second_object_instance_type,
2158 Label::Distance near_jump) {
2159 // Load instance type for both strings.
2160 movq(scratch1, first_object_instance_type);
2161 movq(scratch2, second_object_instance_type);
2163 // Check that both are flat ascii strings.
2164 ASSERT(kNotStringTag != 0);
2165 const int kFlatAsciiStringMask =
2166 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2167 const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
2169 andl(scratch1, Immediate(kFlatAsciiStringMask));
2170 andl(scratch2, Immediate(kFlatAsciiStringMask));
2171 // Interleave the bits to check both scratch1 and scratch2 in one test.
2172 ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2173 lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
2175 Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
2176 j(not_equal, on_fail, near_jump);
2181 void MacroAssembler::Move(Register dst, Register src) {
2188 void MacroAssembler::Move(Register dst, Handle<Object> source) {
2189 ASSERT(!source->IsFailure());
2190 if (source->IsSmi()) {
2191 Move(dst, Smi::cast(*source));
2193 movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
2198 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
2199 ASSERT(!source->IsFailure());
2200 if (source->IsSmi()) {
2201 Move(dst, Smi::cast(*source));
2203 movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
2204 movq(dst, kScratchRegister);
2209 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
2210 if (source->IsSmi()) {
2211 Cmp(dst, Smi::cast(*source));
2213 Move(kScratchRegister, source);
2214 cmpq(dst, kScratchRegister);
2219 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
2220 if (source->IsSmi()) {
2221 Cmp(dst, Smi::cast(*source));
2223 ASSERT(source->IsHeapObject());
2224 movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
2225 cmpq(dst, kScratchRegister);
2230 void MacroAssembler::Push(Handle<Object> source) {
2231 if (source->IsSmi()) {
2232 Push(Smi::cast(*source));
2234 ASSERT(source->IsHeapObject());
2235 movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
2236 push(kScratchRegister);
2241 void MacroAssembler::Push(Smi* source) {
2242 intptr_t smi = reinterpret_cast<intptr_t>(source);
2243 if (is_int32(smi)) {
2244 push(Immediate(static_cast<int32_t>(smi)));
2246 Register constant = GetSmiConstant(source);
2252 void MacroAssembler::Drop(int stack_elements) {
2253 if (stack_elements > 0) {
2254 addq(rsp, Immediate(stack_elements * kPointerSize));
2259 void MacroAssembler::Test(const Operand& src, Smi* source) {
2260 testl(Operand(src, kIntSize), Immediate(source->value()));
2264 void MacroAssembler::TestBit(const Operand& src, int bits) {
2265 int byte_offset = bits / kBitsPerByte;
2266 int bit_in_byte = bits & (kBitsPerByte - 1);
2267 testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
2271 void MacroAssembler::Jump(ExternalReference ext) {
2272 LoadAddress(kScratchRegister, ext);
2273 jmp(kScratchRegister);
2277 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
2278 movq(kScratchRegister, destination, rmode);
2279 jmp(kScratchRegister);
2283 void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
2284 // TODO(X64): Inline this
2285 jmp(code_object, rmode);
2289 int MacroAssembler::CallSize(ExternalReference ext) {
2290 // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
2291 const int kCallInstructionSize = 3;
2292 return LoadAddressSize(ext) + kCallInstructionSize;
2296 void MacroAssembler::Call(ExternalReference ext) {
2298 int end_position = pc_offset() + CallSize(ext);
2300 LoadAddress(kScratchRegister, ext);
2301 call(kScratchRegister);
2303 CHECK_EQ(end_position, pc_offset());
2308 void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
2310 int end_position = pc_offset() + CallSize(destination, rmode);
2312 movq(kScratchRegister, destination, rmode);
2313 call(kScratchRegister);
2315 CHECK_EQ(pc_offset(), end_position);
2320 void MacroAssembler::Call(Handle<Code> code_object,
2321 RelocInfo::Mode rmode,
2324 int end_position = pc_offset() + CallSize(code_object);
2326 ASSERT(RelocInfo::IsCodeTarget(rmode));
2327 call(code_object, rmode, ast_id);
2329 CHECK_EQ(end_position, pc_offset());
2334 void MacroAssembler::Pushad() {
2339 // Not pushing rsp or rbp.
2344 // r10 is kScratchRegister.
2346 // r12 is kSmiConstantRegister.
2347 // r13 is kRootRegister.
2350 STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
2351 // Use lea for symmetry with Popad.
2353 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
2354 lea(rsp, Operand(rsp, -sp_delta));
2358 void MacroAssembler::Popad() {
2359 // Popad must not change the flags, so use lea instead of addq.
2361 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
2362 lea(rsp, Operand(rsp, sp_delta));
2377 void MacroAssembler::Dropad() {
2378 addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
2382 // Order general registers are pushed by Pushad:
2383 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
2384 int MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
2404 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
2405 movq(SafepointRegisterSlot(dst), src);
2409 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
2410 movq(dst, SafepointRegisterSlot(src));
2414 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
2415 return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
2419 void MacroAssembler::PushTryHandler(CodeLocation try_location,
2421 // Adjust this code if not the case.
2422 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2423 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2424 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
2425 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
2426 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
2427 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
2429 // The pc (return address) is already on TOS. This code pushes state,
2430 // frame pointer, context, and current handler.
2431 if (try_location == IN_JAVASCRIPT) {
2432 if (type == TRY_CATCH_HANDLER) {
2433 push(Immediate(StackHandler::TRY_CATCH));
2435 push(Immediate(StackHandler::TRY_FINALLY));
2440 ASSERT(try_location == IN_JS_ENTRY);
2441 // The frame pointer does not point to a JS frame so we save NULL
2442 // for rbp. We expect the code throwing an exception to check rbp
2443 // before dereferencing it to restore the context.
2444 push(Immediate(StackHandler::ENTRY));
2445 push(Immediate(0)); // NULL frame pointer.
2446 Push(Smi::FromInt(0)); // No context.
2448 // Save the current handler.
2449 Operand handler_operand =
2450 ExternalOperand(ExternalReference(Isolate::kHandlerAddress, isolate()));
2451 push(handler_operand);
2452 // Link this handler.
2453 movq(handler_operand, rsp);
2457 void MacroAssembler::PopTryHandler() {
2458 ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
2459 // Unlink this handler.
2460 Operand handler_operand =
2461 ExternalOperand(ExternalReference(Isolate::kHandlerAddress, isolate()));
2462 pop(handler_operand);
2463 // Remove the remaining fields.
2464 addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
2468 void MacroAssembler::Throw(Register value) {
2469 // Adjust this code if not the case.
2470 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2471 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2472 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
2473 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
2474 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
2475 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
2476 // Keep thrown value in rax.
2477 if (!value.is(rax)) {
2481 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2482 Operand handler_operand = ExternalOperand(handler_address);
2483 movq(rsp, handler_operand);
2484 // get next in chain
2485 pop(handler_operand);
2486 pop(rsi); // Context.
2487 pop(rbp); // Frame pointer.
2490 // If the handler is a JS frame, restore the context to the frame.
2491 // (rdx == ENTRY) == (rbp == 0) == (rsi == 0), so we could test any
2494 cmpq(rdx, Immediate(StackHandler::ENTRY));
2495 j(equal, &skip, Label::kNear);
2496 movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
2503 void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
2505 // Adjust this code if not the case.
2506 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2507 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2508 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
2509 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
2510 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
2511 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
2512 // Keep thrown value in rax.
2513 if (!value.is(rax)) {
2516 // Fetch top stack handler.
2517 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2518 Load(rsp, handler_address);
2520 // Unwind the handlers until the ENTRY handler is found.
2523 // Load the type of the current stack handler.
2524 const int kStateOffset = StackHandlerConstants::kStateOffset;
2525 cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
2526 j(equal, &done, Label::kNear);
2527 // Fetch the next handler in the list.
2528 const int kNextOffset = StackHandlerConstants::kNextOffset;
2529 movq(rsp, Operand(rsp, kNextOffset));
2533 // Set the top handler address to next handler past the current ENTRY handler.
2534 Operand handler_operand = ExternalOperand(handler_address);
2535 pop(handler_operand);
2537 if (type == OUT_OF_MEMORY) {
2538 // Set external caught exception to false.
2539 ExternalReference external_caught(
2540 Isolate::kExternalCaughtExceptionAddress, isolate());
2541 Set(rax, static_cast<int64_t>(false));
2542 Store(external_caught, rax);
2544 // Set pending exception and rax to out of memory exception.
2545 ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
2547 movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
2548 Store(pending_exception, rax);
2551 // Discard the context saved in the handler and clear the context pointer.
2555 pop(rbp); // Restore frame pointer.
2556 pop(rdx); // Discard state.
2562 void MacroAssembler::Ret() {
2567 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
2568 if (is_uint16(bytes_dropped)) {
2572 addq(rsp, Immediate(bytes_dropped));
2579 void MacroAssembler::FCmp() {
2585 void MacroAssembler::CmpObjectType(Register heap_object,
2588 movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
2589 CmpInstanceType(map, type);
2593 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
2594 cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
2595 Immediate(static_cast<int8_t>(type)));
2599 void MacroAssembler::CheckFastElements(Register map,
2601 Label::Distance distance) {
2602 STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
2603 STATIC_ASSERT(FAST_ELEMENTS == 1);
2604 cmpb(FieldOperand(map, Map::kBitField2Offset),
2605 Immediate(Map::kMaximumBitField2FastElementValue));
2606 j(above, fail, distance);
2610 void MacroAssembler::CheckFastObjectElements(Register map,
2612 Label::Distance distance) {
2613 STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
2614 STATIC_ASSERT(FAST_ELEMENTS == 1);
2615 cmpb(FieldOperand(map, Map::kBitField2Offset),
2616 Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
2617 j(below_equal, fail, distance);
2618 cmpb(FieldOperand(map, Map::kBitField2Offset),
2619 Immediate(Map::kMaximumBitField2FastElementValue));
2620 j(above, fail, distance);
2624 void MacroAssembler::CheckFastSmiOnlyElements(Register map,
2626 Label::Distance distance) {
2627 STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
2628 cmpb(FieldOperand(map, Map::kBitField2Offset),
2629 Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
2630 j(above, fail, distance);
2634 void MacroAssembler::StoreNumberToDoubleElements(
2635 Register maybe_number,
2638 XMMRegister xmm_scratch,
2640 Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
2642 JumpIfSmi(maybe_number, &smi_value, Label::kNear);
2644 CheckMap(maybe_number,
2645 isolate()->factory()->heap_number_map(),
2649 // Double value, canonicalize NaN.
2650 uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
2651 cmpl(FieldOperand(maybe_number, offset),
2652 Immediate(kNaNOrInfinityLowerBoundUpper32));
2653 j(greater_equal, &maybe_nan, Label::kNear);
2656 movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
2657 bind(&have_double_value);
2658 movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
2663 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
2664 // it's an Infinity, and the non-NaN code path applies.
2665 j(greater, &is_nan, Label::kNear);
2666 cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
2669 // Convert all NaNs to the same canonical NaN value when they are stored in
2670 // the double array.
2671 Set(kScratchRegister, BitCast<uint64_t>(
2672 FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
2673 movq(xmm_scratch, kScratchRegister);
2674 jmp(&have_double_value, Label::kNear);
2677 // Value is a smi. convert to a double and store.
2678 // Preserve original value.
2679 SmiToInteger32(kScratchRegister, maybe_number);
2680 cvtlsi2sd(xmm_scratch, kScratchRegister);
2681 movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
2687 void MacroAssembler::CheckMap(Register obj,
2690 SmiCheckType smi_check_type) {
2691 if (smi_check_type == DO_SMI_CHECK) {
2692 JumpIfSmi(obj, fail);
2694 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
2699 void MacroAssembler::ClampUint8(Register reg) {
2701 testl(reg, Immediate(0xFFFFFF00));
2702 j(zero, &done, Label::kNear);
2703 setcc(negative, reg); // 1 if negative, 0 if positive.
2704 decb(reg); // 0 if negative, 255 if positive.
2709 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
2710 XMMRegister temp_xmm_reg,
2711 Register result_reg,
2712 Register temp_reg) {
2715 xorps(temp_xmm_reg, temp_xmm_reg);
2716 ucomisd(input_reg, temp_xmm_reg);
2717 j(below, &done, Label::kNear);
2718 uint64_t one_half = BitCast<uint64_t, double>(0.5);
2719 Set(temp_reg, one_half);
2720 movq(temp_xmm_reg, temp_reg);
2721 addsd(temp_xmm_reg, input_reg);
2722 cvttsd2si(result_reg, temp_xmm_reg);
2723 testl(result_reg, Immediate(0xFFFFFF00));
2724 j(zero, &done, Label::kNear);
2725 Set(result_reg, 255);
2730 void MacroAssembler::LoadInstanceDescriptors(Register map,
2731 Register descriptors) {
2732 movq(descriptors, FieldOperand(map,
2733 Map::kInstanceDescriptorsOrBitField3Offset));
2735 JumpIfNotSmi(descriptors, ¬_smi, Label::kNear);
2736 Move(descriptors, isolate()->factory()->empty_descriptor_array());
2741 void MacroAssembler::DispatchMap(Register obj,
2743 Handle<Code> success,
2744 SmiCheckType smi_check_type) {
2746 if (smi_check_type == DO_SMI_CHECK) {
2747 JumpIfSmi(obj, &fail);
2749 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
2750 j(equal, success, RelocInfo::CODE_TARGET);
2756 void MacroAssembler::AbortIfNotNumber(Register object) {
2758 Condition is_smi = CheckSmi(object);
2759 j(is_smi, &ok, Label::kNear);
2760 Cmp(FieldOperand(object, HeapObject::kMapOffset),
2761 isolate()->factory()->heap_number_map());
2762 Assert(equal, "Operand not a number");
2767 void MacroAssembler::AbortIfSmi(Register object) {
2768 Condition is_smi = CheckSmi(object);
2769 Assert(NegateCondition(is_smi), "Operand is a smi");
2773 void MacroAssembler::AbortIfNotSmi(Register object) {
2774 Condition is_smi = CheckSmi(object);
2775 Assert(is_smi, "Operand is not a smi");
2779 void MacroAssembler::AbortIfNotSmi(const Operand& object) {
2780 Condition is_smi = CheckSmi(object);
2781 Assert(is_smi, "Operand is not a smi");
2785 void MacroAssembler::AbortIfNotString(Register object) {
2786 testb(object, Immediate(kSmiTagMask));
2787 Assert(not_equal, "Operand is not a string");
2789 movq(object, FieldOperand(object, HeapObject::kMapOffset));
2790 CmpInstanceType(object, FIRST_NONSTRING_TYPE);
2792 Assert(below, "Operand is not a string");
2796 void MacroAssembler::AbortIfNotRootValue(Register src,
2797 Heap::RootListIndex root_value_index,
2798 const char* message) {
2799 ASSERT(!src.is(kScratchRegister));
2800 LoadRoot(kScratchRegister, root_value_index);
2801 cmpq(src, kScratchRegister);
2802 Check(equal, message);
2807 Condition MacroAssembler::IsObjectStringType(Register heap_object,
2809 Register instance_type) {
2810 movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
2811 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
2812 STATIC_ASSERT(kNotStringTag != 0);
2813 testb(instance_type, Immediate(kIsNotStringMask));
2818 void MacroAssembler::TryGetFunctionPrototype(Register function,
2821 bool miss_on_bound_function) {
2822 // Check that the receiver isn't a smi.
2823 testl(function, Immediate(kSmiTagMask));
2826 // Check that the function really is a function.
2827 CmpObjectType(function, JS_FUNCTION_TYPE, result);
2830 if (miss_on_bound_function) {
2831 movq(kScratchRegister,
2832 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
2833 // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
2835 TestBit(FieldOperand(kScratchRegister,
2836 SharedFunctionInfo::kCompilerHintsOffset),
2837 SharedFunctionInfo::kBoundFunction);
2841 // Make sure that the function has an instance prototype.
2843 testb(FieldOperand(result, Map::kBitFieldOffset),
2844 Immediate(1 << Map::kHasNonInstancePrototype));
2845 j(not_zero, &non_instance, Label::kNear);
2847 // Get the prototype or initial map from the function.
2849 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2851 // If the prototype or initial map is the hole, don't return it and
2852 // simply miss the cache instead. This will allow us to allocate a
2853 // prototype object on-demand in the runtime system.
2854 CompareRoot(result, Heap::kTheHoleValueRootIndex);
2857 // If the function does not have an initial map, we're done.
2859 CmpObjectType(result, MAP_TYPE, kScratchRegister);
2860 j(not_equal, &done, Label::kNear);
2862 // Get the prototype from the initial map.
2863 movq(result, FieldOperand(result, Map::kPrototypeOffset));
2864 jmp(&done, Label::kNear);
2866 // Non-instance prototype: Fetch prototype from constructor field
2868 bind(&non_instance);
2869 movq(result, FieldOperand(result, Map::kConstructorOffset));
2876 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
2877 if (FLAG_native_code_counters && counter->Enabled()) {
2878 Operand counter_operand = ExternalOperand(ExternalReference(counter));
2879 movl(counter_operand, Immediate(value));
2884 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
2886 if (FLAG_native_code_counters && counter->Enabled()) {
2887 Operand counter_operand = ExternalOperand(ExternalReference(counter));
2889 incl(counter_operand);
2891 addl(counter_operand, Immediate(value));
2897 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
2899 if (FLAG_native_code_counters && counter->Enabled()) {
2900 Operand counter_operand = ExternalOperand(ExternalReference(counter));
2902 decl(counter_operand);
2904 subl(counter_operand, Immediate(value));
2910 #ifdef ENABLE_DEBUGGER_SUPPORT
2911 void MacroAssembler::DebugBreak() {
2912 Set(rax, 0); // No arguments.
2913 LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
2915 ASSERT(AllowThisStubCall(&ces));
2916 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
2918 #endif // ENABLE_DEBUGGER_SUPPORT
2921 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
2922 // This macro takes the dst register to make the code more readable
2923 // at the call sites. However, the dst register has to be rcx to
2924 // follow the calling convention which requires the call type to be
2926 ASSERT(dst.is(rcx));
2927 if (call_kind == CALL_AS_FUNCTION) {
2928 LoadSmiConstant(dst, Smi::FromInt(1));
2930 LoadSmiConstant(dst, Smi::FromInt(0));
2935 void MacroAssembler::InvokeCode(Register code,
2936 const ParameterCount& expected,
2937 const ParameterCount& actual,
2939 const CallWrapper& call_wrapper,
2940 CallKind call_kind) {
2941 // You can't call a function without a valid frame.
2942 ASSERT(flag == JUMP_FUNCTION || has_frame());
2945 InvokePrologue(expected,
2947 Handle<Code>::null(),
2954 if (flag == CALL_FUNCTION) {
2955 call_wrapper.BeforeCall(CallSize(code));
2956 SetCallKind(rcx, call_kind);
2958 call_wrapper.AfterCall();
2960 ASSERT(flag == JUMP_FUNCTION);
2961 SetCallKind(rcx, call_kind);
2968 void MacroAssembler::InvokeCode(Handle<Code> code,
2969 const ParameterCount& expected,
2970 const ParameterCount& actual,
2971 RelocInfo::Mode rmode,
2973 const CallWrapper& call_wrapper,
2974 CallKind call_kind) {
2975 // You can't call a function without a valid frame.
2976 ASSERT(flag == JUMP_FUNCTION || has_frame());
2979 Register dummy = rax;
2980 InvokePrologue(expected,
2989 if (flag == CALL_FUNCTION) {
2990 call_wrapper.BeforeCall(CallSize(code));
2991 SetCallKind(rcx, call_kind);
2993 call_wrapper.AfterCall();
2995 ASSERT(flag == JUMP_FUNCTION);
2996 SetCallKind(rcx, call_kind);
3003 void MacroAssembler::InvokeFunction(Register function,
3004 const ParameterCount& actual,
3006 const CallWrapper& call_wrapper,
3007 CallKind call_kind) {
3008 // You can't call a function without a valid frame.
3009 ASSERT(flag == JUMP_FUNCTION || has_frame());
3011 ASSERT(function.is(rdi));
3012 movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3013 movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
3015 FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
3016 // Advances rdx to the end of the Code object header, to the start of
3017 // the executable code.
3018 movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3020 ParameterCount expected(rbx);
3021 InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
3025 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3026 const ParameterCount& actual,
3028 const CallWrapper& call_wrapper,
3029 CallKind call_kind) {
3030 // You can't call a function without a valid frame.
3031 ASSERT(flag == JUMP_FUNCTION || has_frame());
3033 // Get the function and setup the context.
3034 Move(rdi, function);
3035 movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
3037 // We call indirectly through the code field in the function to
3038 // allow recompilation to take effect without changing any of the
3040 movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3041 ParameterCount expected(function->shared()->formal_parameter_count());
3042 InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
3046 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3047 const ParameterCount& actual,
3048 Handle<Code> code_constant,
3049 Register code_register,
3052 Label::Distance near_jump,
3053 const CallWrapper& call_wrapper,
3054 CallKind call_kind) {
3055 bool definitely_matches = false;
3057 if (expected.is_immediate()) {
3058 ASSERT(actual.is_immediate());
3059 if (expected.immediate() == actual.immediate()) {
3060 definitely_matches = true;
3062 Set(rax, actual.immediate());
3063 if (expected.immediate() ==
3064 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
3065 // Don't worry about adapting arguments for built-ins that
3066 // don't want that done. Skip adaption code by making it look
3067 // like we have a match between expected and actual number of
3069 definitely_matches = true;
3071 Set(rbx, expected.immediate());
3075 if (actual.is_immediate()) {
3076 // Expected is in register, actual is immediate. This is the
3077 // case when we invoke function values without going through the
3079 cmpq(expected.reg(), Immediate(actual.immediate()));
3080 j(equal, &invoke, Label::kNear);
3081 ASSERT(expected.reg().is(rbx));
3082 Set(rax, actual.immediate());
3083 } else if (!expected.reg().is(actual.reg())) {
3084 // Both expected and actual are in (different) registers. This
3085 // is the case when we invoke functions using call and apply.
3086 cmpq(expected.reg(), actual.reg());
3087 j(equal, &invoke, Label::kNear);
3088 ASSERT(actual.reg().is(rax));
3089 ASSERT(expected.reg().is(rbx));
3093 if (!definitely_matches) {
3094 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
3095 if (!code_constant.is_null()) {
3096 movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
3097 addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
3098 } else if (!code_register.is(rdx)) {
3099 movq(rdx, code_register);
3102 if (flag == CALL_FUNCTION) {
3103 call_wrapper.BeforeCall(CallSize(adaptor));
3104 SetCallKind(rcx, call_kind);
3105 Call(adaptor, RelocInfo::CODE_TARGET);
3106 call_wrapper.AfterCall();
3107 jmp(done, near_jump);
3109 SetCallKind(rcx, call_kind);
3110 Jump(adaptor, RelocInfo::CODE_TARGET);
3117 void MacroAssembler::EnterFrame(StackFrame::Type type) {
3120 push(rsi); // Context.
3121 Push(Smi::FromInt(type));
3122 movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
3123 push(kScratchRegister);
3124 if (emit_debug_code()) {
3125 movq(kScratchRegister,
3126 isolate()->factory()->undefined_value(),
3127 RelocInfo::EMBEDDED_OBJECT);
3128 cmpq(Operand(rsp, 0), kScratchRegister);
3129 Check(not_equal, "code object not properly patched");
3134 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
3135 if (emit_debug_code()) {
3136 Move(kScratchRegister, Smi::FromInt(type));
3137 cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
3138 Check(equal, "stack frame types must match");
3145 void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
3146 // Setup the frame structure on the stack.
3147 // All constants are relative to the frame pointer of the exit frame.
3148 ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
3149 ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
3150 ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
3154 // Reserve room for entry stack pointer and push the code object.
3155 ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
3156 push(Immediate(0)); // Saved entry sp, patched before call.
3157 movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
3158 push(kScratchRegister); // Accessed from EditFrame::code_slot.
3160 // Save the frame pointer and the context in top.
3162 movq(r14, rax); // Backup rax in callee-save register.
3165 Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
3166 Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
3170 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
3171 bool save_doubles) {
3173 const int kShadowSpace = 4;
3174 arg_stack_space += kShadowSpace;
3176 // Optionally save all XMM registers.
3178 int space = XMMRegister::kNumRegisters * kDoubleSize +
3179 arg_stack_space * kPointerSize;
3180 subq(rsp, Immediate(space));
3181 int offset = -2 * kPointerSize;
3182 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
3183 XMMRegister reg = XMMRegister::FromAllocationIndex(i);
3184 movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
3186 } else if (arg_stack_space > 0) {
3187 subq(rsp, Immediate(arg_stack_space * kPointerSize));
3190 // Get the required frame alignment for the OS.
3191 const int kFrameAlignment = OS::ActivationFrameAlignment();
3192 if (kFrameAlignment > 0) {
3193 ASSERT(IsPowerOf2(kFrameAlignment));
3194 ASSERT(is_int8(kFrameAlignment));
3195 and_(rsp, Immediate(-kFrameAlignment));
3198 // Patch the saved entry sp.
3199 movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
3203 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
3204 EnterExitFramePrologue(true);
3206 // Setup argv in callee-saved register r15. It is reused in LeaveExitFrame,
3207 // so it must be retained across the C-call.
3208 int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
3209 lea(r15, Operand(rbp, r14, times_pointer_size, offset));
3211 EnterExitFrameEpilogue(arg_stack_space, save_doubles);
3215 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
3216 EnterExitFramePrologue(false);
3217 EnterExitFrameEpilogue(arg_stack_space, false);
3221 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
3225 int offset = -2 * kPointerSize;
3226 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
3227 XMMRegister reg = XMMRegister::FromAllocationIndex(i);
3228 movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
3231 // Get the return address from the stack and restore the frame pointer.
3232 movq(rcx, Operand(rbp, 1 * kPointerSize));
3233 movq(rbp, Operand(rbp, 0 * kPointerSize));
3235 // Drop everything up to and including the arguments and the receiver
3236 // from the caller stack.
3237 lea(rsp, Operand(r15, 1 * kPointerSize));
3239 // Push the return address to get ready to return.
3242 LeaveExitFrameEpilogue();
3246 void MacroAssembler::LeaveApiExitFrame() {
3250 LeaveExitFrameEpilogue();
3254 void MacroAssembler::LeaveExitFrameEpilogue() {
3255 // Restore current context from top and clear it in debug mode.
3256 ExternalReference context_address(Isolate::kContextAddress, isolate());
3257 Operand context_operand = ExternalOperand(context_address);
3258 movq(rsi, context_operand);
3260 movq(context_operand, Immediate(0));
3263 // Clear the top frame.
3264 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
3266 Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
3267 movq(c_entry_fp_operand, Immediate(0));
3271 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
3274 Label same_contexts;
3276 ASSERT(!holder_reg.is(scratch));
3277 ASSERT(!scratch.is(kScratchRegister));
3278 // Load current lexical context from the stack frame.
3279 movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
3281 // When generating debug code, make sure the lexical context is set.
3282 if (emit_debug_code()) {
3283 cmpq(scratch, Immediate(0));
3284 Check(not_equal, "we should not have an empty lexical context");
3286 // Load the global context of the current context.
3287 int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
3288 movq(scratch, FieldOperand(scratch, offset));
3289 movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
3291 // Check the context is a global context.
3292 if (emit_debug_code()) {
3293 Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
3294 isolate()->factory()->global_context_map());
3295 Check(equal, "JSGlobalObject::global_context should be a global context.");
3298 // Check if both contexts are the same.
3299 cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
3300 j(equal, &same_contexts);
3302 // Compare security tokens.
3303 // Check that the security token in the calling global object is
3304 // compatible with the security token in the receiving global
3307 // Check the context is a global context.
3308 if (emit_debug_code()) {
3309 // Preserve original value of holder_reg.
3311 movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
3312 CompareRoot(holder_reg, Heap::kNullValueRootIndex);
3313 Check(not_equal, "JSGlobalProxy::context() should not be null.");
3315 // Read the first word and compare to global_context_map(),
3316 movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
3317 CompareRoot(holder_reg, Heap::kGlobalContextMapRootIndex);
3318 Check(equal, "JSGlobalObject::global_context should be a global context.");
3322 movq(kScratchRegister,
3323 FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
3325 Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
3326 movq(scratch, FieldOperand(scratch, token_offset));
3327 cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
3330 bind(&same_contexts);
3334 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
3343 // elements - holds the slow-case elements of the receiver on entry.
3344 // Unchanged unless 'result' is the same register.
3346 // key - holds the smi key on entry.
3347 // Unchanged unless 'result' is the same register.
3349 // Scratch registers:
3351 // r0 - holds the untagged key on entry and holds the hash once computed.
3353 // r1 - used to hold the capacity mask of the dictionary
3355 // r2 - used for the index into the dictionary.
3357 // result - holds the result on exit if the load succeeded.
3358 // Allowed to be the same as 'key' or 'result'.
3359 // Unchanged on bailout so 'key' or 'result' can be used
3360 // in further computation.
3364 // Compute the hash code from the untagged key. This must be kept in sync
3365 // with ComputeIntegerHash in utils.h.
3367 // hash = ~hash + (hash << 15);
3370 shll(r1, Immediate(15));
3372 // hash = hash ^ (hash >> 12);
3374 shrl(r1, Immediate(12));
3376 // hash = hash + (hash << 2);
3377 leal(r0, Operand(r0, r0, times_4, 0));
3378 // hash = hash ^ (hash >> 4);
3380 shrl(r1, Immediate(4));
3382 // hash = hash * 2057;
3383 imull(r0, r0, Immediate(2057));
3384 // hash = hash ^ (hash >> 16);
3386 shrl(r1, Immediate(16));
3389 // Compute capacity mask.
3391 FieldOperand(elements, NumberDictionary::kCapacityOffset));
3394 // Generate an unrolled loop that performs a few probes before giving up.
3395 const int kProbes = 4;
3396 for (int i = 0; i < kProbes; i++) {
3397 // Use r2 for index calculations and keep the hash intact in r0.
3399 // Compute the masked index: (hash + i + i * i) & mask.
3401 addl(r2, Immediate(NumberDictionary::GetProbeOffset(i)));
3405 // Scale the index by multiplying by the entry size.
3406 ASSERT(NumberDictionary::kEntrySize == 3);
3407 lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
3409 // Check if the key matches.
3410 cmpq(key, FieldOperand(elements,
3413 NumberDictionary::kElementsStartOffset));
3414 if (i != (kProbes - 1)) {
3422 // Check that the value is a normal propety.
3423 const int kDetailsOffset =
3424 NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
3425 ASSERT_EQ(NORMAL, 0);
3426 Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
3427 Smi::FromInt(PropertyDetails::TypeField::kMask));
3430 // Get the value at the masked, scaled index.
3431 const int kValueOffset =
3432 NumberDictionary::kElementsStartOffset + kPointerSize;
3433 movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
3437 void MacroAssembler::LoadAllocationTopHelper(Register result,
3439 AllocationFlags flags) {
3440 ExternalReference new_space_allocation_top =
3441 ExternalReference::new_space_allocation_top_address(isolate());
3443 // Just return if allocation top is already known.
3444 if ((flags & RESULT_CONTAINS_TOP) != 0) {
3445 // No use of scratch if allocation top is provided.
3446 ASSERT(!scratch.is_valid());
3448 // Assert that result actually contains top on entry.
3449 Operand top_operand = ExternalOperand(new_space_allocation_top);
3450 cmpq(result, top_operand);
3451 Check(equal, "Unexpected allocation top");
3456 // Move address of new object to result. Use scratch register if available,
3457 // and keep address in scratch until call to UpdateAllocationTopHelper.
3458 if (scratch.is_valid()) {
3459 LoadAddress(scratch, new_space_allocation_top);
3460 movq(result, Operand(scratch, 0));
3462 Load(result, new_space_allocation_top);
3467 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
3469 if (emit_debug_code()) {
3470 testq(result_end, Immediate(kObjectAlignmentMask));
3471 Check(zero, "Unaligned allocation in new space");
3474 ExternalReference new_space_allocation_top =
3475 ExternalReference::new_space_allocation_top_address(isolate());
3478 if (scratch.is_valid()) {
3479 // Scratch already contains address of allocation top.
3480 movq(Operand(scratch, 0), result_end);
3482 Store(new_space_allocation_top, result_end);
3487 void MacroAssembler::AllocateInNewSpace(int object_size,
3489 Register result_end,
3492 AllocationFlags flags) {
3493 if (!FLAG_inline_new) {
3494 if (emit_debug_code()) {
3495 // Trash the registers to simulate an allocation failure.
3496 movl(result, Immediate(0x7091));
3497 if (result_end.is_valid()) {
3498 movl(result_end, Immediate(0x7191));
3500 if (scratch.is_valid()) {
3501 movl(scratch, Immediate(0x7291));
3507 ASSERT(!result.is(result_end));
3509 // Load address of new object into result.
3510 LoadAllocationTopHelper(result, scratch, flags);
3512 // Calculate new top and bail out if new space is exhausted.
3513 ExternalReference new_space_allocation_limit =
3514 ExternalReference::new_space_allocation_limit_address(isolate());
3516 Register top_reg = result_end.is_valid() ? result_end : result;
3518 if (!top_reg.is(result)) {
3519 movq(top_reg, result);
3521 addq(top_reg, Immediate(object_size));
3522 j(carry, gc_required);
3523 Operand limit_operand = ExternalOperand(new_space_allocation_limit);
3524 cmpq(top_reg, limit_operand);
3525 j(above, gc_required);
3527 // Update allocation top.
3528 UpdateAllocationTopHelper(top_reg, scratch);
3530 if (top_reg.is(result)) {
3531 if ((flags & TAG_OBJECT) != 0) {
3532 subq(result, Immediate(object_size - kHeapObjectTag));
3534 subq(result, Immediate(object_size));
3536 } else if ((flags & TAG_OBJECT) != 0) {
3537 // Tag the result if requested.
3538 addq(result, Immediate(kHeapObjectTag));
3543 void MacroAssembler::AllocateInNewSpace(int header_size,
3544 ScaleFactor element_size,
3545 Register element_count,
3547 Register result_end,
3550 AllocationFlags flags) {
3551 if (!FLAG_inline_new) {
3552 if (emit_debug_code()) {
3553 // Trash the registers to simulate an allocation failure.
3554 movl(result, Immediate(0x7091));
3555 movl(result_end, Immediate(0x7191));
3556 if (scratch.is_valid()) {
3557 movl(scratch, Immediate(0x7291));
3559 // Register element_count is not modified by the function.
3564 ASSERT(!result.is(result_end));
3566 // Load address of new object into result.
3567 LoadAllocationTopHelper(result, scratch, flags);
3569 // Calculate new top and bail out if new space is exhausted.
3570 ExternalReference new_space_allocation_limit =
3571 ExternalReference::new_space_allocation_limit_address(isolate());
3573 // We assume that element_count*element_size + header_size does not
3575 lea(result_end, Operand(element_count, element_size, header_size));
3576 addq(result_end, result);
3577 j(carry, gc_required);
3578 Operand limit_operand = ExternalOperand(new_space_allocation_limit);
3579 cmpq(result_end, limit_operand);
3580 j(above, gc_required);
3582 // Update allocation top.
3583 UpdateAllocationTopHelper(result_end, scratch);
3585 // Tag the result if requested.
3586 if ((flags & TAG_OBJECT) != 0) {
3587 addq(result, Immediate(kHeapObjectTag));
3592 void MacroAssembler::AllocateInNewSpace(Register object_size,
3594 Register result_end,
3597 AllocationFlags flags) {
3598 if (!FLAG_inline_new) {
3599 if (emit_debug_code()) {
3600 // Trash the registers to simulate an allocation failure.
3601 movl(result, Immediate(0x7091));
3602 movl(result_end, Immediate(0x7191));
3603 if (scratch.is_valid()) {
3604 movl(scratch, Immediate(0x7291));
3606 // object_size is left unchanged by this function.
3611 ASSERT(!result.is(result_end));
3613 // Load address of new object into result.
3614 LoadAllocationTopHelper(result, scratch, flags);
3616 // Calculate new top and bail out if new space is exhausted.
3617 ExternalReference new_space_allocation_limit =
3618 ExternalReference::new_space_allocation_limit_address(isolate());
3619 if (!object_size.is(result_end)) {
3620 movq(result_end, object_size);
3622 addq(result_end, result);
3623 j(carry, gc_required);
3624 Operand limit_operand = ExternalOperand(new_space_allocation_limit);
3625 cmpq(result_end, limit_operand);
3626 j(above, gc_required);
3628 // Update allocation top.
3629 UpdateAllocationTopHelper(result_end, scratch);
3631 // Tag the result if requested.
3632 if ((flags & TAG_OBJECT) != 0) {
3633 addq(result, Immediate(kHeapObjectTag));
3638 void MacroAssembler::UndoAllocationInNewSpace(Register object) {
3639 ExternalReference new_space_allocation_top =
3640 ExternalReference::new_space_allocation_top_address(isolate());
3642 // Make sure the object has no tag before resetting top.
3643 and_(object, Immediate(~kHeapObjectTagMask));
3644 Operand top_operand = ExternalOperand(new_space_allocation_top);
3646 cmpq(object, top_operand);
3647 Check(below, "Undo allocation of non allocated memory");
3649 movq(top_operand, object);
3653 void MacroAssembler::AllocateHeapNumber(Register result,
3655 Label* gc_required) {
3656 // Allocate heap number in new space.
3657 AllocateInNewSpace(HeapNumber::kSize,
3665 LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
3666 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
3670 void MacroAssembler::AllocateTwoByteString(Register result,
3675 Label* gc_required) {
3676 // Calculate the number of bytes needed for the characters in the string while
3677 // observing object alignment.
3678 const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
3679 kObjectAlignmentMask;
3680 ASSERT(kShortSize == 2);
3681 // scratch1 = length * 2 + kObjectAlignmentMask.
3682 lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
3684 and_(scratch1, Immediate(~kObjectAlignmentMask));
3685 if (kHeaderAlignment > 0) {
3686 subq(scratch1, Immediate(kHeaderAlignment));
3689 // Allocate two byte string in new space.
3690 AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
3699 // Set the map, length and hash field.
3700 LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
3701 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
3702 Integer32ToSmi(scratch1, length);
3703 movq(FieldOperand(result, String::kLengthOffset), scratch1);
3704 movq(FieldOperand(result, String::kHashFieldOffset),
3705 Immediate(String::kEmptyHashField));
3709 void MacroAssembler::AllocateAsciiString(Register result,
3714 Label* gc_required) {
3715 // Calculate the number of bytes needed for the characters in the string while
3716 // observing object alignment.
3717 const int kHeaderAlignment = SeqAsciiString::kHeaderSize &
3718 kObjectAlignmentMask;
3719 movl(scratch1, length);
3720 ASSERT(kCharSize == 1);
3721 addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
3722 and_(scratch1, Immediate(~kObjectAlignmentMask));
3723 if (kHeaderAlignment > 0) {
3724 subq(scratch1, Immediate(kHeaderAlignment));
3727 // Allocate ascii string in new space.
3728 AllocateInNewSpace(SeqAsciiString::kHeaderSize,
3737 // Set the map, length and hash field.
3738 LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
3739 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
3740 Integer32ToSmi(scratch1, length);
3741 movq(FieldOperand(result, String::kLengthOffset), scratch1);
3742 movq(FieldOperand(result, String::kHashFieldOffset),
3743 Immediate(String::kEmptyHashField));
3747 void MacroAssembler::AllocateTwoByteConsString(Register result,
3750 Label* gc_required) {
3751 // Allocate heap number in new space.
3752 AllocateInNewSpace(ConsString::kSize,
3759 // Set the map. The other fields are left uninitialized.
3760 LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
3761 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
3765 void MacroAssembler::AllocateAsciiConsString(Register result,
3768 Label* gc_required) {
3769 // Allocate heap number in new space.
3770 AllocateInNewSpace(ConsString::kSize,
3777 // Set the map. The other fields are left uninitialized.
3778 LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
3779 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
3783 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3786 Label* gc_required) {
3787 // Allocate heap number in new space.
3788 AllocateInNewSpace(SlicedString::kSize,
3795 // Set the map. The other fields are left uninitialized.
3796 LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
3797 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
3801 void MacroAssembler::AllocateAsciiSlicedString(Register result,
3804 Label* gc_required) {
3805 // Allocate heap number in new space.
3806 AllocateInNewSpace(SlicedString::kSize,
3813 // Set the map. The other fields are left uninitialized.
3814 LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex);
3815 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
3819 // Copy memory, byte-by-byte, from source to destination. Not optimized for
3820 // long or aligned copies. The contents of scratch and length are destroyed.
3821 // Destination is incremented by length, source, length and scratch are
3823 // A simpler loop is faster on small copies, but slower on large ones.
3824 // The cld() instruction must have been emitted, to set the direction flag(),
3825 // before calling this function.
3826 void MacroAssembler::CopyBytes(Register destination,
3831 ASSERT(min_length >= 0);
3832 if (FLAG_debug_code) {
3833 cmpl(length, Immediate(min_length));
3834 Assert(greater_equal, "Invalid min_length");
3836 Label loop, done, short_string, short_loop;
3838 const int kLongStringLimit = 20;
3839 if (min_length <= kLongStringLimit) {
3840 cmpl(length, Immediate(kLongStringLimit));
3841 j(less_equal, &short_string);
3844 ASSERT(source.is(rsi));
3845 ASSERT(destination.is(rdi));
3846 ASSERT(length.is(rcx));
3848 // Because source is 8-byte aligned in our uses of this function,
3849 // we keep source aligned for the rep movs operation by copying the odd bytes
3850 // at the end of the ranges.
3851 movq(scratch, length);
3852 shrl(length, Immediate(3));
3854 // Move remaining bytes of length.
3855 andl(scratch, Immediate(0x7));
3856 movq(length, Operand(source, scratch, times_1, -8));
3857 movq(Operand(destination, scratch, times_1, -8), length);
3858 addq(destination, scratch);
3860 if (min_length <= kLongStringLimit) {
3863 bind(&short_string);
3864 if (min_length == 0) {
3865 testl(length, length);
3868 lea(scratch, Operand(destination, length, times_1, 0));
3871 movb(length, Operand(source, 0));
3872 movb(Operand(destination, 0), length);
3875 cmpq(destination, scratch);
3876 j(not_equal, &short_loop);
3883 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3884 Register end_offset,
3889 movq(Operand(start_offset, 0), filler);
3890 addq(start_offset, Immediate(kPointerSize));
3892 cmpq(start_offset, end_offset);
3897 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
3898 if (context_chain_length > 0) {
3899 // Move up the chain of contexts to the context containing the slot.
3900 movq(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3901 for (int i = 1; i < context_chain_length; i++) {
3902 movq(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3905 // Slot is in the current function context. Move it into the
3906 // destination register in case we store into it (the write barrier
3907 // cannot be allowed to destroy the context in rsi).
3911 // We should not have found a with context by walking the context
3912 // chain (i.e., the static scope chain and runtime context chain do
3913 // not agree). A variable occurring in such a scope should have
3914 // slot type LOOKUP and not CONTEXT.
3915 if (emit_debug_code()) {
3916 CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
3917 Heap::kWithContextMapRootIndex);
3918 Check(not_equal, "Variable resolved to with context.");
3923 static const int kRegisterPassedArguments = 4;
3925 static const int kRegisterPassedArguments = 6;
3928 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
3929 // Load the global or builtins object from the current context.
3930 movq(function, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
3931 // Load the global context from the global or builtins object.
3932 movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
3933 // Load the function from the global context.
3934 movq(function, Operand(function, Context::SlotOffset(index)));
3938 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
3940 // Load the initial map. The global functions all have initial maps.
3941 movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3942 if (emit_debug_code()) {
3944 CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
3947 Abort("Global functions must have initial map");
3953 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
3954 // On Windows 64 stack slots are reserved by the caller for all arguments
3955 // including the ones passed in registers, and space is always allocated for
3956 // the four register arguments even if the function takes fewer than four
3958 // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
3959 // and the caller does not reserve stack slots for them.
3960 ASSERT(num_arguments >= 0);
3962 const int kMinimumStackSlots = kRegisterPassedArguments;
3963 if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
3964 return num_arguments;
3966 if (num_arguments < kRegisterPassedArguments) return 0;
3967 return num_arguments - kRegisterPassedArguments;
3972 void MacroAssembler::PrepareCallCFunction(int num_arguments) {
3973 int frame_alignment = OS::ActivationFrameAlignment();
3974 ASSERT(frame_alignment != 0);
3975 ASSERT(num_arguments >= 0);
3977 // Make stack end at alignment and allocate space for arguments and old rsp.
3978 movq(kScratchRegister, rsp);
3979 ASSERT(IsPowerOf2(frame_alignment));
3980 int argument_slots_on_stack =
3981 ArgumentStackSlotsForCFunctionCall(num_arguments);
3982 subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize));
3983 and_(rsp, Immediate(-frame_alignment));
3984 movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister);
3988 void MacroAssembler::CallCFunction(ExternalReference function,
3989 int num_arguments) {
3990 LoadAddress(rax, function);
3991 CallCFunction(rax, num_arguments);
3995 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
3996 ASSERT(has_frame());
3997 // Check stack alignment.
3998 if (emit_debug_code()) {
3999 CheckStackAlignment();
4003 ASSERT(OS::ActivationFrameAlignment() != 0);
4004 ASSERT(num_arguments >= 0);
4005 int argument_slots_on_stack =
4006 ArgumentStackSlotsForCFunctionCall(num_arguments);
4007 movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
4011 bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
4012 if (r1.is(r2)) return true;
4013 if (r1.is(r3)) return true;
4014 if (r1.is(r4)) return true;
4015 if (r2.is(r3)) return true;
4016 if (r2.is(r4)) return true;
4017 if (r3.is(r4)) return true;
4022 CodePatcher::CodePatcher(byte* address, int size)
4023 : address_(address),
4025 masm_(Isolate::Current(), address, size + Assembler::kGap) {
4026 // Create a new macro assembler pointing to the address of the code to patch.
4027 // The size is adjusted with kGap on order for the assembler to generate size
4028 // bytes of instructions without failing with buffer size constraints.
4029 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4033 CodePatcher::~CodePatcher() {
4034 // Indicate that code has changed.
4035 CPU::FlushICache(address_, size_);
4037 // Check that the code was patched as expected.
4038 ASSERT(masm_.pc_ == address_ + size_);
4039 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4043 void MacroAssembler::CheckPageFlag(
4048 Label* condition_met,
4049 Label::Distance condition_met_distance) {
4050 ASSERT(cc == zero || cc == not_zero);
4051 if (scratch.is(object)) {
4052 and_(scratch, Immediate(~Page::kPageAlignmentMask));
4054 movq(scratch, Immediate(~Page::kPageAlignmentMask));
4055 and_(scratch, object);
4057 if (mask < (1 << kBitsPerByte)) {
4058 testb(Operand(scratch, MemoryChunk::kFlagsOffset),
4059 Immediate(static_cast<uint8_t>(mask)));
4061 testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
4063 j(cc, condition_met, condition_met_distance);
4067 void MacroAssembler::JumpIfBlack(Register object,
4068 Register bitmap_scratch,
4069 Register mask_scratch,
4071 Label::Distance on_black_distance) {
4072 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
4073 GetMarkBits(object, bitmap_scratch, mask_scratch);
4075 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4076 // The mask_scratch register contains a 1 at the position of the first bit
4077 // and a 0 at all other positions, including the position of the second bit.
4078 movq(rcx, mask_scratch);
4079 // Make rcx into a mask that covers both marking bits using the operation
4080 // rcx = mask | (mask << 1).
4081 lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
4082 // Note that we are using a 4-byte aligned 8-byte load.
4083 and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
4084 cmpq(mask_scratch, rcx);
4085 j(equal, on_black, on_black_distance);
4089 // Detect some, but not all, common pointer-free objects. This is used by the
4090 // incremental write barrier which doesn't care about oddballs (they are always
4091 // marked black immediately so this code is not hit).
4092 void MacroAssembler::JumpIfDataObject(
4095 Label* not_data_object,
4096 Label::Distance not_data_object_distance) {
4097 Label is_data_object;
4098 movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
4099 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
4100 j(equal, &is_data_object, Label::kNear);
4101 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4102 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4103 // If it's a string and it's not a cons string then it's an object containing
4105 testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
4106 Immediate(kIsIndirectStringMask | kIsNotStringMask));
4107 j(not_zero, not_data_object, not_data_object_distance);
4108 bind(&is_data_object);
4112 void MacroAssembler::GetMarkBits(Register addr_reg,
4113 Register bitmap_reg,
4114 Register mask_reg) {
4115 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
4116 movq(bitmap_reg, addr_reg);
4117 // Sign extended 32 bit immediate.
4118 and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
4119 movq(rcx, addr_reg);
4121 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
4122 shrl(rcx, Immediate(shift));
4124 Immediate((Page::kPageAlignmentMask >> shift) &
4125 ~(Bitmap::kBytesPerCell - 1)));
4127 addq(bitmap_reg, rcx);
4128 movq(rcx, addr_reg);
4129 shrl(rcx, Immediate(kPointerSizeLog2));
4130 and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
4131 movl(mask_reg, Immediate(1));
4136 void MacroAssembler::EnsureNotWhite(
4138 Register bitmap_scratch,
4139 Register mask_scratch,
4140 Label* value_is_white_and_not_data,
4141 Label::Distance distance) {
4142 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
4143 GetMarkBits(value, bitmap_scratch, mask_scratch);
4145 // If the value is black or grey we don't need to do anything.
4146 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4147 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4148 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
4149 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4153 // Since both black and grey have a 1 in the first position and white does
4154 // not have a 1 there we only need to check one bit.
4155 testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4156 j(not_zero, &done, Label::kNear);
4158 if (FLAG_debug_code) {
4159 // Check for impossible bit pattern.
4162 // shl. May overflow making the check conservative.
4163 addq(mask_scratch, mask_scratch);
4164 testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4165 j(zero, &ok, Label::kNear);
4171 // Value is white. We check whether it is data that doesn't need scanning.
4172 // Currently only checks for HeapNumber and non-cons strings.
4173 Register map = rcx; // Holds map while checking type.
4174 Register length = rcx; // Holds length of object after checking type.
4175 Label not_heap_number;
4176 Label is_data_object;
4178 // Check for heap-number
4179 movq(map, FieldOperand(value, HeapObject::kMapOffset));
4180 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
4181 j(not_equal, ¬_heap_number, Label::kNear);
4182 movq(length, Immediate(HeapNumber::kSize));
4183 jmp(&is_data_object, Label::kNear);
4185 bind(¬_heap_number);
4186 // Check for strings.
4187 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4188 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4189 // If it's a string and it's not a cons string then it's an object containing
4191 Register instance_type = rcx;
4192 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
4193 testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
4194 j(not_zero, value_is_white_and_not_data);
4195 // It's a non-indirect (non-cons and non-slice) string.
4196 // If it's external, the length is just ExternalString::kSize.
4197 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
4199 // External strings are the only ones with the kExternalStringTag bit
4201 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
4202 ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
4203 testb(instance_type, Immediate(kExternalStringTag));
4204 j(zero, ¬_external, Label::kNear);
4205 movq(length, Immediate(ExternalString::kSize));
4206 jmp(&is_data_object, Label::kNear);
4208 bind(¬_external);
4209 // Sequential string, either ASCII or UC16.
4210 ASSERT(kAsciiStringTag == 0x04);
4211 and_(length, Immediate(kStringEncodingMask));
4212 xor_(length, Immediate(kStringEncodingMask));
4213 addq(length, Immediate(0x04));
4214 // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
4215 imul(length, FieldOperand(value, String::kLengthOffset));
4216 shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
4217 addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
4218 and_(length, Immediate(~kObjectAlignmentMask));
4220 bind(&is_data_object);
4221 // Value is a data object, and it is white. Mark it black. Since we know
4222 // that the object is white we can make it black by flipping one bit.
4223 or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4225 and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
4226 addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
4231 } } // namespace v8::internal
4233 #endif // V8_TARGET_ARCH_X64