1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "src/bootstrapper.h"
10 #include "src/codegen.h"
11 #include "src/cpu-profiler.h"
12 #include "src/x64/assembler-x64.h"
13 #include "src/x64/macro-assembler-x64.h"
14 #include "src/serialize.h"
15 #include "src/debug.h"
17 #include "src/isolate-inl.h"
22 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
23 : Assembler(arg_isolate, buffer, size),
24 generating_stub_(false),
26 root_array_available_(true) {
27 if (isolate() != NULL) {
28 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
34 static const int64_t kInvalidRootRegisterDelta = -1;
37 int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
38 if (predictable_code_size() &&
39 (other.address() < reinterpret_cast<Address>(isolate()) ||
40 other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
41 return kInvalidRootRegisterDelta;
43 Address roots_register_value = kRootRegisterBias +
44 reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
46 int64_t delta = kInvalidRootRegisterDelta; // Bogus initialization.
47 if (kPointerSize == kInt64Size) {
48 delta = other.address() - roots_register_value;
50 // For x32, zero extend the address to 64-bit and calculate the delta.
51 uint64_t o = static_cast<uint32_t>(
52 reinterpret_cast<intptr_t>(other.address()));
53 uint64_t r = static_cast<uint32_t>(
54 reinterpret_cast<intptr_t>(roots_register_value));
61 Operand MacroAssembler::ExternalOperand(ExternalReference target,
63 if (root_array_available_ && !serializer_enabled()) {
64 int64_t delta = RootRegisterDelta(target);
65 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
66 return Operand(kRootRegister, static_cast<int32_t>(delta));
69 Move(scratch, target);
70 return Operand(scratch, 0);
74 void MacroAssembler::Load(Register destination, ExternalReference source) {
75 if (root_array_available_ && !serializer_enabled()) {
76 int64_t delta = RootRegisterDelta(source);
77 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
78 movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
83 if (destination.is(rax)) {
86 Move(kScratchRegister, source);
87 movp(destination, Operand(kScratchRegister, 0));
92 void MacroAssembler::Store(ExternalReference destination, Register source) {
93 if (root_array_available_ && !serializer_enabled()) {
94 int64_t delta = RootRegisterDelta(destination);
95 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
96 movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
101 if (source.is(rax)) {
102 store_rax(destination);
104 Move(kScratchRegister, destination);
105 movp(Operand(kScratchRegister, 0), source);
110 void MacroAssembler::LoadAddress(Register destination,
111 ExternalReference source) {
112 if (root_array_available_ && !serializer_enabled()) {
113 int64_t delta = RootRegisterDelta(source);
114 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
115 leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
120 Move(destination, source);
124 int MacroAssembler::LoadAddressSize(ExternalReference source) {
125 if (root_array_available_ && !serializer_enabled()) {
126 // This calculation depends on the internals of LoadAddress.
127 // It's correctness is ensured by the asserts in the Call
128 // instruction below.
129 int64_t delta = RootRegisterDelta(source);
130 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
131 // Operand is leap(scratch, Operand(kRootRegister, delta));
132 // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
134 if (!is_int8(static_cast<int32_t>(delta))) {
135 size += 3; // Need full four-byte displacement in lea.
140 // Size of movp(destination, src);
141 return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
145 void MacroAssembler::PushAddress(ExternalReference source) {
146 int64_t address = reinterpret_cast<int64_t>(source.address());
147 if (is_int32(address) && !serializer_enabled()) {
148 if (emit_debug_code()) {
149 Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
151 Push(Immediate(static_cast<int32_t>(address)));
154 LoadAddress(kScratchRegister, source);
155 Push(kScratchRegister);
159 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
160 ASSERT(root_array_available_);
161 movp(destination, Operand(kRootRegister,
162 (index << kPointerSizeLog2) - kRootRegisterBias));
166 void MacroAssembler::LoadRootIndexed(Register destination,
167 Register variable_offset,
169 ASSERT(root_array_available_);
171 Operand(kRootRegister,
172 variable_offset, times_pointer_size,
173 (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
177 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
178 ASSERT(root_array_available_);
179 movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
184 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
185 ASSERT(root_array_available_);
186 Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
190 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
191 ASSERT(root_array_available_);
192 cmpp(with, Operand(kRootRegister,
193 (index << kPointerSizeLog2) - kRootRegisterBias));
197 void MacroAssembler::CompareRoot(const Operand& with,
198 Heap::RootListIndex index) {
199 ASSERT(root_array_available_);
200 ASSERT(!with.AddressUsesRegister(kScratchRegister));
201 LoadRoot(kScratchRegister, index);
202 cmpp(with, kScratchRegister);
206 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
209 SaveFPRegsMode save_fp,
210 RememberedSetFinalAction and_then) {
211 if (emit_debug_code()) {
213 JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
217 // Load store buffer top.
218 LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
219 // Store pointer to buffer.
220 movp(Operand(scratch, 0), addr);
221 // Increment buffer top.
222 addp(scratch, Immediate(kPointerSize));
223 // Write back new top of buffer.
224 StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
225 // Call stub on end of buffer.
227 // Check for end of buffer.
228 testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
229 if (and_then == kReturnAtEnd) {
230 Label buffer_overflowed;
231 j(not_equal, &buffer_overflowed, Label::kNear);
233 bind(&buffer_overflowed);
235 ASSERT(and_then == kFallThroughAtEnd);
236 j(equal, &done, Label::kNear);
238 StoreBufferOverflowStub store_buffer_overflow =
239 StoreBufferOverflowStub(isolate(), save_fp);
240 CallStub(&store_buffer_overflow);
241 if (and_then == kReturnAtEnd) {
244 ASSERT(and_then == kFallThroughAtEnd);
250 void MacroAssembler::InNewSpace(Register object,
254 Label::Distance distance) {
255 if (serializer_enabled()) {
256 // Can't do arithmetic on external references if it might get serialized.
257 // The mask isn't really an address. We load it as an external reference in
258 // case the size of the new space is different between the snapshot maker
259 // and the running system.
260 if (scratch.is(object)) {
261 Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
262 andp(scratch, kScratchRegister);
264 Move(scratch, ExternalReference::new_space_mask(isolate()));
265 andp(scratch, object);
267 Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
268 cmpp(scratch, kScratchRegister);
269 j(cc, branch, distance);
271 ASSERT(kPointerSize == kInt64Size
272 ? is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))
273 : kPointerSize == kInt32Size);
274 intptr_t new_space_start =
275 reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
276 Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
277 Assembler::RelocInfoNone());
278 if (scratch.is(object)) {
279 addp(scratch, kScratchRegister);
281 leap(scratch, Operand(object, kScratchRegister, times_1, 0));
284 Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
285 j(cc, branch, distance);
290 void MacroAssembler::RecordWriteField(
295 SaveFPRegsMode save_fp,
296 RememberedSetAction remembered_set_action,
298 PointersToHereCheck pointers_to_here_check_for_value) {
299 // First, check if a write barrier is even needed. The tests below
300 // catch stores of Smis.
303 // Skip barrier if writing a smi.
304 if (smi_check == INLINE_SMI_CHECK) {
305 JumpIfSmi(value, &done);
308 // Although the object register is tagged, the offset is relative to the start
309 // of the object, so so offset must be a multiple of kPointerSize.
310 ASSERT(IsAligned(offset, kPointerSize));
312 leap(dst, FieldOperand(object, offset));
313 if (emit_debug_code()) {
315 testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
316 j(zero, &ok, Label::kNear);
321 RecordWrite(object, dst, value, save_fp, remembered_set_action,
322 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
326 // Clobber clobbered input registers when running with the debug-code flag
327 // turned on to provoke errors.
328 if (emit_debug_code()) {
329 Move(value, kZapValue, Assembler::RelocInfoNone());
330 Move(dst, kZapValue, Assembler::RelocInfoNone());
335 void MacroAssembler::RecordWriteArray(
339 SaveFPRegsMode save_fp,
340 RememberedSetAction remembered_set_action,
342 PointersToHereCheck pointers_to_here_check_for_value) {
343 // First, check if a write barrier is even needed. The tests below
344 // catch stores of Smis.
347 // Skip barrier if writing a smi.
348 if (smi_check == INLINE_SMI_CHECK) {
349 JumpIfSmi(value, &done);
352 // Array access: calculate the destination address. Index is not a smi.
353 Register dst = index;
354 leap(dst, Operand(object, index, times_pointer_size,
355 FixedArray::kHeaderSize - kHeapObjectTag));
357 RecordWrite(object, dst, value, save_fp, remembered_set_action,
358 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
362 // Clobber clobbered input registers when running with the debug-code flag
363 // turned on to provoke errors.
364 if (emit_debug_code()) {
365 Move(value, kZapValue, Assembler::RelocInfoNone());
366 Move(index, kZapValue, Assembler::RelocInfoNone());
371 void MacroAssembler::RecordWriteForMap(Register object,
374 SaveFPRegsMode fp_mode) {
375 ASSERT(!object.is(kScratchRegister));
376 ASSERT(!object.is(map));
377 ASSERT(!object.is(dst));
378 ASSERT(!map.is(dst));
379 AssertNotSmi(object);
381 if (emit_debug_code()) {
383 if (map.is(kScratchRegister)) pushq(map);
384 CompareMap(map, isolate()->factory()->meta_map());
385 if (map.is(kScratchRegister)) popq(map);
386 j(equal, &ok, Label::kNear);
391 if (!FLAG_incremental_marking) {
395 if (emit_debug_code()) {
397 if (map.is(kScratchRegister)) pushq(map);
398 cmpp(map, FieldOperand(object, HeapObject::kMapOffset));
399 if (map.is(kScratchRegister)) popq(map);
400 j(equal, &ok, Label::kNear);
405 // Compute the address.
406 leap(dst, FieldOperand(object, HeapObject::kMapOffset));
408 // Count number of write barriers in generated code.
409 isolate()->counters()->write_barriers_static()->Increment();
410 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
412 // First, check if a write barrier is even needed. The tests below
413 // catch stores of smis and stores into the young generation.
416 // A single check of the map's pages interesting flag suffices, since it is
417 // only set during incremental collection, and then it's also guaranteed that
418 // the from object's page's interesting flag is also set. This optimization
419 // relies on the fact that maps can never be in new space.
421 map, // Used as scratch.
422 MemoryChunk::kPointersToHereAreInterestingMask,
427 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
433 // Clobber clobbered registers when running with the debug-code flag
434 // turned on to provoke errors.
435 if (emit_debug_code()) {
436 Move(dst, kZapValue, Assembler::RelocInfoNone());
437 Move(map, kZapValue, Assembler::RelocInfoNone());
442 void MacroAssembler::RecordWrite(
446 SaveFPRegsMode fp_mode,
447 RememberedSetAction remembered_set_action,
449 PointersToHereCheck pointers_to_here_check_for_value) {
450 ASSERT(!object.is(value));
451 ASSERT(!object.is(address));
452 ASSERT(!value.is(address));
453 AssertNotSmi(object);
455 if (remembered_set_action == OMIT_REMEMBERED_SET &&
456 !FLAG_incremental_marking) {
460 if (emit_debug_code()) {
462 cmpp(value, Operand(address, 0));
463 j(equal, &ok, Label::kNear);
468 // Count number of write barriers in generated code.
469 isolate()->counters()->write_barriers_static()->Increment();
470 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
472 // First, check if a write barrier is even needed. The tests below
473 // catch stores of smis and stores into the young generation.
476 if (smi_check == INLINE_SMI_CHECK) {
477 // Skip barrier if writing a smi.
478 JumpIfSmi(value, &done);
481 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
483 value, // Used as scratch.
484 MemoryChunk::kPointersToHereAreInterestingMask,
490 CheckPageFlag(object,
491 value, // Used as scratch.
492 MemoryChunk::kPointersFromHereAreInterestingMask,
497 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
503 // Clobber clobbered registers when running with the debug-code flag
504 // turned on to provoke errors.
505 if (emit_debug_code()) {
506 Move(address, kZapValue, Assembler::RelocInfoNone());
507 Move(value, kZapValue, Assembler::RelocInfoNone());
512 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
513 if (emit_debug_code()) Check(cc, reason);
517 void MacroAssembler::AssertFastElements(Register elements) {
518 if (emit_debug_code()) {
520 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
521 Heap::kFixedArrayMapRootIndex);
522 j(equal, &ok, Label::kNear);
523 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
524 Heap::kFixedDoubleArrayMapRootIndex);
525 j(equal, &ok, Label::kNear);
526 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
527 Heap::kFixedCOWArrayMapRootIndex);
528 j(equal, &ok, Label::kNear);
529 Abort(kJSObjectWithFastElementsMapHasSlowElements);
535 void MacroAssembler::Check(Condition cc, BailoutReason reason) {
537 j(cc, &L, Label::kNear);
539 // Control will not return here.
544 void MacroAssembler::CheckStackAlignment() {
545 int frame_alignment = OS::ActivationFrameAlignment();
546 int frame_alignment_mask = frame_alignment - 1;
547 if (frame_alignment > kPointerSize) {
548 ASSERT(IsPowerOf2(frame_alignment));
549 Label alignment_as_expected;
550 testp(rsp, Immediate(frame_alignment_mask));
551 j(zero, &alignment_as_expected, Label::kNear);
552 // Abort if stack is not aligned.
554 bind(&alignment_as_expected);
559 void MacroAssembler::NegativeZeroTest(Register result,
563 testl(result, result);
564 j(not_zero, &ok, Label::kNear);
571 void MacroAssembler::Abort(BailoutReason reason) {
573 const char* msg = GetBailoutReason(reason);
575 RecordComment("Abort message: ");
579 if (FLAG_trap_on_abort) {
585 Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
586 Assembler::RelocInfoNone());
587 Push(kScratchRegister);
590 // We don't actually want to generate a pile of code for this, so just
591 // claim there is a stack frame, without generating one.
592 FrameScope scope(this, StackFrame::NONE);
593 CallRuntime(Runtime::kAbort, 1);
595 CallRuntime(Runtime::kAbort, 1);
597 // Control will not return here.
602 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
603 ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
604 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
608 void MacroAssembler::TailCallStub(CodeStub* stub) {
609 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
613 void MacroAssembler::StubReturn(int argc) {
614 ASSERT(argc >= 1 && generating_stub());
615 ret((argc - 1) * kPointerSize);
619 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
620 return has_frame_ || !stub->SometimesSetsUpAFrame();
624 void MacroAssembler::IndexFromHash(Register hash, Register index) {
625 // The assert checks that the constants for the maximum number of digits
626 // for an array index cached in the hash field and the number of bits
627 // reserved for it does not conflict.
628 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
629 (1 << String::kArrayIndexValueBits));
630 if (!hash.is(index)) {
633 DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
637 void MacroAssembler::CallRuntime(const Runtime::Function* f,
639 SaveFPRegsMode save_doubles) {
640 // If the expected number of arguments of the runtime function is
641 // constant, we check that the actual number of arguments match the
643 CHECK(f->nargs < 0 || f->nargs == num_arguments);
645 // TODO(1236192): Most runtime routines don't need the number of
646 // arguments passed in because it is constant. At some point we
647 // should remove this need and make the runtime routine entry code
649 Set(rax, num_arguments);
650 LoadAddress(rbx, ExternalReference(f, isolate()));
651 CEntryStub ces(isolate(), f->result_size, save_doubles);
656 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
658 Set(rax, num_arguments);
659 LoadAddress(rbx, ext);
661 CEntryStub stub(isolate(), 1);
666 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
669 // ----------- S t a t e -------------
670 // -- rsp[0] : return address
671 // -- rsp[8] : argument num_arguments - 1
673 // -- rsp[8 * num_arguments] : argument 0 (receiver)
674 // -----------------------------------
676 // TODO(1236192): Most runtime routines don't need the number of
677 // arguments passed in because it is constant. At some point we
678 // should remove this need and make the runtime routine entry code
680 Set(rax, num_arguments);
681 JumpToExternalReference(ext, result_size);
685 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
688 TailCallExternalReference(ExternalReference(fid, isolate()),
694 static int Offset(ExternalReference ref0, ExternalReference ref1) {
695 int64_t offset = (ref0.address() - ref1.address());
696 // Check that fits into int.
697 ASSERT(static_cast<int>(offset) == offset);
698 return static_cast<int>(offset);
702 void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
703 EnterApiExitFrame(arg_stack_space);
707 void MacroAssembler::CallApiFunctionAndReturn(
708 Register function_address,
709 ExternalReference thunk_ref,
710 Register thunk_last_arg,
712 Operand return_value_operand,
713 Operand* context_restore_operand) {
715 Label promote_scheduled_exception;
716 Label exception_handled;
717 Label delete_allocated_handles;
718 Label leave_exit_frame;
721 Factory* factory = isolate()->factory();
722 ExternalReference next_address =
723 ExternalReference::handle_scope_next_address(isolate());
724 const int kNextOffset = 0;
725 const int kLimitOffset = Offset(
726 ExternalReference::handle_scope_limit_address(isolate()),
728 const int kLevelOffset = Offset(
729 ExternalReference::handle_scope_level_address(isolate()),
731 ExternalReference scheduled_exception_address =
732 ExternalReference::scheduled_exception_address(isolate());
734 ASSERT(rdx.is(function_address) || r8.is(function_address));
735 // Allocate HandleScope in callee-save registers.
736 Register prev_next_address_reg = r14;
737 Register prev_limit_reg = rbx;
738 Register base_reg = r15;
739 Move(base_reg, next_address);
740 movp(prev_next_address_reg, Operand(base_reg, kNextOffset));
741 movp(prev_limit_reg, Operand(base_reg, kLimitOffset));
742 addl(Operand(base_reg, kLevelOffset), Immediate(1));
744 if (FLAG_log_timer_events) {
745 FrameScope frame(this, StackFrame::MANUAL);
746 PushSafepointRegisters();
747 PrepareCallCFunction(1);
748 LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
749 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
750 PopSafepointRegisters();
754 Label profiler_disabled;
755 Label end_profiler_check;
756 Move(rax, ExternalReference::is_profiling_address(isolate()));
757 cmpb(Operand(rax, 0), Immediate(0));
758 j(zero, &profiler_disabled);
760 // Third parameter is the address of the actual getter function.
761 Move(thunk_last_arg, function_address);
762 Move(rax, thunk_ref);
763 jmp(&end_profiler_check);
765 bind(&profiler_disabled);
766 // Call the api function!
767 Move(rax, function_address);
769 bind(&end_profiler_check);
771 // Call the api function!
774 if (FLAG_log_timer_events) {
775 FrameScope frame(this, StackFrame::MANUAL);
776 PushSafepointRegisters();
777 PrepareCallCFunction(1);
778 LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
779 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
780 PopSafepointRegisters();
783 // Load the value from ReturnValue
784 movp(rax, return_value_operand);
787 // No more valid handles (the result handle was the last one). Restore
788 // previous handle scope.
789 subl(Operand(base_reg, kLevelOffset), Immediate(1));
790 movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
791 cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
792 j(not_equal, &delete_allocated_handles);
793 bind(&leave_exit_frame);
795 // Check if the function scheduled an exception.
796 Move(rsi, scheduled_exception_address);
797 Cmp(Operand(rsi, 0), factory->the_hole_value());
798 j(not_equal, &promote_scheduled_exception);
799 bind(&exception_handled);
801 #if ENABLE_EXTRA_CHECKS
802 // Check if the function returned a valid JavaScript value.
804 Register return_value = rax;
807 JumpIfSmi(return_value, &ok, Label::kNear);
808 movp(map, FieldOperand(return_value, HeapObject::kMapOffset));
810 CmpInstanceType(map, FIRST_NONSTRING_TYPE);
811 j(below, &ok, Label::kNear);
813 CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
814 j(above_equal, &ok, Label::kNear);
816 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
817 j(equal, &ok, Label::kNear);
819 CompareRoot(return_value, Heap::kUndefinedValueRootIndex);
820 j(equal, &ok, Label::kNear);
822 CompareRoot(return_value, Heap::kTrueValueRootIndex);
823 j(equal, &ok, Label::kNear);
825 CompareRoot(return_value, Heap::kFalseValueRootIndex);
826 j(equal, &ok, Label::kNear);
828 CompareRoot(return_value, Heap::kNullValueRootIndex);
829 j(equal, &ok, Label::kNear);
831 Abort(kAPICallReturnedInvalidObject);
836 bool restore_context = context_restore_operand != NULL;
837 if (restore_context) {
838 movp(rsi, *context_restore_operand);
840 LeaveApiExitFrame(!restore_context);
841 ret(stack_space * kPointerSize);
843 bind(&promote_scheduled_exception);
845 FrameScope frame(this, StackFrame::INTERNAL);
846 CallRuntime(Runtime::kHiddenPromoteScheduledException, 0);
848 jmp(&exception_handled);
850 // HandleScope limit has changed. Delete allocated extensions.
851 bind(&delete_allocated_handles);
852 movp(Operand(base_reg, kLimitOffset), prev_limit_reg);
853 movp(prev_limit_reg, rax);
854 LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
856 ExternalReference::delete_handle_scope_extensions(isolate()));
858 movp(rax, prev_limit_reg);
859 jmp(&leave_exit_frame);
863 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
865 // Set the entry point and jump to the C entry runtime stub.
866 LoadAddress(rbx, ext);
867 CEntryStub ces(isolate(), result_size);
868 jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
872 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
874 const CallWrapper& call_wrapper) {
875 // You can't call a builtin without a valid frame.
876 ASSERT(flag == JUMP_FUNCTION || has_frame());
878 // Rely on the assertion to check that the number of provided
879 // arguments match the expected number of arguments. Fake a
880 // parameter count to avoid emitting code to do the check.
881 ParameterCount expected(0);
882 GetBuiltinEntry(rdx, id);
883 InvokeCode(rdx, expected, expected, flag, call_wrapper);
887 void MacroAssembler::GetBuiltinFunction(Register target,
888 Builtins::JavaScript id) {
889 // Load the builtins object into target register.
890 movp(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
891 movp(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
892 movp(target, FieldOperand(target,
893 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
897 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
898 ASSERT(!target.is(rdi));
899 // Load the JavaScript builtin function from the builtins object.
900 GetBuiltinFunction(rdi, id);
901 movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
905 #define REG(Name) { kRegister_ ## Name ## _Code }
907 static const Register saved_regs[] = {
908 REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
909 REG(r9), REG(r10), REG(r11)
914 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
917 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
920 Register exclusion3) {
921 // We don't allow a GC during a store buffer overflow so there is no need to
922 // store the registers in any particular way, but we do have to store and
924 for (int i = 0; i < kNumberOfSavedRegs; i++) {
925 Register reg = saved_regs[i];
926 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
930 // R12 to r15 are callee save on all platforms.
931 if (fp_mode == kSaveFPRegs) {
932 subp(rsp, Immediate(kSIMD128Size * XMMRegister::kMaxNumRegisters));
933 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
934 XMMRegister reg = XMMRegister::from_code(i);
935 movups(Operand(rsp, i * kSIMD128Size), reg);
941 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
944 Register exclusion3) {
945 if (fp_mode == kSaveFPRegs) {
946 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
947 XMMRegister reg = XMMRegister::from_code(i);
948 movups(reg, Operand(rsp, i * kSIMD128Size));
950 addp(rsp, Immediate(kSIMD128Size * XMMRegister::kMaxNumRegisters));
952 for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
953 Register reg = saved_regs[i];
954 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
961 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
967 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
973 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
974 ASSERT(!r.IsDouble());
975 if (r.IsInteger8()) {
977 } else if (r.IsUInteger8()) {
979 } else if (r.IsInteger16()) {
981 } else if (r.IsUInteger16()) {
983 } else if (r.IsInteger32()) {
991 void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
992 ASSERT(!r.IsDouble());
993 if (r.IsInteger8() || r.IsUInteger8()) {
995 } else if (r.IsInteger16() || r.IsUInteger16()) {
997 } else if (r.IsInteger32()) {
1000 if (r.IsHeapObject()) {
1002 } else if (r.IsSmi()) {
1010 void MacroAssembler::Set(Register dst, int64_t x) {
1013 } else if (is_uint32(x)) {
1014 movl(dst, Immediate(static_cast<uint32_t>(x)));
1015 } else if (is_int32(x)) {
1016 movq(dst, Immediate(static_cast<int32_t>(x)));
1023 void MacroAssembler::Set(const Operand& dst, intptr_t x) {
1024 if (kPointerSize == kInt64Size) {
1026 movp(dst, Immediate(static_cast<int32_t>(x)));
1028 Set(kScratchRegister, x);
1029 movp(dst, kScratchRegister);
1032 movp(dst, Immediate(static_cast<int32_t>(x)));
1037 // ----------------------------------------------------------------------------
1038 // Smi tagging, untagging and tag detection.
1040 bool MacroAssembler::IsUnsafeInt(const int32_t x) {
1041 static const int kMaxBits = 17;
1042 return !is_intn(x, kMaxBits);
1046 void MacroAssembler::SafeMove(Register dst, Smi* src) {
1047 ASSERT(!dst.is(kScratchRegister));
1048 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1049 if (SmiValuesAre32Bits()) {
1050 // JIT cookie can be converted to Smi.
1051 Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
1052 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1053 xorp(dst, kScratchRegister);
1055 ASSERT(SmiValuesAre31Bits());
1056 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
1057 movp(dst, Immediate(value ^ jit_cookie()));
1058 xorp(dst, Immediate(jit_cookie()));
1066 void MacroAssembler::SafePush(Smi* src) {
1067 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1068 if (SmiValuesAre32Bits()) {
1069 // JIT cookie can be converted to Smi.
1070 Push(Smi::FromInt(src->value() ^ jit_cookie()));
1071 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1072 xorp(Operand(rsp, 0), kScratchRegister);
1074 ASSERT(SmiValuesAre31Bits());
1075 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
1076 Push(Immediate(value ^ jit_cookie()));
1077 xorp(Operand(rsp, 0), Immediate(jit_cookie()));
1085 Register MacroAssembler::GetSmiConstant(Smi* source) {
1086 int value = source->value();
1088 xorl(kScratchRegister, kScratchRegister);
1089 return kScratchRegister;
1092 return kSmiConstantRegister;
1094 LoadSmiConstant(kScratchRegister, source);
1095 return kScratchRegister;
1099 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
1100 if (emit_debug_code()) {
1101 Move(dst, Smi::FromInt(kSmiConstantRegisterValue),
1102 Assembler::RelocInfoNone());
1103 cmpq(dst, kSmiConstantRegister);
1104 Assert(equal, kUninitializedKSmiConstantRegister);
1106 int value = source->value();
1111 bool negative = value < 0;
1112 unsigned int uvalue = negative ? -value : value;
1117 Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
1121 leap(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
1125 leap(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
1129 Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
1133 Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
1137 Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
1140 movp(dst, kSmiConstantRegister);
1146 Move(dst, source, Assembler::RelocInfoNone());
1155 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
1156 STATIC_ASSERT(kSmiTag == 0);
1160 shlp(dst, Immediate(kSmiShift));
1164 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
1165 if (emit_debug_code()) {
1166 testb(dst, Immediate(0x01));
1168 j(zero, &ok, Label::kNear);
1169 Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
1173 if (SmiValuesAre32Bits()) {
1174 ASSERT(kSmiShift % kBitsPerByte == 0);
1175 movl(Operand(dst, kSmiShift / kBitsPerByte), src);
1177 ASSERT(SmiValuesAre31Bits());
1178 Integer32ToSmi(kScratchRegister, src);
1179 movp(dst, kScratchRegister);
1184 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
1188 addl(dst, Immediate(constant));
1190 leal(dst, Operand(src, constant));
1192 shlp(dst, Immediate(kSmiShift));
1196 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
1197 STATIC_ASSERT(kSmiTag == 0);
1202 if (SmiValuesAre32Bits()) {
1203 shrp(dst, Immediate(kSmiShift));
1205 ASSERT(SmiValuesAre31Bits());
1206 sarl(dst, Immediate(kSmiShift));
1211 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
1212 if (SmiValuesAre32Bits()) {
1213 movl(dst, Operand(src, kSmiShift / kBitsPerByte));
1215 ASSERT(SmiValuesAre31Bits());
1217 sarl(dst, Immediate(kSmiShift));
1222 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
1223 STATIC_ASSERT(kSmiTag == 0);
1227 sarp(dst, Immediate(kSmiShift));
1228 if (kPointerSize == kInt32Size) {
1229 // Sign extend to 64-bit.
1235 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
1236 if (SmiValuesAre32Bits()) {
1237 movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
1239 ASSERT(SmiValuesAre31Bits());
1241 SmiToInteger64(dst, dst);
1246 void MacroAssembler::SmiTest(Register src) {
1252 void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1259 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
1265 void MacroAssembler::Cmp(Register dst, Smi* src) {
1266 ASSERT(!dst.is(kScratchRegister));
1267 if (src->value() == 0) {
1270 Register constant_reg = GetSmiConstant(src);
1271 cmpp(dst, constant_reg);
1276 void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
1283 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
1290 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
1292 if (SmiValuesAre32Bits()) {
1293 cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
1295 ASSERT(SmiValuesAre31Bits());
1296 cmpl(dst, Immediate(src));
1301 void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
1302 // The Operand cannot use the smi register.
1303 Register smi_reg = GetSmiConstant(src);
1304 ASSERT(!dst.AddressUsesRegister(smi_reg));
1309 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
1310 if (SmiValuesAre32Bits()) {
1311 cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
1313 ASSERT(SmiValuesAre31Bits());
1314 SmiToInteger32(kScratchRegister, dst);
1315 cmpl(kScratchRegister, src);
1320 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
1326 SmiToInteger64(dst, src);
1332 if (power < kSmiShift) {
1333 sarp(dst, Immediate(kSmiShift - power));
1334 } else if (power > kSmiShift) {
1335 shlp(dst, Immediate(power - kSmiShift));
1340 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
1343 ASSERT((0 <= power) && (power < 32));
1345 shrp(dst, Immediate(power + kSmiShift));
1347 UNIMPLEMENTED(); // Not used.
1352 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1354 Label::Distance near_jump) {
1355 if (dst.is(src1) || dst.is(src2)) {
1356 ASSERT(!src1.is(kScratchRegister));
1357 ASSERT(!src2.is(kScratchRegister));
1358 movp(kScratchRegister, src1);
1359 orp(kScratchRegister, src2);
1360 JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
1361 movp(dst, kScratchRegister);
1365 JumpIfNotSmi(dst, on_not_smis, near_jump);
1370 Condition MacroAssembler::CheckSmi(Register src) {
1371 STATIC_ASSERT(kSmiTag == 0);
1372 testb(src, Immediate(kSmiTagMask));
1377 Condition MacroAssembler::CheckSmi(const Operand& src) {
1378 STATIC_ASSERT(kSmiTag == 0);
1379 testb(src, Immediate(kSmiTagMask));
1384 Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
1385 STATIC_ASSERT(kSmiTag == 0);
1386 // Test that both bits of the mask 0x8000000000000001 are zero.
1387 movp(kScratchRegister, src);
1388 rolp(kScratchRegister, Immediate(1));
1389 testb(kScratchRegister, Immediate(3));
1394 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
1395 if (first.is(second)) {
1396 return CheckSmi(first);
1398 STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
1399 if (SmiValuesAre32Bits()) {
1400 leal(kScratchRegister, Operand(first, second, times_1, 0));
1401 testb(kScratchRegister, Immediate(0x03));
1403 ASSERT(SmiValuesAre31Bits());
1404 movl(kScratchRegister, first);
1405 orl(kScratchRegister, second);
1406 testb(kScratchRegister, Immediate(kSmiTagMask));
1412 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
1414 if (first.is(second)) {
1415 return CheckNonNegativeSmi(first);
1417 movp(kScratchRegister, first);
1418 orp(kScratchRegister, second);
1419 rolp(kScratchRegister, Immediate(1));
1420 testl(kScratchRegister, Immediate(3));
1425 Condition MacroAssembler::CheckEitherSmi(Register first,
1428 if (first.is(second)) {
1429 return CheckSmi(first);
1431 if (scratch.is(second)) {
1432 andl(scratch, first);
1434 if (!scratch.is(first)) {
1435 movl(scratch, first);
1437 andl(scratch, second);
1439 testb(scratch, Immediate(kSmiTagMask));
1444 Condition MacroAssembler::CheckIsMinSmi(Register src) {
1445 ASSERT(!src.is(kScratchRegister));
1446 // If we overflow by subtracting one, it's the minimal smi value.
1447 cmpp(src, kSmiConstantRegister);
1452 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
1453 if (SmiValuesAre32Bits()) {
1454 // A 32-bit integer value can always be converted to a smi.
1457 ASSERT(SmiValuesAre31Bits());
1458 cmpl(src, Immediate(0xc0000000));
1464 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
1465 if (SmiValuesAre32Bits()) {
1466 // An unsigned 32-bit integer value is valid as long as the high bit
1471 ASSERT(SmiValuesAre31Bits());
1472 testl(src, Immediate(0xc0000000));
1478 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
1480 andl(dst, Immediate(kSmiTagMask));
1482 movl(dst, Immediate(kSmiTagMask));
1488 void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
1489 if (!(src.AddressUsesRegister(dst))) {
1490 movl(dst, Immediate(kSmiTagMask));
1494 andl(dst, Immediate(kSmiTagMask));
1499 void MacroAssembler::JumpIfValidSmiValue(Register src,
1501 Label::Distance near_jump) {
1502 Condition is_valid = CheckInteger32ValidSmiValue(src);
1503 j(is_valid, on_valid, near_jump);
1507 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1509 Label::Distance near_jump) {
1510 Condition is_valid = CheckInteger32ValidSmiValue(src);
1511 j(NegateCondition(is_valid), on_invalid, near_jump);
1515 void MacroAssembler::JumpIfUIntValidSmiValue(Register src,
1517 Label::Distance near_jump) {
1518 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1519 j(is_valid, on_valid, near_jump);
1523 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1525 Label::Distance near_jump) {
1526 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1527 j(NegateCondition(is_valid), on_invalid, near_jump);
1531 void MacroAssembler::JumpIfSmi(Register src,
1533 Label::Distance near_jump) {
1534 Condition smi = CheckSmi(src);
1535 j(smi, on_smi, near_jump);
1539 void MacroAssembler::JumpIfNotSmi(Register src,
1541 Label::Distance near_jump) {
1542 Condition smi = CheckSmi(src);
1543 j(NegateCondition(smi), on_not_smi, near_jump);
1547 void MacroAssembler::JumpUnlessNonNegativeSmi(
1548 Register src, Label* on_not_smi_or_negative,
1549 Label::Distance near_jump) {
1550 Condition non_negative_smi = CheckNonNegativeSmi(src);
1551 j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
1555 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1558 Label::Distance near_jump) {
1559 SmiCompare(src, constant);
1560 j(equal, on_equals, near_jump);
1564 void MacroAssembler::JumpIfNotBothSmi(Register src1,
1566 Label* on_not_both_smi,
1567 Label::Distance near_jump) {
1568 Condition both_smi = CheckBothSmi(src1, src2);
1569 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1573 void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
1575 Label* on_not_both_smi,
1576 Label::Distance near_jump) {
1577 Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
1578 j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1582 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1583 if (constant->value() == 0) {
1588 } else if (dst.is(src)) {
1589 ASSERT(!dst.is(kScratchRegister));
1590 switch (constant->value()) {
1592 addp(dst, kSmiConstantRegister);
1595 leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1598 leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1601 leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1604 Register constant_reg = GetSmiConstant(constant);
1605 addp(dst, constant_reg);
1609 switch (constant->value()) {
1611 leap(dst, Operand(src, kSmiConstantRegister, times_1, 0));
1614 leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1617 leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1620 leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1623 LoadSmiConstant(dst, constant);
1631 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1632 if (constant->value() != 0) {
1633 if (SmiValuesAre32Bits()) {
1634 addl(Operand(dst, kSmiShift / kBitsPerByte),
1635 Immediate(constant->value()));
1637 ASSERT(SmiValuesAre31Bits());
1638 addp(dst, Immediate(constant));
1644 void MacroAssembler::SmiAddConstant(Register dst,
1647 SmiOperationExecutionMode mode,
1648 Label* bailout_label,
1649 Label::Distance near_jump) {
1650 if (constant->value() == 0) {
1654 } else if (dst.is(src)) {
1655 ASSERT(!dst.is(kScratchRegister));
1656 LoadSmiConstant(kScratchRegister, constant);
1657 addp(dst, kScratchRegister);
1658 if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
1659 j(no_overflow, bailout_label, near_jump);
1660 ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
1661 subp(dst, kScratchRegister);
1662 } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
1663 if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
1665 j(no_overflow, &done, Label::kNear);
1666 subp(dst, kScratchRegister);
1667 jmp(bailout_label, near_jump);
1670 // Bailout if overflow without reserving src.
1671 j(overflow, bailout_label, near_jump);
1674 CHECK(mode.IsEmpty());
1677 ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
1678 ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
1679 LoadSmiConstant(dst, constant);
1681 j(overflow, bailout_label, near_jump);
1686 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1687 if (constant->value() == 0) {
1691 } else if (dst.is(src)) {
1692 ASSERT(!dst.is(kScratchRegister));
1693 Register constant_reg = GetSmiConstant(constant);
1694 subp(dst, constant_reg);
1696 if (constant->value() == Smi::kMinValue) {
1697 LoadSmiConstant(dst, constant);
1698 // Adding and subtracting the min-value gives the same result, it only
1699 // differs on the overflow bit, which we don't check here.
1702 // Subtract by adding the negation.
1703 LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1710 void MacroAssembler::SmiSubConstant(Register dst,
1713 SmiOperationExecutionMode mode,
1714 Label* bailout_label,
1715 Label::Distance near_jump) {
1716 if (constant->value() == 0) {
1720 } else if (dst.is(src)) {
1721 ASSERT(!dst.is(kScratchRegister));
1722 LoadSmiConstant(kScratchRegister, constant);
1723 subp(dst, kScratchRegister);
1724 if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
1725 j(no_overflow, bailout_label, near_jump);
1726 ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
1727 addp(dst, kScratchRegister);
1728 } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
1729 if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
1731 j(no_overflow, &done, Label::kNear);
1732 addp(dst, kScratchRegister);
1733 jmp(bailout_label, near_jump);
1736 // Bailout if overflow without reserving src.
1737 j(overflow, bailout_label, near_jump);
1740 CHECK(mode.IsEmpty());
1743 ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
1744 ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
1745 if (constant->value() == Smi::kMinValue) {
1746 ASSERT(!dst.is(kScratchRegister));
1748 LoadSmiConstant(kScratchRegister, constant);
1749 subp(dst, kScratchRegister);
1750 j(overflow, bailout_label, near_jump);
1752 // Subtract by adding the negation.
1753 LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1755 j(overflow, bailout_label, near_jump);
1761 void MacroAssembler::SmiNeg(Register dst,
1763 Label* on_smi_result,
1764 Label::Distance near_jump) {
1766 ASSERT(!dst.is(kScratchRegister));
1767 movp(kScratchRegister, src);
1768 negp(dst); // Low 32 bits are retained as zero by negation.
1769 // Test if result is zero or Smi::kMinValue.
1770 cmpp(dst, kScratchRegister);
1771 j(not_equal, on_smi_result, near_jump);
1772 movp(src, kScratchRegister);
1777 // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1778 j(not_equal, on_smi_result, near_jump);
1784 static void SmiAddHelper(MacroAssembler* masm,
1788 Label* on_not_smi_result,
1789 Label::Distance near_jump) {
1792 masm->addp(dst, src2);
1793 masm->j(no_overflow, &done, Label::kNear);
1795 masm->subp(dst, src2);
1796 masm->jmp(on_not_smi_result, near_jump);
1799 masm->movp(dst, src1);
1800 masm->addp(dst, src2);
1801 masm->j(overflow, on_not_smi_result, near_jump);
1806 void MacroAssembler::SmiAdd(Register dst,
1809 Label* on_not_smi_result,
1810 Label::Distance near_jump) {
1811 ASSERT_NOT_NULL(on_not_smi_result);
1812 ASSERT(!dst.is(src2));
1813 SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1817 void MacroAssembler::SmiAdd(Register dst,
1819 const Operand& src2,
1820 Label* on_not_smi_result,
1821 Label::Distance near_jump) {
1822 ASSERT_NOT_NULL(on_not_smi_result);
1823 ASSERT(!src2.AddressUsesRegister(dst));
1824 SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1828 void MacroAssembler::SmiAdd(Register dst,
1831 // No overflow checking. Use only when it's known that
1832 // overflowing is impossible.
1833 if (!dst.is(src1)) {
1834 if (emit_debug_code()) {
1835 movp(kScratchRegister, src1);
1836 addp(kScratchRegister, src2);
1837 Check(no_overflow, kSmiAdditionOverflow);
1839 leap(dst, Operand(src1, src2, times_1, 0));
1842 Assert(no_overflow, kSmiAdditionOverflow);
1848 static void SmiSubHelper(MacroAssembler* masm,
1852 Label* on_not_smi_result,
1853 Label::Distance near_jump) {
1856 masm->subp(dst, src2);
1857 masm->j(no_overflow, &done, Label::kNear);
1859 masm->addp(dst, src2);
1860 masm->jmp(on_not_smi_result, near_jump);
1863 masm->movp(dst, src1);
1864 masm->subp(dst, src2);
1865 masm->j(overflow, on_not_smi_result, near_jump);
1870 void MacroAssembler::SmiSub(Register dst,
1873 Label* on_not_smi_result,
1874 Label::Distance near_jump) {
1875 ASSERT_NOT_NULL(on_not_smi_result);
1876 ASSERT(!dst.is(src2));
1877 SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1881 void MacroAssembler::SmiSub(Register dst,
1883 const Operand& src2,
1884 Label* on_not_smi_result,
1885 Label::Distance near_jump) {
1886 ASSERT_NOT_NULL(on_not_smi_result);
1887 ASSERT(!src2.AddressUsesRegister(dst));
1888 SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1893 static void SmiSubNoOverflowHelper(MacroAssembler* masm,
1897 // No overflow checking. Use only when it's known that
1898 // overflowing is impossible (e.g., subtracting two positive smis).
1899 if (!dst.is(src1)) {
1900 masm->movp(dst, src1);
1902 masm->subp(dst, src2);
1903 masm->Assert(no_overflow, kSmiSubtractionOverflow);
1907 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1908 ASSERT(!dst.is(src2));
1909 SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
1913 void MacroAssembler::SmiSub(Register dst,
1915 const Operand& src2) {
1916 SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
1920 void MacroAssembler::SmiMul(Register dst,
1923 Label* on_not_smi_result,
1924 Label::Distance near_jump) {
1925 ASSERT(!dst.is(src2));
1926 ASSERT(!dst.is(kScratchRegister));
1927 ASSERT(!src1.is(kScratchRegister));
1928 ASSERT(!src2.is(kScratchRegister));
1931 Label failure, zero_correct_result;
1932 movp(kScratchRegister, src1); // Create backup for later testing.
1933 SmiToInteger64(dst, src1);
1935 j(overflow, &failure, Label::kNear);
1937 // Check for negative zero result. If product is zero, and one
1938 // argument is negative, go to slow case.
1939 Label correct_result;
1941 j(not_zero, &correct_result, Label::kNear);
1943 movp(dst, kScratchRegister);
1945 // Result was positive zero.
1946 j(positive, &zero_correct_result, Label::kNear);
1948 bind(&failure); // Reused failure exit, restores src1.
1949 movp(src1, kScratchRegister);
1950 jmp(on_not_smi_result, near_jump);
1952 bind(&zero_correct_result);
1955 bind(&correct_result);
1957 SmiToInteger64(dst, src1);
1959 j(overflow, on_not_smi_result, near_jump);
1960 // Check for negative zero result. If product is zero, and one
1961 // argument is negative, go to slow case.
1962 Label correct_result;
1964 j(not_zero, &correct_result, Label::kNear);
1965 // One of src1 and src2 is zero, the check whether the other is
1967 movp(kScratchRegister, src1);
1968 xorp(kScratchRegister, src2);
1969 j(negative, on_not_smi_result, near_jump);
1970 bind(&correct_result);
1975 void MacroAssembler::SmiDiv(Register dst,
1978 Label* on_not_smi_result,
1979 Label::Distance near_jump) {
1980 ASSERT(!src1.is(kScratchRegister));
1981 ASSERT(!src2.is(kScratchRegister));
1982 ASSERT(!dst.is(kScratchRegister));
1983 ASSERT(!src2.is(rax));
1984 ASSERT(!src2.is(rdx));
1985 ASSERT(!src1.is(rdx));
1987 // Check for 0 divisor (result is +/-Infinity).
1989 j(zero, on_not_smi_result, near_jump);
1992 movp(kScratchRegister, src1);
1994 SmiToInteger32(rax, src1);
1995 // We need to rule out dividing Smi::kMinValue by -1, since that would
1996 // overflow in idiv and raise an exception.
1997 // We combine this with negative zero test (negative zero only happens
1998 // when dividing zero by a negative number).
2000 // We overshoot a little and go to slow case if we divide min-value
2001 // by any negative value, not just -1.
2003 testl(rax, Immediate(~Smi::kMinValue));
2004 j(not_zero, &safe_div, Label::kNear);
2007 j(positive, &safe_div, Label::kNear);
2008 movp(src1, kScratchRegister);
2009 jmp(on_not_smi_result, near_jump);
2011 j(negative, on_not_smi_result, near_jump);
2015 SmiToInteger32(src2, src2);
2016 // Sign extend src1 into edx:eax.
2019 Integer32ToSmi(src2, src2);
2020 // Check that the remainder is zero.
2024 j(zero, &smi_result, Label::kNear);
2025 movp(src1, kScratchRegister);
2026 jmp(on_not_smi_result, near_jump);
2029 j(not_zero, on_not_smi_result, near_jump);
2031 if (!dst.is(src1) && src1.is(rax)) {
2032 movp(src1, kScratchRegister);
2034 Integer32ToSmi(dst, rax);
2038 void MacroAssembler::SmiMod(Register dst,
2041 Label* on_not_smi_result,
2042 Label::Distance near_jump) {
2043 ASSERT(!dst.is(kScratchRegister));
2044 ASSERT(!src1.is(kScratchRegister));
2045 ASSERT(!src2.is(kScratchRegister));
2046 ASSERT(!src2.is(rax));
2047 ASSERT(!src2.is(rdx));
2048 ASSERT(!src1.is(rdx));
2049 ASSERT(!src1.is(src2));
2052 j(zero, on_not_smi_result, near_jump);
2055 movp(kScratchRegister, src1);
2057 SmiToInteger32(rax, src1);
2058 SmiToInteger32(src2, src2);
2060 // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
2062 cmpl(rax, Immediate(Smi::kMinValue));
2063 j(not_equal, &safe_div, Label::kNear);
2064 cmpl(src2, Immediate(-1));
2065 j(not_equal, &safe_div, Label::kNear);
2066 // Retag inputs and go slow case.
2067 Integer32ToSmi(src2, src2);
2069 movp(src1, kScratchRegister);
2071 jmp(on_not_smi_result, near_jump);
2074 // Sign extend eax into edx:eax.
2077 // Restore smi tags on inputs.
2078 Integer32ToSmi(src2, src2);
2080 movp(src1, kScratchRegister);
2082 // Check for a negative zero result. If the result is zero, and the
2083 // dividend is negative, go slow to return a floating point negative zero.
2086 j(not_zero, &smi_result, Label::kNear);
2088 j(negative, on_not_smi_result, near_jump);
2090 Integer32ToSmi(dst, rdx);
2094 void MacroAssembler::SmiNot(Register dst, Register src) {
2095 ASSERT(!dst.is(kScratchRegister));
2096 ASSERT(!src.is(kScratchRegister));
2097 if (SmiValuesAre32Bits()) {
2098 // Set tag and padding bits before negating, so that they are zero
2100 movl(kScratchRegister, Immediate(~0));
2102 ASSERT(SmiValuesAre31Bits());
2103 movl(kScratchRegister, Immediate(1));
2106 xorp(dst, kScratchRegister);
2108 leap(dst, Operand(src, kScratchRegister, times_1, 0));
2114 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
2115 ASSERT(!dst.is(src2));
2116 if (!dst.is(src1)) {
2123 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
2124 if (constant->value() == 0) {
2126 } else if (dst.is(src)) {
2127 ASSERT(!dst.is(kScratchRegister));
2128 Register constant_reg = GetSmiConstant(constant);
2129 andp(dst, constant_reg);
2131 LoadSmiConstant(dst, constant);
2137 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
2138 if (!dst.is(src1)) {
2139 ASSERT(!src1.is(src2));
2146 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
2148 ASSERT(!dst.is(kScratchRegister));
2149 Register constant_reg = GetSmiConstant(constant);
2150 orp(dst, constant_reg);
2152 LoadSmiConstant(dst, constant);
2158 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
2159 if (!dst.is(src1)) {
2160 ASSERT(!src1.is(src2));
2167 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
2169 ASSERT(!dst.is(kScratchRegister));
2170 Register constant_reg = GetSmiConstant(constant);
2171 xorp(dst, constant_reg);
2173 LoadSmiConstant(dst, constant);
2179 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
2182 ASSERT(is_uint5(shift_value));
2183 if (shift_value > 0) {
2185 sarp(dst, Immediate(shift_value + kSmiShift));
2186 shlp(dst, Immediate(kSmiShift));
2188 UNIMPLEMENTED(); // Not used.
2194 void MacroAssembler::SmiShiftLeftConstant(Register dst,
2197 Label* on_not_smi_result,
2198 Label::Distance near_jump) {
2199 if (SmiValuesAre32Bits()) {
2203 if (shift_value > 0) {
2204 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2205 shlq(dst, Immediate(shift_value & 0x1f));
2208 ASSERT(SmiValuesAre31Bits());
2210 UNIMPLEMENTED(); // Not used.
2212 SmiToInteger32(dst, src);
2213 shll(dst, Immediate(shift_value));
2214 JumpIfNotValidSmiValue(dst, on_not_smi_result, near_jump);
2215 Integer32ToSmi(dst, dst);
2221 void MacroAssembler::SmiShiftLogicalRightConstant(
2222 Register dst, Register src, int shift_value,
2223 Label* on_not_smi_result, Label::Distance near_jump) {
2224 // Logic right shift interprets its result as an *unsigned* number.
2226 UNIMPLEMENTED(); // Not used.
2228 if (shift_value == 0) {
2230 j(negative, on_not_smi_result, near_jump);
2232 if (SmiValuesAre32Bits()) {
2234 shrp(dst, Immediate(shift_value + kSmiShift));
2235 shlp(dst, Immediate(kSmiShift));
2237 ASSERT(SmiValuesAre31Bits());
2238 SmiToInteger32(dst, src);
2239 shrp(dst, Immediate(shift_value));
2240 JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump);
2241 Integer32ToSmi(dst, dst);
2247 void MacroAssembler::SmiShiftLeft(Register dst,
2250 Label* on_not_smi_result,
2251 Label::Distance near_jump) {
2252 if (SmiValuesAre32Bits()) {
2253 ASSERT(!dst.is(rcx));
2254 if (!dst.is(src1)) {
2257 // Untag shift amount.
2258 SmiToInteger32(rcx, src2);
2259 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2260 andp(rcx, Immediate(0x1f));
2263 ASSERT(SmiValuesAre31Bits());
2264 ASSERT(!dst.is(kScratchRegister));
2265 ASSERT(!src1.is(kScratchRegister));
2266 ASSERT(!src2.is(kScratchRegister));
2267 ASSERT(!dst.is(src2));
2268 ASSERT(!dst.is(rcx));
2270 if (src1.is(rcx) || src2.is(rcx)) {
2271 movq(kScratchRegister, rcx);
2274 UNIMPLEMENTED(); // Not used.
2277 SmiToInteger32(dst, src1);
2278 SmiToInteger32(rcx, src2);
2280 JumpIfValidSmiValue(dst, &valid_result, Label::kNear);
2281 // As src1 or src2 could not be dst, we do not need to restore them for
2283 if (src1.is(rcx) || src2.is(rcx)) {
2285 movq(src1, kScratchRegister);
2287 movq(src2, kScratchRegister);
2290 jmp(on_not_smi_result, near_jump);
2291 bind(&valid_result);
2292 Integer32ToSmi(dst, dst);
2298 void MacroAssembler::SmiShiftLogicalRight(Register dst,
2301 Label* on_not_smi_result,
2302 Label::Distance near_jump) {
2303 ASSERT(!dst.is(kScratchRegister));
2304 ASSERT(!src1.is(kScratchRegister));
2305 ASSERT(!src2.is(kScratchRegister));
2306 ASSERT(!dst.is(src2));
2307 ASSERT(!dst.is(rcx));
2308 if (src1.is(rcx) || src2.is(rcx)) {
2309 movq(kScratchRegister, rcx);
2312 UNIMPLEMENTED(); // Not used.
2315 SmiToInteger32(dst, src1);
2316 SmiToInteger32(rcx, src2);
2318 JumpIfUIntValidSmiValue(dst, &valid_result, Label::kNear);
2319 // As src1 or src2 could not be dst, we do not need to restore them for
2321 if (src1.is(rcx) || src2.is(rcx)) {
2323 movq(src1, kScratchRegister);
2325 movq(src2, kScratchRegister);
2328 jmp(on_not_smi_result, near_jump);
2329 bind(&valid_result);
2330 Integer32ToSmi(dst, dst);
2335 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
2338 ASSERT(!dst.is(kScratchRegister));
2339 ASSERT(!src1.is(kScratchRegister));
2340 ASSERT(!src2.is(kScratchRegister));
2341 ASSERT(!dst.is(rcx));
2343 SmiToInteger32(rcx, src2);
2344 if (!dst.is(src1)) {
2347 SmiToInteger32(dst, dst);
2349 Integer32ToSmi(dst, dst);
2353 void MacroAssembler::SelectNonSmi(Register dst,
2357 Label::Distance near_jump) {
2358 ASSERT(!dst.is(kScratchRegister));
2359 ASSERT(!src1.is(kScratchRegister));
2360 ASSERT(!src2.is(kScratchRegister));
2361 ASSERT(!dst.is(src1));
2362 ASSERT(!dst.is(src2));
2363 // Both operands must not be smis.
2365 Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
2366 Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
2368 STATIC_ASSERT(kSmiTag == 0);
2369 ASSERT_EQ(0, Smi::FromInt(0));
2370 movl(kScratchRegister, Immediate(kSmiTagMask));
2371 andp(kScratchRegister, src1);
2372 testl(kScratchRegister, src2);
2373 // If non-zero then both are smis.
2374 j(not_zero, on_not_smis, near_jump);
2376 // Exactly one operand is a smi.
2377 ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
2378 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
2379 subp(kScratchRegister, Immediate(1));
2380 // If src1 is a smi, then scratch register all 1s, else it is all 0s.
2383 andp(dst, kScratchRegister);
2384 // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
2386 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
2390 SmiIndex MacroAssembler::SmiToIndex(Register dst,
2393 if (SmiValuesAre32Bits()) {
2394 ASSERT(is_uint6(shift));
2395 // There is a possible optimization if shift is in the range 60-63, but that
2396 // will (and must) never happen.
2400 if (shift < kSmiShift) {
2401 sarp(dst, Immediate(kSmiShift - shift));
2403 shlp(dst, Immediate(shift - kSmiShift));
2405 return SmiIndex(dst, times_1);
2407 ASSERT(SmiValuesAre31Bits());
2408 ASSERT(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2412 // We have to sign extend the index register to 64-bit as the SMI might
2415 if (shift == times_1) {
2416 sarq(dst, Immediate(kSmiShift));
2417 return SmiIndex(dst, times_1);
2419 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2424 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
2427 if (SmiValuesAre32Bits()) {
2428 // Register src holds a positive smi.
2429 ASSERT(is_uint6(shift));
2434 if (shift < kSmiShift) {
2435 sarp(dst, Immediate(kSmiShift - shift));
2437 shlp(dst, Immediate(shift - kSmiShift));
2439 return SmiIndex(dst, times_1);
2441 ASSERT(SmiValuesAre31Bits());
2442 ASSERT(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2447 if (shift == times_1) {
2448 sarq(dst, Immediate(kSmiShift));
2449 return SmiIndex(dst, times_1);
2451 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2456 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
2457 if (SmiValuesAre32Bits()) {
2458 ASSERT_EQ(0, kSmiShift % kBitsPerByte);
2459 addl(dst, Operand(src, kSmiShift / kBitsPerByte));
2461 ASSERT(SmiValuesAre31Bits());
2462 SmiToInteger32(kScratchRegister, src);
2463 addl(dst, kScratchRegister);
2468 void MacroAssembler::Push(Smi* source) {
2469 intptr_t smi = reinterpret_cast<intptr_t>(source);
2470 if (is_int32(smi)) {
2471 Push(Immediate(static_cast<int32_t>(smi)));
2473 Register constant = GetSmiConstant(source);
2479 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
2480 ASSERT(!src.is(scratch));
2483 shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2484 shlp(src, Immediate(kSmiShift));
2487 shlp(scratch, Immediate(kSmiShift));
2492 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
2493 ASSERT(!dst.is(scratch));
2496 shrp(scratch, Immediate(kSmiShift));
2498 shrp(dst, Immediate(kSmiShift));
2500 shlp(dst, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2505 void MacroAssembler::Test(const Operand& src, Smi* source) {
2506 if (SmiValuesAre32Bits()) {
2507 testl(Operand(src, kIntSize), Immediate(source->value()));
2509 ASSERT(SmiValuesAre31Bits());
2510 testl(src, Immediate(source));
2515 // ----------------------------------------------------------------------------
2518 void MacroAssembler::LookupNumberStringCache(Register object,
2523 // Use of registers. Register result is used as a temporary.
2524 Register number_string_cache = result;
2525 Register mask = scratch1;
2526 Register scratch = scratch2;
2528 // Load the number string cache.
2529 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2531 // Make the hash mask from the length of the number string cache. It
2532 // contains two elements (number and string) for each cache entry.
2534 mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
2535 shrl(mask, Immediate(1));
2536 subp(mask, Immediate(1)); // Make mask.
2538 // Calculate the entry in the number string cache. The hash value in the
2539 // number string cache for smis is just the smi value, and the hash for
2540 // doubles is the xor of the upper and lower words. See
2541 // Heap::GetNumberStringCache.
2543 Label load_result_from_cache;
2544 JumpIfSmi(object, &is_smi);
2546 isolate()->factory()->heap_number_map(),
2550 STATIC_ASSERT(8 == kDoubleSize);
2551 movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
2552 xorp(scratch, FieldOperand(object, HeapNumber::kValueOffset));
2553 andp(scratch, mask);
2554 // Each entry in string cache consists of two pointer sized fields,
2555 // but times_twice_pointer_size (multiplication by 16) scale factor
2556 // is not supported by addrmode on x64 platform.
2557 // So we have to premultiply entry index before lookup.
2558 shlp(scratch, Immediate(kPointerSizeLog2 + 1));
2560 Register index = scratch;
2561 Register probe = mask;
2563 FieldOperand(number_string_cache,
2566 FixedArray::kHeaderSize));
2567 JumpIfSmi(probe, not_found);
2568 movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
2569 ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
2570 j(parity_even, not_found); // Bail out if NaN is involved.
2571 j(not_equal, not_found); // The cache did not contain this value.
2572 jmp(&load_result_from_cache);
2575 SmiToInteger32(scratch, object);
2576 andp(scratch, mask);
2577 // Each entry in string cache consists of two pointer sized fields,
2578 // but times_twice_pointer_size (multiplication by 16) scale factor
2579 // is not supported by addrmode on x64 platform.
2580 // So we have to premultiply entry index before lookup.
2581 shlp(scratch, Immediate(kPointerSizeLog2 + 1));
2583 // Check if the entry is the smi we are looking for.
2585 FieldOperand(number_string_cache,
2588 FixedArray::kHeaderSize));
2589 j(not_equal, not_found);
2591 // Get the result from the cache.
2592 bind(&load_result_from_cache);
2594 FieldOperand(number_string_cache,
2597 FixedArray::kHeaderSize + kPointerSize));
2598 IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
2602 void MacroAssembler::absps(XMMRegister dst) {
2603 static const struct V8_ALIGNED(16) {
2608 } float_absolute_constant =
2609 { 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF };
2610 Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_absolute_constant));
2611 andps(dst, Operand(kScratchRegister, 0));
2615 void MacroAssembler::abspd(XMMRegister dst) {
2616 static const struct V8_ALIGNED(16) {
2619 } double_absolute_constant =
2620 { V8_UINT64_C(0x7FFFFFFFFFFFFFFF), V8_UINT64_C(0x7FFFFFFFFFFFFFFF) };
2621 Set(kScratchRegister, reinterpret_cast<intptr_t>(&double_absolute_constant));
2622 andpd(dst, Operand(kScratchRegister, 0));
2626 void MacroAssembler::negateps(XMMRegister dst) {
2627 static const struct V8_ALIGNED(16) {
2632 } float_negate_constant =
2633 { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
2634 Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_negate_constant));
2635 xorps(dst, Operand(kScratchRegister, 0));
2639 void MacroAssembler::negatepd(XMMRegister dst) {
2640 static const struct V8_ALIGNED(16) {
2643 } double_absolute_constant =
2644 { V8_UINT64_C(0x8000000000000000), V8_UINT64_C(0x8000000000000000) };
2645 Set(kScratchRegister, reinterpret_cast<intptr_t>(&double_absolute_constant));
2646 xorpd(dst, Operand(kScratchRegister, 0));
2650 void MacroAssembler::notps(XMMRegister dst) {
2651 static const struct V8_ALIGNED(16) {
2656 } float_not_constant =
2657 { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
2658 Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_not_constant));
2659 xorps(dst, Operand(kScratchRegister, 0));
2663 void MacroAssembler::pnegd(XMMRegister dst) {
2664 static const struct V8_ALIGNED(16) {
2669 } int32_one_constant = { 0x1, 0x1, 0x1, 0x1 };
2671 Set(kScratchRegister, reinterpret_cast<intptr_t>(&int32_one_constant));
2672 paddd(dst, Operand(kScratchRegister, 0));
2677 void MacroAssembler::JumpIfNotString(Register object,
2678 Register object_map,
2680 Label::Distance near_jump) {
2681 Condition is_smi = CheckSmi(object);
2682 j(is_smi, not_string, near_jump);
2683 CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
2684 j(above_equal, not_string, near_jump);
2688 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
2689 Register first_object,
2690 Register second_object,
2694 Label::Distance near_jump) {
2695 // Check that both objects are not smis.
2696 Condition either_smi = CheckEitherSmi(first_object, second_object);
2697 j(either_smi, on_fail, near_jump);
2699 // Load instance type for both strings.
2700 movp(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
2701 movp(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
2702 movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2703 movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2705 // Check that both are flat ASCII strings.
2706 ASSERT(kNotStringTag != 0);
2707 const int kFlatAsciiStringMask =
2708 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2709 const int kFlatAsciiStringTag =
2710 kStringTag | kOneByteStringTag | kSeqStringTag;
2712 andl(scratch1, Immediate(kFlatAsciiStringMask));
2713 andl(scratch2, Immediate(kFlatAsciiStringMask));
2714 // Interleave the bits to check both scratch1 and scratch2 in one test.
2715 ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2716 leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2718 Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
2719 j(not_equal, on_fail, near_jump);
2723 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
2724 Register instance_type,
2727 Label::Distance near_jump) {
2728 if (!scratch.is(instance_type)) {
2729 movl(scratch, instance_type);
2732 const int kFlatAsciiStringMask =
2733 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2735 andl(scratch, Immediate(kFlatAsciiStringMask));
2736 cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
2737 j(not_equal, failure, near_jump);
2741 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
2742 Register first_object_instance_type,
2743 Register second_object_instance_type,
2747 Label::Distance near_jump) {
2748 // Load instance type for both strings.
2749 movp(scratch1, first_object_instance_type);
2750 movp(scratch2, second_object_instance_type);
2752 // Check that both are flat ASCII strings.
2753 ASSERT(kNotStringTag != 0);
2754 const int kFlatAsciiStringMask =
2755 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2756 const int kFlatAsciiStringTag =
2757 kStringTag | kOneByteStringTag | kSeqStringTag;
2759 andl(scratch1, Immediate(kFlatAsciiStringMask));
2760 andl(scratch2, Immediate(kFlatAsciiStringMask));
2761 // Interleave the bits to check both scratch1 and scratch2 in one test.
2762 ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2763 leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2765 Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
2766 j(not_equal, on_fail, near_jump);
2771 static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
2772 T operand_or_register,
2773 Label* not_unique_name,
2774 Label::Distance distance) {
2775 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2777 masm->testb(operand_or_register,
2778 Immediate(kIsNotStringMask | kIsNotInternalizedMask));
2779 masm->j(zero, &succeed, Label::kNear);
2780 masm->cmpb(operand_or_register, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
2781 masm->j(not_equal, not_unique_name, distance);
2783 masm->bind(&succeed);
2787 void MacroAssembler::JumpIfNotUniqueName(Operand operand,
2788 Label* not_unique_name,
2789 Label::Distance distance) {
2790 JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
2794 void MacroAssembler::JumpIfNotUniqueName(Register reg,
2795 Label* not_unique_name,
2796 Label::Distance distance) {
2797 JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
2801 void MacroAssembler::Move(Register dst, Register src) {
2808 void MacroAssembler::Move(Register dst, Handle<Object> source) {
2809 AllowDeferredHandleDereference smi_check;
2810 if (source->IsSmi()) {
2811 Move(dst, Smi::cast(*source));
2813 MoveHeapObject(dst, source);
2818 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
2819 AllowDeferredHandleDereference smi_check;
2820 if (source->IsSmi()) {
2821 Move(dst, Smi::cast(*source));
2823 MoveHeapObject(kScratchRegister, source);
2824 movp(dst, kScratchRegister);
2829 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
2830 AllowDeferredHandleDereference smi_check;
2831 if (source->IsSmi()) {
2832 Cmp(dst, Smi::cast(*source));
2834 MoveHeapObject(kScratchRegister, source);
2835 cmpp(dst, kScratchRegister);
2840 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
2841 AllowDeferredHandleDereference smi_check;
2842 if (source->IsSmi()) {
2843 Cmp(dst, Smi::cast(*source));
2845 MoveHeapObject(kScratchRegister, source);
2846 cmpp(dst, kScratchRegister);
2851 void MacroAssembler::Push(Handle<Object> source) {
2852 AllowDeferredHandleDereference smi_check;
2853 if (source->IsSmi()) {
2854 Push(Smi::cast(*source));
2856 MoveHeapObject(kScratchRegister, source);
2857 Push(kScratchRegister);
2862 void MacroAssembler::MoveHeapObject(Register result,
2863 Handle<Object> object) {
2864 AllowDeferredHandleDereference using_raw_address;
2865 ASSERT(object->IsHeapObject());
2866 if (isolate()->heap()->InNewSpace(*object)) {
2867 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2868 Move(result, cell, RelocInfo::CELL);
2869 movp(result, Operand(result, 0));
2871 Move(result, object, RelocInfo::EMBEDDED_OBJECT);
2876 void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
2878 AllowDeferredHandleDereference embedding_raw_address;
2879 load_rax(cell.location(), RelocInfo::CELL);
2881 Move(dst, cell, RelocInfo::CELL);
2882 movp(dst, Operand(dst, 0));
2887 void MacroAssembler::Drop(int stack_elements) {
2888 if (stack_elements > 0) {
2889 addp(rsp, Immediate(stack_elements * kPointerSize));
2894 void MacroAssembler::DropUnderReturnAddress(int stack_elements,
2896 ASSERT(stack_elements > 0);
2897 if (kPointerSize == kInt64Size && stack_elements == 1) {
2898 popq(MemOperand(rsp, 0));
2902 PopReturnAddressTo(scratch);
2903 Drop(stack_elements);
2904 PushReturnAddressFrom(scratch);
2908 void MacroAssembler::Push(Register src) {
2909 if (kPointerSize == kInt64Size) {
2912 // x32 uses 64-bit push for rbp in the prologue.
2913 ASSERT(src.code() != rbp.code());
2914 leal(rsp, Operand(rsp, -4));
2915 movp(Operand(rsp, 0), src);
2920 void MacroAssembler::Push(const Operand& src) {
2921 if (kPointerSize == kInt64Size) {
2924 movp(kScratchRegister, src);
2925 leal(rsp, Operand(rsp, -4));
2926 movp(Operand(rsp, 0), kScratchRegister);
2931 void MacroAssembler::PushQuad(const Operand& src) {
2932 if (kPointerSize == kInt64Size) {
2935 movp(kScratchRegister, src);
2936 pushq(kScratchRegister);
2941 void MacroAssembler::Push(Immediate value) {
2942 if (kPointerSize == kInt64Size) {
2945 leal(rsp, Operand(rsp, -4));
2946 movp(Operand(rsp, 0), value);
2951 void MacroAssembler::PushImm32(int32_t imm32) {
2952 if (kPointerSize == kInt64Size) {
2955 leal(rsp, Operand(rsp, -4));
2956 movp(Operand(rsp, 0), Immediate(imm32));
2961 void MacroAssembler::Pop(Register dst) {
2962 if (kPointerSize == kInt64Size) {
2965 // x32 uses 64-bit pop for rbp in the epilogue.
2966 ASSERT(dst.code() != rbp.code());
2967 movp(dst, Operand(rsp, 0));
2968 leal(rsp, Operand(rsp, 4));
2973 void MacroAssembler::Pop(const Operand& dst) {
2974 if (kPointerSize == kInt64Size) {
2977 Register scratch = dst.AddressUsesRegister(kScratchRegister)
2978 ? kSmiConstantRegister : kScratchRegister;
2979 movp(scratch, Operand(rsp, 0));
2981 leal(rsp, Operand(rsp, 4));
2982 if (scratch.is(kSmiConstantRegister)) {
2983 // Restore kSmiConstantRegister.
2984 movp(kSmiConstantRegister,
2985 reinterpret_cast<void*>(Smi::FromInt(kSmiConstantRegisterValue)),
2986 Assembler::RelocInfoNone());
2992 void MacroAssembler::PopQuad(const Operand& dst) {
2993 if (kPointerSize == kInt64Size) {
2996 popq(kScratchRegister);
2997 movp(dst, kScratchRegister);
3002 void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst,
3005 ASSERT(offset > SharedFunctionInfo::kLengthOffset &&
3006 offset <= SharedFunctionInfo::kSize &&
3007 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
3008 if (kPointerSize == kInt64Size) {
3009 movsxlq(dst, FieldOperand(base, offset));
3011 movp(dst, FieldOperand(base, offset));
3012 SmiToInteger32(dst, dst);
3017 void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base,
3020 ASSERT(offset > SharedFunctionInfo::kLengthOffset &&
3021 offset <= SharedFunctionInfo::kSize &&
3022 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
3023 if (kPointerSize == kInt32Size) {
3024 // On x32, this field is represented by SMI.
3027 int byte_offset = bits / kBitsPerByte;
3028 int bit_in_byte = bits & (kBitsPerByte - 1);
3029 testb(FieldOperand(base, offset + byte_offset), Immediate(1 << bit_in_byte));
3033 void MacroAssembler::Jump(ExternalReference ext) {
3034 LoadAddress(kScratchRegister, ext);
3035 jmp(kScratchRegister);
3039 void MacroAssembler::Jump(const Operand& op) {
3040 if (kPointerSize == kInt64Size) {
3043 movp(kScratchRegister, op);
3044 jmp(kScratchRegister);
3049 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
3050 Move(kScratchRegister, destination, rmode);
3051 jmp(kScratchRegister);
3055 void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
3056 // TODO(X64): Inline this
3057 jmp(code_object, rmode);
3061 int MacroAssembler::CallSize(ExternalReference ext) {
3062 // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
3063 return LoadAddressSize(ext) +
3064 Assembler::kCallScratchRegisterInstructionLength;
3068 void MacroAssembler::Call(ExternalReference ext) {
3070 int end_position = pc_offset() + CallSize(ext);
3072 LoadAddress(kScratchRegister, ext);
3073 call(kScratchRegister);
3075 CHECK_EQ(end_position, pc_offset());
3080 void MacroAssembler::Call(const Operand& op) {
3081 if (kPointerSize == kInt64Size) {
3084 movp(kScratchRegister, op);
3085 call(kScratchRegister);
3090 void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
3092 int end_position = pc_offset() + CallSize(destination);
3094 Move(kScratchRegister, destination, rmode);
3095 call(kScratchRegister);
3097 CHECK_EQ(pc_offset(), end_position);
3102 void MacroAssembler::Call(Handle<Code> code_object,
3103 RelocInfo::Mode rmode,
3104 TypeFeedbackId ast_id) {
3106 int end_position = pc_offset() + CallSize(code_object);
3108 ASSERT(RelocInfo::IsCodeTarget(rmode) ||
3109 rmode == RelocInfo::CODE_AGE_SEQUENCE);
3110 call(code_object, rmode, ast_id);
3112 CHECK_EQ(end_position, pc_offset());
3117 void MacroAssembler::Pushad() {
3122 // Not pushing rsp or rbp.
3127 // r10 is kScratchRegister.
3129 // r12 is kSmiConstantRegister.
3130 // r13 is kRootRegister.
3133 STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
3134 // Use lea for symmetry with Popad.
3136 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
3137 leap(rsp, Operand(rsp, -sp_delta));
3141 void MacroAssembler::Popad() {
3142 // Popad must not change the flags, so use lea instead of addq.
3144 (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
3145 leap(rsp, Operand(rsp, sp_delta));
3160 void MacroAssembler::Dropad() {
3161 addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
3165 // Order general registers are pushed by Pushad:
3166 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
3168 MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
3188 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
3189 const Immediate& imm) {
3190 movp(SafepointRegisterSlot(dst), imm);
3194 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
3195 movp(SafepointRegisterSlot(dst), src);
3199 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
3200 movp(dst, SafepointRegisterSlot(src));
3204 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
3205 return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
3209 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
3210 int handler_index) {
3211 // Adjust this code if not the case.
3212 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
3214 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3215 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3216 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3217 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3218 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3220 // We will build up the handler from the bottom by pushing on the stack.
3221 // First push the frame pointer and context.
3222 if (kind == StackHandler::JS_ENTRY) {
3223 // The frame pointer does not point to a JS frame so we save NULL for
3224 // rbp. We expect the code throwing an exception to check rbp before
3225 // dereferencing it to restore the context.
3226 pushq(Immediate(0)); // NULL frame pointer.
3227 Push(Smi::FromInt(0)); // No context.
3233 // Push the state and the code object.
3235 StackHandler::IndexField::encode(handler_index) |
3236 StackHandler::KindField::encode(kind);
3237 Push(Immediate(state));
3240 // Link the current handler as the next handler.
3241 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3242 Push(ExternalOperand(handler_address));
3243 // Set this new handler as the current one.
3244 movp(ExternalOperand(handler_address), rsp);
3248 void MacroAssembler::PopTryHandler() {
3249 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3250 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3251 Pop(ExternalOperand(handler_address));
3252 addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
3256 void MacroAssembler::JumpToHandlerEntry() {
3257 // Compute the handler entry address and jump to it. The handler table is
3258 // a fixed array of (smi-tagged) code offsets.
3259 // rax = exception, rdi = code object, rdx = state.
3260 movp(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
3261 shrp(rdx, Immediate(StackHandler::kKindWidth));
3263 FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
3264 SmiToInteger64(rdx, rdx);
3265 leap(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
3270 void MacroAssembler::Throw(Register value) {
3271 // Adjust this code if not the case.
3272 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
3274 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3275 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3276 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3277 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3278 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3280 // The exception is expected in rax.
3281 if (!value.is(rax)) {
3284 // Drop the stack pointer to the top of the top handler.
3285 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3286 movp(rsp, ExternalOperand(handler_address));
3287 // Restore the next handler.
3288 Pop(ExternalOperand(handler_address));
3290 // Remove the code object and state, compute the handler address in rdi.
3291 Pop(rdi); // Code object.
3292 Pop(rdx); // Offset and state.
3294 // Restore the context and frame pointer.
3295 Pop(rsi); // Context.
3296 popq(rbp); // Frame pointer.
3298 // If the handler is a JS frame, restore the context to the frame.
3299 // (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
3303 j(zero, &skip, Label::kNear);
3304 movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
3307 JumpToHandlerEntry();
3311 void MacroAssembler::ThrowUncatchable(Register value) {
3312 // Adjust this code if not the case.
3313 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
3315 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3316 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3317 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3318 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3319 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3321 // The exception is expected in rax.
3322 if (!value.is(rax)) {
3325 // Drop the stack pointer to the top of the top stack handler.
3326 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3327 Load(rsp, handler_address);
3329 // Unwind the handlers until the top ENTRY handler is found.
3330 Label fetch_next, check_kind;
3331 jmp(&check_kind, Label::kNear);
3333 movp(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
3336 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
3337 testl(Operand(rsp, StackHandlerConstants::kStateOffset),
3338 Immediate(StackHandler::KindField::kMask));
3339 j(not_zero, &fetch_next);
3341 // Set the top handler address to next handler past the top ENTRY handler.
3342 Pop(ExternalOperand(handler_address));
3344 // Remove the code object and state, compute the handler address in rdi.
3345 Pop(rdi); // Code object.
3346 Pop(rdx); // Offset and state.
3348 // Clear the context pointer and frame pointer (0 was saved in the handler).
3352 JumpToHandlerEntry();
3356 void MacroAssembler::Ret() {
3361 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
3362 if (is_uint16(bytes_dropped)) {
3365 PopReturnAddressTo(scratch);
3366 addp(rsp, Immediate(bytes_dropped));
3367 PushReturnAddressFrom(scratch);
3373 void MacroAssembler::FCmp() {
3379 void MacroAssembler::CmpObjectType(Register heap_object,
3382 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3383 CmpInstanceType(map, type);
3387 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
3388 cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
3389 Immediate(static_cast<int8_t>(type)));
3393 void MacroAssembler::CheckFastElements(Register map,
3395 Label::Distance distance) {
3396 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3397 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3398 STATIC_ASSERT(FAST_ELEMENTS == 2);
3399 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3400 cmpb(FieldOperand(map, Map::kBitField2Offset),
3401 Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3402 j(above, fail, distance);
3406 void MacroAssembler::CheckFastObjectElements(Register map,
3408 Label::Distance distance) {
3409 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3410 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3411 STATIC_ASSERT(FAST_ELEMENTS == 2);
3412 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3413 cmpb(FieldOperand(map, Map::kBitField2Offset),
3414 Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3415 j(below_equal, fail, distance);
3416 cmpb(FieldOperand(map, Map::kBitField2Offset),
3417 Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3418 j(above, fail, distance);
3422 void MacroAssembler::CheckFastSmiElements(Register map,
3424 Label::Distance distance) {
3425 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3426 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3427 cmpb(FieldOperand(map, Map::kBitField2Offset),
3428 Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3429 j(above, fail, distance);
3433 void MacroAssembler::StoreNumberToDoubleElements(
3434 Register maybe_number,
3437 XMMRegister xmm_scratch,
3439 int elements_offset) {
3440 Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
3442 JumpIfSmi(maybe_number, &smi_value, Label::kNear);
3444 CheckMap(maybe_number,
3445 isolate()->factory()->heap_number_map(),
3449 // Double value, canonicalize NaN.
3450 uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
3451 cmpl(FieldOperand(maybe_number, offset),
3452 Immediate(kNaNOrInfinityLowerBoundUpper32));
3453 j(greater_equal, &maybe_nan, Label::kNear);
3456 movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
3457 bind(&have_double_value);
3458 movsd(FieldOperand(elements, index, times_8,
3459 FixedDoubleArray::kHeaderSize - elements_offset),
3464 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3465 // it's an Infinity, and the non-NaN code path applies.
3466 j(greater, &is_nan, Label::kNear);
3467 cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
3470 // Convert all NaNs to the same canonical NaN value when they are stored in
3471 // the double array.
3472 Set(kScratchRegister, BitCast<uint64_t>(
3473 FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
3474 movq(xmm_scratch, kScratchRegister);
3475 jmp(&have_double_value, Label::kNear);
3478 // Value is a smi. convert to a double and store.
3479 // Preserve original value.
3480 SmiToInteger32(kScratchRegister, maybe_number);
3481 Cvtlsi2sd(xmm_scratch, kScratchRegister);
3482 movsd(FieldOperand(elements, index, times_8,
3483 FixedDoubleArray::kHeaderSize - elements_offset),
3489 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
3490 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3494 void MacroAssembler::CheckMap(Register obj,
3497 SmiCheckType smi_check_type) {
3498 if (smi_check_type == DO_SMI_CHECK) {
3499 JumpIfSmi(obj, fail);
3502 CompareMap(obj, map);
3507 void MacroAssembler::ClampUint8(Register reg) {
3509 testl(reg, Immediate(0xFFFFFF00));
3510 j(zero, &done, Label::kNear);
3511 setcc(negative, reg); // 1 if negative, 0 if positive.
3512 decb(reg); // 0 if negative, 255 if positive.
3517 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
3518 XMMRegister temp_xmm_reg,
3519 Register result_reg) {
3522 xorps(temp_xmm_reg, temp_xmm_reg);
3523 cvtsd2si(result_reg, input_reg);
3524 testl(result_reg, Immediate(0xFFFFFF00));
3525 j(zero, &done, Label::kNear);
3526 cmpl(result_reg, Immediate(1));
3527 j(overflow, &conv_failure, Label::kNear);
3528 movl(result_reg, Immediate(0));
3529 setcc(sign, result_reg);
3530 subl(result_reg, Immediate(1));
3531 andl(result_reg, Immediate(255));
3532 jmp(&done, Label::kNear);
3533 bind(&conv_failure);
3535 ucomisd(input_reg, temp_xmm_reg);
3536 j(below, &done, Label::kNear);
3537 Set(result_reg, 255);
3542 void MacroAssembler::LoadUint32(XMMRegister dst,
3544 if (FLAG_debug_code) {
3545 cmpq(src, Immediate(0xffffffff));
3546 Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
3548 cvtqsi2sd(dst, src);
3552 void MacroAssembler::SlowTruncateToI(Register result_reg,
3555 DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
3556 call(stub.GetCode(), RelocInfo::CODE_TARGET);
3560 void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
3561 Register input_reg) {
3563 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3564 cvttsd2siq(result_reg, xmm0);
3565 cmpq(result_reg, Immediate(1));
3566 j(no_overflow, &done, Label::kNear);
3569 if (input_reg.is(result_reg)) {
3570 subp(rsp, Immediate(kDoubleSize));
3571 movsd(MemOperand(rsp, 0), xmm0);
3572 SlowTruncateToI(result_reg, rsp, 0);
3573 addp(rsp, Immediate(kDoubleSize));
3575 SlowTruncateToI(result_reg, input_reg);
3579 // Keep our invariant that the upper 32 bits are zero.
3580 movl(result_reg, result_reg);
3584 void MacroAssembler::TruncateDoubleToI(Register result_reg,
3585 XMMRegister input_reg) {
3587 cvttsd2siq(result_reg, input_reg);
3588 cmpq(result_reg, Immediate(1));
3589 j(no_overflow, &done, Label::kNear);
3591 subp(rsp, Immediate(kDoubleSize));
3592 movsd(MemOperand(rsp, 0), input_reg);
3593 SlowTruncateToI(result_reg, rsp, 0);
3594 addp(rsp, Immediate(kDoubleSize));
3597 // Keep our invariant that the upper 32 bits are zero.
3598 movl(result_reg, result_reg);
3602 void MacroAssembler::DoubleToI(Register result_reg,
3603 XMMRegister input_reg,
3604 XMMRegister scratch,
3605 MinusZeroMode minus_zero_mode,
3606 Label* conversion_failed,
3607 Label::Distance dst) {
3608 cvttsd2si(result_reg, input_reg);
3609 Cvtlsi2sd(xmm0, result_reg);
3610 ucomisd(xmm0, input_reg);
3611 j(not_equal, conversion_failed, dst);
3612 j(parity_even, conversion_failed, dst); // NaN.
3613 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
3615 // The integer converted back is equal to the original. We
3616 // only have to test if we got -0 as an input.
3617 testl(result_reg, result_reg);
3618 j(not_zero, &done, Label::kNear);
3619 movmskpd(result_reg, input_reg);
3620 // Bit 0 contains the sign of the double in input_reg.
3621 // If input was positive, we are ok and return 0, otherwise
3622 // jump to conversion_failed.
3623 andl(result_reg, Immediate(1));
3624 j(not_zero, conversion_failed, dst);
3630 void MacroAssembler::TaggedToI(Register result_reg,
3633 MinusZeroMode minus_zero_mode,
3634 Label* lost_precision,
3635 Label::Distance dst) {
3637 ASSERT(!temp.is(xmm0));
3639 // Heap number map check.
3640 CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3641 Heap::kHeapNumberMapRootIndex);
3642 j(not_equal, lost_precision, dst);
3644 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3645 cvttsd2si(result_reg, xmm0);
3646 Cvtlsi2sd(temp, result_reg);
3647 ucomisd(xmm0, temp);
3648 RecordComment("Deferred TaggedToI: lost precision");
3649 j(not_equal, lost_precision, dst);
3650 RecordComment("Deferred TaggedToI: NaN");
3651 j(parity_even, lost_precision, dst); // NaN.
3652 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
3653 testl(result_reg, result_reg);
3654 j(not_zero, &done, Label::kNear);
3655 movmskpd(result_reg, xmm0);
3656 andl(result_reg, Immediate(1));
3657 j(not_zero, lost_precision, dst);
3663 void MacroAssembler::LoadInstanceDescriptors(Register map,
3664 Register descriptors) {
3665 movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
3669 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3670 movl(dst, FieldOperand(map, Map::kBitField3Offset));
3671 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3675 void MacroAssembler::EnumLength(Register dst, Register map) {
3676 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3677 movl(dst, FieldOperand(map, Map::kBitField3Offset));
3678 andl(dst, Immediate(Map::EnumLengthBits::kMask));
3679 Integer32ToSmi(dst, dst);
3683 void MacroAssembler::DispatchMap(Register obj,
3686 Handle<Code> success,
3687 SmiCheckType smi_check_type) {
3689 if (smi_check_type == DO_SMI_CHECK) {
3690 JumpIfSmi(obj, &fail);
3692 Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3693 j(equal, success, RelocInfo::CODE_TARGET);
3699 void MacroAssembler::AssertNumber(Register object) {
3700 if (emit_debug_code()) {
3702 Condition is_smi = CheckSmi(object);
3703 j(is_smi, &ok, Label::kNear);
3704 Cmp(FieldOperand(object, HeapObject::kMapOffset),
3705 isolate()->factory()->heap_number_map());
3706 Check(equal, kOperandIsNotANumber);
3712 void MacroAssembler::AssertNotSmi(Register object) {
3713 if (emit_debug_code()) {
3714 Condition is_smi = CheckSmi(object);
3715 Check(NegateCondition(is_smi), kOperandIsASmi);
3720 void MacroAssembler::AssertSmi(Register object) {
3721 if (emit_debug_code()) {
3722 Condition is_smi = CheckSmi(object);
3723 Check(is_smi, kOperandIsNotASmi);
3728 void MacroAssembler::AssertSmi(const Operand& object) {
3729 if (emit_debug_code()) {
3730 Condition is_smi = CheckSmi(object);
3731 Check(is_smi, kOperandIsNotASmi);
3736 void MacroAssembler::AssertZeroExtended(Register int32_register) {
3737 if (emit_debug_code()) {
3738 ASSERT(!int32_register.is(kScratchRegister));
3739 movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
3740 cmpq(kScratchRegister, int32_register);
3741 Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
3746 void MacroAssembler::AssertString(Register object) {
3747 if (emit_debug_code()) {
3748 testb(object, Immediate(kSmiTagMask));
3749 Check(not_equal, kOperandIsASmiAndNotAString);
3751 movp(object, FieldOperand(object, HeapObject::kMapOffset));
3752 CmpInstanceType(object, FIRST_NONSTRING_TYPE);
3754 Check(below, kOperandIsNotAString);
3759 void MacroAssembler::AssertName(Register object) {
3760 if (emit_debug_code()) {
3761 testb(object, Immediate(kSmiTagMask));
3762 Check(not_equal, kOperandIsASmiAndNotAName);
3764 movp(object, FieldOperand(object, HeapObject::kMapOffset));
3765 CmpInstanceType(object, LAST_NAME_TYPE);
3767 Check(below_equal, kOperandIsNotAName);
3772 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
3773 if (emit_debug_code()) {
3774 Label done_checking;
3775 AssertNotSmi(object);
3776 Cmp(object, isolate()->factory()->undefined_value());
3777 j(equal, &done_checking);
3778 Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
3779 Assert(equal, kExpectedUndefinedOrCell);
3780 bind(&done_checking);
3785 void MacroAssembler::AssertRootValue(Register src,
3786 Heap::RootListIndex root_value_index,
3787 BailoutReason reason) {
3788 if (emit_debug_code()) {
3789 ASSERT(!src.is(kScratchRegister));
3790 LoadRoot(kScratchRegister, root_value_index);
3791 cmpp(src, kScratchRegister);
3792 Check(equal, reason);
3798 Condition MacroAssembler::IsObjectStringType(Register heap_object,
3800 Register instance_type) {
3801 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3802 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3803 STATIC_ASSERT(kNotStringTag != 0);
3804 testb(instance_type, Immediate(kIsNotStringMask));
3809 Condition MacroAssembler::IsObjectNameType(Register heap_object,
3811 Register instance_type) {
3812 movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3813 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3814 cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
3819 void MacroAssembler::TryGetFunctionPrototype(Register function,
3822 bool miss_on_bound_function) {
3823 // Check that the receiver isn't a smi.
3824 testl(function, Immediate(kSmiTagMask));
3827 // Check that the function really is a function.
3828 CmpObjectType(function, JS_FUNCTION_TYPE, result);
3831 if (miss_on_bound_function) {
3832 movp(kScratchRegister,
3833 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3834 // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
3836 TestBitSharedFunctionInfoSpecialField(kScratchRegister,
3837 SharedFunctionInfo::kCompilerHintsOffset,
3838 SharedFunctionInfo::kBoundFunction);
3842 // Make sure that the function has an instance prototype.
3844 testb(FieldOperand(result, Map::kBitFieldOffset),
3845 Immediate(1 << Map::kHasNonInstancePrototype));
3846 j(not_zero, &non_instance, Label::kNear);
3848 // Get the prototype or initial map from the function.
3850 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3852 // If the prototype or initial map is the hole, don't return it and
3853 // simply miss the cache instead. This will allow us to allocate a
3854 // prototype object on-demand in the runtime system.
3855 CompareRoot(result, Heap::kTheHoleValueRootIndex);
3858 // If the function does not have an initial map, we're done.
3860 CmpObjectType(result, MAP_TYPE, kScratchRegister);
3861 j(not_equal, &done, Label::kNear);
3863 // Get the prototype from the initial map.
3864 movp(result, FieldOperand(result, Map::kPrototypeOffset));
3865 jmp(&done, Label::kNear);
3867 // Non-instance prototype: Fetch prototype from constructor field
3869 bind(&non_instance);
3870 movp(result, FieldOperand(result, Map::kConstructorOffset));
3877 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
3878 if (FLAG_native_code_counters && counter->Enabled()) {
3879 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3880 movl(counter_operand, Immediate(value));
3885 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
3887 if (FLAG_native_code_counters && counter->Enabled()) {
3888 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3890 incl(counter_operand);
3892 addl(counter_operand, Immediate(value));
3898 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
3900 if (FLAG_native_code_counters && counter->Enabled()) {
3901 Operand counter_operand = ExternalOperand(ExternalReference(counter));
3903 decl(counter_operand);
3905 subl(counter_operand, Immediate(value));
3911 void MacroAssembler::DebugBreak() {
3912 Set(rax, 0); // No arguments.
3913 LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
3914 CEntryStub ces(isolate(), 1);
3915 ASSERT(AllowThisStubCall(&ces));
3916 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3920 void MacroAssembler::InvokeCode(Register code,
3921 const ParameterCount& expected,
3922 const ParameterCount& actual,
3924 const CallWrapper& call_wrapper) {
3925 // You can't call a function without a valid frame.
3926 ASSERT(flag == JUMP_FUNCTION || has_frame());
3929 bool definitely_mismatches = false;
3930 InvokePrologue(expected,
3932 Handle<Code>::null(),
3935 &definitely_mismatches,
3939 if (!definitely_mismatches) {
3940 if (flag == CALL_FUNCTION) {
3941 call_wrapper.BeforeCall(CallSize(code));
3943 call_wrapper.AfterCall();
3945 ASSERT(flag == JUMP_FUNCTION);
3953 void MacroAssembler::InvokeFunction(Register function,
3954 const ParameterCount& actual,
3956 const CallWrapper& call_wrapper) {
3957 // You can't call a function without a valid frame.
3958 ASSERT(flag == JUMP_FUNCTION || has_frame());
3960 ASSERT(function.is(rdi));
3961 movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3962 movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
3963 LoadSharedFunctionInfoSpecialField(rbx, rdx,
3964 SharedFunctionInfo::kFormalParameterCountOffset);
3965 // Advances rdx to the end of the Code object header, to the start of
3966 // the executable code.
3967 movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3969 ParameterCount expected(rbx);
3970 InvokeCode(rdx, expected, actual, flag, call_wrapper);
3974 void MacroAssembler::InvokeFunction(Register function,
3975 const ParameterCount& expected,
3976 const ParameterCount& actual,
3978 const CallWrapper& call_wrapper) {
3979 // You can't call a function without a valid frame.
3980 ASSERT(flag == JUMP_FUNCTION || has_frame());
3982 ASSERT(function.is(rdi));
3983 movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
3984 // Advances rdx to the end of the Code object header, to the start of
3985 // the executable code.
3986 movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3988 InvokeCode(rdx, expected, actual, flag, call_wrapper);
3992 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3993 const ParameterCount& expected,
3994 const ParameterCount& actual,
3996 const CallWrapper& call_wrapper) {
3997 Move(rdi, function);
3998 InvokeFunction(rdi, expected, actual, flag, call_wrapper);
4002 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
4003 const ParameterCount& actual,
4004 Handle<Code> code_constant,
4005 Register code_register,
4007 bool* definitely_mismatches,
4009 Label::Distance near_jump,
4010 const CallWrapper& call_wrapper) {
4011 bool definitely_matches = false;
4012 *definitely_mismatches = false;
4014 if (expected.is_immediate()) {
4015 ASSERT(actual.is_immediate());
4016 if (expected.immediate() == actual.immediate()) {
4017 definitely_matches = true;
4019 Set(rax, actual.immediate());
4020 if (expected.immediate() ==
4021 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
4022 // Don't worry about adapting arguments for built-ins that
4023 // don't want that done. Skip adaption code by making it look
4024 // like we have a match between expected and actual number of
4026 definitely_matches = true;
4028 *definitely_mismatches = true;
4029 Set(rbx, expected.immediate());
4033 if (actual.is_immediate()) {
4034 // Expected is in register, actual is immediate. This is the
4035 // case when we invoke function values without going through the
4037 cmpp(expected.reg(), Immediate(actual.immediate()));
4038 j(equal, &invoke, Label::kNear);
4039 ASSERT(expected.reg().is(rbx));
4040 Set(rax, actual.immediate());
4041 } else if (!expected.reg().is(actual.reg())) {
4042 // Both expected and actual are in (different) registers. This
4043 // is the case when we invoke functions using call and apply.
4044 cmpp(expected.reg(), actual.reg());
4045 j(equal, &invoke, Label::kNear);
4046 ASSERT(actual.reg().is(rax));
4047 ASSERT(expected.reg().is(rbx));
4051 if (!definitely_matches) {
4052 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
4053 if (!code_constant.is_null()) {
4054 Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
4055 addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
4056 } else if (!code_register.is(rdx)) {
4057 movp(rdx, code_register);
4060 if (flag == CALL_FUNCTION) {
4061 call_wrapper.BeforeCall(CallSize(adaptor));
4062 Call(adaptor, RelocInfo::CODE_TARGET);
4063 call_wrapper.AfterCall();
4064 if (!*definitely_mismatches) {
4065 jmp(done, near_jump);
4068 Jump(adaptor, RelocInfo::CODE_TARGET);
4075 void MacroAssembler::StubPrologue() {
4076 pushq(rbp); // Caller's frame pointer.
4078 Push(rsi); // Callee's context.
4079 Push(Smi::FromInt(StackFrame::STUB));
4083 void MacroAssembler::Prologue(bool code_pre_aging) {
4084 PredictableCodeSizeScope predictible_code_size_scope(this,
4085 kNoCodeAgeSequenceLength);
4086 if (code_pre_aging) {
4087 // Pre-age the code.
4088 Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
4089 RelocInfo::CODE_AGE_SEQUENCE);
4090 Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
4092 pushq(rbp); // Caller's frame pointer.
4094 Push(rsi); // Callee's context.
4095 Push(rdi); // Callee's JS function.
4100 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4103 Push(rsi); // Context.
4104 Push(Smi::FromInt(type));
4105 Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
4106 Push(kScratchRegister);
4107 if (emit_debug_code()) {
4108 Move(kScratchRegister,
4109 isolate()->factory()->undefined_value(),
4110 RelocInfo::EMBEDDED_OBJECT);
4111 cmpp(Operand(rsp, 0), kScratchRegister);
4112 Check(not_equal, kCodeObjectNotProperlyPatched);
4117 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4118 if (emit_debug_code()) {
4119 Move(kScratchRegister, Smi::FromInt(type));
4120 cmpp(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
4121 Check(equal, kStackFrameTypesMustMatch);
4128 void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
4129 // Set up the frame structure on the stack.
4130 // All constants are relative to the frame pointer of the exit frame.
4131 ASSERT(ExitFrameConstants::kCallerSPDisplacement ==
4132 kFPOnStackSize + kPCOnStackSize);
4133 ASSERT(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
4134 ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
4138 // Reserve room for entry stack pointer and push the code object.
4139 ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
4140 Push(Immediate(0)); // Saved entry sp, patched before call.
4141 Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
4142 Push(kScratchRegister); // Accessed from EditFrame::code_slot.
4144 // Save the frame pointer and the context in top.
4146 movp(r14, rax); // Backup rax in callee-save register.
4149 Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
4150 Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
4154 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
4155 bool save_doubles) {
4157 const int kShadowSpace = 4;
4158 arg_stack_space += kShadowSpace;
4160 // Optionally save all XMM registers.
4162 int space = XMMRegister::kMaxNumAllocatableRegisters * kSIMD128Size +
4163 arg_stack_space * kRegisterSize;
4164 subp(rsp, Immediate(space));
4165 int offset = -2 * kPointerSize;
4166 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
4167 XMMRegister reg = XMMRegister::FromAllocationIndex(i);
4168 movups(Operand(rbp, offset - ((i + 1) * kSIMD128Size)), reg);
4170 } else if (arg_stack_space > 0) {
4171 subp(rsp, Immediate(arg_stack_space * kRegisterSize));
4174 // Get the required frame alignment for the OS.
4175 const int kFrameAlignment = OS::ActivationFrameAlignment();
4176 if (kFrameAlignment > 0) {
4177 ASSERT(IsPowerOf2(kFrameAlignment));
4178 ASSERT(is_int8(kFrameAlignment));
4179 andp(rsp, Immediate(-kFrameAlignment));
4182 // Patch the saved entry sp.
4183 movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
4187 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
4188 EnterExitFramePrologue(true);
4190 // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
4191 // so it must be retained across the C-call.
4192 int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
4193 leap(r15, Operand(rbp, r14, times_pointer_size, offset));
4195 EnterExitFrameEpilogue(arg_stack_space, save_doubles);
4199 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
4200 EnterExitFramePrologue(false);
4201 EnterExitFrameEpilogue(arg_stack_space, false);
4205 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
4209 int offset = -2 * kPointerSize;
4210 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
4211 XMMRegister reg = XMMRegister::FromAllocationIndex(i);
4212 movups(reg, Operand(rbp, offset - ((i + 1) * kSIMD128Size)));
4215 // Get the return address from the stack and restore the frame pointer.
4216 movp(rcx, Operand(rbp, kFPOnStackSize));
4217 movp(rbp, Operand(rbp, 0 * kPointerSize));
4219 // Drop everything up to and including the arguments and the receiver
4220 // from the caller stack.
4221 leap(rsp, Operand(r15, 1 * kPointerSize));
4223 PushReturnAddressFrom(rcx);
4225 LeaveExitFrameEpilogue(true);
4229 void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
4233 LeaveExitFrameEpilogue(restore_context);
4237 void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
4238 // Restore current context from top and clear it in debug mode.
4239 ExternalReference context_address(Isolate::kContextAddress, isolate());
4240 Operand context_operand = ExternalOperand(context_address);
4241 if (restore_context) {
4242 movp(rsi, context_operand);
4245 movp(context_operand, Immediate(0));
4248 // Clear the top frame.
4249 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
4251 Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
4252 movp(c_entry_fp_operand, Immediate(0));
4256 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
4259 Label same_contexts;
4261 ASSERT(!holder_reg.is(scratch));
4262 ASSERT(!scratch.is(kScratchRegister));
4263 // Load current lexical context from the stack frame.
4264 movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
4266 // When generating debug code, make sure the lexical context is set.
4267 if (emit_debug_code()) {
4268 cmpp(scratch, Immediate(0));
4269 Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
4271 // Load the native context of the current context.
4273 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
4274 movp(scratch, FieldOperand(scratch, offset));
4275 movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
4277 // Check the context is a native context.
4278 if (emit_debug_code()) {
4279 Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
4280 isolate()->factory()->native_context_map());
4281 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
4284 // Check if both contexts are the same.
4285 cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4286 j(equal, &same_contexts);
4288 // Compare security tokens.
4289 // Check that the security token in the calling global object is
4290 // compatible with the security token in the receiving global
4293 // Check the context is a native context.
4294 if (emit_debug_code()) {
4295 // Preserve original value of holder_reg.
4298 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4299 CompareRoot(holder_reg, Heap::kNullValueRootIndex);
4300 Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
4302 // Read the first word and compare to native_context_map(),
4303 movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
4304 CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
4305 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
4309 movp(kScratchRegister,
4310 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4312 Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
4313 movp(scratch, FieldOperand(scratch, token_offset));
4314 cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
4317 bind(&same_contexts);
4321 // Compute the hash code from the untagged key. This must be kept in sync with
4322 // ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
4323 // code-stub-hydrogen.cc
4324 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
4325 // First of all we assign the hash seed to scratch.
4326 LoadRoot(scratch, Heap::kHashSeedRootIndex);
4327 SmiToInteger32(scratch, scratch);
4329 // Xor original key with a seed.
4332 // Compute the hash code from the untagged key. This must be kept in sync
4333 // with ComputeIntegerHash in utils.h.
4335 // hash = ~hash + (hash << 15);
4338 shll(scratch, Immediate(15));
4340 // hash = hash ^ (hash >> 12);
4342 shrl(scratch, Immediate(12));
4344 // hash = hash + (hash << 2);
4345 leal(r0, Operand(r0, r0, times_4, 0));
4346 // hash = hash ^ (hash >> 4);
4348 shrl(scratch, Immediate(4));
4350 // hash = hash * 2057;
4351 imull(r0, r0, Immediate(2057));
4352 // hash = hash ^ (hash >> 16);
4354 shrl(scratch, Immediate(16));
4360 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
4369 // elements - holds the slow-case elements of the receiver on entry.
4370 // Unchanged unless 'result' is the same register.
4372 // key - holds the smi key on entry.
4373 // Unchanged unless 'result' is the same register.
4375 // Scratch registers:
4377 // r0 - holds the untagged key on entry and holds the hash once computed.
4379 // r1 - used to hold the capacity mask of the dictionary
4381 // r2 - used for the index into the dictionary.
4383 // result - holds the result on exit if the load succeeded.
4384 // Allowed to be the same as 'key' or 'result'.
4385 // Unchanged on bailout so 'key' or 'result' can be used
4386 // in further computation.
4390 GetNumberHash(r0, r1);
4392 // Compute capacity mask.
4393 SmiToInteger32(r1, FieldOperand(elements,
4394 SeededNumberDictionary::kCapacityOffset));
4397 // Generate an unrolled loop that performs a few probes before giving up.
4398 for (int i = 0; i < kNumberDictionaryProbes; i++) {
4399 // Use r2 for index calculations and keep the hash intact in r0.
4401 // Compute the masked index: (hash + i + i * i) & mask.
4403 addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
4407 // Scale the index by multiplying by the entry size.
4408 ASSERT(SeededNumberDictionary::kEntrySize == 3);
4409 leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
4411 // Check if the key matches.
4412 cmpp(key, FieldOperand(elements,
4415 SeededNumberDictionary::kElementsStartOffset));
4416 if (i != (kNumberDictionaryProbes - 1)) {
4424 // Check that the value is a normal propety.
4425 const int kDetailsOffset =
4426 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
4427 ASSERT_EQ(NORMAL, 0);
4428 Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
4429 Smi::FromInt(PropertyDetails::TypeField::kMask));
4432 // Get the value at the masked, scaled index.
4433 const int kValueOffset =
4434 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
4435 movp(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
4439 void MacroAssembler::LoadAllocationTopHelper(Register result,
4441 AllocationFlags flags) {
4442 ExternalReference allocation_top =
4443 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4445 // Just return if allocation top is already known.
4446 if ((flags & RESULT_CONTAINS_TOP) != 0) {
4447 // No use of scratch if allocation top is provided.
4448 ASSERT(!scratch.is_valid());
4450 // Assert that result actually contains top on entry.
4451 Operand top_operand = ExternalOperand(allocation_top);
4452 cmpp(result, top_operand);
4453 Check(equal, kUnexpectedAllocationTop);
4458 // Move address of new object to result. Use scratch register if available,
4459 // and keep address in scratch until call to UpdateAllocationTopHelper.
4460 if (scratch.is_valid()) {
4461 LoadAddress(scratch, allocation_top);
4462 movp(result, Operand(scratch, 0));
4464 Load(result, allocation_top);
4469 void MacroAssembler::MakeSureDoubleAlignedHelper(Register result,
4472 AllocationFlags flags) {
4473 if (kPointerSize == kDoubleSize) {
4474 if (FLAG_debug_code) {
4475 testl(result, Immediate(kDoubleAlignmentMask));
4476 Check(zero, kAllocationIsNotDoubleAligned);
4479 // Align the next allocation. Storing the filler map without checking top
4480 // is safe in new-space because the limit of the heap is aligned there.
4481 ASSERT(kPointerSize * 2 == kDoubleSize);
4482 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
4483 ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
4484 // Make sure scratch is not clobbered by this function as it might be
4485 // used in UpdateAllocationTopHelper later.
4486 ASSERT(!scratch.is(kScratchRegister));
4488 testl(result, Immediate(kDoubleAlignmentMask));
4489 j(zero, &aligned, Label::kNear);
4490 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
4491 ExternalReference allocation_limit =
4492 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4493 cmpp(result, ExternalOperand(allocation_limit));
4494 j(above_equal, gc_required);
4496 LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex);
4497 movp(Operand(result, 0), kScratchRegister);
4498 addp(result, Immediate(kDoubleSize / 2));
4504 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
4506 AllocationFlags flags) {
4507 if (emit_debug_code()) {
4508 testp(result_end, Immediate(kObjectAlignmentMask));
4509 Check(zero, kUnalignedAllocationInNewSpace);
4512 ExternalReference allocation_top =
4513 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4516 if (scratch.is_valid()) {
4517 // Scratch already contains address of allocation top.
4518 movp(Operand(scratch, 0), result_end);
4520 Store(allocation_top, result_end);
4525 void MacroAssembler::Allocate(int object_size,
4527 Register result_end,
4530 AllocationFlags flags) {
4531 ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
4532 ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
4533 if (!FLAG_inline_new) {
4534 if (emit_debug_code()) {
4535 // Trash the registers to simulate an allocation failure.
4536 movl(result, Immediate(0x7091));
4537 if (result_end.is_valid()) {
4538 movl(result_end, Immediate(0x7191));
4540 if (scratch.is_valid()) {
4541 movl(scratch, Immediate(0x7291));
4547 ASSERT(!result.is(result_end));
4549 // Load address of new object into result.
4550 LoadAllocationTopHelper(result, scratch, flags);
4552 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4553 MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4556 // Calculate new top and bail out if new space is exhausted.
4557 ExternalReference allocation_limit =
4558 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4560 Register top_reg = result_end.is_valid() ? result_end : result;
4562 if (!top_reg.is(result)) {
4563 movp(top_reg, result);
4565 addp(top_reg, Immediate(object_size));
4566 j(carry, gc_required);
4567 Operand limit_operand = ExternalOperand(allocation_limit);
4568 cmpp(top_reg, limit_operand);
4569 j(above, gc_required);
4571 // Update allocation top.
4572 UpdateAllocationTopHelper(top_reg, scratch, flags);
4574 bool tag_result = (flags & TAG_OBJECT) != 0;
4575 if (top_reg.is(result)) {
4577 subp(result, Immediate(object_size - kHeapObjectTag));
4579 subp(result, Immediate(object_size));
4581 } else if (tag_result) {
4582 // Tag the result if requested.
4583 ASSERT(kHeapObjectTag == 1);
4589 void MacroAssembler::Allocate(int header_size,
4590 ScaleFactor element_size,
4591 Register element_count,
4593 Register result_end,
4596 AllocationFlags flags) {
4597 ASSERT((flags & SIZE_IN_WORDS) == 0);
4598 leap(result_end, Operand(element_count, element_size, header_size));
4599 Allocate(result_end, result, result_end, scratch, gc_required, flags);
4603 void MacroAssembler::Allocate(Register object_size,
4605 Register result_end,
4608 AllocationFlags flags) {
4609 ASSERT((flags & SIZE_IN_WORDS) == 0);
4610 if (!FLAG_inline_new) {
4611 if (emit_debug_code()) {
4612 // Trash the registers to simulate an allocation failure.
4613 movl(result, Immediate(0x7091));
4614 movl(result_end, Immediate(0x7191));
4615 if (scratch.is_valid()) {
4616 movl(scratch, Immediate(0x7291));
4618 // object_size is left unchanged by this function.
4623 ASSERT(!result.is(result_end));
4625 // Load address of new object into result.
4626 LoadAllocationTopHelper(result, scratch, flags);
4628 if ((flags & DOUBLE_ALIGNMENT) != 0) {
4629 MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4632 // Calculate new top and bail out if new space is exhausted.
4633 ExternalReference allocation_limit =
4634 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4635 if (!object_size.is(result_end)) {
4636 movp(result_end, object_size);
4638 addp(result_end, result);
4639 j(carry, gc_required);
4640 Operand limit_operand = ExternalOperand(allocation_limit);
4641 cmpp(result_end, limit_operand);
4642 j(above, gc_required);
4644 // Update allocation top.
4645 UpdateAllocationTopHelper(result_end, scratch, flags);
4647 // Tag the result if requested.
4648 if ((flags & TAG_OBJECT) != 0) {
4649 addp(result, Immediate(kHeapObjectTag));
4654 void MacroAssembler::UndoAllocationInNewSpace(Register object) {
4655 ExternalReference new_space_allocation_top =
4656 ExternalReference::new_space_allocation_top_address(isolate());
4658 // Make sure the object has no tag before resetting top.
4659 andp(object, Immediate(~kHeapObjectTagMask));
4660 Operand top_operand = ExternalOperand(new_space_allocation_top);
4662 cmpp(object, top_operand);
4663 Check(below, kUndoAllocationOfNonAllocatedMemory);
4665 movp(top_operand, object);
4669 void MacroAssembler::AllocateHeapNumber(Register result,
4671 Label* gc_required) {
4672 // Allocate heap number in new space.
4673 Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
4676 LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
4677 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4681 #define SIMD128_HEAP_ALLOCATE_FUNCTIONS(V) \
4682 V(Float32x4, float32x4) \
4683 V(Float64x2, float64x2) \
4686 #define DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION(TYPE, type) \
4687 void MacroAssembler::Allocate##TYPE(Register result, \
4688 Register scratch1, \
4689 Register scratch2, \
4690 Register scratch3, \
4691 Label* gc_required) { \
4692 /* Allocate SIMD128 object. */ \
4693 Allocate(TYPE::kSize, result, scratch1, no_reg, gc_required, TAG_OBJECT);\
4694 Handle<Map> simd128_map( \
4695 isolate()->native_context()->type##_function()->initial_map()); \
4696 MoveHeapObject(kScratchRegister, simd128_map); \
4697 movp(FieldOperand(result, JSObject::kMapOffset), \
4698 kScratchRegister); \
4699 MoveHeapObject(kScratchRegister, \
4700 isolate()->factory()->empty_fixed_array()); \
4701 movp(FieldOperand(result, JSObject::kPropertiesOffset), \
4702 kScratchRegister); \
4703 movp(FieldOperand(result, JSObject::kElementsOffset), \
4704 kScratchRegister); \
4705 /* Allocate FixedTypedArray object. */ \
4706 Allocate(FixedTypedArrayBase::kDataOffset + k##TYPE##Size, \
4707 scratch1, scratch2, no_reg, gc_required, TAG_OBJECT); \
4708 MoveHeapObject(kScratchRegister, \
4709 isolate()->factory()->fixed_##type##_array_map()); \
4710 movp(FieldOperand(scratch1, FixedTypedArrayBase::kMapOffset), \
4711 kScratchRegister); \
4712 movp(scratch3, Immediate(1)); \
4713 Integer32ToSmi(scratch2, scratch3); \
4714 movp(FieldOperand(scratch1, FixedTypedArrayBase::kLengthOffset), \
4716 /* Assign FixedTypedArray object to SIMD128 object. */ \
4717 movp(FieldOperand(result, TYPE::kValueOffset), scratch1); \
4720 SIMD128_HEAP_ALLOCATE_FUNCTIONS(DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION)
4723 void MacroAssembler::AllocateTwoByteString(Register result,
4728 Label* gc_required) {
4729 // Calculate the number of bytes needed for the characters in the string while
4730 // observing object alignment.
4731 const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
4732 kObjectAlignmentMask;
4733 ASSERT(kShortSize == 2);
4734 // scratch1 = length * 2 + kObjectAlignmentMask.
4735 leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
4737 andp(scratch1, Immediate(~kObjectAlignmentMask));
4738 if (kHeaderAlignment > 0) {
4739 subp(scratch1, Immediate(kHeaderAlignment));
4742 // Allocate two byte string in new space.
4743 Allocate(SeqTwoByteString::kHeaderSize,
4752 // Set the map, length and hash field.
4753 LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
4754 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4755 Integer32ToSmi(scratch1, length);
4756 movp(FieldOperand(result, String::kLengthOffset), scratch1);
4757 movp(FieldOperand(result, String::kHashFieldOffset),
4758 Immediate(String::kEmptyHashField));
4762 void MacroAssembler::AllocateAsciiString(Register result,
4767 Label* gc_required) {
4768 // Calculate the number of bytes needed for the characters in the string while
4769 // observing object alignment.
4770 const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
4771 kObjectAlignmentMask;
4772 movl(scratch1, length);
4773 ASSERT(kCharSize == 1);
4774 addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
4775 andp(scratch1, Immediate(~kObjectAlignmentMask));
4776 if (kHeaderAlignment > 0) {
4777 subp(scratch1, Immediate(kHeaderAlignment));
4780 // Allocate ASCII string in new space.
4781 Allocate(SeqOneByteString::kHeaderSize,
4790 // Set the map, length and hash field.
4791 LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
4792 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4793 Integer32ToSmi(scratch1, length);
4794 movp(FieldOperand(result, String::kLengthOffset), scratch1);
4795 movp(FieldOperand(result, String::kHashFieldOffset),
4796 Immediate(String::kEmptyHashField));
4800 void MacroAssembler::AllocateTwoByteConsString(Register result,
4803 Label* gc_required) {
4804 // Allocate heap number in new space.
4805 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4808 // Set the map. The other fields are left uninitialized.
4809 LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
4810 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4814 void MacroAssembler::AllocateAsciiConsString(Register result,
4817 Label* gc_required) {
4818 Allocate(ConsString::kSize,
4825 // Set the map. The other fields are left uninitialized.
4826 LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
4827 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4831 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
4834 Label* gc_required) {
4835 // Allocate heap number in new space.
4836 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4839 // Set the map. The other fields are left uninitialized.
4840 LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
4841 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4845 void MacroAssembler::AllocateAsciiSlicedString(Register result,
4848 Label* gc_required) {
4849 // Allocate heap number in new space.
4850 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4853 // Set the map. The other fields are left uninitialized.
4854 LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex);
4855 movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4859 // Copy memory, byte-by-byte, from source to destination. Not optimized for
4860 // long or aligned copies. The contents of scratch and length are destroyed.
4861 // Destination is incremented by length, source, length and scratch are
4863 // A simpler loop is faster on small copies, but slower on large ones.
4864 // The cld() instruction must have been emitted, to set the direction flag(),
4865 // before calling this function.
4866 void MacroAssembler::CopyBytes(Register destination,
4871 ASSERT(min_length >= 0);
4872 if (emit_debug_code()) {
4873 cmpl(length, Immediate(min_length));
4874 Assert(greater_equal, kInvalidMinLength);
4876 Label short_loop, len8, len16, len24, done, short_string;
4878 const int kLongStringLimit = 4 * kPointerSize;
4879 if (min_length <= kLongStringLimit) {
4880 cmpl(length, Immediate(kPointerSize));
4881 j(below, &short_string, Label::kNear);
4884 ASSERT(source.is(rsi));
4885 ASSERT(destination.is(rdi));
4886 ASSERT(length.is(rcx));
4888 if (min_length <= kLongStringLimit) {
4889 cmpl(length, Immediate(2 * kPointerSize));
4890 j(below_equal, &len8, Label::kNear);
4891 cmpl(length, Immediate(3 * kPointerSize));
4892 j(below_equal, &len16, Label::kNear);
4893 cmpl(length, Immediate(4 * kPointerSize));
4894 j(below_equal, &len24, Label::kNear);
4897 // Because source is 8-byte aligned in our uses of this function,
4898 // we keep source aligned for the rep movs operation by copying the odd bytes
4899 // at the end of the ranges.
4900 movp(scratch, length);
4901 shrl(length, Immediate(kPointerSizeLog2));
4903 // Move remaining bytes of length.
4904 andl(scratch, Immediate(kPointerSize - 1));
4905 movp(length, Operand(source, scratch, times_1, -kPointerSize));
4906 movp(Operand(destination, scratch, times_1, -kPointerSize), length);
4907 addp(destination, scratch);
4909 if (min_length <= kLongStringLimit) {
4910 jmp(&done, Label::kNear);
4912 movp(scratch, Operand(source, 2 * kPointerSize));
4913 movp(Operand(destination, 2 * kPointerSize), scratch);
4915 movp(scratch, Operand(source, kPointerSize));
4916 movp(Operand(destination, kPointerSize), scratch);
4918 movp(scratch, Operand(source, 0));
4919 movp(Operand(destination, 0), scratch);
4920 // Move remaining bytes of length.
4921 movp(scratch, Operand(source, length, times_1, -kPointerSize));
4922 movp(Operand(destination, length, times_1, -kPointerSize), scratch);
4923 addp(destination, length);
4924 jmp(&done, Label::kNear);
4926 bind(&short_string);
4927 if (min_length == 0) {
4928 testl(length, length);
4929 j(zero, &done, Label::kNear);
4933 movb(scratch, Operand(source, 0));
4934 movb(Operand(destination, 0), scratch);
4938 j(not_zero, &short_loop);
4945 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
4946 Register end_offset,
4951 movp(Operand(start_offset, 0), filler);
4952 addp(start_offset, Immediate(kPointerSize));
4954 cmpp(start_offset, end_offset);
4959 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4960 if (context_chain_length > 0) {
4961 // Move up the chain of contexts to the context containing the slot.
4962 movp(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4963 for (int i = 1; i < context_chain_length; i++) {
4964 movp(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4967 // Slot is in the current function context. Move it into the
4968 // destination register in case we store into it (the write barrier
4969 // cannot be allowed to destroy the context in rsi).
4973 // We should not have found a with context by walking the context
4974 // chain (i.e., the static scope chain and runtime context chain do
4975 // not agree). A variable occurring in such a scope should have
4976 // slot type LOOKUP and not CONTEXT.
4977 if (emit_debug_code()) {
4978 CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
4979 Heap::kWithContextMapRootIndex);
4980 Check(not_equal, kVariableResolvedToWithContext);
4985 void MacroAssembler::LoadTransitionedArrayMapConditional(
4986 ElementsKind expected_kind,
4987 ElementsKind transitioned_kind,
4988 Register map_in_out,
4990 Label* no_map_match) {
4991 // Load the global or builtins object from the current context.
4993 Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4994 movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
4996 // Check that the function's map is the same as the expected cached map.
4997 movp(scratch, Operand(scratch,
4998 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
5000 int offset = expected_kind * kPointerSize +
5001 FixedArrayBase::kHeaderSize;
5002 cmpp(map_in_out, FieldOperand(scratch, offset));
5003 j(not_equal, no_map_match);
5005 // Use the transitioned cached map.
5006 offset = transitioned_kind * kPointerSize +
5007 FixedArrayBase::kHeaderSize;
5008 movp(map_in_out, FieldOperand(scratch, offset));
5013 static const int kRegisterPassedArguments = 4;
5015 static const int kRegisterPassedArguments = 6;
5018 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
5019 // Load the global or builtins object from the current context.
5021 Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
5022 // Load the native context from the global or builtins object.
5023 movp(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
5024 // Load the function from the native context.
5025 movp(function, Operand(function, Context::SlotOffset(index)));
5029 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
5031 // Load the initial map. The global functions all have initial maps.
5032 movp(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
5033 if (emit_debug_code()) {
5035 CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
5038 Abort(kGlobalFunctionsMustHaveInitialMap);
5044 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
5045 // On Windows 64 stack slots are reserved by the caller for all arguments
5046 // including the ones passed in registers, and space is always allocated for
5047 // the four register arguments even if the function takes fewer than four
5049 // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
5050 // and the caller does not reserve stack slots for them.
5051 ASSERT(num_arguments >= 0);
5053 const int kMinimumStackSlots = kRegisterPassedArguments;
5054 if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
5055 return num_arguments;
5057 if (num_arguments < kRegisterPassedArguments) return 0;
5058 return num_arguments - kRegisterPassedArguments;
5063 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5066 uint32_t encoding_mask) {
5068 JumpIfNotSmi(string, &is_object);
5073 movp(value, FieldOperand(string, HeapObject::kMapOffset));
5074 movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset));
5076 andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
5077 cmpp(value, Immediate(encoding_mask));
5079 Check(equal, kUnexpectedStringType);
5081 // The index is assumed to be untagged coming in, tag it to compare with the
5082 // string length without using a temp register, it is restored at the end of
5084 Integer32ToSmi(index, index);
5085 SmiCompare(index, FieldOperand(string, String::kLengthOffset));
5086 Check(less, kIndexIsTooLarge);
5088 SmiCompare(index, Smi::FromInt(0));
5089 Check(greater_equal, kIndexIsNegative);
5091 // Restore the index
5092 SmiToInteger32(index, index);
5096 void MacroAssembler::PrepareCallCFunction(int num_arguments) {
5097 int frame_alignment = OS::ActivationFrameAlignment();
5098 ASSERT(frame_alignment != 0);
5099 ASSERT(num_arguments >= 0);
5101 // Make stack end at alignment and allocate space for arguments and old rsp.
5102 movp(kScratchRegister, rsp);
5103 ASSERT(IsPowerOf2(frame_alignment));
5104 int argument_slots_on_stack =
5105 ArgumentStackSlotsForCFunctionCall(num_arguments);
5106 subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
5107 andp(rsp, Immediate(-frame_alignment));
5108 movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
5112 void MacroAssembler::CallCFunction(ExternalReference function,
5113 int num_arguments) {
5114 LoadAddress(rax, function);
5115 CallCFunction(rax, num_arguments);
5119 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
5120 ASSERT(has_frame());
5121 // Check stack alignment.
5122 if (emit_debug_code()) {
5123 CheckStackAlignment();
5127 ASSERT(OS::ActivationFrameAlignment() != 0);
5128 ASSERT(num_arguments >= 0);
5129 int argument_slots_on_stack =
5130 ArgumentStackSlotsForCFunctionCall(num_arguments);
5131 movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
5135 bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
5136 if (r1.is(r2)) return true;
5137 if (r1.is(r3)) return true;
5138 if (r1.is(r4)) return true;
5139 if (r2.is(r3)) return true;
5140 if (r2.is(r4)) return true;
5141 if (r3.is(r4)) return true;
5146 CodePatcher::CodePatcher(byte* address, int size)
5147 : address_(address),
5149 masm_(NULL, address, size + Assembler::kGap) {
5150 // Create a new macro assembler pointing to the address of the code to patch.
5151 // The size is adjusted with kGap on order for the assembler to generate size
5152 // bytes of instructions without failing with buffer size constraints.
5153 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5157 CodePatcher::~CodePatcher() {
5158 // Indicate that code has changed.
5159 CPU::FlushICache(address_, size_);
5161 // Check that the code was patched as expected.
5162 ASSERT(masm_.pc_ == address_ + size_);
5163 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5167 void MacroAssembler::CheckPageFlag(
5172 Label* condition_met,
5173 Label::Distance condition_met_distance) {
5174 ASSERT(cc == zero || cc == not_zero);
5175 if (scratch.is(object)) {
5176 andp(scratch, Immediate(~Page::kPageAlignmentMask));
5178 movp(scratch, Immediate(~Page::kPageAlignmentMask));
5179 andp(scratch, object);
5181 if (mask < (1 << kBitsPerByte)) {
5182 testb(Operand(scratch, MemoryChunk::kFlagsOffset),
5183 Immediate(static_cast<uint8_t>(mask)));
5185 testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
5187 j(cc, condition_met, condition_met_distance);
5191 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
5193 Label* if_deprecated) {
5194 if (map->CanBeDeprecated()) {
5196 movl(scratch, FieldOperand(scratch, Map::kBitField3Offset));
5197 andl(scratch, Immediate(Map::Deprecated::kMask));
5198 j(not_zero, if_deprecated);
5203 void MacroAssembler::JumpIfBlack(Register object,
5204 Register bitmap_scratch,
5205 Register mask_scratch,
5207 Label::Distance on_black_distance) {
5208 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
5209 GetMarkBits(object, bitmap_scratch, mask_scratch);
5211 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5212 // The mask_scratch register contains a 1 at the position of the first bit
5213 // and a 0 at all other positions, including the position of the second bit.
5214 movp(rcx, mask_scratch);
5215 // Make rcx into a mask that covers both marking bits using the operation
5216 // rcx = mask | (mask << 1).
5217 leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
5218 // Note that we are using a 4-byte aligned 8-byte load.
5219 andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
5220 cmpp(mask_scratch, rcx);
5221 j(equal, on_black, on_black_distance);
5225 // Detect some, but not all, common pointer-free objects. This is used by the
5226 // incremental write barrier which doesn't care about oddballs (they are always
5227 // marked black immediately so this code is not hit).
5228 void MacroAssembler::JumpIfDataObject(
5231 Label* not_data_object,
5232 Label::Distance not_data_object_distance) {
5233 Label is_data_object;
5234 movp(scratch, FieldOperand(value, HeapObject::kMapOffset));
5235 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5236 j(equal, &is_data_object, Label::kNear);
5237 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5238 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5239 // If it's a string and it's not a cons string then it's an object containing
5241 testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
5242 Immediate(kIsIndirectStringMask | kIsNotStringMask));
5243 j(not_zero, not_data_object, not_data_object_distance);
5244 bind(&is_data_object);
5248 void MacroAssembler::GetMarkBits(Register addr_reg,
5249 Register bitmap_reg,
5250 Register mask_reg) {
5251 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
5252 movp(bitmap_reg, addr_reg);
5253 // Sign extended 32 bit immediate.
5254 andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
5255 movp(rcx, addr_reg);
5257 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
5258 shrl(rcx, Immediate(shift));
5260 Immediate((Page::kPageAlignmentMask >> shift) &
5261 ~(Bitmap::kBytesPerCell - 1)));
5263 addp(bitmap_reg, rcx);
5264 movp(rcx, addr_reg);
5265 shrl(rcx, Immediate(kPointerSizeLog2));
5266 andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
5267 movl(mask_reg, Immediate(1));
5272 void MacroAssembler::EnsureNotWhite(
5274 Register bitmap_scratch,
5275 Register mask_scratch,
5276 Label* value_is_white_and_not_data,
5277 Label::Distance distance) {
5278 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
5279 GetMarkBits(value, bitmap_scratch, mask_scratch);
5281 // If the value is black or grey we don't need to do anything.
5282 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5283 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5284 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
5285 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5289 // Since both black and grey have a 1 in the first position and white does
5290 // not have a 1 there we only need to check one bit.
5291 testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5292 j(not_zero, &done, Label::kNear);
5294 if (emit_debug_code()) {
5295 // Check for impossible bit pattern.
5298 // shl. May overflow making the check conservative.
5299 addp(mask_scratch, mask_scratch);
5300 testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5301 j(zero, &ok, Label::kNear);
5307 // Value is white. We check whether it is data that doesn't need scanning.
5308 // Currently only checks for HeapNumber and non-cons strings.
5309 Register map = rcx; // Holds map while checking type.
5310 Register length = rcx; // Holds length of object after checking type.
5311 Label not_heap_number;
5312 Label is_data_object;
5314 // Check for heap-number
5315 movp(map, FieldOperand(value, HeapObject::kMapOffset));
5316 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
5317 j(not_equal, ¬_heap_number, Label::kNear);
5318 movp(length, Immediate(HeapNumber::kSize));
5319 jmp(&is_data_object, Label::kNear);
5321 bind(¬_heap_number);
5322 // Check for strings.
5323 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5324 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5325 // If it's a string and it's not a cons string then it's an object containing
5327 Register instance_type = rcx;
5328 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
5329 testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
5330 j(not_zero, value_is_white_and_not_data);
5331 // It's a non-indirect (non-cons and non-slice) string.
5332 // If it's external, the length is just ExternalString::kSize.
5333 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5335 // External strings are the only ones with the kExternalStringTag bit
5337 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
5338 ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
5339 testb(instance_type, Immediate(kExternalStringTag));
5340 j(zero, ¬_external, Label::kNear);
5341 movp(length, Immediate(ExternalString::kSize));
5342 jmp(&is_data_object, Label::kNear);
5344 bind(¬_external);
5345 // Sequential string, either ASCII or UC16.
5346 ASSERT(kOneByteStringTag == 0x04);
5347 andp(length, Immediate(kStringEncodingMask));
5348 xorp(length, Immediate(kStringEncodingMask));
5349 addp(length, Immediate(0x04));
5350 // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
5351 imulp(length, FieldOperand(value, String::kLengthOffset));
5352 shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
5353 addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
5354 andp(length, Immediate(~kObjectAlignmentMask));
5356 bind(&is_data_object);
5357 // Value is a data object, and it is white. Mark it black. Since we know
5358 // that the object is white we can make it black by flipping one bit.
5359 orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5361 andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
5362 addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
5368 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5370 Register empty_fixed_array_value = r8;
5371 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5374 // Check if the enum length field is properly initialized, indicating that
5375 // there is an enum cache.
5376 movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5378 EnumLength(rdx, rbx);
5379 Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
5380 j(equal, call_runtime);
5386 movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5388 // For all objects but the receiver, check that the cache is empty.
5389 EnumLength(rdx, rbx);
5390 Cmp(rdx, Smi::FromInt(0));
5391 j(not_equal, call_runtime);
5395 // Check that there are no elements. Register rcx contains the current JS
5396 // object we've reached through the prototype chain.
5398 cmpp(empty_fixed_array_value,
5399 FieldOperand(rcx, JSObject::kElementsOffset));
5400 j(equal, &no_elements);
5402 // Second chance, the object may be using the empty slow element dictionary.
5403 LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
5404 cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
5405 j(not_equal, call_runtime);
5408 movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
5409 cmpp(rcx, null_value);
5410 j(not_equal, &next);
5413 void MacroAssembler::TestJSArrayForAllocationMemento(
5414 Register receiver_reg,
5415 Register scratch_reg,
5416 Label* no_memento_found) {
5417 ExternalReference new_space_start =
5418 ExternalReference::new_space_start(isolate());
5419 ExternalReference new_space_allocation_top =
5420 ExternalReference::new_space_allocation_top_address(isolate());
5422 leap(scratch_reg, Operand(receiver_reg,
5423 JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5424 Move(kScratchRegister, new_space_start);
5425 cmpp(scratch_reg, kScratchRegister);
5426 j(less, no_memento_found);
5427 cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
5428 j(greater, no_memento_found);
5429 CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
5430 Heap::kAllocationMementoMapRootIndex);
5434 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5439 ASSERT(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
5440 ASSERT(!scratch1.is(scratch0));
5441 Register current = scratch0;
5444 movp(current, object);
5446 // Loop based on the map going up the prototype chain.
5448 movp(current, FieldOperand(current, HeapObject::kMapOffset));
5449 movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
5450 DecodeField<Map::ElementsKindBits>(scratch1);
5451 cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
5453 movp(current, FieldOperand(current, Map::kPrototypeOffset));
5454 CompareRoot(current, Heap::kNullValueRootIndex);
5455 j(not_equal, &loop_again);
5459 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
5460 ASSERT(!dividend.is(rax));
5461 ASSERT(!dividend.is(rdx));
5462 MultiplierAndShift ms(divisor);
5463 movl(rax, Immediate(ms.multiplier()));
5465 if (divisor > 0 && ms.multiplier() < 0) addl(rdx, dividend);
5466 if (divisor < 0 && ms.multiplier() > 0) subl(rdx, dividend);
5467 if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift()));
5468 movl(rax, dividend);
5469 shrl(rax, Immediate(31));
5474 } } // namespace v8::internal
5476 #endif // V8_TARGET_ARCH_X64