1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #if V8_TARGET_ARCH_IA32
7 #include "src/base/bits.h"
8 #include "src/base/division-by-constant.h"
9 #include "src/bootstrapper.h"
10 #include "src/codegen.h"
11 #include "src/cpu-profiler.h"
12 #include "src/debug/debug.h"
13 #include "src/ia32/frames-ia32.h"
14 #include "src/ia32/macro-assembler-ia32.h"
15 #include "src/runtime/runtime.h"
20 // -------------------------------------------------------------------------
21 // MacroAssembler implementation.
23 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
24 : Assembler(arg_isolate, buffer, size),
25 generating_stub_(false),
27 if (isolate() != NULL) {
28 // TODO(titzer): should we just use a null handle here instead?
29 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
35 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
36 DCHECK(!r.IsDouble());
39 } else if (r.IsUInteger8()) {
41 } else if (r.IsInteger16()) {
43 } else if (r.IsUInteger16()) {
51 void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
52 DCHECK(!r.IsDouble());
53 if (r.IsInteger8() || r.IsUInteger8()) {
55 } else if (r.IsInteger16() || r.IsUInteger16()) {
58 if (r.IsHeapObject()) {
60 } else if (r.IsSmi()) {
68 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
69 if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
70 mov(destination, isolate()->heap()->root_handle(index));
73 ExternalReference roots_array_start =
74 ExternalReference::roots_array_start(isolate());
75 mov(destination, Immediate(index));
76 mov(destination, Operand::StaticArray(destination,
82 void MacroAssembler::StoreRoot(Register source,
84 Heap::RootListIndex index) {
85 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
86 ExternalReference roots_array_start =
87 ExternalReference::roots_array_start(isolate());
88 mov(scratch, Immediate(index));
89 mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
94 void MacroAssembler::CompareRoot(Register with,
96 Heap::RootListIndex index) {
97 ExternalReference roots_array_start =
98 ExternalReference::roots_array_start(isolate());
99 mov(scratch, Immediate(index));
100 cmp(with, Operand::StaticArray(scratch,
106 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
107 DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
108 cmp(with, isolate()->heap()->root_handle(index));
112 void MacroAssembler::CompareRoot(const Operand& with,
113 Heap::RootListIndex index) {
114 DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
115 cmp(with, isolate()->heap()->root_handle(index));
119 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
120 DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
121 Push(isolate()->heap()->root_handle(index));
125 void MacroAssembler::InNewSpace(
129 Label* condition_met,
130 Label::Distance condition_met_distance) {
131 DCHECK(cc == equal || cc == not_equal);
132 if (scratch.is(object)) {
133 and_(scratch, Immediate(~Page::kPageAlignmentMask));
135 mov(scratch, Immediate(~Page::kPageAlignmentMask));
136 and_(scratch, object);
138 // Check that we can use a test_b.
139 DCHECK(MemoryChunk::IN_FROM_SPACE < 8);
140 DCHECK(MemoryChunk::IN_TO_SPACE < 8);
141 int mask = (1 << MemoryChunk::IN_FROM_SPACE)
142 | (1 << MemoryChunk::IN_TO_SPACE);
143 // If non-zero, the page belongs to new-space.
144 test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
145 static_cast<uint8_t>(mask));
146 j(cc, condition_met, condition_met_distance);
150 void MacroAssembler::RememberedSetHelper(
151 Register object, // Only used for debug checks.
154 SaveFPRegsMode save_fp,
155 MacroAssembler::RememberedSetFinalAction and_then) {
157 if (emit_debug_code()) {
159 JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
163 // Load store buffer top.
164 ExternalReference store_buffer =
165 ExternalReference::store_buffer_top(isolate());
166 mov(scratch, Operand::StaticVariable(store_buffer));
167 // Store pointer to buffer.
168 mov(Operand(scratch, 0), addr);
169 // Increment buffer top.
170 add(scratch, Immediate(kPointerSize));
171 // Write back new top of buffer.
172 mov(Operand::StaticVariable(store_buffer), scratch);
173 // Call stub on end of buffer.
174 // Check for end of buffer.
175 test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
176 if (and_then == kReturnAtEnd) {
177 Label buffer_overflowed;
178 j(not_equal, &buffer_overflowed, Label::kNear);
180 bind(&buffer_overflowed);
182 DCHECK(and_then == kFallThroughAtEnd);
183 j(equal, &done, Label::kNear);
185 StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
186 CallStub(&store_buffer_overflow);
187 if (and_then == kReturnAtEnd) {
190 DCHECK(and_then == kFallThroughAtEnd);
196 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
197 XMMRegister scratch_reg,
198 Register result_reg) {
201 xorps(scratch_reg, scratch_reg);
202 cvtsd2si(result_reg, input_reg);
203 test(result_reg, Immediate(0xFFFFFF00));
204 j(zero, &done, Label::kNear);
205 cmp(result_reg, Immediate(0x1));
206 j(overflow, &conv_failure, Label::kNear);
207 mov(result_reg, Immediate(0));
208 setcc(sign, result_reg);
209 sub(result_reg, Immediate(1));
210 and_(result_reg, Immediate(255));
211 jmp(&done, Label::kNear);
213 Move(result_reg, Immediate(0));
214 ucomisd(input_reg, scratch_reg);
215 j(below, &done, Label::kNear);
216 Move(result_reg, Immediate(255));
221 void MacroAssembler::ClampUint8(Register reg) {
223 test(reg, Immediate(0xFFFFFF00));
224 j(zero, &done, Label::kNear);
225 setcc(negative, reg); // 1 if negative, 0 if positive.
226 dec_b(reg); // 0 if negative, 255 if positive.
231 void MacroAssembler::SlowTruncateToI(Register result_reg,
234 DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
235 call(stub.GetCode(), RelocInfo::CODE_TARGET);
239 void MacroAssembler::TruncateDoubleToI(Register result_reg,
240 XMMRegister input_reg) {
242 cvttsd2si(result_reg, Operand(input_reg));
243 cmp(result_reg, 0x1);
244 j(no_overflow, &done, Label::kNear);
246 sub(esp, Immediate(kDoubleSize));
247 movsd(MemOperand(esp, 0), input_reg);
248 SlowTruncateToI(result_reg, esp, 0);
249 add(esp, Immediate(kDoubleSize));
254 void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
256 MinusZeroMode minus_zero_mode,
257 Label* lost_precision, Label* is_nan,
258 Label* minus_zero, Label::Distance dst) {
259 DCHECK(!input_reg.is(scratch));
260 cvttsd2si(result_reg, Operand(input_reg));
261 Cvtsi2sd(scratch, Operand(result_reg));
262 ucomisd(scratch, input_reg);
263 j(not_equal, lost_precision, dst);
264 j(parity_even, is_nan, dst);
265 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
267 // The integer converted back is equal to the original. We
268 // only have to test if we got -0 as an input.
269 test(result_reg, Operand(result_reg));
270 j(not_zero, &done, Label::kNear);
271 movmskpd(result_reg, input_reg);
272 // Bit 0 contains the sign of the double in input_reg.
273 // If input was positive, we are ok and return 0, otherwise
274 // jump to minus_zero.
276 j(not_zero, minus_zero, dst);
282 void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
283 Register input_reg) {
284 Label done, slow_case;
286 if (CpuFeatures::IsSupported(SSE3)) {
287 CpuFeatureScope scope(this, SSE3);
289 // Use more powerful conversion when sse3 is available.
290 // Load x87 register with heap number.
291 fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
292 // Get exponent alone and check for too-big exponent.
293 mov(result_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
294 and_(result_reg, HeapNumber::kExponentMask);
295 const uint32_t kTooBigExponent =
296 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
297 cmp(Operand(result_reg), Immediate(kTooBigExponent));
298 j(greater_equal, &slow_case, Label::kNear);
300 // Reserve space for 64 bit answer.
301 sub(Operand(esp), Immediate(kDoubleSize));
302 // Do conversion, which cannot fail because we checked the exponent.
303 fisttp_d(Operand(esp, 0));
304 mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
305 add(Operand(esp), Immediate(kDoubleSize));
306 jmp(&done, Label::kNear);
310 if (input_reg.is(result_reg)) {
311 // Input is clobbered. Restore number from fpu stack
312 sub(Operand(esp), Immediate(kDoubleSize));
313 fstp_d(Operand(esp, 0));
314 SlowTruncateToI(result_reg, esp, 0);
315 add(esp, Immediate(kDoubleSize));
318 SlowTruncateToI(result_reg, input_reg);
321 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
322 cvttsd2si(result_reg, Operand(xmm0));
323 cmp(result_reg, 0x1);
324 j(no_overflow, &done, Label::kNear);
325 // Check if the input was 0x8000000 (kMinInt).
326 // If no, then we got an overflow and we deoptimize.
327 ExternalReference min_int = ExternalReference::address_of_min_int();
328 ucomisd(xmm0, Operand::StaticVariable(min_int));
329 j(not_equal, &slow_case, Label::kNear);
330 j(parity_even, &slow_case, Label::kNear); // NaN.
331 jmp(&done, Label::kNear);
335 if (input_reg.is(result_reg)) {
336 // Input is clobbered. Restore number from double scratch.
337 sub(esp, Immediate(kDoubleSize));
338 movsd(MemOperand(esp, 0), xmm0);
339 SlowTruncateToI(result_reg, esp, 0);
340 add(esp, Immediate(kDoubleSize));
342 SlowTruncateToI(result_reg, input_reg);
349 void MacroAssembler::LoadUint32(XMMRegister dst, const Operand& src) {
351 cmp(src, Immediate(0));
352 ExternalReference uint32_bias = ExternalReference::address_of_uint32_bias();
354 j(not_sign, &done, Label::kNear);
355 addsd(dst, Operand::StaticVariable(uint32_bias));
360 void MacroAssembler::RecordWriteArray(
364 SaveFPRegsMode save_fp,
365 RememberedSetAction remembered_set_action,
367 PointersToHereCheck pointers_to_here_check_for_value) {
368 // First, check if a write barrier is even needed. The tests below
369 // catch stores of Smis.
372 // Skip barrier if writing a smi.
373 if (smi_check == INLINE_SMI_CHECK) {
374 DCHECK_EQ(0, kSmiTag);
375 test(value, Immediate(kSmiTagMask));
379 // Array access: calculate the destination address in the same manner as
380 // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
381 // into an array of words.
382 Register dst = index;
383 lea(dst, Operand(object, index, times_half_pointer_size,
384 FixedArray::kHeaderSize - kHeapObjectTag));
386 RecordWrite(object, dst, value, save_fp, remembered_set_action,
387 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
391 // Clobber clobbered input registers when running with the debug-code flag
392 // turned on to provoke errors.
393 if (emit_debug_code()) {
394 mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
395 mov(index, Immediate(bit_cast<int32_t>(kZapValue)));
400 void MacroAssembler::RecordWriteField(
405 SaveFPRegsMode save_fp,
406 RememberedSetAction remembered_set_action,
408 PointersToHereCheck pointers_to_here_check_for_value) {
409 // First, check if a write barrier is even needed. The tests below
410 // catch stores of Smis.
413 // Skip barrier if writing a smi.
414 if (smi_check == INLINE_SMI_CHECK) {
415 JumpIfSmi(value, &done, Label::kNear);
418 // Although the object register is tagged, the offset is relative to the start
419 // of the object, so so offset must be a multiple of kPointerSize.
420 DCHECK(IsAligned(offset, kPointerSize));
422 lea(dst, FieldOperand(object, offset));
423 if (emit_debug_code()) {
425 test_b(dst, (1 << kPointerSizeLog2) - 1);
426 j(zero, &ok, Label::kNear);
431 RecordWrite(object, dst, value, save_fp, remembered_set_action,
432 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
436 // Clobber clobbered input registers when running with the debug-code flag
437 // turned on to provoke errors.
438 if (emit_debug_code()) {
439 mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
440 mov(dst, Immediate(bit_cast<int32_t>(kZapValue)));
445 void MacroAssembler::RecordWriteForMap(
450 SaveFPRegsMode save_fp) {
453 Register address = scratch1;
454 Register value = scratch2;
455 if (emit_debug_code()) {
457 lea(address, FieldOperand(object, HeapObject::kMapOffset));
458 test_b(address, (1 << kPointerSizeLog2) - 1);
459 j(zero, &ok, Label::kNear);
464 DCHECK(!object.is(value));
465 DCHECK(!object.is(address));
466 DCHECK(!value.is(address));
467 AssertNotSmi(object);
469 if (!FLAG_incremental_marking) {
473 // Compute the address.
474 lea(address, FieldOperand(object, HeapObject::kMapOffset));
476 // A single check of the map's pages interesting flag suffices, since it is
477 // only set during incremental collection, and then it's also guaranteed that
478 // the from object's page's interesting flag is also set. This optimization
479 // relies on the fact that maps can never be in new space.
480 DCHECK(!isolate()->heap()->InNewSpace(*map));
481 CheckPageFlagForMap(map,
482 MemoryChunk::kPointersToHereAreInterestingMask,
487 RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET,
493 // Count number of write barriers in generated code.
494 isolate()->counters()->write_barriers_static()->Increment();
495 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
497 // Clobber clobbered input registers when running with the debug-code flag
498 // turned on to provoke errors.
499 if (emit_debug_code()) {
500 mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
501 mov(scratch1, Immediate(bit_cast<int32_t>(kZapValue)));
502 mov(scratch2, Immediate(bit_cast<int32_t>(kZapValue)));
507 void MacroAssembler::RecordWrite(
511 SaveFPRegsMode fp_mode,
512 RememberedSetAction remembered_set_action,
514 PointersToHereCheck pointers_to_here_check_for_value) {
515 DCHECK(!object.is(value));
516 DCHECK(!object.is(address));
517 DCHECK(!value.is(address));
518 AssertNotSmi(object);
520 if (remembered_set_action == OMIT_REMEMBERED_SET &&
521 !FLAG_incremental_marking) {
525 if (emit_debug_code()) {
527 cmp(value, Operand(address, 0));
528 j(equal, &ok, Label::kNear);
533 // First, check if a write barrier is even needed. The tests below
534 // catch stores of Smis and stores into young gen.
537 if (smi_check == INLINE_SMI_CHECK) {
538 // Skip barrier if writing a smi.
539 JumpIfSmi(value, &done, Label::kNear);
542 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
544 value, // Used as scratch.
545 MemoryChunk::kPointersToHereAreInterestingMask,
550 CheckPageFlag(object,
551 value, // Used as scratch.
552 MemoryChunk::kPointersFromHereAreInterestingMask,
557 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
563 // Count number of write barriers in generated code.
564 isolate()->counters()->write_barriers_static()->Increment();
565 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
567 // Clobber clobbered registers when running with the debug-code flag
568 // turned on to provoke errors.
569 if (emit_debug_code()) {
570 mov(address, Immediate(bit_cast<int32_t>(kZapValue)));
571 mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
576 void MacroAssembler::DebugBreak() {
577 Move(eax, Immediate(0));
578 mov(ebx, Immediate(ExternalReference(Runtime::kHandleDebuggerStatement,
580 CEntryStub ces(isolate(), 1);
581 call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
585 void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
591 bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
592 static const int kMaxImmediateBits = 17;
593 if (!RelocInfo::IsNone(x.rmode_)) return false;
594 return !is_intn(x.x_, kMaxImmediateBits);
598 void MacroAssembler::SafeMove(Register dst, const Immediate& x) {
599 if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
600 Move(dst, Immediate(x.x_ ^ jit_cookie()));
601 xor_(dst, jit_cookie());
608 void MacroAssembler::SafePush(const Immediate& x) {
609 if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
610 push(Immediate(x.x_ ^ jit_cookie()));
611 xor_(Operand(esp, 0), Immediate(jit_cookie()));
618 void MacroAssembler::CmpObjectType(Register heap_object,
621 mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
622 CmpInstanceType(map, type);
626 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
627 cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
628 static_cast<int8_t>(type));
632 void MacroAssembler::CheckFastElements(Register map,
634 Label::Distance distance) {
635 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
636 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
637 STATIC_ASSERT(FAST_ELEMENTS == 2);
638 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
639 cmpb(FieldOperand(map, Map::kBitField2Offset),
640 Map::kMaximumBitField2FastHoleyElementValue);
641 j(above, fail, distance);
645 void MacroAssembler::CheckFastObjectElements(Register map,
647 Label::Distance distance) {
648 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
649 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
650 STATIC_ASSERT(FAST_ELEMENTS == 2);
651 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
652 cmpb(FieldOperand(map, Map::kBitField2Offset),
653 Map::kMaximumBitField2FastHoleySmiElementValue);
654 j(below_equal, fail, distance);
655 cmpb(FieldOperand(map, Map::kBitField2Offset),
656 Map::kMaximumBitField2FastHoleyElementValue);
657 j(above, fail, distance);
661 void MacroAssembler::CheckFastSmiElements(Register map,
663 Label::Distance distance) {
664 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
665 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
666 cmpb(FieldOperand(map, Map::kBitField2Offset),
667 Map::kMaximumBitField2FastHoleySmiElementValue);
668 j(above, fail, distance);
672 void MacroAssembler::StoreNumberToDoubleElements(
673 Register maybe_number,
677 XMMRegister scratch2,
679 int elements_offset) {
680 Label smi_value, done;
681 JumpIfSmi(maybe_number, &smi_value, Label::kNear);
683 CheckMap(maybe_number,
684 isolate()->factory()->heap_number_map(),
688 // Double value, turn potential sNaN into qNaN.
690 mulsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
691 jmp(&done, Label::kNear);
694 // Value is a smi. Convert to a double and store.
695 // Preserve original value.
696 mov(scratch1, maybe_number);
698 Cvtsi2sd(scratch2, scratch1);
700 movsd(FieldOperand(elements, key, times_4,
701 FixedDoubleArray::kHeaderSize - elements_offset),
706 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
707 cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
711 void MacroAssembler::CheckMap(Register obj,
714 SmiCheckType smi_check_type) {
715 if (smi_check_type == DO_SMI_CHECK) {
716 JumpIfSmi(obj, fail);
719 CompareMap(obj, map);
724 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
725 Register scratch2, Handle<WeakCell> cell,
726 Handle<Code> success,
727 SmiCheckType smi_check_type) {
729 if (smi_check_type == DO_SMI_CHECK) {
730 JumpIfSmi(obj, &fail);
732 mov(scratch1, FieldOperand(obj, HeapObject::kMapOffset));
733 CmpWeakValue(scratch1, cell, scratch2);
740 Condition MacroAssembler::IsObjectStringType(Register heap_object,
742 Register instance_type) {
743 mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
744 movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
745 STATIC_ASSERT(kNotStringTag != 0);
746 test(instance_type, Immediate(kIsNotStringMask));
751 Condition MacroAssembler::IsObjectNameType(Register heap_object,
753 Register instance_type) {
754 mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
755 movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
756 cmpb(instance_type, static_cast<uint8_t>(LAST_NAME_TYPE));
761 void MacroAssembler::FCmp() {
767 void MacroAssembler::AssertNumber(Register object) {
768 if (emit_debug_code()) {
770 JumpIfSmi(object, &ok);
771 cmp(FieldOperand(object, HeapObject::kMapOffset),
772 isolate()->factory()->heap_number_map());
773 Check(equal, kOperandNotANumber);
779 void MacroAssembler::AssertSmi(Register object) {
780 if (emit_debug_code()) {
781 test(object, Immediate(kSmiTagMask));
782 Check(equal, kOperandIsNotASmi);
787 void MacroAssembler::AssertString(Register object) {
788 if (emit_debug_code()) {
789 test(object, Immediate(kSmiTagMask));
790 Check(not_equal, kOperandIsASmiAndNotAString);
792 mov(object, FieldOperand(object, HeapObject::kMapOffset));
793 CmpInstanceType(object, FIRST_NONSTRING_TYPE);
795 Check(below, kOperandIsNotAString);
800 void MacroAssembler::AssertName(Register object) {
801 if (emit_debug_code()) {
802 test(object, Immediate(kSmiTagMask));
803 Check(not_equal, kOperandIsASmiAndNotAName);
805 mov(object, FieldOperand(object, HeapObject::kMapOffset));
806 CmpInstanceType(object, LAST_NAME_TYPE);
808 Check(below_equal, kOperandIsNotAName);
813 void MacroAssembler::AssertFunction(Register object) {
814 if (emit_debug_code()) {
815 test(object, Immediate(kSmiTagMask));
816 Check(not_equal, kOperandIsASmiAndNotAFunction);
818 CmpObjectType(object, JS_FUNCTION_TYPE, object);
820 Check(not_equal, kOperandIsNotAFunction);
825 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
826 if (emit_debug_code()) {
828 AssertNotSmi(object);
829 cmp(object, isolate()->factory()->undefined_value());
830 j(equal, &done_checking);
831 cmp(FieldOperand(object, 0),
832 Immediate(isolate()->factory()->allocation_site_map()));
833 Assert(equal, kExpectedUndefinedOrCell);
834 bind(&done_checking);
839 void MacroAssembler::AssertNotSmi(Register object) {
840 if (emit_debug_code()) {
841 test(object, Immediate(kSmiTagMask));
842 Check(not_equal, kOperandIsASmi);
847 void MacroAssembler::StubPrologue() {
848 push(ebp); // Caller's frame pointer.
850 push(esi); // Callee's context.
851 push(Immediate(Smi::FromInt(StackFrame::STUB)));
855 void MacroAssembler::Prologue(bool code_pre_aging) {
856 PredictableCodeSizeScope predictible_code_size_scope(this,
857 kNoCodeAgeSequenceLength);
858 if (code_pre_aging) {
860 call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
861 RelocInfo::CODE_AGE_SEQUENCE);
862 Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
864 push(ebp); // Caller's frame pointer.
866 push(esi); // Callee's context.
867 push(edi); // Callee's JS function.
872 void MacroAssembler::EnterFrame(StackFrame::Type type,
873 bool load_constant_pool_pointer_reg) {
874 // Out-of-line constant pool not implemented on ia32.
879 void MacroAssembler::EnterFrame(StackFrame::Type type) {
883 push(Immediate(Smi::FromInt(type)));
884 push(Immediate(CodeObject()));
885 if (emit_debug_code()) {
886 cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
887 Check(not_equal, kCodeObjectNotProperlyPatched);
892 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
893 if (emit_debug_code()) {
894 cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
895 Immediate(Smi::FromInt(type)));
896 Check(equal, kStackFrameTypesMustMatch);
902 void MacroAssembler::EnterExitFramePrologue() {
903 // Set up the frame structure on the stack.
904 DCHECK(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
905 DCHECK(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
906 DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
910 // Reserve room for entry stack pointer and push the code object.
911 DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
912 push(Immediate(0)); // Saved entry sp, patched before call.
913 push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
915 // Save the frame pointer and the context in top.
916 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate());
917 ExternalReference context_address(Isolate::kContextAddress, isolate());
918 ExternalReference c_function_address(Isolate::kCFunctionAddress, isolate());
919 mov(Operand::StaticVariable(c_entry_fp_address), ebp);
920 mov(Operand::StaticVariable(context_address), esi);
921 mov(Operand::StaticVariable(c_function_address), ebx);
925 void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
926 // Optionally save all XMM registers.
928 int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
930 sub(esp, Immediate(space));
931 const int offset = -2 * kPointerSize;
932 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
933 XMMRegister reg = XMMRegister::from_code(i);
934 movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
937 sub(esp, Immediate(argc * kPointerSize));
940 // Get the required frame alignment for the OS.
941 const int kFrameAlignment = base::OS::ActivationFrameAlignment();
942 if (kFrameAlignment > 0) {
943 DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
944 and_(esp, -kFrameAlignment);
947 // Patch the saved entry sp.
948 mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
952 void MacroAssembler::EnterExitFrame(bool save_doubles) {
953 EnterExitFramePrologue();
955 // Set up argc and argv in callee-saved registers.
956 int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
958 lea(esi, Operand(ebp, eax, times_4, offset));
960 // Reserve space for argc, argv and isolate.
961 EnterExitFrameEpilogue(3, save_doubles);
965 void MacroAssembler::EnterApiExitFrame(int argc) {
966 EnterExitFramePrologue();
967 EnterExitFrameEpilogue(argc, false);
971 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
972 // Optionally restore all XMM registers.
974 const int offset = -2 * kPointerSize;
975 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
976 XMMRegister reg = XMMRegister::from_code(i);
977 movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
981 // Get the return address from the stack and restore the frame pointer.
982 mov(ecx, Operand(ebp, 1 * kPointerSize));
983 mov(ebp, Operand(ebp, 0 * kPointerSize));
985 // Pop the arguments and the receiver from the caller stack.
986 lea(esp, Operand(esi, 1 * kPointerSize));
988 // Push the return address to get ready to return.
991 LeaveExitFrameEpilogue(true);
995 void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
996 // Restore current context from top and clear it in debug mode.
997 ExternalReference context_address(Isolate::kContextAddress, isolate());
998 if (restore_context) {
999 mov(esi, Operand::StaticVariable(context_address));
1002 mov(Operand::StaticVariable(context_address), Immediate(0));
1005 // Clear the top frame.
1006 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
1008 mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
1012 void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
1016 LeaveExitFrameEpilogue(restore_context);
1020 void MacroAssembler::PushStackHandler() {
1021 // Adjust this code if not the case.
1022 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
1023 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1025 // Link the current handler as the next handler.
1026 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
1027 push(Operand::StaticVariable(handler_address));
1029 // Set this new handler as the current one.
1030 mov(Operand::StaticVariable(handler_address), esp);
1034 void MacroAssembler::PopStackHandler() {
1035 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1036 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
1037 pop(Operand::StaticVariable(handler_address));
1038 add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
1042 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1046 Label same_contexts;
1048 DCHECK(!holder_reg.is(scratch1));
1049 DCHECK(!holder_reg.is(scratch2));
1050 DCHECK(!scratch1.is(scratch2));
1052 // Load current lexical context from the stack frame.
1053 mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset));
1055 // When generating debug code, make sure the lexical context is set.
1056 if (emit_debug_code()) {
1057 cmp(scratch1, Immediate(0));
1058 Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
1060 // Load the native context of the current context.
1062 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1063 mov(scratch1, FieldOperand(scratch1, offset));
1064 mov(scratch1, FieldOperand(scratch1, GlobalObject::kNativeContextOffset));
1066 // Check the context is a native context.
1067 if (emit_debug_code()) {
1068 // Read the first word and compare to native_context_map.
1069 cmp(FieldOperand(scratch1, HeapObject::kMapOffset),
1070 isolate()->factory()->native_context_map());
1071 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
1074 // Check if both contexts are the same.
1075 cmp(scratch1, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1076 j(equal, &same_contexts);
1078 // Compare security tokens, save holder_reg on the stack so we can use it
1079 // as a temporary register.
1081 // Check that the security token in the calling global object is
1082 // compatible with the security token in the receiving global
1085 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1087 // Check the context is a native context.
1088 if (emit_debug_code()) {
1089 cmp(scratch2, isolate()->factory()->null_value());
1090 Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
1092 // Read the first word and compare to native_context_map(),
1093 cmp(FieldOperand(scratch2, HeapObject::kMapOffset),
1094 isolate()->factory()->native_context_map());
1095 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
1098 int token_offset = Context::kHeaderSize +
1099 Context::SECURITY_TOKEN_INDEX * kPointerSize;
1100 mov(scratch1, FieldOperand(scratch1, token_offset));
1101 cmp(scratch1, FieldOperand(scratch2, token_offset));
1104 bind(&same_contexts);
1108 // Compute the hash code from the untagged key. This must be kept in sync with
1109 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
1110 // code-stub-hydrogen.cc
1112 // Note: r0 will contain hash code
1113 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
1114 // Xor original key with a seed.
1115 if (serializer_enabled()) {
1116 ExternalReference roots_array_start =
1117 ExternalReference::roots_array_start(isolate());
1118 mov(scratch, Immediate(Heap::kHashSeedRootIndex));
1120 Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
1124 int32_t seed = isolate()->heap()->HashSeed();
1125 xor_(r0, Immediate(seed));
1128 // hash = ~hash + (hash << 15);
1133 // hash = hash ^ (hash >> 12);
1137 // hash = hash + (hash << 2);
1138 lea(r0, Operand(r0, r0, times_4, 0));
1139 // hash = hash ^ (hash >> 4);
1143 // hash = hash * 2057;
1145 // hash = hash ^ (hash >> 16);
1149 and_(r0, 0x3fffffff);
1154 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
1163 // elements - holds the slow-case elements of the receiver and is unchanged.
1165 // key - holds the smi key on entry and is unchanged.
1167 // Scratch registers:
1169 // r0 - holds the untagged key on entry and holds the hash once computed.
1171 // r1 - used to hold the capacity mask of the dictionary
1173 // r2 - used for the index into the dictionary.
1175 // result - holds the result on exit if the load succeeds and we fall through.
1179 GetNumberHash(r0, r1);
1181 // Compute capacity mask.
1182 mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset));
1183 shr(r1, kSmiTagSize); // convert smi to int
1186 // Generate an unrolled loop that performs a few probes before giving up.
1187 for (int i = 0; i < kNumberDictionaryProbes; i++) {
1188 // Use r2 for index calculations and keep the hash intact in r0.
1190 // Compute the masked index: (hash + i + i * i) & mask.
1192 add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
1196 // Scale the index by multiplying by the entry size.
1197 DCHECK(SeededNumberDictionary::kEntrySize == 3);
1198 lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
1200 // Check if the key matches.
1201 cmp(key, FieldOperand(elements,
1204 SeededNumberDictionary::kElementsStartOffset));
1205 if (i != (kNumberDictionaryProbes - 1)) {
1213 // Check that the value is a field property.
1214 const int kDetailsOffset =
1215 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1217 test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
1218 Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
1221 // Get the value at the masked, scaled index.
1222 const int kValueOffset =
1223 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1224 mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
1228 void MacroAssembler::LoadAllocationTopHelper(Register result,
1230 AllocationFlags flags) {
1231 ExternalReference allocation_top =
1232 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1234 // Just return if allocation top is already known.
1235 if ((flags & RESULT_CONTAINS_TOP) != 0) {
1236 // No use of scratch if allocation top is provided.
1237 DCHECK(scratch.is(no_reg));
1239 // Assert that result actually contains top on entry.
1240 cmp(result, Operand::StaticVariable(allocation_top));
1241 Check(equal, kUnexpectedAllocationTop);
1246 // Move address of new object to result. Use scratch register if available.
1247 if (scratch.is(no_reg)) {
1248 mov(result, Operand::StaticVariable(allocation_top));
1250 mov(scratch, Immediate(allocation_top));
1251 mov(result, Operand(scratch, 0));
1256 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
1258 AllocationFlags flags) {
1259 if (emit_debug_code()) {
1260 test(result_end, Immediate(kObjectAlignmentMask));
1261 Check(zero, kUnalignedAllocationInNewSpace);
1264 ExternalReference allocation_top =
1265 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1267 // Update new top. Use scratch if available.
1268 if (scratch.is(no_reg)) {
1269 mov(Operand::StaticVariable(allocation_top), result_end);
1271 mov(Operand(scratch, 0), result_end);
1276 void MacroAssembler::Allocate(int object_size,
1278 Register result_end,
1281 AllocationFlags flags) {
1282 DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
1283 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
1284 if (!FLAG_inline_new) {
1285 if (emit_debug_code()) {
1286 // Trash the registers to simulate an allocation failure.
1287 mov(result, Immediate(0x7091));
1288 if (result_end.is_valid()) {
1289 mov(result_end, Immediate(0x7191));
1291 if (scratch.is_valid()) {
1292 mov(scratch, Immediate(0x7291));
1298 DCHECK(!result.is(result_end));
1300 // Load address of new object into result.
1301 LoadAllocationTopHelper(result, scratch, flags);
1303 ExternalReference allocation_limit =
1304 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1306 // Align the next allocation. Storing the filler map without checking top is
1307 // safe in new-space because the limit of the heap is aligned there.
1308 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1309 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1311 test(result, Immediate(kDoubleAlignmentMask));
1312 j(zero, &aligned, Label::kNear);
1313 if ((flags & PRETENURE) != 0) {
1314 cmp(result, Operand::StaticVariable(allocation_limit));
1315 j(above_equal, gc_required);
1317 mov(Operand(result, 0),
1318 Immediate(isolate()->factory()->one_pointer_filler_map()));
1319 add(result, Immediate(kDoubleSize / 2));
1323 // Calculate new top and bail out if space is exhausted.
1324 Register top_reg = result_end.is_valid() ? result_end : result;
1325 if (!top_reg.is(result)) {
1326 mov(top_reg, result);
1328 add(top_reg, Immediate(object_size));
1329 j(carry, gc_required);
1330 cmp(top_reg, Operand::StaticVariable(allocation_limit));
1331 j(above, gc_required);
1333 // Update allocation top.
1334 UpdateAllocationTopHelper(top_reg, scratch, flags);
1336 // Tag result if requested.
1337 bool tag_result = (flags & TAG_OBJECT) != 0;
1338 if (top_reg.is(result)) {
1340 sub(result, Immediate(object_size - kHeapObjectTag));
1342 sub(result, Immediate(object_size));
1344 } else if (tag_result) {
1345 DCHECK(kHeapObjectTag == 1);
1351 void MacroAssembler::Allocate(int header_size,
1352 ScaleFactor element_size,
1353 Register element_count,
1354 RegisterValueType element_count_type,
1356 Register result_end,
1359 AllocationFlags flags) {
1360 DCHECK((flags & SIZE_IN_WORDS) == 0);
1361 if (!FLAG_inline_new) {
1362 if (emit_debug_code()) {
1363 // Trash the registers to simulate an allocation failure.
1364 mov(result, Immediate(0x7091));
1365 mov(result_end, Immediate(0x7191));
1366 if (scratch.is_valid()) {
1367 mov(scratch, Immediate(0x7291));
1369 // Register element_count is not modified by the function.
1374 DCHECK(!result.is(result_end));
1376 // Load address of new object into result.
1377 LoadAllocationTopHelper(result, scratch, flags);
1379 ExternalReference allocation_limit =
1380 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1382 // Align the next allocation. Storing the filler map without checking top is
1383 // safe in new-space because the limit of the heap is aligned there.
1384 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1385 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1387 test(result, Immediate(kDoubleAlignmentMask));
1388 j(zero, &aligned, Label::kNear);
1389 if ((flags & PRETENURE) != 0) {
1390 cmp(result, Operand::StaticVariable(allocation_limit));
1391 j(above_equal, gc_required);
1393 mov(Operand(result, 0),
1394 Immediate(isolate()->factory()->one_pointer_filler_map()));
1395 add(result, Immediate(kDoubleSize / 2));
1399 // Calculate new top and bail out if space is exhausted.
1400 // We assume that element_count*element_size + header_size does not
1402 if (element_count_type == REGISTER_VALUE_IS_SMI) {
1403 STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1);
1404 STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2);
1405 STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4);
1406 DCHECK(element_size >= times_2);
1407 DCHECK(kSmiTagSize == 1);
1408 element_size = static_cast<ScaleFactor>(element_size - 1);
1410 DCHECK(element_count_type == REGISTER_VALUE_IS_INT32);
1412 lea(result_end, Operand(element_count, element_size, header_size));
1413 add(result_end, result);
1414 j(carry, gc_required);
1415 cmp(result_end, Operand::StaticVariable(allocation_limit));
1416 j(above, gc_required);
1418 if ((flags & TAG_OBJECT) != 0) {
1419 DCHECK(kHeapObjectTag == 1);
1423 // Update allocation top.
1424 UpdateAllocationTopHelper(result_end, scratch, flags);
1428 void MacroAssembler::Allocate(Register object_size,
1430 Register result_end,
1433 AllocationFlags flags) {
1434 DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
1435 if (!FLAG_inline_new) {
1436 if (emit_debug_code()) {
1437 // Trash the registers to simulate an allocation failure.
1438 mov(result, Immediate(0x7091));
1439 mov(result_end, Immediate(0x7191));
1440 if (scratch.is_valid()) {
1441 mov(scratch, Immediate(0x7291));
1443 // object_size is left unchanged by this function.
1448 DCHECK(!result.is(result_end));
1450 // Load address of new object into result.
1451 LoadAllocationTopHelper(result, scratch, flags);
1453 ExternalReference allocation_limit =
1454 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1456 // Align the next allocation. Storing the filler map without checking top is
1457 // safe in new-space because the limit of the heap is aligned there.
1458 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1459 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1461 test(result, Immediate(kDoubleAlignmentMask));
1462 j(zero, &aligned, Label::kNear);
1463 if ((flags & PRETENURE) != 0) {
1464 cmp(result, Operand::StaticVariable(allocation_limit));
1465 j(above_equal, gc_required);
1467 mov(Operand(result, 0),
1468 Immediate(isolate()->factory()->one_pointer_filler_map()));
1469 add(result, Immediate(kDoubleSize / 2));
1473 // Calculate new top and bail out if space is exhausted.
1474 if (!object_size.is(result_end)) {
1475 mov(result_end, object_size);
1477 add(result_end, result);
1478 j(carry, gc_required);
1479 cmp(result_end, Operand::StaticVariable(allocation_limit));
1480 j(above, gc_required);
1482 // Tag result if requested.
1483 if ((flags & TAG_OBJECT) != 0) {
1484 DCHECK(kHeapObjectTag == 1);
1488 // Update allocation top.
1489 UpdateAllocationTopHelper(result_end, scratch, flags);
1493 void MacroAssembler::AllocateHeapNumber(Register result,
1498 // Allocate heap number in new space.
1499 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
1502 Handle<Map> map = mode == MUTABLE
1503 ? isolate()->factory()->mutable_heap_number_map()
1504 : isolate()->factory()->heap_number_map();
1507 mov(FieldOperand(result, HeapObject::kMapOffset), Immediate(map));
1511 void MacroAssembler::AllocateTwoByteString(Register result,
1516 Label* gc_required) {
1517 // Calculate the number of bytes needed for the characters in the string while
1518 // observing object alignment.
1519 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1520 DCHECK(kShortSize == 2);
1521 // scratch1 = length * 2 + kObjectAlignmentMask.
1522 lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
1523 and_(scratch1, Immediate(~kObjectAlignmentMask));
1525 // Allocate two byte string in new space.
1526 Allocate(SeqTwoByteString::kHeaderSize,
1529 REGISTER_VALUE_IS_INT32,
1536 // Set the map, length and hash field.
1537 mov(FieldOperand(result, HeapObject::kMapOffset),
1538 Immediate(isolate()->factory()->string_map()));
1539 mov(scratch1, length);
1541 mov(FieldOperand(result, String::kLengthOffset), scratch1);
1542 mov(FieldOperand(result, String::kHashFieldOffset),
1543 Immediate(String::kEmptyHashField));
1547 void MacroAssembler::AllocateOneByteString(Register result, Register length,
1548 Register scratch1, Register scratch2,
1550 Label* gc_required) {
1551 // Calculate the number of bytes needed for the characters in the string while
1552 // observing object alignment.
1553 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1554 mov(scratch1, length);
1555 DCHECK(kCharSize == 1);
1556 add(scratch1, Immediate(kObjectAlignmentMask));
1557 and_(scratch1, Immediate(~kObjectAlignmentMask));
1559 // Allocate one-byte string in new space.
1560 Allocate(SeqOneByteString::kHeaderSize,
1563 REGISTER_VALUE_IS_INT32,
1570 // Set the map, length and hash field.
1571 mov(FieldOperand(result, HeapObject::kMapOffset),
1572 Immediate(isolate()->factory()->one_byte_string_map()));
1573 mov(scratch1, length);
1575 mov(FieldOperand(result, String::kLengthOffset), scratch1);
1576 mov(FieldOperand(result, String::kHashFieldOffset),
1577 Immediate(String::kEmptyHashField));
1581 void MacroAssembler::AllocateOneByteString(Register result, int length,
1582 Register scratch1, Register scratch2,
1583 Label* gc_required) {
1586 // Allocate one-byte string in new space.
1587 Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
1588 gc_required, TAG_OBJECT);
1590 // Set the map, length and hash field.
1591 mov(FieldOperand(result, HeapObject::kMapOffset),
1592 Immediate(isolate()->factory()->one_byte_string_map()));
1593 mov(FieldOperand(result, String::kLengthOffset),
1594 Immediate(Smi::FromInt(length)));
1595 mov(FieldOperand(result, String::kHashFieldOffset),
1596 Immediate(String::kEmptyHashField));
1600 void MacroAssembler::AllocateTwoByteConsString(Register result,
1603 Label* gc_required) {
1604 // Allocate heap number in new space.
1605 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1608 // Set the map. The other fields are left uninitialized.
1609 mov(FieldOperand(result, HeapObject::kMapOffset),
1610 Immediate(isolate()->factory()->cons_string_map()));
1614 void MacroAssembler::AllocateOneByteConsString(Register result,
1617 Label* gc_required) {
1618 Allocate(ConsString::kSize,
1625 // Set the map. The other fields are left uninitialized.
1626 mov(FieldOperand(result, HeapObject::kMapOffset),
1627 Immediate(isolate()->factory()->cons_one_byte_string_map()));
1631 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
1634 Label* gc_required) {
1635 // Allocate heap number in new space.
1636 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1639 // Set the map. The other fields are left uninitialized.
1640 mov(FieldOperand(result, HeapObject::kMapOffset),
1641 Immediate(isolate()->factory()->sliced_string_map()));
1645 void MacroAssembler::AllocateOneByteSlicedString(Register result,
1648 Label* gc_required) {
1649 // Allocate heap number in new space.
1650 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1653 // Set the map. The other fields are left uninitialized.
1654 mov(FieldOperand(result, HeapObject::kMapOffset),
1655 Immediate(isolate()->factory()->sliced_one_byte_string_map()));
1659 // Copy memory, byte-by-byte, from source to destination. Not optimized for
1660 // long or aligned copies. The contents of scratch and length are destroyed.
1661 // Source and destination are incremented by length.
1662 // Many variants of movsb, loop unrolling, word moves, and indexed operands
1663 // have been tried here already, and this is fastest.
1664 // A simpler loop is faster on small copies, but 30% slower on large ones.
1665 // The cld() instruction must have been emitted, to set the direction flag(),
1666 // before calling this function.
1667 void MacroAssembler::CopyBytes(Register source,
1668 Register destination,
1671 Label short_loop, len4, len8, len12, done, short_string;
1672 DCHECK(source.is(esi));
1673 DCHECK(destination.is(edi));
1674 DCHECK(length.is(ecx));
1675 cmp(length, Immediate(4));
1676 j(below, &short_string, Label::kNear);
1678 // Because source is 4-byte aligned in our uses of this function,
1679 // we keep source aligned for the rep_movs call by copying the odd bytes
1680 // at the end of the ranges.
1681 mov(scratch, Operand(source, length, times_1, -4));
1682 mov(Operand(destination, length, times_1, -4), scratch);
1684 cmp(length, Immediate(8));
1685 j(below_equal, &len4, Label::kNear);
1686 cmp(length, Immediate(12));
1687 j(below_equal, &len8, Label::kNear);
1688 cmp(length, Immediate(16));
1689 j(below_equal, &len12, Label::kNear);
1694 and_(scratch, Immediate(0x3));
1695 add(destination, scratch);
1696 jmp(&done, Label::kNear);
1699 mov(scratch, Operand(source, 8));
1700 mov(Operand(destination, 8), scratch);
1702 mov(scratch, Operand(source, 4));
1703 mov(Operand(destination, 4), scratch);
1705 mov(scratch, Operand(source, 0));
1706 mov(Operand(destination, 0), scratch);
1707 add(destination, length);
1708 jmp(&done, Label::kNear);
1710 bind(&short_string);
1711 test(length, length);
1712 j(zero, &done, Label::kNear);
1715 mov_b(scratch, Operand(source, 0));
1716 mov_b(Operand(destination, 0), scratch);
1720 j(not_zero, &short_loop);
1726 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
1727 Register end_offset,
1732 mov(Operand(start_offset, 0), filler);
1733 add(start_offset, Immediate(kPointerSize));
1735 cmp(start_offset, end_offset);
1740 void MacroAssembler::BooleanBitTest(Register object,
1743 bit_index += kSmiTagSize + kSmiShiftSize;
1744 DCHECK(base::bits::IsPowerOfTwo32(kBitsPerByte));
1745 int byte_index = bit_index / kBitsPerByte;
1746 int byte_bit_index = bit_index & (kBitsPerByte - 1);
1747 test_b(FieldOperand(object, field_offset + byte_index),
1748 static_cast<byte>(1 << byte_bit_index));
1753 void MacroAssembler::NegativeZeroTest(Register result,
1755 Label* then_label) {
1757 test(result, result);
1760 j(sign, then_label);
1765 void MacroAssembler::NegativeZeroTest(Register result,
1769 Label* then_label) {
1771 test(result, result);
1775 j(sign, then_label);
1780 void MacroAssembler::GetMapConstructor(Register result, Register map,
1783 mov(result, FieldOperand(map, Map::kConstructorOrBackPointerOffset));
1785 JumpIfSmi(result, &done, Label::kNear);
1786 CmpObjectType(result, MAP_TYPE, temp);
1787 j(not_equal, &done, Label::kNear);
1788 mov(result, FieldOperand(result, Map::kConstructorOrBackPointerOffset));
1794 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
1795 Register scratch, Label* miss) {
1796 // Get the prototype or initial map from the function.
1798 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
1800 // If the prototype or initial map is the hole, don't return it and
1801 // simply miss the cache instead. This will allow us to allocate a
1802 // prototype object on-demand in the runtime system.
1803 cmp(result, Immediate(isolate()->factory()->the_hole_value()));
1806 // If the function does not have an initial map, we're done.
1808 CmpObjectType(result, MAP_TYPE, scratch);
1809 j(not_equal, &done, Label::kNear);
1811 // Get the prototype from the initial map.
1812 mov(result, FieldOperand(result, Map::kPrototypeOffset));
1819 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
1820 DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
1821 call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
1825 void MacroAssembler::TailCallStub(CodeStub* stub) {
1826 jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
1830 void MacroAssembler::StubReturn(int argc) {
1831 DCHECK(argc >= 1 && generating_stub());
1832 ret((argc - 1) * kPointerSize);
1836 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
1837 return has_frame_ || !stub->SometimesSetsUpAFrame();
1841 void MacroAssembler::IndexFromHash(Register hash, Register index) {
1842 // The assert checks that the constants for the maximum number of digits
1843 // for an array index cached in the hash field and the number of bits
1844 // reserved for it does not conflict.
1845 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
1846 (1 << String::kArrayIndexValueBits));
1847 if (!index.is(hash)) {
1850 DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
1854 void MacroAssembler::CallRuntime(const Runtime::Function* f,
1856 SaveFPRegsMode save_doubles) {
1857 // If the expected number of arguments of the runtime function is
1858 // constant, we check that the actual number of arguments match the
1860 CHECK(f->nargs < 0 || f->nargs == num_arguments);
1862 // TODO(1236192): Most runtime routines don't need the number of
1863 // arguments passed in because it is constant. At some point we
1864 // should remove this need and make the runtime routine entry code
1866 Move(eax, Immediate(num_arguments));
1867 mov(ebx, Immediate(ExternalReference(f, isolate())));
1868 CEntryStub ces(isolate(), 1, save_doubles);
1873 void MacroAssembler::CallExternalReference(ExternalReference ref,
1874 int num_arguments) {
1875 mov(eax, Immediate(num_arguments));
1876 mov(ebx, Immediate(ref));
1878 CEntryStub stub(isolate(), 1);
1883 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
1886 // TODO(1236192): Most runtime routines don't need the number of
1887 // arguments passed in because it is constant. At some point we
1888 // should remove this need and make the runtime routine entry code
1890 Move(eax, Immediate(num_arguments));
1891 JumpToExternalReference(ext);
1895 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
1898 TailCallExternalReference(ExternalReference(fid, isolate()),
1904 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
1905 // Set the entry point and jump to the C entry runtime stub.
1906 mov(ebx, Immediate(ext));
1907 CEntryStub ces(isolate(), 1);
1908 jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
1912 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1913 const ParameterCount& actual,
1914 Handle<Code> code_constant,
1915 const Operand& code_operand,
1917 bool* definitely_mismatches,
1919 Label::Distance done_near,
1920 const CallWrapper& call_wrapper) {
1921 bool definitely_matches = false;
1922 *definitely_mismatches = false;
1924 if (expected.is_immediate()) {
1925 DCHECK(actual.is_immediate());
1926 if (expected.immediate() == actual.immediate()) {
1927 definitely_matches = true;
1929 mov(eax, actual.immediate());
1930 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1931 if (expected.immediate() == sentinel) {
1932 // Don't worry about adapting arguments for builtins that
1933 // don't want that done. Skip adaption code by making it look
1934 // like we have a match between expected and actual number of
1936 definitely_matches = true;
1938 *definitely_mismatches = true;
1939 mov(ebx, expected.immediate());
1943 if (actual.is_immediate()) {
1944 // Expected is in register, actual is immediate. This is the
1945 // case when we invoke function values without going through the
1947 cmp(expected.reg(), actual.immediate());
1949 DCHECK(expected.reg().is(ebx));
1950 mov(eax, actual.immediate());
1951 } else if (!expected.reg().is(actual.reg())) {
1952 // Both expected and actual are in (different) registers. This
1953 // is the case when we invoke functions using call and apply.
1954 cmp(expected.reg(), actual.reg());
1956 DCHECK(actual.reg().is(eax));
1957 DCHECK(expected.reg().is(ebx));
1961 if (!definitely_matches) {
1962 Handle<Code> adaptor =
1963 isolate()->builtins()->ArgumentsAdaptorTrampoline();
1964 if (!code_constant.is_null()) {
1965 mov(edx, Immediate(code_constant));
1966 add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
1967 } else if (!code_operand.is_reg(edx)) {
1968 mov(edx, code_operand);
1971 if (flag == CALL_FUNCTION) {
1972 call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
1973 call(adaptor, RelocInfo::CODE_TARGET);
1974 call_wrapper.AfterCall();
1975 if (!*definitely_mismatches) {
1976 jmp(done, done_near);
1979 jmp(adaptor, RelocInfo::CODE_TARGET);
1986 void MacroAssembler::InvokeCode(const Operand& code,
1987 const ParameterCount& expected,
1988 const ParameterCount& actual,
1990 const CallWrapper& call_wrapper) {
1991 // You can't call a function without a valid frame.
1992 DCHECK(flag == JUMP_FUNCTION || has_frame());
1995 bool definitely_mismatches = false;
1996 InvokePrologue(expected, actual, Handle<Code>::null(), code,
1997 &done, &definitely_mismatches, flag, Label::kNear,
1999 if (!definitely_mismatches) {
2000 if (flag == CALL_FUNCTION) {
2001 call_wrapper.BeforeCall(CallSize(code));
2003 call_wrapper.AfterCall();
2005 DCHECK(flag == JUMP_FUNCTION);
2013 void MacroAssembler::InvokeFunction(Register fun,
2014 const ParameterCount& actual,
2016 const CallWrapper& call_wrapper) {
2017 // You can't call a function without a valid frame.
2018 DCHECK(flag == JUMP_FUNCTION || has_frame());
2020 DCHECK(fun.is(edi));
2021 mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
2022 mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
2023 mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
2026 ParameterCount expected(ebx);
2027 InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
2028 expected, actual, flag, call_wrapper);
2032 void MacroAssembler::InvokeFunction(Register fun,
2033 const ParameterCount& expected,
2034 const ParameterCount& actual,
2036 const CallWrapper& call_wrapper) {
2037 // You can't call a function without a valid frame.
2038 DCHECK(flag == JUMP_FUNCTION || has_frame());
2040 DCHECK(fun.is(edi));
2041 mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
2043 InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
2044 expected, actual, flag, call_wrapper);
2048 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
2049 const ParameterCount& expected,
2050 const ParameterCount& actual,
2052 const CallWrapper& call_wrapper) {
2053 LoadHeapObject(edi, function);
2054 InvokeFunction(edi, expected, actual, flag, call_wrapper);
2058 void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
2059 const CallWrapper& call_wrapper) {
2060 // You can't call a builtin without a valid frame.
2061 DCHECK(flag == JUMP_FUNCTION || has_frame());
2063 // Rely on the assertion to check that the number of provided
2064 // arguments match the expected number of arguments. Fake a
2065 // parameter count to avoid emitting code to do the check.
2066 ParameterCount expected(0);
2067 GetBuiltinFunction(edi, native_context_index);
2068 InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
2069 expected, expected, flag, call_wrapper);
2073 void MacroAssembler::GetBuiltinFunction(Register target,
2074 int native_context_index) {
2075 // Load the JavaScript builtin function from the builtins object.
2076 mov(target, GlobalObjectOperand());
2077 mov(target, FieldOperand(target, GlobalObject::kNativeContextOffset));
2078 mov(target, ContextOperand(target, native_context_index));
2082 void MacroAssembler::GetBuiltinEntry(Register target,
2083 int native_context_index) {
2084 DCHECK(!target.is(edi));
2085 // Load the JavaScript builtin function from the builtins object.
2086 GetBuiltinFunction(edi, native_context_index);
2087 // Load the code entry point from the function into the target register.
2088 mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
2092 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2093 if (context_chain_length > 0) {
2094 // Move up the chain of contexts to the context containing the slot.
2095 mov(dst, Operand(esi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2096 for (int i = 1; i < context_chain_length; i++) {
2097 mov(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2100 // Slot is in the current function context. Move it into the
2101 // destination register in case we store into it (the write barrier
2102 // cannot be allowed to destroy the context in esi).
2106 // We should not have found a with context by walking the context chain
2107 // (i.e., the static scope chain and runtime context chain do not agree).
2108 // A variable occurring in such a scope should have slot type LOOKUP and
2110 if (emit_debug_code()) {
2111 cmp(FieldOperand(dst, HeapObject::kMapOffset),
2112 isolate()->factory()->with_context_map());
2113 Check(not_equal, kVariableResolvedToWithContext);
2118 void MacroAssembler::LoadGlobalProxy(Register dst) {
2119 mov(dst, GlobalObjectOperand());
2120 mov(dst, FieldOperand(dst, GlobalObject::kGlobalProxyOffset));
2124 void MacroAssembler::LoadTransitionedArrayMapConditional(
2125 ElementsKind expected_kind,
2126 ElementsKind transitioned_kind,
2127 Register map_in_out,
2129 Label* no_map_match) {
2130 // Load the global or builtins object from the current context.
2131 mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2132 mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
2134 // Check that the function's map is the same as the expected cached map.
2135 mov(scratch, Operand(scratch,
2136 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
2138 size_t offset = expected_kind * kPointerSize +
2139 FixedArrayBase::kHeaderSize;
2140 cmp(map_in_out, FieldOperand(scratch, offset));
2141 j(not_equal, no_map_match);
2143 // Use the transitioned cached map.
2144 offset = transitioned_kind * kPointerSize +
2145 FixedArrayBase::kHeaderSize;
2146 mov(map_in_out, FieldOperand(scratch, offset));
2150 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2151 // Load the global or builtins object from the current context.
2153 Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2154 // Load the native context from the global or builtins object.
2156 FieldOperand(function, GlobalObject::kNativeContextOffset));
2157 // Load the function from the native context.
2158 mov(function, Operand(function, Context::SlotOffset(index)));
2162 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2164 // Load the initial map. The global functions all have initial maps.
2165 mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2166 if (emit_debug_code()) {
2168 CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
2171 Abort(kGlobalFunctionsMustHaveInitialMap);
2177 // Store the value in register src in the safepoint register stack
2178 // slot for register dst.
2179 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
2180 mov(SafepointRegisterSlot(dst), src);
2184 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Immediate src) {
2185 mov(SafepointRegisterSlot(dst), src);
2189 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
2190 mov(dst, SafepointRegisterSlot(src));
2194 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
2195 return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
2199 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
2200 // The registers are pushed starting with the lowest encoding,
2201 // which means that lowest encodings are furthest away from
2202 // the stack pointer.
2203 DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
2204 return kNumSafepointRegisters - reg_code - 1;
2208 void MacroAssembler::LoadHeapObject(Register result,
2209 Handle<HeapObject> object) {
2210 AllowDeferredHandleDereference embedding_raw_address;
2211 if (isolate()->heap()->InNewSpace(*object)) {
2212 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2213 mov(result, Operand::ForCell(cell));
2215 mov(result, object);
2220 void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
2221 AllowDeferredHandleDereference using_raw_address;
2222 if (isolate()->heap()->InNewSpace(*object)) {
2223 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2224 cmp(reg, Operand::ForCell(cell));
2231 void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
2232 AllowDeferredHandleDereference using_raw_address;
2233 if (isolate()->heap()->InNewSpace(*object)) {
2234 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2235 push(Operand::ForCell(cell));
2242 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
2245 cmp(value, FieldOperand(scratch, WeakCell::kValueOffset));
2249 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
2251 mov(value, FieldOperand(value, WeakCell::kValueOffset));
2255 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
2257 GetWeakValue(value, cell);
2258 JumpIfSmi(value, miss);
2262 void MacroAssembler::Ret() {
2267 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
2268 if (is_uint16(bytes_dropped)) {
2272 add(esp, Immediate(bytes_dropped));
2279 void MacroAssembler::Drop(int stack_elements) {
2280 if (stack_elements > 0) {
2281 add(esp, Immediate(stack_elements * kPointerSize));
2286 void MacroAssembler::Move(Register dst, Register src) {
2293 void MacroAssembler::Move(Register dst, const Immediate& x) {
2295 xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
2302 void MacroAssembler::Move(const Operand& dst, const Immediate& x) {
2307 void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
2311 unsigned cnt = base::bits::CountPopulation32(src);
2312 unsigned nlz = base::bits::CountLeadingZeros32(src);
2313 unsigned ntz = base::bits::CountTrailingZeros32(src);
2314 if (nlz + cnt + ntz == 32) {
2317 psrld(dst, 32 - cnt);
2319 pslld(dst, 32 - cnt);
2320 if (nlz != 0) psrld(dst, nlz);
2324 mov(eax, Immediate(src));
2325 movd(dst, Operand(eax));
2332 void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
2336 uint32_t lower = static_cast<uint32_t>(src);
2337 uint32_t upper = static_cast<uint32_t>(src >> 32);
2338 unsigned cnt = base::bits::CountPopulation64(src);
2339 unsigned nlz = base::bits::CountLeadingZeros64(src);
2340 unsigned ntz = base::bits::CountTrailingZeros64(src);
2341 if (nlz + cnt + ntz == 64) {
2344 psrlq(dst, 64 - cnt);
2346 psllq(dst, 64 - cnt);
2347 if (nlz != 0) psrlq(dst, nlz);
2349 } else if (lower == 0) {
2352 } else if (CpuFeatures::IsSupported(SSE4_1)) {
2353 CpuFeatureScope scope(this, SSE4_1);
2355 Move(eax, Immediate(lower));
2356 movd(dst, Operand(eax));
2357 Move(eax, Immediate(upper));
2358 pinsrd(dst, Operand(eax), 1);
2361 push(Immediate(upper));
2362 push(Immediate(lower));
2363 movsd(dst, Operand(esp, 0));
2364 add(esp, Immediate(kDoubleSize));
2370 void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
2376 if (CpuFeatures::IsSupported(SSE4_1)) {
2377 CpuFeatureScope sse_scope(this, SSE4_1);
2378 pextrd(dst, src, imm8);
2381 pshufd(xmm0, src, 1);
2386 void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
2387 DCHECK(imm8 == 0 || imm8 == 1);
2388 if (CpuFeatures::IsSupported(SSE4_1)) {
2389 CpuFeatureScope sse_scope(this, SSE4_1);
2390 pinsrd(dst, src, imm8);
2395 punpckldq(dst, xmm0);
2399 punpckldq(xmm0, dst);
2405 void MacroAssembler::Lzcnt(Register dst, const Operand& src) {
2406 if (CpuFeatures::IsSupported(LZCNT)) {
2407 CpuFeatureScope scope(this, LZCNT);
2413 j(not_zero, ¬_zero_src, Label::kNear);
2414 Move(dst, Immediate(63)); // 63^31 == 32
2415 bind(¬_zero_src);
2416 xor_(dst, Immediate(31)); // for x in [0..31], 31^x == 31-x.
2420 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
2421 if (FLAG_native_code_counters && counter->Enabled()) {
2422 mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
2427 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
2429 if (FLAG_native_code_counters && counter->Enabled()) {
2430 Operand operand = Operand::StaticVariable(ExternalReference(counter));
2434 add(operand, Immediate(value));
2440 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
2442 if (FLAG_native_code_counters && counter->Enabled()) {
2443 Operand operand = Operand::StaticVariable(ExternalReference(counter));
2447 sub(operand, Immediate(value));
2453 void MacroAssembler::IncrementCounter(Condition cc,
2454 StatsCounter* counter,
2457 if (FLAG_native_code_counters && counter->Enabled()) {
2459 j(NegateCondition(cc), &skip);
2461 IncrementCounter(counter, value);
2468 void MacroAssembler::DecrementCounter(Condition cc,
2469 StatsCounter* counter,
2472 if (FLAG_native_code_counters && counter->Enabled()) {
2474 j(NegateCondition(cc), &skip);
2476 DecrementCounter(counter, value);
2483 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
2484 if (emit_debug_code()) Check(cc, reason);
2488 void MacroAssembler::AssertFastElements(Register elements) {
2489 if (emit_debug_code()) {
2490 Factory* factory = isolate()->factory();
2492 cmp(FieldOperand(elements, HeapObject::kMapOffset),
2493 Immediate(factory->fixed_array_map()));
2495 cmp(FieldOperand(elements, HeapObject::kMapOffset),
2496 Immediate(factory->fixed_double_array_map()));
2498 cmp(FieldOperand(elements, HeapObject::kMapOffset),
2499 Immediate(factory->fixed_cow_array_map()));
2501 Abort(kJSObjectWithFastElementsMapHasSlowElements);
2507 void MacroAssembler::Check(Condition cc, BailoutReason reason) {
2511 // will not return here
2516 void MacroAssembler::CheckStackAlignment() {
2517 int frame_alignment = base::OS::ActivationFrameAlignment();
2518 int frame_alignment_mask = frame_alignment - 1;
2519 if (frame_alignment > kPointerSize) {
2520 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
2521 Label alignment_as_expected;
2522 test(esp, Immediate(frame_alignment_mask));
2523 j(zero, &alignment_as_expected);
2524 // Abort if stack is not aligned.
2526 bind(&alignment_as_expected);
2531 void MacroAssembler::Abort(BailoutReason reason) {
2533 const char* msg = GetBailoutReason(reason);
2535 RecordComment("Abort message: ");
2539 if (FLAG_trap_on_abort) {
2545 push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(reason))));
2546 // Disable stub call restrictions to always allow calls to abort.
2548 // We don't actually want to generate a pile of code for this, so just
2549 // claim there is a stack frame, without generating one.
2550 FrameScope scope(this, StackFrame::NONE);
2551 CallRuntime(Runtime::kAbort, 1);
2553 CallRuntime(Runtime::kAbort, 1);
2555 // will not return here
2560 void MacroAssembler::LoadInstanceDescriptors(Register map,
2561 Register descriptors) {
2562 mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
2566 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
2567 mov(dst, FieldOperand(map, Map::kBitField3Offset));
2568 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
2572 void MacroAssembler::LoadAccessor(Register dst, Register holder,
2574 AccessorComponent accessor) {
2575 mov(dst, FieldOperand(holder, HeapObject::kMapOffset));
2576 LoadInstanceDescriptors(dst, dst);
2577 mov(dst, FieldOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
2578 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
2579 : AccessorPair::kSetterOffset;
2580 mov(dst, FieldOperand(dst, offset));
2584 void MacroAssembler::LoadPowerOf2(XMMRegister dst,
2587 DCHECK(is_uintn(power + HeapNumber::kExponentBias,
2588 HeapNumber::kExponentBits));
2589 mov(scratch, Immediate(power + HeapNumber::kExponentBias));
2591 psllq(dst, HeapNumber::kMantissaBits);
2595 void MacroAssembler::LookupNumberStringCache(Register object,
2600 // Use of registers. Register result is used as a temporary.
2601 Register number_string_cache = result;
2602 Register mask = scratch1;
2603 Register scratch = scratch2;
2605 // Load the number string cache.
2606 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2607 // Make the hash mask from the length of the number string cache. It
2608 // contains two elements (number and string) for each cache entry.
2609 mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
2610 shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
2611 sub(mask, Immediate(1)); // Make mask.
2613 // Calculate the entry in the number string cache. The hash value in the
2614 // number string cache for smis is just the smi value, and the hash for
2615 // doubles is the xor of the upper and lower words. See
2616 // Heap::GetNumberStringCache.
2617 Label smi_hash_calculated;
2618 Label load_result_from_cache;
2620 STATIC_ASSERT(kSmiTag == 0);
2621 JumpIfNotSmi(object, ¬_smi, Label::kNear);
2622 mov(scratch, object);
2624 jmp(&smi_hash_calculated, Label::kNear);
2626 cmp(FieldOperand(object, HeapObject::kMapOffset),
2627 isolate()->factory()->heap_number_map());
2628 j(not_equal, not_found);
2629 STATIC_ASSERT(8 == kDoubleSize);
2630 mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
2631 xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
2632 // Object is heap number and hash is now in scratch. Calculate cache index.
2633 and_(scratch, mask);
2634 Register index = scratch;
2635 Register probe = mask;
2637 FieldOperand(number_string_cache,
2639 times_twice_pointer_size,
2640 FixedArray::kHeaderSize));
2641 JumpIfSmi(probe, not_found);
2642 movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
2643 ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
2644 j(parity_even, not_found); // Bail out if NaN is involved.
2645 j(not_equal, not_found); // The cache did not contain this value.
2646 jmp(&load_result_from_cache, Label::kNear);
2648 bind(&smi_hash_calculated);
2649 // Object is smi and hash is now in scratch. Calculate cache index.
2650 and_(scratch, mask);
2651 // Check if the entry is the smi we are looking for.
2653 FieldOperand(number_string_cache,
2655 times_twice_pointer_size,
2656 FixedArray::kHeaderSize));
2657 j(not_equal, not_found);
2659 // Get the result from the cache.
2660 bind(&load_result_from_cache);
2662 FieldOperand(number_string_cache,
2664 times_twice_pointer_size,
2665 FixedArray::kHeaderSize + kPointerSize));
2666 IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
2670 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
2671 Register instance_type, Register scratch, Label* failure) {
2672 if (!scratch.is(instance_type)) {
2673 mov(scratch, instance_type);
2676 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
2677 cmp(scratch, kStringTag | kSeqStringTag | kOneByteStringTag);
2678 j(not_equal, failure);
2682 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register object1,
2687 // Check that both objects are not smis.
2688 STATIC_ASSERT(kSmiTag == 0);
2689 mov(scratch1, object1);
2690 and_(scratch1, object2);
2691 JumpIfSmi(scratch1, failure);
2693 // Load instance type for both strings.
2694 mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
2695 mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset));
2696 movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2697 movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2699 // Check that both are flat one-byte strings.
2700 const int kFlatOneByteStringMask =
2701 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2702 const int kFlatOneByteStringTag =
2703 kStringTag | kOneByteStringTag | kSeqStringTag;
2704 // Interleave bits from both instance types and compare them in one check.
2705 DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
2706 and_(scratch1, kFlatOneByteStringMask);
2707 and_(scratch2, kFlatOneByteStringMask);
2708 lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
2709 cmp(scratch1, kFlatOneByteStringTag | (kFlatOneByteStringTag << 3));
2710 j(not_equal, failure);
2714 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
2715 Label* not_unique_name,
2716 Label::Distance distance) {
2717 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2719 test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
2721 cmpb(operand, static_cast<uint8_t>(SYMBOL_TYPE));
2722 j(not_equal, not_unique_name, distance);
2728 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
2731 uint32_t encoding_mask) {
2733 JumpIfNotSmi(string, &is_object, Label::kNear);
2738 mov(value, FieldOperand(string, HeapObject::kMapOffset));
2739 movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
2741 and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
2742 cmp(value, Immediate(encoding_mask));
2744 Check(equal, kUnexpectedStringType);
2746 // The index is assumed to be untagged coming in, tag it to compare with the
2747 // string length without using a temp register, it is restored at the end of
2750 Check(no_overflow, kIndexIsTooLarge);
2752 cmp(index, FieldOperand(string, String::kLengthOffset));
2753 Check(less, kIndexIsTooLarge);
2755 cmp(index, Immediate(Smi::FromInt(0)));
2756 Check(greater_equal, kIndexIsNegative);
2758 // Restore the index
2763 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
2764 int frame_alignment = base::OS::ActivationFrameAlignment();
2765 if (frame_alignment != 0) {
2766 // Make stack end at alignment and make room for num_arguments words
2767 // and the original value of esp.
2769 sub(esp, Immediate((num_arguments + 1) * kPointerSize));
2770 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
2771 and_(esp, -frame_alignment);
2772 mov(Operand(esp, num_arguments * kPointerSize), scratch);
2774 sub(esp, Immediate(num_arguments * kPointerSize));
2779 void MacroAssembler::CallCFunction(ExternalReference function,
2780 int num_arguments) {
2781 // Trashing eax is ok as it will be the return value.
2782 mov(eax, Immediate(function));
2783 CallCFunction(eax, num_arguments);
2787 void MacroAssembler::CallCFunction(Register function,
2788 int num_arguments) {
2789 DCHECK(has_frame());
2790 // Check stack alignment.
2791 if (emit_debug_code()) {
2792 CheckStackAlignment();
2796 if (base::OS::ActivationFrameAlignment() != 0) {
2797 mov(esp, Operand(esp, num_arguments * kPointerSize));
2799 add(esp, Immediate(num_arguments * kPointerSize));
2805 bool AreAliased(Register reg1,
2813 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
2814 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
2815 reg7.is_valid() + reg8.is_valid();
2818 if (reg1.is_valid()) regs |= reg1.bit();
2819 if (reg2.is_valid()) regs |= reg2.bit();
2820 if (reg3.is_valid()) regs |= reg3.bit();
2821 if (reg4.is_valid()) regs |= reg4.bit();
2822 if (reg5.is_valid()) regs |= reg5.bit();
2823 if (reg6.is_valid()) regs |= reg6.bit();
2824 if (reg7.is_valid()) regs |= reg7.bit();
2825 if (reg8.is_valid()) regs |= reg8.bit();
2826 int n_of_non_aliasing_regs = NumRegs(regs);
2828 return n_of_valid_regs != n_of_non_aliasing_regs;
2833 CodePatcher::CodePatcher(byte* address, int size)
2834 : address_(address),
2836 masm_(NULL, address, size + Assembler::kGap) {
2837 // Create a new macro assembler pointing to the address of the code to patch.
2838 // The size is adjusted with kGap on order for the assembler to generate size
2839 // bytes of instructions without failing with buffer size constraints.
2840 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2844 CodePatcher::~CodePatcher() {
2845 // Indicate that code has changed.
2846 CpuFeatures::FlushICache(address_, size_);
2848 // Check that the code was patched as expected.
2849 DCHECK(masm_.pc_ == address_ + size_);
2850 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2854 void MacroAssembler::CheckPageFlag(
2859 Label* condition_met,
2860 Label::Distance condition_met_distance) {
2861 DCHECK(cc == zero || cc == not_zero);
2862 if (scratch.is(object)) {
2863 and_(scratch, Immediate(~Page::kPageAlignmentMask));
2865 mov(scratch, Immediate(~Page::kPageAlignmentMask));
2866 and_(scratch, object);
2868 if (mask < (1 << kBitsPerByte)) {
2869 test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
2870 static_cast<uint8_t>(mask));
2872 test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
2874 j(cc, condition_met, condition_met_distance);
2878 void MacroAssembler::CheckPageFlagForMap(
2882 Label* condition_met,
2883 Label::Distance condition_met_distance) {
2884 DCHECK(cc == zero || cc == not_zero);
2885 Page* page = Page::FromAddress(map->address());
2886 DCHECK(!serializer_enabled()); // Serializer cannot match page_flags.
2887 ExternalReference reference(ExternalReference::page_flags(page));
2888 // The inlined static address check of the page's flags relies
2889 // on maps never being compacted.
2890 DCHECK(!isolate()->heap()->mark_compact_collector()->
2891 IsOnEvacuationCandidate(*map));
2892 if (mask < (1 << kBitsPerByte)) {
2893 test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask));
2895 test(Operand::StaticVariable(reference), Immediate(mask));
2897 j(cc, condition_met, condition_met_distance);
2901 void MacroAssembler::JumpIfBlack(Register object,
2905 Label::Distance on_black_near) {
2906 HasColor(object, scratch0, scratch1,
2907 on_black, on_black_near,
2908 1, 0); // kBlackBitPattern.
2909 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
2913 void MacroAssembler::HasColor(Register object,
2914 Register bitmap_scratch,
2915 Register mask_scratch,
2917 Label::Distance has_color_distance,
2920 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
2922 GetMarkBits(object, bitmap_scratch, mask_scratch);
2924 Label other_color, word_boundary;
2925 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
2926 j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
2927 add(mask_scratch, mask_scratch); // Shift left 1 by adding.
2928 j(zero, &word_boundary, Label::kNear);
2929 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
2930 j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
2931 jmp(&other_color, Label::kNear);
2933 bind(&word_boundary);
2934 test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1);
2936 j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
2941 void MacroAssembler::GetMarkBits(Register addr_reg,
2942 Register bitmap_reg,
2943 Register mask_reg) {
2944 DCHECK(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
2945 mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
2946 and_(bitmap_reg, addr_reg);
2949 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
2952 (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
2954 add(bitmap_reg, ecx);
2956 shr(ecx, kPointerSizeLog2);
2957 and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
2958 mov(mask_reg, Immediate(1));
2963 void MacroAssembler::EnsureNotWhite(
2965 Register bitmap_scratch,
2966 Register mask_scratch,
2967 Label* value_is_white_and_not_data,
2968 Label::Distance distance) {
2969 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
2970 GetMarkBits(value, bitmap_scratch, mask_scratch);
2972 // If the value is black or grey we don't need to do anything.
2973 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
2974 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
2975 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
2976 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
2980 // Since both black and grey have a 1 in the first position and white does
2981 // not have a 1 there we only need to check one bit.
2982 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
2983 j(not_zero, &done, Label::kNear);
2985 if (emit_debug_code()) {
2986 // Check for impossible bit pattern.
2989 // shl. May overflow making the check conservative.
2990 add(mask_scratch, mask_scratch);
2991 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
2992 j(zero, &ok, Label::kNear);
2998 // Value is white. We check whether it is data that doesn't need scanning.
2999 // Currently only checks for HeapNumber and non-cons strings.
3000 Register map = ecx; // Holds map while checking type.
3001 Register length = ecx; // Holds length of object after checking type.
3002 Label not_heap_number;
3003 Label is_data_object;
3005 // Check for heap-number
3006 mov(map, FieldOperand(value, HeapObject::kMapOffset));
3007 cmp(map, isolate()->factory()->heap_number_map());
3008 j(not_equal, ¬_heap_number, Label::kNear);
3009 mov(length, Immediate(HeapNumber::kSize));
3010 jmp(&is_data_object, Label::kNear);
3012 bind(¬_heap_number);
3013 // Check for strings.
3014 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3015 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3016 // If it's a string and it's not a cons string then it's an object containing
3018 Register instance_type = ecx;
3019 movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3020 test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
3021 j(not_zero, value_is_white_and_not_data);
3022 // It's a non-indirect (non-cons and non-slice) string.
3023 // If it's external, the length is just ExternalString::kSize.
3024 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
3026 // External strings are the only ones with the kExternalStringTag bit
3028 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
3029 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
3030 test_b(instance_type, kExternalStringTag);
3031 j(zero, ¬_external, Label::kNear);
3032 mov(length, Immediate(ExternalString::kSize));
3033 jmp(&is_data_object, Label::kNear);
3035 bind(¬_external);
3036 // Sequential string, either Latin1 or UC16.
3037 DCHECK(kOneByteStringTag == 0x04);
3038 and_(length, Immediate(kStringEncodingMask));
3039 xor_(length, Immediate(kStringEncodingMask));
3040 add(length, Immediate(0x04));
3041 // Value now either 4 (if Latin1) or 8 (if UC16), i.e., char-size shifted
3042 // by 2. If we multiply the string length as smi by this, it still
3043 // won't overflow a 32-bit value.
3044 DCHECK_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
3045 DCHECK(SeqOneByteString::kMaxSize <=
3046 static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
3047 imul(length, FieldOperand(value, String::kLengthOffset));
3048 shr(length, 2 + kSmiTagSize + kSmiShiftSize);
3049 add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
3050 and_(length, Immediate(~kObjectAlignmentMask));
3052 bind(&is_data_object);
3053 // Value is a data object, and it is white. Mark it black. Since we know
3054 // that the object is white we can make it black by flipping one bit.
3055 or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
3057 and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
3058 add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
3060 if (emit_debug_code()) {
3061 mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3062 cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
3063 Check(less_equal, kLiveBytesCountOverflowChunkSize);
3070 void MacroAssembler::EnumLength(Register dst, Register map) {
3071 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3072 mov(dst, FieldOperand(map, Map::kBitField3Offset));
3073 and_(dst, Immediate(Map::EnumLengthBits::kMask));
3078 void MacroAssembler::CheckEnumCache(Label* call_runtime) {
3082 // Check if the enum length field is properly initialized, indicating that
3083 // there is an enum cache.
3084 mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
3086 EnumLength(edx, ebx);
3087 cmp(edx, Immediate(Smi::FromInt(kInvalidEnumCacheSentinel)));
3088 j(equal, call_runtime);
3093 mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
3095 // For all objects but the receiver, check that the cache is empty.
3096 EnumLength(edx, ebx);
3097 cmp(edx, Immediate(Smi::FromInt(0)));
3098 j(not_equal, call_runtime);
3102 // Check that there are no elements. Register rcx contains the current JS
3103 // object we've reached through the prototype chain.
3105 mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
3106 cmp(ecx, isolate()->factory()->empty_fixed_array());
3107 j(equal, &no_elements);
3109 // Second chance, the object may be using the empty slow element dictionary.
3110 cmp(ecx, isolate()->factory()->empty_slow_element_dictionary());
3111 j(not_equal, call_runtime);
3114 mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
3115 cmp(ecx, isolate()->factory()->null_value());
3116 j(not_equal, &next);
3120 void MacroAssembler::TestJSArrayForAllocationMemento(
3121 Register receiver_reg,
3122 Register scratch_reg,
3123 Label* no_memento_found) {
3124 ExternalReference new_space_start =
3125 ExternalReference::new_space_start(isolate());
3126 ExternalReference new_space_allocation_top =
3127 ExternalReference::new_space_allocation_top_address(isolate());
3129 lea(scratch_reg, Operand(receiver_reg,
3130 JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
3131 cmp(scratch_reg, Immediate(new_space_start));
3132 j(less, no_memento_found);
3133 cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
3134 j(greater, no_memento_found);
3135 cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
3136 Immediate(isolate()->factory()->allocation_memento_map()));
3140 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
3145 DCHECK(!scratch1.is(scratch0));
3146 Factory* factory = isolate()->factory();
3147 Register current = scratch0;
3148 Label loop_again, end;
3150 // scratch contained elements pointer.
3151 mov(current, object);
3152 mov(current, FieldOperand(current, HeapObject::kMapOffset));
3153 mov(current, FieldOperand(current, Map::kPrototypeOffset));
3154 cmp(current, Immediate(factory->null_value()));
3157 // Loop based on the map going up the prototype chain.
3159 mov(current, FieldOperand(current, HeapObject::kMapOffset));
3160 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
3161 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
3162 CmpInstanceType(current, JS_OBJECT_TYPE);
3164 mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
3165 DecodeField<Map::ElementsKindBits>(scratch1);
3166 cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
3168 mov(current, FieldOperand(current, Map::kPrototypeOffset));
3169 cmp(current, Immediate(factory->null_value()));
3170 j(not_equal, &loop_again);
3176 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
3177 DCHECK(!dividend.is(eax));
3178 DCHECK(!dividend.is(edx));
3179 base::MagicNumbersForDivision<uint32_t> mag =
3180 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
3181 mov(eax, Immediate(mag.multiplier));
3183 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
3184 if (divisor > 0 && neg) add(edx, dividend);
3185 if (divisor < 0 && !neg && mag.multiplier > 0) sub(edx, dividend);
3186 if (mag.shift > 0) sar(edx, mag.shift);
3193 } // namespace internal
3196 #endif // V8_TARGET_ARCH_IA32