1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #if V8_TARGET_ARCH_IA32
9 #include "src/base/bits.h"
10 #include "src/base/division-by-constant.h"
11 #include "src/bootstrapper.h"
12 #include "src/codegen.h"
13 #include "src/cpu-profiler.h"
14 #include "src/debug.h"
15 #include "src/isolate-inl.h"
16 #include "src/runtime/runtime.h"
21 // -------------------------------------------------------------------------
22 // MacroAssembler implementation.
24 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
25 : Assembler(arg_isolate, buffer, size),
26 generating_stub_(false),
28 if (isolate() != NULL) {
29 // TODO(titzer): should we just use a null handle here instead?
30 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
36 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
37 DCHECK(!r.IsDouble());
40 } else if (r.IsUInteger8()) {
42 } else if (r.IsInteger16()) {
44 } else if (r.IsUInteger16()) {
52 void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
53 DCHECK(!r.IsDouble());
54 if (r.IsInteger8() || r.IsUInteger8()) {
56 } else if (r.IsInteger16() || r.IsUInteger16()) {
59 if (r.IsHeapObject()) {
61 } else if (r.IsSmi()) {
69 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
70 if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
71 Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
72 mov(destination, value);
75 ExternalReference roots_array_start =
76 ExternalReference::roots_array_start(isolate());
77 mov(destination, Immediate(index));
78 mov(destination, Operand::StaticArray(destination,
84 void MacroAssembler::StoreRoot(Register source,
86 Heap::RootListIndex index) {
87 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
88 ExternalReference roots_array_start =
89 ExternalReference::roots_array_start(isolate());
90 mov(scratch, Immediate(index));
91 mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
96 void MacroAssembler::CompareRoot(Register with,
98 Heap::RootListIndex index) {
99 ExternalReference roots_array_start =
100 ExternalReference::roots_array_start(isolate());
101 mov(scratch, Immediate(index));
102 cmp(with, Operand::StaticArray(scratch,
108 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
109 DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
110 Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
115 void MacroAssembler::CompareRoot(const Operand& with,
116 Heap::RootListIndex index) {
117 DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
118 Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
123 void MacroAssembler::InNewSpace(
127 Label* condition_met,
128 Label::Distance condition_met_distance) {
129 DCHECK(cc == equal || cc == not_equal);
130 if (scratch.is(object)) {
131 and_(scratch, Immediate(~Page::kPageAlignmentMask));
133 mov(scratch, Immediate(~Page::kPageAlignmentMask));
134 and_(scratch, object);
136 // Check that we can use a test_b.
137 DCHECK(MemoryChunk::IN_FROM_SPACE < 8);
138 DCHECK(MemoryChunk::IN_TO_SPACE < 8);
139 int mask = (1 << MemoryChunk::IN_FROM_SPACE)
140 | (1 << MemoryChunk::IN_TO_SPACE);
141 // If non-zero, the page belongs to new-space.
142 test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
143 static_cast<uint8_t>(mask));
144 j(cc, condition_met, condition_met_distance);
148 void MacroAssembler::RememberedSetHelper(
149 Register object, // Only used for debug checks.
152 SaveFPRegsMode save_fp,
153 MacroAssembler::RememberedSetFinalAction and_then) {
155 if (emit_debug_code()) {
157 JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
161 // Load store buffer top.
162 ExternalReference store_buffer =
163 ExternalReference::store_buffer_top(isolate());
164 mov(scratch, Operand::StaticVariable(store_buffer));
165 // Store pointer to buffer.
166 mov(Operand(scratch, 0), addr);
167 // Increment buffer top.
168 add(scratch, Immediate(kPointerSize));
169 // Write back new top of buffer.
170 mov(Operand::StaticVariable(store_buffer), scratch);
171 // Call stub on end of buffer.
172 // Check for end of buffer.
173 test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
174 if (and_then == kReturnAtEnd) {
175 Label buffer_overflowed;
176 j(not_equal, &buffer_overflowed, Label::kNear);
178 bind(&buffer_overflowed);
180 DCHECK(and_then == kFallThroughAtEnd);
181 j(equal, &done, Label::kNear);
183 StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
184 CallStub(&store_buffer_overflow);
185 if (and_then == kReturnAtEnd) {
188 DCHECK(and_then == kFallThroughAtEnd);
194 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
195 XMMRegister scratch_reg,
196 Register result_reg) {
199 xorps(scratch_reg, scratch_reg);
200 cvtsd2si(result_reg, input_reg);
201 test(result_reg, Immediate(0xFFFFFF00));
202 j(zero, &done, Label::kNear);
203 cmp(result_reg, Immediate(0x1));
204 j(overflow, &conv_failure, Label::kNear);
205 mov(result_reg, Immediate(0));
206 setcc(sign, result_reg);
207 sub(result_reg, Immediate(1));
208 and_(result_reg, Immediate(255));
209 jmp(&done, Label::kNear);
211 Move(result_reg, Immediate(0));
212 ucomisd(input_reg, scratch_reg);
213 j(below, &done, Label::kNear);
214 Move(result_reg, Immediate(255));
219 void MacroAssembler::ClampUint8(Register reg) {
221 test(reg, Immediate(0xFFFFFF00));
222 j(zero, &done, Label::kNear);
223 setcc(negative, reg); // 1 if negative, 0 if positive.
224 dec_b(reg); // 0 if negative, 255 if positive.
229 void MacroAssembler::SlowTruncateToI(Register result_reg,
232 DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
233 call(stub.GetCode(), RelocInfo::CODE_TARGET);
237 void MacroAssembler::TruncateDoubleToI(Register result_reg,
238 XMMRegister input_reg) {
240 cvttsd2si(result_reg, Operand(input_reg));
241 cmp(result_reg, 0x1);
242 j(no_overflow, &done, Label::kNear);
244 sub(esp, Immediate(kDoubleSize));
245 movsd(MemOperand(esp, 0), input_reg);
246 SlowTruncateToI(result_reg, esp, 0);
247 add(esp, Immediate(kDoubleSize));
252 void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
254 MinusZeroMode minus_zero_mode,
255 Label* lost_precision, Label* is_nan,
256 Label* minus_zero, Label::Distance dst) {
257 DCHECK(!input_reg.is(scratch));
258 cvttsd2si(result_reg, Operand(input_reg));
259 Cvtsi2sd(scratch, Operand(result_reg));
260 ucomisd(scratch, input_reg);
261 j(not_equal, lost_precision, dst);
262 j(parity_even, is_nan, dst);
263 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
265 // The integer converted back is equal to the original. We
266 // only have to test if we got -0 as an input.
267 test(result_reg, Operand(result_reg));
268 j(not_zero, &done, Label::kNear);
269 movmskpd(result_reg, input_reg);
270 // Bit 0 contains the sign of the double in input_reg.
271 // If input was positive, we are ok and return 0, otherwise
272 // jump to minus_zero.
274 j(not_zero, minus_zero, dst);
280 void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
281 Register input_reg) {
282 Label done, slow_case;
284 if (CpuFeatures::IsSupported(SSE3)) {
285 CpuFeatureScope scope(this, SSE3);
287 // Use more powerful conversion when sse3 is available.
288 // Load x87 register with heap number.
289 fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
290 // Get exponent alone and check for too-big exponent.
291 mov(result_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
292 and_(result_reg, HeapNumber::kExponentMask);
293 const uint32_t kTooBigExponent =
294 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
295 cmp(Operand(result_reg), Immediate(kTooBigExponent));
296 j(greater_equal, &slow_case, Label::kNear);
298 // Reserve space for 64 bit answer.
299 sub(Operand(esp), Immediate(kDoubleSize));
300 // Do conversion, which cannot fail because we checked the exponent.
301 fisttp_d(Operand(esp, 0));
302 mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
303 add(Operand(esp), Immediate(kDoubleSize));
304 jmp(&done, Label::kNear);
308 if (input_reg.is(result_reg)) {
309 // Input is clobbered. Restore number from fpu stack
310 sub(Operand(esp), Immediate(kDoubleSize));
311 fstp_d(Operand(esp, 0));
312 SlowTruncateToI(result_reg, esp, 0);
313 add(esp, Immediate(kDoubleSize));
316 SlowTruncateToI(result_reg, input_reg);
319 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
320 cvttsd2si(result_reg, Operand(xmm0));
321 cmp(result_reg, 0x1);
322 j(no_overflow, &done, Label::kNear);
323 // Check if the input was 0x8000000 (kMinInt).
324 // If no, then we got an overflow and we deoptimize.
325 ExternalReference min_int = ExternalReference::address_of_min_int();
326 ucomisd(xmm0, Operand::StaticVariable(min_int));
327 j(not_equal, &slow_case, Label::kNear);
328 j(parity_even, &slow_case, Label::kNear); // NaN.
329 jmp(&done, Label::kNear);
333 if (input_reg.is(result_reg)) {
334 // Input is clobbered. Restore number from double scratch.
335 sub(esp, Immediate(kDoubleSize));
336 movsd(MemOperand(esp, 0), xmm0);
337 SlowTruncateToI(result_reg, esp, 0);
338 add(esp, Immediate(kDoubleSize));
340 SlowTruncateToI(result_reg, input_reg);
347 void MacroAssembler::LoadUint32(XMMRegister dst, const Operand& src) {
349 cmp(src, Immediate(0));
350 ExternalReference uint32_bias = ExternalReference::address_of_uint32_bias();
352 j(not_sign, &done, Label::kNear);
353 addsd(dst, Operand::StaticVariable(uint32_bias));
358 void MacroAssembler::RecordWriteArray(
362 SaveFPRegsMode save_fp,
363 RememberedSetAction remembered_set_action,
365 PointersToHereCheck pointers_to_here_check_for_value) {
366 // First, check if a write barrier is even needed. The tests below
367 // catch stores of Smis.
370 // Skip barrier if writing a smi.
371 if (smi_check == INLINE_SMI_CHECK) {
372 DCHECK_EQ(0, kSmiTag);
373 test(value, Immediate(kSmiTagMask));
377 // Array access: calculate the destination address in the same manner as
378 // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
379 // into an array of words.
380 Register dst = index;
381 lea(dst, Operand(object, index, times_half_pointer_size,
382 FixedArray::kHeaderSize - kHeapObjectTag));
384 RecordWrite(object, dst, value, save_fp, remembered_set_action,
385 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
389 // Clobber clobbered input registers when running with the debug-code flag
390 // turned on to provoke errors.
391 if (emit_debug_code()) {
392 mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
393 mov(index, Immediate(bit_cast<int32_t>(kZapValue)));
398 void MacroAssembler::RecordWriteField(
403 SaveFPRegsMode save_fp,
404 RememberedSetAction remembered_set_action,
406 PointersToHereCheck pointers_to_here_check_for_value) {
407 // First, check if a write barrier is even needed. The tests below
408 // catch stores of Smis.
411 // Skip barrier if writing a smi.
412 if (smi_check == INLINE_SMI_CHECK) {
413 JumpIfSmi(value, &done, Label::kNear);
416 // Although the object register is tagged, the offset is relative to the start
417 // of the object, so so offset must be a multiple of kPointerSize.
418 DCHECK(IsAligned(offset, kPointerSize));
420 lea(dst, FieldOperand(object, offset));
421 if (emit_debug_code()) {
423 test_b(dst, (1 << kPointerSizeLog2) - 1);
424 j(zero, &ok, Label::kNear);
429 RecordWrite(object, dst, value, save_fp, remembered_set_action,
430 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
434 // Clobber clobbered input registers when running with the debug-code flag
435 // turned on to provoke errors.
436 if (emit_debug_code()) {
437 mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
438 mov(dst, Immediate(bit_cast<int32_t>(kZapValue)));
443 void MacroAssembler::RecordWriteForMap(
448 SaveFPRegsMode save_fp) {
451 Register address = scratch1;
452 Register value = scratch2;
453 if (emit_debug_code()) {
455 lea(address, FieldOperand(object, HeapObject::kMapOffset));
456 test_b(address, (1 << kPointerSizeLog2) - 1);
457 j(zero, &ok, Label::kNear);
462 DCHECK(!object.is(value));
463 DCHECK(!object.is(address));
464 DCHECK(!value.is(address));
465 AssertNotSmi(object);
467 if (!FLAG_incremental_marking) {
471 // Compute the address.
472 lea(address, FieldOperand(object, HeapObject::kMapOffset));
474 // A single check of the map's pages interesting flag suffices, since it is
475 // only set during incremental collection, and then it's also guaranteed that
476 // the from object's page's interesting flag is also set. This optimization
477 // relies on the fact that maps can never be in new space.
478 DCHECK(!isolate()->heap()->InNewSpace(*map));
479 CheckPageFlagForMap(map,
480 MemoryChunk::kPointersToHereAreInterestingMask,
485 RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET,
491 // Count number of write barriers in generated code.
492 isolate()->counters()->write_barriers_static()->Increment();
493 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
495 // Clobber clobbered input registers when running with the debug-code flag
496 // turned on to provoke errors.
497 if (emit_debug_code()) {
498 mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
499 mov(scratch1, Immediate(bit_cast<int32_t>(kZapValue)));
500 mov(scratch2, Immediate(bit_cast<int32_t>(kZapValue)));
505 void MacroAssembler::RecordWrite(
509 SaveFPRegsMode fp_mode,
510 RememberedSetAction remembered_set_action,
512 PointersToHereCheck pointers_to_here_check_for_value) {
513 DCHECK(!object.is(value));
514 DCHECK(!object.is(address));
515 DCHECK(!value.is(address));
516 AssertNotSmi(object);
518 if (remembered_set_action == OMIT_REMEMBERED_SET &&
519 !FLAG_incremental_marking) {
523 if (emit_debug_code()) {
525 cmp(value, Operand(address, 0));
526 j(equal, &ok, Label::kNear);
531 // First, check if a write barrier is even needed. The tests below
532 // catch stores of Smis and stores into young gen.
535 if (smi_check == INLINE_SMI_CHECK) {
536 // Skip barrier if writing a smi.
537 JumpIfSmi(value, &done, Label::kNear);
540 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
542 value, // Used as scratch.
543 MemoryChunk::kPointersToHereAreInterestingMask,
548 CheckPageFlag(object,
549 value, // Used as scratch.
550 MemoryChunk::kPointersFromHereAreInterestingMask,
555 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
561 // Count number of write barriers in generated code.
562 isolate()->counters()->write_barriers_static()->Increment();
563 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
565 // Clobber clobbered registers when running with the debug-code flag
566 // turned on to provoke errors.
567 if (emit_debug_code()) {
568 mov(address, Immediate(bit_cast<int32_t>(kZapValue)));
569 mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
574 void MacroAssembler::DebugBreak() {
575 Move(eax, Immediate(0));
576 mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate())));
577 CEntryStub ces(isolate(), 1);
578 call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
582 void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
588 bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
589 static const int kMaxImmediateBits = 17;
590 if (!RelocInfo::IsNone(x.rmode_)) return false;
591 return !is_intn(x.x_, kMaxImmediateBits);
595 void MacroAssembler::SafeMove(Register dst, const Immediate& x) {
596 if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
597 Move(dst, Immediate(x.x_ ^ jit_cookie()));
598 xor_(dst, jit_cookie());
605 void MacroAssembler::SafePush(const Immediate& x) {
606 if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
607 push(Immediate(x.x_ ^ jit_cookie()));
608 xor_(Operand(esp, 0), Immediate(jit_cookie()));
615 void MacroAssembler::CmpObjectType(Register heap_object,
618 mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
619 CmpInstanceType(map, type);
623 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
624 cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
625 static_cast<int8_t>(type));
629 void MacroAssembler::CheckFastElements(Register map,
631 Label::Distance distance) {
632 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
633 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
634 STATIC_ASSERT(FAST_ELEMENTS == 2);
635 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
636 cmpb(FieldOperand(map, Map::kBitField2Offset),
637 Map::kMaximumBitField2FastHoleyElementValue);
638 j(above, fail, distance);
642 void MacroAssembler::CheckFastObjectElements(Register map,
644 Label::Distance distance) {
645 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
646 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
647 STATIC_ASSERT(FAST_ELEMENTS == 2);
648 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
649 cmpb(FieldOperand(map, Map::kBitField2Offset),
650 Map::kMaximumBitField2FastHoleySmiElementValue);
651 j(below_equal, fail, distance);
652 cmpb(FieldOperand(map, Map::kBitField2Offset),
653 Map::kMaximumBitField2FastHoleyElementValue);
654 j(above, fail, distance);
658 void MacroAssembler::CheckFastSmiElements(Register map,
660 Label::Distance distance) {
661 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
662 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
663 cmpb(FieldOperand(map, Map::kBitField2Offset),
664 Map::kMaximumBitField2FastHoleySmiElementValue);
665 j(above, fail, distance);
669 void MacroAssembler::StoreNumberToDoubleElements(
670 Register maybe_number,
674 XMMRegister scratch2,
676 int elements_offset) {
677 Label smi_value, done;
678 JumpIfSmi(maybe_number, &smi_value, Label::kNear);
680 CheckMap(maybe_number,
681 isolate()->factory()->heap_number_map(),
685 // Double value, turn potential sNaN into qNaN.
687 mulsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
688 jmp(&done, Label::kNear);
691 // Value is a smi. Convert to a double and store.
692 // Preserve original value.
693 mov(scratch1, maybe_number);
695 Cvtsi2sd(scratch2, scratch1);
697 movsd(FieldOperand(elements, key, times_4,
698 FixedDoubleArray::kHeaderSize - elements_offset),
703 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
704 cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
708 void MacroAssembler::CheckMap(Register obj,
711 SmiCheckType smi_check_type) {
712 if (smi_check_type == DO_SMI_CHECK) {
713 JumpIfSmi(obj, fail);
716 CompareMap(obj, map);
721 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
722 Register scratch2, Handle<WeakCell> cell,
723 Handle<Code> success,
724 SmiCheckType smi_check_type) {
726 if (smi_check_type == DO_SMI_CHECK) {
727 JumpIfSmi(obj, &fail);
729 mov(scratch1, FieldOperand(obj, HeapObject::kMapOffset));
730 CmpWeakValue(scratch1, cell, scratch2);
737 Condition MacroAssembler::IsObjectStringType(Register heap_object,
739 Register instance_type) {
740 mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
741 movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
742 STATIC_ASSERT(kNotStringTag != 0);
743 test(instance_type, Immediate(kIsNotStringMask));
748 Condition MacroAssembler::IsObjectNameType(Register heap_object,
750 Register instance_type) {
751 mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
752 movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
753 cmpb(instance_type, static_cast<uint8_t>(LAST_NAME_TYPE));
758 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
762 mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
763 IsInstanceJSObjectType(map, scratch, fail);
767 void MacroAssembler::IsInstanceJSObjectType(Register map,
770 movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
771 sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
773 LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
778 void MacroAssembler::FCmp() {
784 void MacroAssembler::AssertNumber(Register object) {
785 if (emit_debug_code()) {
787 JumpIfSmi(object, &ok);
788 cmp(FieldOperand(object, HeapObject::kMapOffset),
789 isolate()->factory()->heap_number_map());
790 Check(equal, kOperandNotANumber);
796 void MacroAssembler::AssertSmi(Register object) {
797 if (emit_debug_code()) {
798 test(object, Immediate(kSmiTagMask));
799 Check(equal, kOperandIsNotASmi);
804 void MacroAssembler::AssertString(Register object) {
805 if (emit_debug_code()) {
806 test(object, Immediate(kSmiTagMask));
807 Check(not_equal, kOperandIsASmiAndNotAString);
809 mov(object, FieldOperand(object, HeapObject::kMapOffset));
810 CmpInstanceType(object, FIRST_NONSTRING_TYPE);
812 Check(below, kOperandIsNotAString);
817 void MacroAssembler::AssertName(Register object) {
818 if (emit_debug_code()) {
819 test(object, Immediate(kSmiTagMask));
820 Check(not_equal, kOperandIsASmiAndNotAName);
822 mov(object, FieldOperand(object, HeapObject::kMapOffset));
823 CmpInstanceType(object, LAST_NAME_TYPE);
825 Check(below_equal, kOperandIsNotAName);
830 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
831 if (emit_debug_code()) {
833 AssertNotSmi(object);
834 cmp(object, isolate()->factory()->undefined_value());
835 j(equal, &done_checking);
836 cmp(FieldOperand(object, 0),
837 Immediate(isolate()->factory()->allocation_site_map()));
838 Assert(equal, kExpectedUndefinedOrCell);
839 bind(&done_checking);
844 void MacroAssembler::AssertNotSmi(Register object) {
845 if (emit_debug_code()) {
846 test(object, Immediate(kSmiTagMask));
847 Check(not_equal, kOperandIsASmi);
852 void MacroAssembler::StubPrologue() {
853 push(ebp); // Caller's frame pointer.
855 push(esi); // Callee's context.
856 push(Immediate(Smi::FromInt(StackFrame::STUB)));
860 void MacroAssembler::Prologue(bool code_pre_aging) {
861 PredictableCodeSizeScope predictible_code_size_scope(this,
862 kNoCodeAgeSequenceLength);
863 if (code_pre_aging) {
865 call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
866 RelocInfo::CODE_AGE_SEQUENCE);
867 Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
869 push(ebp); // Caller's frame pointer.
871 push(esi); // Callee's context.
872 push(edi); // Callee's JS function.
877 void MacroAssembler::EnterFrame(StackFrame::Type type,
878 bool load_constant_pool_pointer_reg) {
879 // Out-of-line constant pool not implemented on ia32.
884 void MacroAssembler::EnterFrame(StackFrame::Type type) {
888 push(Immediate(Smi::FromInt(type)));
889 push(Immediate(CodeObject()));
890 if (emit_debug_code()) {
891 cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
892 Check(not_equal, kCodeObjectNotProperlyPatched);
897 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
898 if (emit_debug_code()) {
899 cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
900 Immediate(Smi::FromInt(type)));
901 Check(equal, kStackFrameTypesMustMatch);
907 void MacroAssembler::EnterExitFramePrologue() {
908 // Set up the frame structure on the stack.
909 DCHECK(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
910 DCHECK(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
911 DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
915 // Reserve room for entry stack pointer and push the code object.
916 DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
917 push(Immediate(0)); // Saved entry sp, patched before call.
918 push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
920 // Save the frame pointer and the context in top.
921 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate());
922 ExternalReference context_address(Isolate::kContextAddress, isolate());
923 ExternalReference c_function_address(Isolate::kCFunctionAddress, isolate());
924 mov(Operand::StaticVariable(c_entry_fp_address), ebp);
925 mov(Operand::StaticVariable(context_address), esi);
926 mov(Operand::StaticVariable(c_function_address), ebx);
930 void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
931 // Optionally save all XMM registers.
933 int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
935 sub(esp, Immediate(space));
936 const int offset = -2 * kPointerSize;
937 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
938 XMMRegister reg = XMMRegister::from_code(i);
939 movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
942 sub(esp, Immediate(argc * kPointerSize));
945 // Get the required frame alignment for the OS.
946 const int kFrameAlignment = base::OS::ActivationFrameAlignment();
947 if (kFrameAlignment > 0) {
948 DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
949 and_(esp, -kFrameAlignment);
952 // Patch the saved entry sp.
953 mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
957 void MacroAssembler::EnterExitFrame(bool save_doubles) {
958 EnterExitFramePrologue();
960 // Set up argc and argv in callee-saved registers.
961 int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
963 lea(esi, Operand(ebp, eax, times_4, offset));
965 // Reserve space for argc, argv and isolate.
966 EnterExitFrameEpilogue(3, save_doubles);
970 void MacroAssembler::EnterApiExitFrame(int argc) {
971 EnterExitFramePrologue();
972 EnterExitFrameEpilogue(argc, false);
976 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
977 // Optionally restore all XMM registers.
979 const int offset = -2 * kPointerSize;
980 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
981 XMMRegister reg = XMMRegister::from_code(i);
982 movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
986 // Get the return address from the stack and restore the frame pointer.
987 mov(ecx, Operand(ebp, 1 * kPointerSize));
988 mov(ebp, Operand(ebp, 0 * kPointerSize));
990 // Pop the arguments and the receiver from the caller stack.
991 lea(esp, Operand(esi, 1 * kPointerSize));
993 // Push the return address to get ready to return.
996 LeaveExitFrameEpilogue(true);
1000 void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
1001 // Restore current context from top and clear it in debug mode.
1002 ExternalReference context_address(Isolate::kContextAddress, isolate());
1003 if (restore_context) {
1004 mov(esi, Operand::StaticVariable(context_address));
1007 mov(Operand::StaticVariable(context_address), Immediate(0));
1010 // Clear the top frame.
1011 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
1013 mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
1017 void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
1021 LeaveExitFrameEpilogue(restore_context);
1025 void MacroAssembler::PushStackHandler() {
1026 // Adjust this code if not the case.
1027 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
1028 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1030 // Link the current handler as the next handler.
1031 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
1032 push(Operand::StaticVariable(handler_address));
1034 // Set this new handler as the current one.
1035 mov(Operand::StaticVariable(handler_address), esp);
1039 void MacroAssembler::PopStackHandler() {
1040 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1041 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
1042 pop(Operand::StaticVariable(handler_address));
1043 add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
1047 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1051 Label same_contexts;
1053 DCHECK(!holder_reg.is(scratch1));
1054 DCHECK(!holder_reg.is(scratch2));
1055 DCHECK(!scratch1.is(scratch2));
1057 // Load current lexical context from the stack frame.
1058 mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset));
1060 // When generating debug code, make sure the lexical context is set.
1061 if (emit_debug_code()) {
1062 cmp(scratch1, Immediate(0));
1063 Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
1065 // Load the native context of the current context.
1067 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1068 mov(scratch1, FieldOperand(scratch1, offset));
1069 mov(scratch1, FieldOperand(scratch1, GlobalObject::kNativeContextOffset));
1071 // Check the context is a native context.
1072 if (emit_debug_code()) {
1073 // Read the first word and compare to native_context_map.
1074 cmp(FieldOperand(scratch1, HeapObject::kMapOffset),
1075 isolate()->factory()->native_context_map());
1076 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
1079 // Check if both contexts are the same.
1080 cmp(scratch1, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1081 j(equal, &same_contexts);
1083 // Compare security tokens, save holder_reg on the stack so we can use it
1084 // as a temporary register.
1086 // Check that the security token in the calling global object is
1087 // compatible with the security token in the receiving global
1090 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1092 // Check the context is a native context.
1093 if (emit_debug_code()) {
1094 cmp(scratch2, isolate()->factory()->null_value());
1095 Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
1097 // Read the first word and compare to native_context_map(),
1098 cmp(FieldOperand(scratch2, HeapObject::kMapOffset),
1099 isolate()->factory()->native_context_map());
1100 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
1103 int token_offset = Context::kHeaderSize +
1104 Context::SECURITY_TOKEN_INDEX * kPointerSize;
1105 mov(scratch1, FieldOperand(scratch1, token_offset));
1106 cmp(scratch1, FieldOperand(scratch2, token_offset));
1109 bind(&same_contexts);
1113 // Compute the hash code from the untagged key. This must be kept in sync with
1114 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
1115 // code-stub-hydrogen.cc
1117 // Note: r0 will contain hash code
1118 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
1119 // Xor original key with a seed.
1120 if (serializer_enabled()) {
1121 ExternalReference roots_array_start =
1122 ExternalReference::roots_array_start(isolate());
1123 mov(scratch, Immediate(Heap::kHashSeedRootIndex));
1125 Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
1129 int32_t seed = isolate()->heap()->HashSeed();
1130 xor_(r0, Immediate(seed));
1133 // hash = ~hash + (hash << 15);
1138 // hash = hash ^ (hash >> 12);
1142 // hash = hash + (hash << 2);
1143 lea(r0, Operand(r0, r0, times_4, 0));
1144 // hash = hash ^ (hash >> 4);
1148 // hash = hash * 2057;
1150 // hash = hash ^ (hash >> 16);
1158 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
1167 // elements - holds the slow-case elements of the receiver and is unchanged.
1169 // key - holds the smi key on entry and is unchanged.
1171 // Scratch registers:
1173 // r0 - holds the untagged key on entry and holds the hash once computed.
1175 // r1 - used to hold the capacity mask of the dictionary
1177 // r2 - used for the index into the dictionary.
1179 // result - holds the result on exit if the load succeeds and we fall through.
1183 GetNumberHash(r0, r1);
1185 // Compute capacity mask.
1186 mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset));
1187 shr(r1, kSmiTagSize); // convert smi to int
1190 // Generate an unrolled loop that performs a few probes before giving up.
1191 for (int i = 0; i < kNumberDictionaryProbes; i++) {
1192 // Use r2 for index calculations and keep the hash intact in r0.
1194 // Compute the masked index: (hash + i + i * i) & mask.
1196 add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
1200 // Scale the index by multiplying by the entry size.
1201 DCHECK(SeededNumberDictionary::kEntrySize == 3);
1202 lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
1204 // Check if the key matches.
1205 cmp(key, FieldOperand(elements,
1208 SeededNumberDictionary::kElementsStartOffset));
1209 if (i != (kNumberDictionaryProbes - 1)) {
1217 // Check that the value is a field property.
1218 const int kDetailsOffset =
1219 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1221 test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
1222 Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
1225 // Get the value at the masked, scaled index.
1226 const int kValueOffset =
1227 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1228 mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
1232 void MacroAssembler::LoadAllocationTopHelper(Register result,
1234 AllocationFlags flags) {
1235 ExternalReference allocation_top =
1236 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1238 // Just return if allocation top is already known.
1239 if ((flags & RESULT_CONTAINS_TOP) != 0) {
1240 // No use of scratch if allocation top is provided.
1241 DCHECK(scratch.is(no_reg));
1243 // Assert that result actually contains top on entry.
1244 cmp(result, Operand::StaticVariable(allocation_top));
1245 Check(equal, kUnexpectedAllocationTop);
1250 // Move address of new object to result. Use scratch register if available.
1251 if (scratch.is(no_reg)) {
1252 mov(result, Operand::StaticVariable(allocation_top));
1254 mov(scratch, Immediate(allocation_top));
1255 mov(result, Operand(scratch, 0));
1260 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
1262 AllocationFlags flags) {
1263 if (emit_debug_code()) {
1264 test(result_end, Immediate(kObjectAlignmentMask));
1265 Check(zero, kUnalignedAllocationInNewSpace);
1268 ExternalReference allocation_top =
1269 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1271 // Update new top. Use scratch if available.
1272 if (scratch.is(no_reg)) {
1273 mov(Operand::StaticVariable(allocation_top), result_end);
1275 mov(Operand(scratch, 0), result_end);
1280 void MacroAssembler::Allocate(int object_size,
1282 Register result_end,
1285 AllocationFlags flags) {
1286 DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
1287 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
1288 if (!FLAG_inline_new) {
1289 if (emit_debug_code()) {
1290 // Trash the registers to simulate an allocation failure.
1291 mov(result, Immediate(0x7091));
1292 if (result_end.is_valid()) {
1293 mov(result_end, Immediate(0x7191));
1295 if (scratch.is_valid()) {
1296 mov(scratch, Immediate(0x7291));
1302 DCHECK(!result.is(result_end));
1304 // Load address of new object into result.
1305 LoadAllocationTopHelper(result, scratch, flags);
1307 ExternalReference allocation_limit =
1308 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1310 // Align the next allocation. Storing the filler map without checking top is
1311 // safe in new-space because the limit of the heap is aligned there.
1312 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1313 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1314 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1316 test(result, Immediate(kDoubleAlignmentMask));
1317 j(zero, &aligned, Label::kNear);
1318 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1319 cmp(result, Operand::StaticVariable(allocation_limit));
1320 j(above_equal, gc_required);
1322 mov(Operand(result, 0),
1323 Immediate(isolate()->factory()->one_pointer_filler_map()));
1324 add(result, Immediate(kDoubleSize / 2));
1328 // Calculate new top and bail out if space is exhausted.
1329 Register top_reg = result_end.is_valid() ? result_end : result;
1330 if (!top_reg.is(result)) {
1331 mov(top_reg, result);
1333 add(top_reg, Immediate(object_size));
1334 j(carry, gc_required);
1335 cmp(top_reg, Operand::StaticVariable(allocation_limit));
1336 j(above, gc_required);
1338 // Update allocation top.
1339 UpdateAllocationTopHelper(top_reg, scratch, flags);
1341 // Tag result if requested.
1342 bool tag_result = (flags & TAG_OBJECT) != 0;
1343 if (top_reg.is(result)) {
1345 sub(result, Immediate(object_size - kHeapObjectTag));
1347 sub(result, Immediate(object_size));
1349 } else if (tag_result) {
1350 DCHECK(kHeapObjectTag == 1);
1356 void MacroAssembler::Allocate(int header_size,
1357 ScaleFactor element_size,
1358 Register element_count,
1359 RegisterValueType element_count_type,
1361 Register result_end,
1364 AllocationFlags flags) {
1365 DCHECK((flags & SIZE_IN_WORDS) == 0);
1366 if (!FLAG_inline_new) {
1367 if (emit_debug_code()) {
1368 // Trash the registers to simulate an allocation failure.
1369 mov(result, Immediate(0x7091));
1370 mov(result_end, Immediate(0x7191));
1371 if (scratch.is_valid()) {
1372 mov(scratch, Immediate(0x7291));
1374 // Register element_count is not modified by the function.
1379 DCHECK(!result.is(result_end));
1381 // Load address of new object into result.
1382 LoadAllocationTopHelper(result, scratch, flags);
1384 ExternalReference allocation_limit =
1385 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1387 // Align the next allocation. Storing the filler map without checking top is
1388 // safe in new-space because the limit of the heap is aligned there.
1389 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1390 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1391 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1393 test(result, Immediate(kDoubleAlignmentMask));
1394 j(zero, &aligned, Label::kNear);
1395 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1396 cmp(result, Operand::StaticVariable(allocation_limit));
1397 j(above_equal, gc_required);
1399 mov(Operand(result, 0),
1400 Immediate(isolate()->factory()->one_pointer_filler_map()));
1401 add(result, Immediate(kDoubleSize / 2));
1405 // Calculate new top and bail out if space is exhausted.
1406 // We assume that element_count*element_size + header_size does not
1408 if (element_count_type == REGISTER_VALUE_IS_SMI) {
1409 STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1);
1410 STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2);
1411 STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4);
1412 DCHECK(element_size >= times_2);
1413 DCHECK(kSmiTagSize == 1);
1414 element_size = static_cast<ScaleFactor>(element_size - 1);
1416 DCHECK(element_count_type == REGISTER_VALUE_IS_INT32);
1418 lea(result_end, Operand(element_count, element_size, header_size));
1419 add(result_end, result);
1420 j(carry, gc_required);
1421 cmp(result_end, Operand::StaticVariable(allocation_limit));
1422 j(above, gc_required);
1424 if ((flags & TAG_OBJECT) != 0) {
1425 DCHECK(kHeapObjectTag == 1);
1429 // Update allocation top.
1430 UpdateAllocationTopHelper(result_end, scratch, flags);
1434 void MacroAssembler::Allocate(Register object_size,
1436 Register result_end,
1439 AllocationFlags flags) {
1440 DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
1441 if (!FLAG_inline_new) {
1442 if (emit_debug_code()) {
1443 // Trash the registers to simulate an allocation failure.
1444 mov(result, Immediate(0x7091));
1445 mov(result_end, Immediate(0x7191));
1446 if (scratch.is_valid()) {
1447 mov(scratch, Immediate(0x7291));
1449 // object_size is left unchanged by this function.
1454 DCHECK(!result.is(result_end));
1456 // Load address of new object into result.
1457 LoadAllocationTopHelper(result, scratch, flags);
1459 ExternalReference allocation_limit =
1460 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1462 // Align the next allocation. Storing the filler map without checking top is
1463 // safe in new-space because the limit of the heap is aligned there.
1464 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1465 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1466 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1468 test(result, Immediate(kDoubleAlignmentMask));
1469 j(zero, &aligned, Label::kNear);
1470 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1471 cmp(result, Operand::StaticVariable(allocation_limit));
1472 j(above_equal, gc_required);
1474 mov(Operand(result, 0),
1475 Immediate(isolate()->factory()->one_pointer_filler_map()));
1476 add(result, Immediate(kDoubleSize / 2));
1480 // Calculate new top and bail out if space is exhausted.
1481 if (!object_size.is(result_end)) {
1482 mov(result_end, object_size);
1484 add(result_end, result);
1485 j(carry, gc_required);
1486 cmp(result_end, Operand::StaticVariable(allocation_limit));
1487 j(above, gc_required);
1489 // Tag result if requested.
1490 if ((flags & TAG_OBJECT) != 0) {
1491 DCHECK(kHeapObjectTag == 1);
1495 // Update allocation top.
1496 UpdateAllocationTopHelper(result_end, scratch, flags);
1500 void MacroAssembler::UndoAllocationInNewSpace(Register object) {
1501 ExternalReference new_space_allocation_top =
1502 ExternalReference::new_space_allocation_top_address(isolate());
1504 // Make sure the object has no tag before resetting top.
1505 and_(object, Immediate(~kHeapObjectTagMask));
1507 cmp(object, Operand::StaticVariable(new_space_allocation_top));
1508 Check(below, kUndoAllocationOfNonAllocatedMemory);
1510 mov(Operand::StaticVariable(new_space_allocation_top), object);
1514 void MacroAssembler::AllocateHeapNumber(Register result,
1519 // Allocate heap number in new space.
1520 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
1523 Handle<Map> map = mode == MUTABLE
1524 ? isolate()->factory()->mutable_heap_number_map()
1525 : isolate()->factory()->heap_number_map();
1528 mov(FieldOperand(result, HeapObject::kMapOffset), Immediate(map));
1532 void MacroAssembler::AllocateTwoByteString(Register result,
1537 Label* gc_required) {
1538 // Calculate the number of bytes needed for the characters in the string while
1539 // observing object alignment.
1540 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1541 DCHECK(kShortSize == 2);
1542 // scratch1 = length * 2 + kObjectAlignmentMask.
1543 lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
1544 and_(scratch1, Immediate(~kObjectAlignmentMask));
1546 // Allocate two byte string in new space.
1547 Allocate(SeqTwoByteString::kHeaderSize,
1550 REGISTER_VALUE_IS_INT32,
1557 // Set the map, length and hash field.
1558 mov(FieldOperand(result, HeapObject::kMapOffset),
1559 Immediate(isolate()->factory()->string_map()));
1560 mov(scratch1, length);
1562 mov(FieldOperand(result, String::kLengthOffset), scratch1);
1563 mov(FieldOperand(result, String::kHashFieldOffset),
1564 Immediate(String::kEmptyHashField));
1568 void MacroAssembler::AllocateOneByteString(Register result, Register length,
1569 Register scratch1, Register scratch2,
1571 Label* gc_required) {
1572 // Calculate the number of bytes needed for the characters in the string while
1573 // observing object alignment.
1574 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1575 mov(scratch1, length);
1576 DCHECK(kCharSize == 1);
1577 add(scratch1, Immediate(kObjectAlignmentMask));
1578 and_(scratch1, Immediate(~kObjectAlignmentMask));
1580 // Allocate one-byte string in new space.
1581 Allocate(SeqOneByteString::kHeaderSize,
1584 REGISTER_VALUE_IS_INT32,
1591 // Set the map, length and hash field.
1592 mov(FieldOperand(result, HeapObject::kMapOffset),
1593 Immediate(isolate()->factory()->one_byte_string_map()));
1594 mov(scratch1, length);
1596 mov(FieldOperand(result, String::kLengthOffset), scratch1);
1597 mov(FieldOperand(result, String::kHashFieldOffset),
1598 Immediate(String::kEmptyHashField));
1602 void MacroAssembler::AllocateOneByteString(Register result, int length,
1603 Register scratch1, Register scratch2,
1604 Label* gc_required) {
1607 // Allocate one-byte string in new space.
1608 Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
1609 gc_required, TAG_OBJECT);
1611 // Set the map, length and hash field.
1612 mov(FieldOperand(result, HeapObject::kMapOffset),
1613 Immediate(isolate()->factory()->one_byte_string_map()));
1614 mov(FieldOperand(result, String::kLengthOffset),
1615 Immediate(Smi::FromInt(length)));
1616 mov(FieldOperand(result, String::kHashFieldOffset),
1617 Immediate(String::kEmptyHashField));
1621 void MacroAssembler::AllocateTwoByteConsString(Register result,
1624 Label* gc_required) {
1625 // Allocate heap number in new space.
1626 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1629 // Set the map. The other fields are left uninitialized.
1630 mov(FieldOperand(result, HeapObject::kMapOffset),
1631 Immediate(isolate()->factory()->cons_string_map()));
1635 void MacroAssembler::AllocateOneByteConsString(Register result,
1638 Label* gc_required) {
1639 Allocate(ConsString::kSize,
1646 // Set the map. The other fields are left uninitialized.
1647 mov(FieldOperand(result, HeapObject::kMapOffset),
1648 Immediate(isolate()->factory()->cons_one_byte_string_map()));
1652 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
1655 Label* gc_required) {
1656 // Allocate heap number in new space.
1657 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1660 // Set the map. The other fields are left uninitialized.
1661 mov(FieldOperand(result, HeapObject::kMapOffset),
1662 Immediate(isolate()->factory()->sliced_string_map()));
1666 void MacroAssembler::AllocateOneByteSlicedString(Register result,
1669 Label* gc_required) {
1670 // Allocate heap number in new space.
1671 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1674 // Set the map. The other fields are left uninitialized.
1675 mov(FieldOperand(result, HeapObject::kMapOffset),
1676 Immediate(isolate()->factory()->sliced_one_byte_string_map()));
1680 // Copy memory, byte-by-byte, from source to destination. Not optimized for
1681 // long or aligned copies. The contents of scratch and length are destroyed.
1682 // Source and destination are incremented by length.
1683 // Many variants of movsb, loop unrolling, word moves, and indexed operands
1684 // have been tried here already, and this is fastest.
1685 // A simpler loop is faster on small copies, but 30% slower on large ones.
1686 // The cld() instruction must have been emitted, to set the direction flag(),
1687 // before calling this function.
1688 void MacroAssembler::CopyBytes(Register source,
1689 Register destination,
1692 Label short_loop, len4, len8, len12, done, short_string;
1693 DCHECK(source.is(esi));
1694 DCHECK(destination.is(edi));
1695 DCHECK(length.is(ecx));
1696 cmp(length, Immediate(4));
1697 j(below, &short_string, Label::kNear);
1699 // Because source is 4-byte aligned in our uses of this function,
1700 // we keep source aligned for the rep_movs call by copying the odd bytes
1701 // at the end of the ranges.
1702 mov(scratch, Operand(source, length, times_1, -4));
1703 mov(Operand(destination, length, times_1, -4), scratch);
1705 cmp(length, Immediate(8));
1706 j(below_equal, &len4, Label::kNear);
1707 cmp(length, Immediate(12));
1708 j(below_equal, &len8, Label::kNear);
1709 cmp(length, Immediate(16));
1710 j(below_equal, &len12, Label::kNear);
1715 and_(scratch, Immediate(0x3));
1716 add(destination, scratch);
1717 jmp(&done, Label::kNear);
1720 mov(scratch, Operand(source, 8));
1721 mov(Operand(destination, 8), scratch);
1723 mov(scratch, Operand(source, 4));
1724 mov(Operand(destination, 4), scratch);
1726 mov(scratch, Operand(source, 0));
1727 mov(Operand(destination, 0), scratch);
1728 add(destination, length);
1729 jmp(&done, Label::kNear);
1731 bind(&short_string);
1732 test(length, length);
1733 j(zero, &done, Label::kNear);
1736 mov_b(scratch, Operand(source, 0));
1737 mov_b(Operand(destination, 0), scratch);
1741 j(not_zero, &short_loop);
1747 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
1748 Register end_offset,
1753 mov(Operand(start_offset, 0), filler);
1754 add(start_offset, Immediate(kPointerSize));
1756 cmp(start_offset, end_offset);
1761 void MacroAssembler::BooleanBitTest(Register object,
1764 bit_index += kSmiTagSize + kSmiShiftSize;
1765 DCHECK(base::bits::IsPowerOfTwo32(kBitsPerByte));
1766 int byte_index = bit_index / kBitsPerByte;
1767 int byte_bit_index = bit_index & (kBitsPerByte - 1);
1768 test_b(FieldOperand(object, field_offset + byte_index),
1769 static_cast<byte>(1 << byte_bit_index));
1774 void MacroAssembler::NegativeZeroTest(Register result,
1776 Label* then_label) {
1778 test(result, result);
1781 j(sign, then_label);
1786 void MacroAssembler::NegativeZeroTest(Register result,
1790 Label* then_label) {
1792 test(result, result);
1796 j(sign, then_label);
1801 void MacroAssembler::GetMapConstructor(Register result, Register map,
1804 mov(result, FieldOperand(map, Map::kConstructorOrBackPointerOffset));
1806 JumpIfSmi(result, &done);
1807 CmpObjectType(result, MAP_TYPE, temp);
1808 j(not_equal, &done);
1809 mov(result, FieldOperand(result, Map::kConstructorOrBackPointerOffset));
1815 void MacroAssembler::TryGetFunctionPrototype(Register function,
1819 bool miss_on_bound_function) {
1821 if (miss_on_bound_function) {
1822 // Check that the receiver isn't a smi.
1823 JumpIfSmi(function, miss);
1825 // Check that the function really is a function.
1826 CmpObjectType(function, JS_FUNCTION_TYPE, result);
1829 // If a bound function, go to miss label.
1831 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
1832 BooleanBitTest(scratch, SharedFunctionInfo::kCompilerHintsOffset,
1833 SharedFunctionInfo::kBoundFunction);
1836 // Make sure that the function has an instance prototype.
1837 movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
1838 test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
1839 j(not_zero, &non_instance);
1842 // Get the prototype or initial map from the function.
1844 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
1846 // If the prototype or initial map is the hole, don't return it and
1847 // simply miss the cache instead. This will allow us to allocate a
1848 // prototype object on-demand in the runtime system.
1849 cmp(result, Immediate(isolate()->factory()->the_hole_value()));
1852 // If the function does not have an initial map, we're done.
1854 CmpObjectType(result, MAP_TYPE, scratch);
1855 j(not_equal, &done);
1857 // Get the prototype from the initial map.
1858 mov(result, FieldOperand(result, Map::kPrototypeOffset));
1860 if (miss_on_bound_function) {
1863 // Non-instance prototype: Fetch prototype from constructor field
1865 bind(&non_instance);
1866 GetMapConstructor(result, result, scratch);
1874 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
1875 DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
1876 call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
1880 void MacroAssembler::TailCallStub(CodeStub* stub) {
1881 jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
1885 void MacroAssembler::StubReturn(int argc) {
1886 DCHECK(argc >= 1 && generating_stub());
1887 ret((argc - 1) * kPointerSize);
1891 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
1892 return has_frame_ || !stub->SometimesSetsUpAFrame();
1896 void MacroAssembler::IndexFromHash(Register hash, Register index) {
1897 // The assert checks that the constants for the maximum number of digits
1898 // for an array index cached in the hash field and the number of bits
1899 // reserved for it does not conflict.
1900 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
1901 (1 << String::kArrayIndexValueBits));
1902 if (!index.is(hash)) {
1905 DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
1909 void MacroAssembler::CallRuntime(const Runtime::Function* f,
1911 SaveFPRegsMode save_doubles) {
1912 // If the expected number of arguments of the runtime function is
1913 // constant, we check that the actual number of arguments match the
1915 CHECK(f->nargs < 0 || f->nargs == num_arguments);
1917 // TODO(1236192): Most runtime routines don't need the number of
1918 // arguments passed in because it is constant. At some point we
1919 // should remove this need and make the runtime routine entry code
1921 Move(eax, Immediate(num_arguments));
1922 mov(ebx, Immediate(ExternalReference(f, isolate())));
1923 CEntryStub ces(isolate(), 1, save_doubles);
1928 void MacroAssembler::CallExternalReference(ExternalReference ref,
1929 int num_arguments) {
1930 mov(eax, Immediate(num_arguments));
1931 mov(ebx, Immediate(ref));
1933 CEntryStub stub(isolate(), 1);
1938 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
1941 // TODO(1236192): Most runtime routines don't need the number of
1942 // arguments passed in because it is constant. At some point we
1943 // should remove this need and make the runtime routine entry code
1945 Move(eax, Immediate(num_arguments));
1946 JumpToExternalReference(ext);
1950 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
1953 TailCallExternalReference(ExternalReference(fid, isolate()),
1959 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
1960 // Set the entry point and jump to the C entry runtime stub.
1961 mov(ebx, Immediate(ext));
1962 CEntryStub ces(isolate(), 1);
1963 jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
1967 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1968 const ParameterCount& actual,
1969 Handle<Code> code_constant,
1970 const Operand& code_operand,
1972 bool* definitely_mismatches,
1974 Label::Distance done_near,
1975 const CallWrapper& call_wrapper) {
1976 bool definitely_matches = false;
1977 *definitely_mismatches = false;
1979 if (expected.is_immediate()) {
1980 DCHECK(actual.is_immediate());
1981 if (expected.immediate() == actual.immediate()) {
1982 definitely_matches = true;
1984 mov(eax, actual.immediate());
1985 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1986 if (expected.immediate() == sentinel) {
1987 // Don't worry about adapting arguments for builtins that
1988 // don't want that done. Skip adaption code by making it look
1989 // like we have a match between expected and actual number of
1991 definitely_matches = true;
1993 *definitely_mismatches = true;
1994 mov(ebx, expected.immediate());
1998 if (actual.is_immediate()) {
1999 // Expected is in register, actual is immediate. This is the
2000 // case when we invoke function values without going through the
2002 cmp(expected.reg(), actual.immediate());
2004 DCHECK(expected.reg().is(ebx));
2005 mov(eax, actual.immediate());
2006 } else if (!expected.reg().is(actual.reg())) {
2007 // Both expected and actual are in (different) registers. This
2008 // is the case when we invoke functions using call and apply.
2009 cmp(expected.reg(), actual.reg());
2011 DCHECK(actual.reg().is(eax));
2012 DCHECK(expected.reg().is(ebx));
2016 if (!definitely_matches) {
2017 Handle<Code> adaptor =
2018 isolate()->builtins()->ArgumentsAdaptorTrampoline();
2019 if (!code_constant.is_null()) {
2020 mov(edx, Immediate(code_constant));
2021 add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
2022 } else if (!code_operand.is_reg(edx)) {
2023 mov(edx, code_operand);
2026 if (flag == CALL_FUNCTION) {
2027 call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
2028 call(adaptor, RelocInfo::CODE_TARGET);
2029 call_wrapper.AfterCall();
2030 if (!*definitely_mismatches) {
2031 jmp(done, done_near);
2034 jmp(adaptor, RelocInfo::CODE_TARGET);
2041 void MacroAssembler::InvokeCode(const Operand& code,
2042 const ParameterCount& expected,
2043 const ParameterCount& actual,
2045 const CallWrapper& call_wrapper) {
2046 // You can't call a function without a valid frame.
2047 DCHECK(flag == JUMP_FUNCTION || has_frame());
2050 bool definitely_mismatches = false;
2051 InvokePrologue(expected, actual, Handle<Code>::null(), code,
2052 &done, &definitely_mismatches, flag, Label::kNear,
2054 if (!definitely_mismatches) {
2055 if (flag == CALL_FUNCTION) {
2056 call_wrapper.BeforeCall(CallSize(code));
2058 call_wrapper.AfterCall();
2060 DCHECK(flag == JUMP_FUNCTION);
2068 void MacroAssembler::InvokeFunction(Register fun,
2069 const ParameterCount& actual,
2071 const CallWrapper& call_wrapper) {
2072 // You can't call a function without a valid frame.
2073 DCHECK(flag == JUMP_FUNCTION || has_frame());
2075 DCHECK(fun.is(edi));
2076 mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
2077 mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
2078 mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
2081 ParameterCount expected(ebx);
2082 InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
2083 expected, actual, flag, call_wrapper);
2087 void MacroAssembler::InvokeFunction(Register fun,
2088 const ParameterCount& expected,
2089 const ParameterCount& actual,
2091 const CallWrapper& call_wrapper) {
2092 // You can't call a function without a valid frame.
2093 DCHECK(flag == JUMP_FUNCTION || has_frame());
2095 DCHECK(fun.is(edi));
2096 mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
2098 InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
2099 expected, actual, flag, call_wrapper);
2103 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
2104 const ParameterCount& expected,
2105 const ParameterCount& actual,
2107 const CallWrapper& call_wrapper) {
2108 LoadHeapObject(edi, function);
2109 InvokeFunction(edi, expected, actual, flag, call_wrapper);
2113 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
2115 const CallWrapper& call_wrapper) {
2116 // You can't call a builtin without a valid frame.
2117 DCHECK(flag == JUMP_FUNCTION || has_frame());
2119 // Rely on the assertion to check that the number of provided
2120 // arguments match the expected number of arguments. Fake a
2121 // parameter count to avoid emitting code to do the check.
2122 ParameterCount expected(0);
2123 GetBuiltinFunction(edi, id);
2124 InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
2125 expected, expected, flag, call_wrapper);
2129 void MacroAssembler::GetBuiltinFunction(Register target,
2130 Builtins::JavaScript id) {
2131 // Load the JavaScript builtin function from the builtins object.
2132 mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2133 mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
2134 mov(target, FieldOperand(target,
2135 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
2139 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2140 DCHECK(!target.is(edi));
2141 // Load the JavaScript builtin function from the builtins object.
2142 GetBuiltinFunction(edi, id);
2143 // Load the code entry point from the function into the target register.
2144 mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
2148 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2149 if (context_chain_length > 0) {
2150 // Move up the chain of contexts to the context containing the slot.
2151 mov(dst, Operand(esi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2152 for (int i = 1; i < context_chain_length; i++) {
2153 mov(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2156 // Slot is in the current function context. Move it into the
2157 // destination register in case we store into it (the write barrier
2158 // cannot be allowed to destroy the context in esi).
2162 // We should not have found a with context by walking the context chain
2163 // (i.e., the static scope chain and runtime context chain do not agree).
2164 // A variable occurring in such a scope should have slot type LOOKUP and
2166 if (emit_debug_code()) {
2167 cmp(FieldOperand(dst, HeapObject::kMapOffset),
2168 isolate()->factory()->with_context_map());
2169 Check(not_equal, kVariableResolvedToWithContext);
2174 void MacroAssembler::LoadTransitionedArrayMapConditional(
2175 ElementsKind expected_kind,
2176 ElementsKind transitioned_kind,
2177 Register map_in_out,
2179 Label* no_map_match) {
2180 // Load the global or builtins object from the current context.
2181 mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2182 mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
2184 // Check that the function's map is the same as the expected cached map.
2185 mov(scratch, Operand(scratch,
2186 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
2188 size_t offset = expected_kind * kPointerSize +
2189 FixedArrayBase::kHeaderSize;
2190 cmp(map_in_out, FieldOperand(scratch, offset));
2191 j(not_equal, no_map_match);
2193 // Use the transitioned cached map.
2194 offset = transitioned_kind * kPointerSize +
2195 FixedArrayBase::kHeaderSize;
2196 mov(map_in_out, FieldOperand(scratch, offset));
2200 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2201 // Load the global or builtins object from the current context.
2203 Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2204 // Load the native context from the global or builtins object.
2206 FieldOperand(function, GlobalObject::kNativeContextOffset));
2207 // Load the function from the native context.
2208 mov(function, Operand(function, Context::SlotOffset(index)));
2212 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2214 // Load the initial map. The global functions all have initial maps.
2215 mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2216 if (emit_debug_code()) {
2218 CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
2221 Abort(kGlobalFunctionsMustHaveInitialMap);
2227 // Store the value in register src in the safepoint register stack
2228 // slot for register dst.
2229 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
2230 mov(SafepointRegisterSlot(dst), src);
2234 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Immediate src) {
2235 mov(SafepointRegisterSlot(dst), src);
2239 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
2240 mov(dst, SafepointRegisterSlot(src));
2244 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
2245 return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
2249 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
2250 // The registers are pushed starting with the lowest encoding,
2251 // which means that lowest encodings are furthest away from
2252 // the stack pointer.
2253 DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
2254 return kNumSafepointRegisters - reg_code - 1;
2258 void MacroAssembler::LoadHeapObject(Register result,
2259 Handle<HeapObject> object) {
2260 AllowDeferredHandleDereference embedding_raw_address;
2261 if (isolate()->heap()->InNewSpace(*object)) {
2262 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2263 mov(result, Operand::ForCell(cell));
2265 mov(result, object);
2270 void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
2271 AllowDeferredHandleDereference using_raw_address;
2272 if (isolate()->heap()->InNewSpace(*object)) {
2273 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2274 cmp(reg, Operand::ForCell(cell));
2281 void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
2282 AllowDeferredHandleDereference using_raw_address;
2283 if (isolate()->heap()->InNewSpace(*object)) {
2284 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2285 push(Operand::ForCell(cell));
2292 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
2295 cmp(value, FieldOperand(scratch, WeakCell::kValueOffset));
2299 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
2301 mov(value, FieldOperand(value, WeakCell::kValueOffset));
2305 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
2307 GetWeakValue(value, cell);
2308 JumpIfSmi(value, miss);
2312 void MacroAssembler::Ret() {
2317 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
2318 if (is_uint16(bytes_dropped)) {
2322 add(esp, Immediate(bytes_dropped));
2329 void MacroAssembler::Drop(int stack_elements) {
2330 if (stack_elements > 0) {
2331 add(esp, Immediate(stack_elements * kPointerSize));
2336 void MacroAssembler::Move(Register dst, Register src) {
2343 void MacroAssembler::Move(Register dst, const Immediate& x) {
2345 xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
2352 void MacroAssembler::Move(const Operand& dst, const Immediate& x) {
2357 void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
2361 unsigned cnt = base::bits::CountPopulation32(src);
2362 unsigned nlz = base::bits::CountLeadingZeros32(src);
2363 unsigned ntz = base::bits::CountTrailingZeros32(src);
2364 if (nlz + cnt + ntz == 32) {
2367 psrld(dst, 32 - cnt);
2369 pslld(dst, 32 - cnt);
2370 if (nlz != 0) psrld(dst, nlz);
2374 mov(eax, Immediate(src));
2375 movd(dst, Operand(eax));
2382 void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
2383 uint32_t lower = static_cast<uint32_t>(src);
2384 uint32_t upper = static_cast<uint32_t>(src >> 32);
2388 unsigned cnt = base::bits::CountPopulation64(src);
2389 unsigned nlz = base::bits::CountLeadingZeros64(src);
2390 unsigned ntz = base::bits::CountTrailingZeros64(src);
2391 if (nlz + cnt + ntz == 64) {
2394 psrlq(dst, 64 - cnt);
2396 psllq(dst, 64 - cnt);
2397 if (nlz != 0) psrlq(dst, nlz);
2399 } else if (lower == 0) {
2402 } else if (CpuFeatures::IsSupported(SSE4_1)) {
2403 CpuFeatureScope scope(this, SSE4_1);
2405 Move(eax, Immediate(lower));
2406 movd(dst, Operand(eax));
2407 Move(eax, Immediate(upper));
2408 pinsrd(dst, Operand(eax), 1);
2411 push(Immediate(upper));
2412 push(Immediate(lower));
2413 movsd(dst, Operand(esp, 0));
2414 add(esp, Immediate(kDoubleSize));
2420 void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
2426 if (CpuFeatures::IsSupported(SSE4_1)) {
2427 CpuFeatureScope sse_scope(this, SSE4_1);
2428 pextrd(dst, src, imm8);
2431 pshufd(xmm0, src, 1);
2436 void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
2437 DCHECK(imm8 == 0 || imm8 == 1);
2438 if (CpuFeatures::IsSupported(SSE4_1)) {
2439 CpuFeatureScope sse_scope(this, SSE4_1);
2440 pinsrd(dst, src, imm8);
2445 punpckldq(dst, xmm0);
2449 punpckldq(xmm0, dst);
2455 void MacroAssembler::Lzcnt(Register dst, const Operand& src) {
2456 // TODO(intel): Add support for LZCNT (with ABM/BMI1).
2459 j(not_zero, ¬_zero_src, Label::kNear);
2460 Move(dst, Immediate(63)); // 63^31 == 32
2461 bind(¬_zero_src);
2462 xor_(dst, Immediate(31)); // for x in [0..31], 31^x == 31-x.
2466 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
2467 if (FLAG_native_code_counters && counter->Enabled()) {
2468 mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
2473 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
2475 if (FLAG_native_code_counters && counter->Enabled()) {
2476 Operand operand = Operand::StaticVariable(ExternalReference(counter));
2480 add(operand, Immediate(value));
2486 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
2488 if (FLAG_native_code_counters && counter->Enabled()) {
2489 Operand operand = Operand::StaticVariable(ExternalReference(counter));
2493 sub(operand, Immediate(value));
2499 void MacroAssembler::IncrementCounter(Condition cc,
2500 StatsCounter* counter,
2503 if (FLAG_native_code_counters && counter->Enabled()) {
2505 j(NegateCondition(cc), &skip);
2507 IncrementCounter(counter, value);
2514 void MacroAssembler::DecrementCounter(Condition cc,
2515 StatsCounter* counter,
2518 if (FLAG_native_code_counters && counter->Enabled()) {
2520 j(NegateCondition(cc), &skip);
2522 DecrementCounter(counter, value);
2529 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
2530 if (emit_debug_code()) Check(cc, reason);
2534 void MacroAssembler::AssertFastElements(Register elements) {
2535 if (emit_debug_code()) {
2536 Factory* factory = isolate()->factory();
2538 cmp(FieldOperand(elements, HeapObject::kMapOffset),
2539 Immediate(factory->fixed_array_map()));
2541 cmp(FieldOperand(elements, HeapObject::kMapOffset),
2542 Immediate(factory->fixed_double_array_map()));
2544 cmp(FieldOperand(elements, HeapObject::kMapOffset),
2545 Immediate(factory->fixed_cow_array_map()));
2547 Abort(kJSObjectWithFastElementsMapHasSlowElements);
2553 void MacroAssembler::Check(Condition cc, BailoutReason reason) {
2557 // will not return here
2562 void MacroAssembler::CheckStackAlignment() {
2563 int frame_alignment = base::OS::ActivationFrameAlignment();
2564 int frame_alignment_mask = frame_alignment - 1;
2565 if (frame_alignment > kPointerSize) {
2566 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
2567 Label alignment_as_expected;
2568 test(esp, Immediate(frame_alignment_mask));
2569 j(zero, &alignment_as_expected);
2570 // Abort if stack is not aligned.
2572 bind(&alignment_as_expected);
2577 void MacroAssembler::Abort(BailoutReason reason) {
2579 const char* msg = GetBailoutReason(reason);
2581 RecordComment("Abort message: ");
2585 if (FLAG_trap_on_abort) {
2591 push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(reason))));
2592 // Disable stub call restrictions to always allow calls to abort.
2594 // We don't actually want to generate a pile of code for this, so just
2595 // claim there is a stack frame, without generating one.
2596 FrameScope scope(this, StackFrame::NONE);
2597 CallRuntime(Runtime::kAbort, 1);
2599 CallRuntime(Runtime::kAbort, 1);
2601 // will not return here
2606 void MacroAssembler::LoadInstanceDescriptors(Register map,
2607 Register descriptors) {
2608 mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
2612 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
2613 mov(dst, FieldOperand(map, Map::kBitField3Offset));
2614 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
2618 void MacroAssembler::LoadAccessor(Register dst, Register holder,
2620 AccessorComponent accessor) {
2621 mov(dst, FieldOperand(holder, HeapObject::kMapOffset));
2622 LoadInstanceDescriptors(dst, dst);
2623 mov(dst, FieldOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
2624 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
2625 : AccessorPair::kSetterOffset;
2626 mov(dst, FieldOperand(dst, offset));
2630 void MacroAssembler::LoadPowerOf2(XMMRegister dst,
2633 DCHECK(is_uintn(power + HeapNumber::kExponentBias,
2634 HeapNumber::kExponentBits));
2635 mov(scratch, Immediate(power + HeapNumber::kExponentBias));
2637 psllq(dst, HeapNumber::kMantissaBits);
2641 void MacroAssembler::LookupNumberStringCache(Register object,
2646 // Use of registers. Register result is used as a temporary.
2647 Register number_string_cache = result;
2648 Register mask = scratch1;
2649 Register scratch = scratch2;
2651 // Load the number string cache.
2652 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2653 // Make the hash mask from the length of the number string cache. It
2654 // contains two elements (number and string) for each cache entry.
2655 mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
2656 shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
2657 sub(mask, Immediate(1)); // Make mask.
2659 // Calculate the entry in the number string cache. The hash value in the
2660 // number string cache for smis is just the smi value, and the hash for
2661 // doubles is the xor of the upper and lower words. See
2662 // Heap::GetNumberStringCache.
2663 Label smi_hash_calculated;
2664 Label load_result_from_cache;
2666 STATIC_ASSERT(kSmiTag == 0);
2667 JumpIfNotSmi(object, ¬_smi, Label::kNear);
2668 mov(scratch, object);
2670 jmp(&smi_hash_calculated, Label::kNear);
2672 cmp(FieldOperand(object, HeapObject::kMapOffset),
2673 isolate()->factory()->heap_number_map());
2674 j(not_equal, not_found);
2675 STATIC_ASSERT(8 == kDoubleSize);
2676 mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
2677 xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
2678 // Object is heap number and hash is now in scratch. Calculate cache index.
2679 and_(scratch, mask);
2680 Register index = scratch;
2681 Register probe = mask;
2683 FieldOperand(number_string_cache,
2685 times_twice_pointer_size,
2686 FixedArray::kHeaderSize));
2687 JumpIfSmi(probe, not_found);
2688 movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
2689 ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
2690 j(parity_even, not_found); // Bail out if NaN is involved.
2691 j(not_equal, not_found); // The cache did not contain this value.
2692 jmp(&load_result_from_cache, Label::kNear);
2694 bind(&smi_hash_calculated);
2695 // Object is smi and hash is now in scratch. Calculate cache index.
2696 and_(scratch, mask);
2697 // Check if the entry is the smi we are looking for.
2699 FieldOperand(number_string_cache,
2701 times_twice_pointer_size,
2702 FixedArray::kHeaderSize));
2703 j(not_equal, not_found);
2705 // Get the result from the cache.
2706 bind(&load_result_from_cache);
2708 FieldOperand(number_string_cache,
2710 times_twice_pointer_size,
2711 FixedArray::kHeaderSize + kPointerSize));
2712 IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
2716 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
2717 Register instance_type, Register scratch, Label* failure) {
2718 if (!scratch.is(instance_type)) {
2719 mov(scratch, instance_type);
2722 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
2723 cmp(scratch, kStringTag | kSeqStringTag | kOneByteStringTag);
2724 j(not_equal, failure);
2728 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register object1,
2733 // Check that both objects are not smis.
2734 STATIC_ASSERT(kSmiTag == 0);
2735 mov(scratch1, object1);
2736 and_(scratch1, object2);
2737 JumpIfSmi(scratch1, failure);
2739 // Load instance type for both strings.
2740 mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
2741 mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset));
2742 movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2743 movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2745 // Check that both are flat one-byte strings.
2746 const int kFlatOneByteStringMask =
2747 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2748 const int kFlatOneByteStringTag =
2749 kStringTag | kOneByteStringTag | kSeqStringTag;
2750 // Interleave bits from both instance types and compare them in one check.
2751 DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
2752 and_(scratch1, kFlatOneByteStringMask);
2753 and_(scratch2, kFlatOneByteStringMask);
2754 lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
2755 cmp(scratch1, kFlatOneByteStringTag | (kFlatOneByteStringTag << 3));
2756 j(not_equal, failure);
2760 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
2761 Label* not_unique_name,
2762 Label::Distance distance) {
2763 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2765 test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
2767 cmpb(operand, static_cast<uint8_t>(SYMBOL_TYPE));
2768 j(not_equal, not_unique_name, distance);
2774 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
2777 uint32_t encoding_mask) {
2779 JumpIfNotSmi(string, &is_object, Label::kNear);
2784 mov(value, FieldOperand(string, HeapObject::kMapOffset));
2785 movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
2787 and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
2788 cmp(value, Immediate(encoding_mask));
2790 Check(equal, kUnexpectedStringType);
2792 // The index is assumed to be untagged coming in, tag it to compare with the
2793 // string length without using a temp register, it is restored at the end of
2796 Check(no_overflow, kIndexIsTooLarge);
2798 cmp(index, FieldOperand(string, String::kLengthOffset));
2799 Check(less, kIndexIsTooLarge);
2801 cmp(index, Immediate(Smi::FromInt(0)));
2802 Check(greater_equal, kIndexIsNegative);
2804 // Restore the index
2809 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
2810 int frame_alignment = base::OS::ActivationFrameAlignment();
2811 if (frame_alignment != 0) {
2812 // Make stack end at alignment and make room for num_arguments words
2813 // and the original value of esp.
2815 sub(esp, Immediate((num_arguments + 1) * kPointerSize));
2816 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
2817 and_(esp, -frame_alignment);
2818 mov(Operand(esp, num_arguments * kPointerSize), scratch);
2820 sub(esp, Immediate(num_arguments * kPointerSize));
2825 void MacroAssembler::CallCFunction(ExternalReference function,
2826 int num_arguments) {
2827 // Trashing eax is ok as it will be the return value.
2828 mov(eax, Immediate(function));
2829 CallCFunction(eax, num_arguments);
2833 void MacroAssembler::CallCFunction(Register function,
2834 int num_arguments) {
2835 DCHECK(has_frame());
2836 // Check stack alignment.
2837 if (emit_debug_code()) {
2838 CheckStackAlignment();
2842 if (base::OS::ActivationFrameAlignment() != 0) {
2843 mov(esp, Operand(esp, num_arguments * kPointerSize));
2845 add(esp, Immediate(num_arguments * kPointerSize));
2851 bool AreAliased(Register reg1,
2859 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
2860 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
2861 reg7.is_valid() + reg8.is_valid();
2864 if (reg1.is_valid()) regs |= reg1.bit();
2865 if (reg2.is_valid()) regs |= reg2.bit();
2866 if (reg3.is_valid()) regs |= reg3.bit();
2867 if (reg4.is_valid()) regs |= reg4.bit();
2868 if (reg5.is_valid()) regs |= reg5.bit();
2869 if (reg6.is_valid()) regs |= reg6.bit();
2870 if (reg7.is_valid()) regs |= reg7.bit();
2871 if (reg8.is_valid()) regs |= reg8.bit();
2872 int n_of_non_aliasing_regs = NumRegs(regs);
2874 return n_of_valid_regs != n_of_non_aliasing_regs;
2879 CodePatcher::CodePatcher(byte* address, int size)
2880 : address_(address),
2882 masm_(NULL, address, size + Assembler::kGap) {
2883 // Create a new macro assembler pointing to the address of the code to patch.
2884 // The size is adjusted with kGap on order for the assembler to generate size
2885 // bytes of instructions without failing with buffer size constraints.
2886 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2890 CodePatcher::~CodePatcher() {
2891 // Indicate that code has changed.
2892 CpuFeatures::FlushICache(address_, size_);
2894 // Check that the code was patched as expected.
2895 DCHECK(masm_.pc_ == address_ + size_);
2896 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2900 void MacroAssembler::CheckPageFlag(
2905 Label* condition_met,
2906 Label::Distance condition_met_distance) {
2907 DCHECK(cc == zero || cc == not_zero);
2908 if (scratch.is(object)) {
2909 and_(scratch, Immediate(~Page::kPageAlignmentMask));
2911 mov(scratch, Immediate(~Page::kPageAlignmentMask));
2912 and_(scratch, object);
2914 if (mask < (1 << kBitsPerByte)) {
2915 test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
2916 static_cast<uint8_t>(mask));
2918 test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
2920 j(cc, condition_met, condition_met_distance);
2924 void MacroAssembler::CheckPageFlagForMap(
2928 Label* condition_met,
2929 Label::Distance condition_met_distance) {
2930 DCHECK(cc == zero || cc == not_zero);
2931 Page* page = Page::FromAddress(map->address());
2932 DCHECK(!serializer_enabled()); // Serializer cannot match page_flags.
2933 ExternalReference reference(ExternalReference::page_flags(page));
2934 // The inlined static address check of the page's flags relies
2935 // on maps never being compacted.
2936 DCHECK(!isolate()->heap()->mark_compact_collector()->
2937 IsOnEvacuationCandidate(*map));
2938 if (mask < (1 << kBitsPerByte)) {
2939 test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask));
2941 test(Operand::StaticVariable(reference), Immediate(mask));
2943 j(cc, condition_met, condition_met_distance);
2947 void MacroAssembler::JumpIfBlack(Register object,
2951 Label::Distance on_black_near) {
2952 HasColor(object, scratch0, scratch1,
2953 on_black, on_black_near,
2954 1, 0); // kBlackBitPattern.
2955 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
2959 void MacroAssembler::HasColor(Register object,
2960 Register bitmap_scratch,
2961 Register mask_scratch,
2963 Label::Distance has_color_distance,
2966 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
2968 GetMarkBits(object, bitmap_scratch, mask_scratch);
2970 Label other_color, word_boundary;
2971 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
2972 j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
2973 add(mask_scratch, mask_scratch); // Shift left 1 by adding.
2974 j(zero, &word_boundary, Label::kNear);
2975 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
2976 j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
2977 jmp(&other_color, Label::kNear);
2979 bind(&word_boundary);
2980 test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1);
2982 j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
2987 void MacroAssembler::GetMarkBits(Register addr_reg,
2988 Register bitmap_reg,
2989 Register mask_reg) {
2990 DCHECK(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
2991 mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
2992 and_(bitmap_reg, addr_reg);
2995 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
2998 (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
3000 add(bitmap_reg, ecx);
3002 shr(ecx, kPointerSizeLog2);
3003 and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
3004 mov(mask_reg, Immediate(1));
3009 void MacroAssembler::EnsureNotWhite(
3011 Register bitmap_scratch,
3012 Register mask_scratch,
3013 Label* value_is_white_and_not_data,
3014 Label::Distance distance) {
3015 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
3016 GetMarkBits(value, bitmap_scratch, mask_scratch);
3018 // If the value is black or grey we don't need to do anything.
3019 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3020 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
3021 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
3022 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3026 // Since both black and grey have a 1 in the first position and white does
3027 // not have a 1 there we only need to check one bit.
3028 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
3029 j(not_zero, &done, Label::kNear);
3031 if (emit_debug_code()) {
3032 // Check for impossible bit pattern.
3035 // shl. May overflow making the check conservative.
3036 add(mask_scratch, mask_scratch);
3037 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
3038 j(zero, &ok, Label::kNear);
3044 // Value is white. We check whether it is data that doesn't need scanning.
3045 // Currently only checks for HeapNumber and non-cons strings.
3046 Register map = ecx; // Holds map while checking type.
3047 Register length = ecx; // Holds length of object after checking type.
3048 Label not_heap_number;
3049 Label is_data_object;
3051 // Check for heap-number
3052 mov(map, FieldOperand(value, HeapObject::kMapOffset));
3053 cmp(map, isolate()->factory()->heap_number_map());
3054 j(not_equal, ¬_heap_number, Label::kNear);
3055 mov(length, Immediate(HeapNumber::kSize));
3056 jmp(&is_data_object, Label::kNear);
3058 bind(¬_heap_number);
3059 // Check for strings.
3060 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3061 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3062 // If it's a string and it's not a cons string then it's an object containing
3064 Register instance_type = ecx;
3065 movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3066 test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
3067 j(not_zero, value_is_white_and_not_data);
3068 // It's a non-indirect (non-cons and non-slice) string.
3069 // If it's external, the length is just ExternalString::kSize.
3070 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
3072 // External strings are the only ones with the kExternalStringTag bit
3074 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
3075 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
3076 test_b(instance_type, kExternalStringTag);
3077 j(zero, ¬_external, Label::kNear);
3078 mov(length, Immediate(ExternalString::kSize));
3079 jmp(&is_data_object, Label::kNear);
3081 bind(¬_external);
3082 // Sequential string, either Latin1 or UC16.
3083 DCHECK(kOneByteStringTag == 0x04);
3084 and_(length, Immediate(kStringEncodingMask));
3085 xor_(length, Immediate(kStringEncodingMask));
3086 add(length, Immediate(0x04));
3087 // Value now either 4 (if Latin1) or 8 (if UC16), i.e., char-size shifted
3088 // by 2. If we multiply the string length as smi by this, it still
3089 // won't overflow a 32-bit value.
3090 DCHECK_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
3091 DCHECK(SeqOneByteString::kMaxSize <=
3092 static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
3093 imul(length, FieldOperand(value, String::kLengthOffset));
3094 shr(length, 2 + kSmiTagSize + kSmiShiftSize);
3095 add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
3096 and_(length, Immediate(~kObjectAlignmentMask));
3098 bind(&is_data_object);
3099 // Value is a data object, and it is white. Mark it black. Since we know
3100 // that the object is white we can make it black by flipping one bit.
3101 or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
3103 and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
3104 add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
3106 if (emit_debug_code()) {
3107 mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3108 cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
3109 Check(less_equal, kLiveBytesCountOverflowChunkSize);
3116 void MacroAssembler::EnumLength(Register dst, Register map) {
3117 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3118 mov(dst, FieldOperand(map, Map::kBitField3Offset));
3119 and_(dst, Immediate(Map::EnumLengthBits::kMask));
3124 void MacroAssembler::CheckEnumCache(Label* call_runtime) {
3128 // Check if the enum length field is properly initialized, indicating that
3129 // there is an enum cache.
3130 mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
3132 EnumLength(edx, ebx);
3133 cmp(edx, Immediate(Smi::FromInt(kInvalidEnumCacheSentinel)));
3134 j(equal, call_runtime);
3139 mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
3141 // For all objects but the receiver, check that the cache is empty.
3142 EnumLength(edx, ebx);
3143 cmp(edx, Immediate(Smi::FromInt(0)));
3144 j(not_equal, call_runtime);
3148 // Check that there are no elements. Register rcx contains the current JS
3149 // object we've reached through the prototype chain.
3151 mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
3152 cmp(ecx, isolate()->factory()->empty_fixed_array());
3153 j(equal, &no_elements);
3155 // Second chance, the object may be using the empty slow element dictionary.
3156 cmp(ecx, isolate()->factory()->empty_slow_element_dictionary());
3157 j(not_equal, call_runtime);
3160 mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
3161 cmp(ecx, isolate()->factory()->null_value());
3162 j(not_equal, &next);
3166 void MacroAssembler::TestJSArrayForAllocationMemento(
3167 Register receiver_reg,
3168 Register scratch_reg,
3169 Label* no_memento_found) {
3170 ExternalReference new_space_start =
3171 ExternalReference::new_space_start(isolate());
3172 ExternalReference new_space_allocation_top =
3173 ExternalReference::new_space_allocation_top_address(isolate());
3175 lea(scratch_reg, Operand(receiver_reg,
3176 JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
3177 cmp(scratch_reg, Immediate(new_space_start));
3178 j(less, no_memento_found);
3179 cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
3180 j(greater, no_memento_found);
3181 cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
3182 Immediate(isolate()->factory()->allocation_memento_map()));
3186 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
3191 DCHECK(!scratch1.is(scratch0));
3192 Factory* factory = isolate()->factory();
3193 Register current = scratch0;
3196 // scratch contained elements pointer.
3197 mov(current, object);
3199 // Loop based on the map going up the prototype chain.
3201 mov(current, FieldOperand(current, HeapObject::kMapOffset));
3202 mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
3203 DecodeField<Map::ElementsKindBits>(scratch1);
3204 cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
3206 mov(current, FieldOperand(current, Map::kPrototypeOffset));
3207 cmp(current, Immediate(factory->null_value()));
3208 j(not_equal, &loop_again);
3212 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
3213 DCHECK(!dividend.is(eax));
3214 DCHECK(!dividend.is(edx));
3215 base::MagicNumbersForDivision<uint32_t> mag =
3216 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
3217 mov(eax, Immediate(mag.multiplier));
3219 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
3220 if (divisor > 0 && neg) add(edx, dividend);
3221 if (divisor < 0 && !neg && mag.multiplier > 0) sub(edx, dividend);
3222 if (mag.shift > 0) sar(edx, mag.shift);
3229 } } // namespace v8::internal
3231 #endif // V8_TARGET_ARCH_IA32