1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #if V8_TARGET_ARCH_IA32
32 #include "bootstrapper.h"
34 #include "cpu-profiler.h"
36 #include "isolate-inl.h"
38 #include "serialize.h"
43 // -------------------------------------------------------------------------
44 // MacroAssembler implementation.
46 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
47 : Assembler(arg_isolate, buffer, size),
48 generating_stub_(false),
49 allow_stub_calls_(true),
51 if (isolate() != NULL) {
52 // TODO(titzer): should we just use a null handle here instead?
53 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
59 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
60 if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
61 Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
62 mov(destination, value);
65 ExternalReference roots_array_start =
66 ExternalReference::roots_array_start(isolate());
67 mov(destination, Immediate(index));
68 mov(destination, Operand::StaticArray(destination,
74 void MacroAssembler::StoreRoot(Register source,
76 Heap::RootListIndex index) {
77 ASSERT(Heap::RootCanBeWrittenAfterInitialization(index));
78 ExternalReference roots_array_start =
79 ExternalReference::roots_array_start(isolate());
80 mov(scratch, Immediate(index));
81 mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
86 void MacroAssembler::CompareRoot(Register with,
88 Heap::RootListIndex index) {
89 ExternalReference roots_array_start =
90 ExternalReference::roots_array_start(isolate());
91 mov(scratch, Immediate(index));
92 cmp(with, Operand::StaticArray(scratch,
98 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
99 ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index));
100 Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
105 void MacroAssembler::CompareRoot(const Operand& with,
106 Heap::RootListIndex index) {
107 ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index));
108 Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
113 void MacroAssembler::InNewSpace(
117 Label* condition_met,
118 Label::Distance condition_met_distance) {
119 ASSERT(cc == equal || cc == not_equal);
120 if (scratch.is(object)) {
121 and_(scratch, Immediate(~Page::kPageAlignmentMask));
123 mov(scratch, Immediate(~Page::kPageAlignmentMask));
124 and_(scratch, object);
126 // Check that we can use a test_b.
127 ASSERT(MemoryChunk::IN_FROM_SPACE < 8);
128 ASSERT(MemoryChunk::IN_TO_SPACE < 8);
129 int mask = (1 << MemoryChunk::IN_FROM_SPACE)
130 | (1 << MemoryChunk::IN_TO_SPACE);
131 // If non-zero, the page belongs to new-space.
132 test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
133 static_cast<uint8_t>(mask));
134 j(cc, condition_met, condition_met_distance);
138 void MacroAssembler::RememberedSetHelper(
139 Register object, // Only used for debug checks.
142 SaveFPRegsMode save_fp,
143 MacroAssembler::RememberedSetFinalAction and_then) {
145 if (emit_debug_code()) {
147 JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
151 // Load store buffer top.
152 ExternalReference store_buffer =
153 ExternalReference::store_buffer_top(isolate());
154 mov(scratch, Operand::StaticVariable(store_buffer));
155 // Store pointer to buffer.
156 mov(Operand(scratch, 0), addr);
157 // Increment buffer top.
158 add(scratch, Immediate(kPointerSize));
159 // Write back new top of buffer.
160 mov(Operand::StaticVariable(store_buffer), scratch);
161 // Call stub on end of buffer.
162 // Check for end of buffer.
163 test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
164 if (and_then == kReturnAtEnd) {
165 Label buffer_overflowed;
166 j(not_equal, &buffer_overflowed, Label::kNear);
168 bind(&buffer_overflowed);
170 ASSERT(and_then == kFallThroughAtEnd);
171 j(equal, &done, Label::kNear);
173 StoreBufferOverflowStub store_buffer_overflow =
174 StoreBufferOverflowStub(save_fp);
175 CallStub(&store_buffer_overflow);
176 if (and_then == kReturnAtEnd) {
179 ASSERT(and_then == kFallThroughAtEnd);
185 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
186 XMMRegister scratch_reg,
187 Register result_reg) {
190 pxor(scratch_reg, scratch_reg);
191 cvtsd2si(result_reg, input_reg);
192 test(result_reg, Immediate(0xFFFFFF00));
193 j(zero, &done, Label::kNear);
194 cmp(result_reg, Immediate(0x80000000));
195 j(equal, &conv_failure, Label::kNear);
196 mov(result_reg, Immediate(0));
197 setcc(above, result_reg);
198 sub(result_reg, Immediate(1));
199 and_(result_reg, Immediate(255));
200 jmp(&done, Label::kNear);
202 Set(result_reg, Immediate(0));
203 ucomisd(input_reg, scratch_reg);
204 j(below, &done, Label::kNear);
205 Set(result_reg, Immediate(255));
210 void MacroAssembler::ClampUint8(Register reg) {
212 test(reg, Immediate(0xFFFFFF00));
213 j(zero, &done, Label::kNear);
214 setcc(negative, reg); // 1 if negative, 0 if positive.
215 dec_b(reg); // 0 if negative, 255 if positive.
220 void MacroAssembler::SlowTruncateToI(Register result_reg,
223 DoubleToIStub stub(input_reg, result_reg, offset, true);
224 call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
228 void MacroAssembler::TruncateDoubleToI(Register result_reg,
229 XMMRegister input_reg) {
231 cvttsd2si(result_reg, Operand(input_reg));
232 cmp(result_reg, 0x80000000u);
233 j(not_equal, &done, Label::kNear);
235 sub(esp, Immediate(kDoubleSize));
236 movsd(MemOperand(esp, 0), input_reg);
237 SlowTruncateToI(result_reg, esp, 0);
238 add(esp, Immediate(kDoubleSize));
243 void MacroAssembler::TruncateX87TOSToI(Register result_reg) {
244 sub(esp, Immediate(kDoubleSize));
245 fst_d(MemOperand(esp, 0));
246 SlowTruncateToI(result_reg, esp, 0);
247 add(esp, Immediate(kDoubleSize));
251 void MacroAssembler::X87TOSToI(Register result_reg,
252 MinusZeroMode minus_zero_mode,
253 Label* conversion_failed,
254 Label::Distance dst) {
256 sub(esp, Immediate(kPointerSize));
258 fist_s(MemOperand(esp, 0));
259 fild_s(MemOperand(esp, 0));
262 j(not_equal, conversion_failed, dst);
263 j(parity_even, conversion_failed, dst);
264 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
265 test(result_reg, Operand(result_reg));
266 j(not_zero, &done, Label::kNear);
267 // To check for minus zero, we load the value again as float, and check
268 // if that is still 0.
269 sub(esp, Immediate(kPointerSize));
270 fst_s(MemOperand(esp, 0));
272 test(result_reg, Operand(result_reg));
273 j(not_zero, conversion_failed, dst);
279 void MacroAssembler::DoubleToI(Register result_reg,
280 XMMRegister input_reg,
282 MinusZeroMode minus_zero_mode,
283 Label* conversion_failed,
284 Label::Distance dst) {
285 ASSERT(!input_reg.is(scratch));
286 cvttsd2si(result_reg, Operand(input_reg));
287 Cvtsi2sd(scratch, Operand(result_reg));
288 ucomisd(scratch, input_reg);
289 j(not_equal, conversion_failed, dst);
290 j(parity_even, conversion_failed, dst); // NaN.
291 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
293 // The integer converted back is equal to the original. We
294 // only have to test if we got -0 as an input.
295 test(result_reg, Operand(result_reg));
296 j(not_zero, &done, Label::kNear);
297 movmskpd(result_reg, input_reg);
298 // Bit 0 contains the sign of the double in input_reg.
299 // If input was positive, we are ok and return 0, otherwise
300 // jump to conversion_failed.
302 j(not_zero, conversion_failed, dst);
308 void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
309 Register input_reg) {
310 Label done, slow_case;
312 if (CpuFeatures::IsSupported(SSE3)) {
313 CpuFeatureScope scope(this, SSE3);
315 // Use more powerful conversion when sse3 is available.
316 // Load x87 register with heap number.
317 fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
318 // Get exponent alone and check for too-big exponent.
319 mov(result_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
320 and_(result_reg, HeapNumber::kExponentMask);
321 const uint32_t kTooBigExponent =
322 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
323 cmp(Operand(result_reg), Immediate(kTooBigExponent));
324 j(greater_equal, &slow_case, Label::kNear);
326 // Reserve space for 64 bit answer.
327 sub(Operand(esp), Immediate(kDoubleSize));
328 // Do conversion, which cannot fail because we checked the exponent.
329 fisttp_d(Operand(esp, 0));
330 mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
331 add(Operand(esp), Immediate(kDoubleSize));
332 jmp(&done, Label::kNear);
336 if (input_reg.is(result_reg)) {
337 // Input is clobbered. Restore number from fpu stack
338 sub(Operand(esp), Immediate(kDoubleSize));
339 fstp_d(Operand(esp, 0));
340 SlowTruncateToI(result_reg, esp, 0);
341 add(esp, Immediate(kDoubleSize));
344 SlowTruncateToI(result_reg, input_reg);
346 } else if (CpuFeatures::IsSupported(SSE2)) {
347 CpuFeatureScope scope(this, SSE2);
348 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
349 cvttsd2si(result_reg, Operand(xmm0));
350 cmp(result_reg, 0x80000000u);
351 j(not_equal, &done, Label::kNear);
352 // Check if the input was 0x8000000 (kMinInt).
353 // If no, then we got an overflow and we deoptimize.
354 ExternalReference min_int = ExternalReference::address_of_min_int();
355 ucomisd(xmm0, Operand::StaticVariable(min_int));
356 j(not_equal, &slow_case, Label::kNear);
357 j(parity_even, &slow_case, Label::kNear); // NaN.
358 jmp(&done, Label::kNear);
362 if (input_reg.is(result_reg)) {
363 // Input is clobbered. Restore number from double scratch.
364 sub(esp, Immediate(kDoubleSize));
365 movsd(MemOperand(esp, 0), xmm0);
366 SlowTruncateToI(result_reg, esp, 0);
367 add(esp, Immediate(kDoubleSize));
369 SlowTruncateToI(result_reg, input_reg);
372 SlowTruncateToI(result_reg, input_reg);
378 void MacroAssembler::TaggedToI(Register result_reg,
381 MinusZeroMode minus_zero_mode,
382 Label* lost_precision) {
384 ASSERT(!temp.is(xmm0));
386 cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
387 isolate()->factory()->heap_number_map());
388 j(not_equal, lost_precision, Label::kNear);
390 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
391 ASSERT(!temp.is(no_xmm_reg));
392 CpuFeatureScope scope(this, SSE2);
394 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
395 cvttsd2si(result_reg, Operand(xmm0));
396 Cvtsi2sd(temp, Operand(result_reg));
398 RecordComment("Deferred TaggedToI: lost precision");
399 j(not_equal, lost_precision, Label::kNear);
400 RecordComment("Deferred TaggedToI: NaN");
401 j(parity_even, lost_precision, Label::kNear);
402 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
403 test(result_reg, Operand(result_reg));
404 j(not_zero, &done, Label::kNear);
405 movmskpd(result_reg, xmm0);
407 RecordComment("Deferred TaggedToI: minus zero");
408 j(not_zero, lost_precision, Label::kNear);
411 // TODO(olivf) Converting a number on the fpu is actually quite slow. We
412 // should first try a fast conversion and then bailout to this slow case.
413 Label lost_precision_pop, zero_check;
414 Label* lost_precision_int = (minus_zero_mode == FAIL_ON_MINUS_ZERO)
415 ? &lost_precision_pop : lost_precision;
416 sub(esp, Immediate(kPointerSize));
417 fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
418 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) fld(0);
419 fist_s(MemOperand(esp, 0));
420 fild_s(MemOperand(esp, 0));
423 j(not_equal, lost_precision_int, Label::kNear);
424 j(parity_even, lost_precision_int, Label::kNear); // NaN.
425 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
426 test(result_reg, Operand(result_reg));
427 j(zero, &zero_check, Label::kNear);
429 jmp(&done, Label::kNear);
431 // To check for minus zero, we load the value again as float, and check
432 // if that is still 0.
433 sub(esp, Immediate(kPointerSize));
434 fstp_s(Operand(esp, 0));
436 test(result_reg, Operand(result_reg));
437 j(zero, &done, Label::kNear);
438 jmp(lost_precision, Label::kNear);
440 bind(&lost_precision_pop);
442 jmp(lost_precision, Label::kNear);
449 void MacroAssembler::LoadUint32(XMMRegister dst,
451 XMMRegister scratch) {
453 cmp(src, Immediate(0));
454 ExternalReference uint32_bias =
455 ExternalReference::address_of_uint32_bias();
456 movsd(scratch, Operand::StaticVariable(uint32_bias));
458 j(not_sign, &done, Label::kNear);
464 void MacroAssembler::LoadUint32NoSSE2(Register src) {
467 fild_s(Operand(esp, 0));
468 cmp(src, Immediate(0));
469 j(not_sign, &done, Label::kNear);
470 ExternalReference uint32_bias =
471 ExternalReference::address_of_uint32_bias();
472 fld_d(Operand::StaticVariable(uint32_bias));
475 add(esp, Immediate(kPointerSize));
479 void MacroAssembler::RecordWriteArray(Register object,
482 SaveFPRegsMode save_fp,
483 RememberedSetAction remembered_set_action,
484 SmiCheck smi_check) {
485 // First, check if a write barrier is even needed. The tests below
486 // catch stores of Smis.
489 // Skip barrier if writing a smi.
490 if (smi_check == INLINE_SMI_CHECK) {
491 ASSERT_EQ(0, kSmiTag);
492 test(value, Immediate(kSmiTagMask));
496 // Array access: calculate the destination address in the same manner as
497 // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
498 // into an array of words.
499 Register dst = index;
500 lea(dst, Operand(object, index, times_half_pointer_size,
501 FixedArray::kHeaderSize - kHeapObjectTag));
504 object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
508 // Clobber clobbered input registers when running with the debug-code flag
509 // turned on to provoke errors.
510 if (emit_debug_code()) {
511 mov(value, Immediate(BitCast<int32_t>(kZapValue)));
512 mov(index, Immediate(BitCast<int32_t>(kZapValue)));
517 void MacroAssembler::RecordWriteField(
522 SaveFPRegsMode save_fp,
523 RememberedSetAction remembered_set_action,
524 SmiCheck smi_check) {
525 // First, check if a write barrier is even needed. The tests below
526 // catch stores of Smis.
529 // Skip barrier if writing a smi.
530 if (smi_check == INLINE_SMI_CHECK) {
531 JumpIfSmi(value, &done, Label::kNear);
534 // Although the object register is tagged, the offset is relative to the start
535 // of the object, so so offset must be a multiple of kPointerSize.
536 ASSERT(IsAligned(offset, kPointerSize));
538 lea(dst, FieldOperand(object, offset));
539 if (emit_debug_code()) {
541 test_b(dst, (1 << kPointerSizeLog2) - 1);
542 j(zero, &ok, Label::kNear);
548 object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
552 // Clobber clobbered input registers when running with the debug-code flag
553 // turned on to provoke errors.
554 if (emit_debug_code()) {
555 mov(value, Immediate(BitCast<int32_t>(kZapValue)));
556 mov(dst, Immediate(BitCast<int32_t>(kZapValue)));
561 void MacroAssembler::RecordWriteForMap(
566 SaveFPRegsMode save_fp) {
569 Register address = scratch1;
570 Register value = scratch2;
571 if (emit_debug_code()) {
573 lea(address, FieldOperand(object, HeapObject::kMapOffset));
574 test_b(address, (1 << kPointerSizeLog2) - 1);
575 j(zero, &ok, Label::kNear);
580 ASSERT(!object.is(value));
581 ASSERT(!object.is(address));
582 ASSERT(!value.is(address));
583 AssertNotSmi(object);
585 if (!FLAG_incremental_marking) {
589 // A single check of the map's pages interesting flag suffices, since it is
590 // only set during incremental collection, and then it's also guaranteed that
591 // the from object's page's interesting flag is also set. This optimization
592 // relies on the fact that maps can never be in new space.
593 ASSERT(!isolate()->heap()->InNewSpace(*map));
594 CheckPageFlagForMap(map,
595 MemoryChunk::kPointersToHereAreInterestingMask,
600 // Delay the initialization of |address| and |value| for the stub until it's
601 // known that the will be needed. Up until this point their values are not
602 // needed since they are embedded in the operands of instructions that need
604 lea(address, FieldOperand(object, HeapObject::kMapOffset));
605 mov(value, Immediate(map));
606 RecordWriteStub stub(object, value, address, OMIT_REMEMBERED_SET, save_fp);
611 // Clobber clobbered input registers when running with the debug-code flag
612 // turned on to provoke errors.
613 if (emit_debug_code()) {
614 mov(value, Immediate(BitCast<int32_t>(kZapValue)));
615 mov(scratch1, Immediate(BitCast<int32_t>(kZapValue)));
616 mov(scratch2, Immediate(BitCast<int32_t>(kZapValue)));
621 void MacroAssembler::RecordWrite(Register object,
624 SaveFPRegsMode fp_mode,
625 RememberedSetAction remembered_set_action,
626 SmiCheck smi_check) {
627 ASSERT(!object.is(value));
628 ASSERT(!object.is(address));
629 ASSERT(!value.is(address));
630 AssertNotSmi(object);
632 if (remembered_set_action == OMIT_REMEMBERED_SET &&
633 !FLAG_incremental_marking) {
637 if (emit_debug_code()) {
639 cmp(value, Operand(address, 0));
640 j(equal, &ok, Label::kNear);
645 // First, check if a write barrier is even needed. The tests below
646 // catch stores of Smis and stores into young gen.
649 if (smi_check == INLINE_SMI_CHECK) {
650 // Skip barrier if writing a smi.
651 JumpIfSmi(value, &done, Label::kNear);
655 value, // Used as scratch.
656 MemoryChunk::kPointersToHereAreInterestingMask,
660 CheckPageFlag(object,
661 value, // Used as scratch.
662 MemoryChunk::kPointersFromHereAreInterestingMask,
667 RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
672 // Clobber clobbered registers when running with the debug-code flag
673 // turned on to provoke errors.
674 if (emit_debug_code()) {
675 mov(address, Immediate(BitCast<int32_t>(kZapValue)));
676 mov(value, Immediate(BitCast<int32_t>(kZapValue)));
681 #ifdef ENABLE_DEBUGGER_SUPPORT
682 void MacroAssembler::DebugBreak() {
683 Set(eax, Immediate(0));
684 mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate())));
686 call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
691 void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
697 void MacroAssembler::Set(Register dst, const Immediate& x) {
699 xor_(dst, dst); // Shorter than mov.
706 void MacroAssembler::Set(const Operand& dst, const Immediate& x) {
711 bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
712 static const int kMaxImmediateBits = 17;
713 if (!RelocInfo::IsNone(x.rmode_)) return false;
714 return !is_intn(x.x_, kMaxImmediateBits);
718 void MacroAssembler::SafeSet(Register dst, const Immediate& x) {
719 if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
720 Set(dst, Immediate(x.x_ ^ jit_cookie()));
721 xor_(dst, jit_cookie());
728 void MacroAssembler::SafePush(const Immediate& x) {
729 if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
730 push(Immediate(x.x_ ^ jit_cookie()));
731 xor_(Operand(esp, 0), Immediate(jit_cookie()));
738 void MacroAssembler::CmpObjectType(Register heap_object,
741 mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
742 CmpInstanceType(map, type);
746 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
747 cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
748 static_cast<int8_t>(type));
752 void MacroAssembler::CheckFastElements(Register map,
754 Label::Distance distance) {
755 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
756 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
757 STATIC_ASSERT(FAST_ELEMENTS == 2);
758 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
759 cmpb(FieldOperand(map, Map::kBitField2Offset),
760 Map::kMaximumBitField2FastHoleyElementValue);
761 j(above, fail, distance);
765 void MacroAssembler::CheckFastObjectElements(Register map,
767 Label::Distance distance) {
768 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
769 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
770 STATIC_ASSERT(FAST_ELEMENTS == 2);
771 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
772 cmpb(FieldOperand(map, Map::kBitField2Offset),
773 Map::kMaximumBitField2FastHoleySmiElementValue);
774 j(below_equal, fail, distance);
775 cmpb(FieldOperand(map, Map::kBitField2Offset),
776 Map::kMaximumBitField2FastHoleyElementValue);
777 j(above, fail, distance);
781 void MacroAssembler::CheckFastSmiElements(Register map,
783 Label::Distance distance) {
784 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
785 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
786 cmpb(FieldOperand(map, Map::kBitField2Offset),
787 Map::kMaximumBitField2FastHoleySmiElementValue);
788 j(above, fail, distance);
792 void MacroAssembler::StoreNumberToDoubleElements(
793 Register maybe_number,
797 XMMRegister scratch2,
799 bool specialize_for_processor,
800 int elements_offset) {
801 Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
802 JumpIfSmi(maybe_number, &smi_value, Label::kNear);
804 CheckMap(maybe_number,
805 isolate()->factory()->heap_number_map(),
809 // Double value, canonicalize NaN.
810 uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
811 cmp(FieldOperand(maybe_number, offset),
812 Immediate(kNaNOrInfinityLowerBoundUpper32));
813 j(greater_equal, &maybe_nan, Label::kNear);
816 ExternalReference canonical_nan_reference =
817 ExternalReference::address_of_canonical_non_hole_nan();
818 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
819 CpuFeatureScope use_sse2(this, SSE2);
820 movsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
821 bind(&have_double_value);
822 movsd(FieldOperand(elements, key, times_4,
823 FixedDoubleArray::kHeaderSize - elements_offset),
826 fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
827 bind(&have_double_value);
828 fstp_d(FieldOperand(elements, key, times_4,
829 FixedDoubleArray::kHeaderSize - elements_offset));
834 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
835 // it's an Infinity, and the non-NaN code path applies.
836 j(greater, &is_nan, Label::kNear);
837 cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
840 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
841 CpuFeatureScope use_sse2(this, SSE2);
842 movsd(scratch2, Operand::StaticVariable(canonical_nan_reference));
844 fld_d(Operand::StaticVariable(canonical_nan_reference));
846 jmp(&have_double_value, Label::kNear);
849 // Value is a smi. Convert to a double and store.
850 // Preserve original value.
851 mov(scratch1, maybe_number);
853 if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
854 CpuFeatureScope fscope(this, SSE2);
855 Cvtsi2sd(scratch2, scratch1);
856 movsd(FieldOperand(elements, key, times_4,
857 FixedDoubleArray::kHeaderSize - elements_offset),
861 fild_s(Operand(esp, 0));
863 fstp_d(FieldOperand(elements, key, times_4,
864 FixedDoubleArray::kHeaderSize - elements_offset));
870 void MacroAssembler::CompareMap(Register obj,
872 Label* early_success) {
873 cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
877 void MacroAssembler::CheckMap(Register obj,
880 SmiCheckType smi_check_type) {
881 if (smi_check_type == DO_SMI_CHECK) {
882 JumpIfSmi(obj, fail);
886 CompareMap(obj, map, &success);
892 void MacroAssembler::DispatchMap(Register obj,
895 Handle<Code> success,
896 SmiCheckType smi_check_type) {
898 if (smi_check_type == DO_SMI_CHECK) {
899 JumpIfSmi(obj, &fail);
901 cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
908 Condition MacroAssembler::IsObjectStringType(Register heap_object,
910 Register instance_type) {
911 mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
912 movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
913 STATIC_ASSERT(kNotStringTag != 0);
914 test(instance_type, Immediate(kIsNotStringMask));
919 Condition MacroAssembler::IsObjectNameType(Register heap_object,
921 Register instance_type) {
922 mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
923 movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
924 cmpb(instance_type, static_cast<uint8_t>(LAST_NAME_TYPE));
929 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
933 mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
934 IsInstanceJSObjectType(map, scratch, fail);
938 void MacroAssembler::IsInstanceJSObjectType(Register map,
941 movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
942 sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
944 LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
949 void MacroAssembler::FCmp() {
950 if (CpuFeatures::IsSupported(CMOV)) {
963 void MacroAssembler::AssertNumber(Register object) {
964 if (emit_debug_code()) {
966 JumpIfSmi(object, &ok);
967 cmp(FieldOperand(object, HeapObject::kMapOffset),
968 isolate()->factory()->heap_number_map());
969 Check(equal, kOperandNotANumber);
975 void MacroAssembler::AssertSmi(Register object) {
976 if (emit_debug_code()) {
977 test(object, Immediate(kSmiTagMask));
978 Check(equal, kOperandIsNotASmi);
983 void MacroAssembler::AssertString(Register object) {
984 if (emit_debug_code()) {
985 test(object, Immediate(kSmiTagMask));
986 Check(not_equal, kOperandIsASmiAndNotAString);
988 mov(object, FieldOperand(object, HeapObject::kMapOffset));
989 CmpInstanceType(object, FIRST_NONSTRING_TYPE);
991 Check(below, kOperandIsNotAString);
996 void MacroAssembler::AssertName(Register object) {
997 if (emit_debug_code()) {
998 test(object, Immediate(kSmiTagMask));
999 Check(not_equal, kOperandIsASmiAndNotAName);
1001 mov(object, FieldOperand(object, HeapObject::kMapOffset));
1002 CmpInstanceType(object, LAST_NAME_TYPE);
1004 Check(below_equal, kOperandIsNotAName);
1009 void MacroAssembler::AssertNotSmi(Register object) {
1010 if (emit_debug_code()) {
1011 test(object, Immediate(kSmiTagMask));
1012 Check(not_equal, kOperandIsASmi);
1017 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
1018 if (frame_mode == BUILD_STUB_FRAME) {
1019 push(ebp); // Caller's frame pointer.
1021 push(esi); // Callee's context.
1022 push(Immediate(Smi::FromInt(StackFrame::STUB)));
1024 PredictableCodeSizeScope predictible_code_size_scope(this,
1025 kNoCodeAgeSequenceLength);
1026 if (isolate()->IsCodePreAgingActive()) {
1027 // Pre-age the code.
1028 call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
1029 RelocInfo::CODE_AGE_SEQUENCE);
1030 Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
1032 push(ebp); // Caller's frame pointer.
1034 push(esi); // Callee's context.
1035 push(edi); // Callee's JS function.
1041 void MacroAssembler::EnterFrame(StackFrame::Type type) {
1045 push(Immediate(Smi::FromInt(type)));
1046 push(Immediate(CodeObject()));
1047 if (emit_debug_code()) {
1048 cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
1049 Check(not_equal, kCodeObjectNotProperlyPatched);
1054 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
1055 if (emit_debug_code()) {
1056 cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
1057 Immediate(Smi::FromInt(type)));
1058 Check(equal, kStackFrameTypesMustMatch);
1064 void MacroAssembler::EnterExitFramePrologue() {
1065 // Set up the frame structure on the stack.
1066 ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
1067 ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
1068 ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
1072 // Reserve room for entry stack pointer and push the code object.
1073 ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
1074 push(Immediate(0)); // Saved entry sp, patched before call.
1075 push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
1077 // Save the frame pointer and the context in top.
1078 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
1080 ExternalReference context_address(Isolate::kContextAddress,
1082 mov(Operand::StaticVariable(c_entry_fp_address), ebp);
1083 mov(Operand::StaticVariable(context_address), esi);
1087 void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
1088 // Optionally save all XMM registers.
1090 CpuFeatureScope scope(this, SSE2);
1091 int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
1092 sub(esp, Immediate(space));
1093 const int offset = -2 * kPointerSize;
1094 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
1095 XMMRegister reg = XMMRegister::from_code(i);
1096 movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
1099 sub(esp, Immediate(argc * kPointerSize));
1102 // Get the required frame alignment for the OS.
1103 const int kFrameAlignment = OS::ActivationFrameAlignment();
1104 if (kFrameAlignment > 0) {
1105 ASSERT(IsPowerOf2(kFrameAlignment));
1106 and_(esp, -kFrameAlignment);
1109 // Patch the saved entry sp.
1110 mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
1114 void MacroAssembler::EnterExitFrame(bool save_doubles) {
1115 EnterExitFramePrologue();
1117 // Set up argc and argv in callee-saved registers.
1118 int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
1120 lea(esi, Operand(ebp, eax, times_4, offset));
1122 // Reserve space for argc, argv and isolate.
1123 EnterExitFrameEpilogue(3, save_doubles);
1127 void MacroAssembler::EnterApiExitFrame(int argc) {
1128 EnterExitFramePrologue();
1129 EnterExitFrameEpilogue(argc, false);
1133 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
1134 // Optionally restore all XMM registers.
1136 CpuFeatureScope scope(this, SSE2);
1137 const int offset = -2 * kPointerSize;
1138 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
1139 XMMRegister reg = XMMRegister::from_code(i);
1140 movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
1144 // Get the return address from the stack and restore the frame pointer.
1145 mov(ecx, Operand(ebp, 1 * kPointerSize));
1146 mov(ebp, Operand(ebp, 0 * kPointerSize));
1148 // Pop the arguments and the receiver from the caller stack.
1149 lea(esp, Operand(esi, 1 * kPointerSize));
1151 // Push the return address to get ready to return.
1154 LeaveExitFrameEpilogue(true);
1158 void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
1159 // Restore current context from top and clear it in debug mode.
1160 ExternalReference context_address(Isolate::kContextAddress, isolate());
1161 if (restore_context) {
1162 mov(esi, Operand::StaticVariable(context_address));
1165 mov(Operand::StaticVariable(context_address), Immediate(0));
1168 // Clear the top frame.
1169 ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
1171 mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
1175 void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
1179 LeaveExitFrameEpilogue(restore_context);
1183 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
1184 int handler_index) {
1185 // Adjust this code if not the case.
1186 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1187 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1188 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1189 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1190 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1191 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1193 // We will build up the handler from the bottom by pushing on the stack.
1194 // First push the frame pointer and context.
1195 if (kind == StackHandler::JS_ENTRY) {
1196 // The frame pointer does not point to a JS frame so we save NULL for
1197 // ebp. We expect the code throwing an exception to check ebp before
1198 // dereferencing it to restore the context.
1199 push(Immediate(0)); // NULL frame pointer.
1200 push(Immediate(Smi::FromInt(0))); // No context.
1205 // Push the state and the code object.
1207 StackHandler::IndexField::encode(handler_index) |
1208 StackHandler::KindField::encode(kind);
1209 push(Immediate(state));
1212 // Link the current handler as the next handler.
1213 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
1214 push(Operand::StaticVariable(handler_address));
1215 // Set this new handler as the current one.
1216 mov(Operand::StaticVariable(handler_address), esp);
1220 void MacroAssembler::PopTryHandler() {
1221 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1222 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
1223 pop(Operand::StaticVariable(handler_address));
1224 add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
1228 void MacroAssembler::JumpToHandlerEntry() {
1229 // Compute the handler entry address and jump to it. The handler table is
1230 // a fixed array of (smi-tagged) code offsets.
1231 // eax = exception, edi = code object, edx = state.
1232 mov(ebx, FieldOperand(edi, Code::kHandlerTableOffset));
1233 shr(edx, StackHandler::kKindWidth);
1234 mov(edx, FieldOperand(ebx, edx, times_4, FixedArray::kHeaderSize));
1236 lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize));
1241 void MacroAssembler::Throw(Register value) {
1242 // Adjust this code if not the case.
1243 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1244 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1245 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1246 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1247 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1248 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1250 // The exception is expected in eax.
1251 if (!value.is(eax)) {
1254 // Drop the stack pointer to the top of the top handler.
1255 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
1256 mov(esp, Operand::StaticVariable(handler_address));
1257 // Restore the next handler.
1258 pop(Operand::StaticVariable(handler_address));
1260 // Remove the code object and state, compute the handler address in edi.
1261 pop(edi); // Code object.
1262 pop(edx); // Index and state.
1264 // Restore the context and frame pointer.
1265 pop(esi); // Context.
1266 pop(ebp); // Frame pointer.
1268 // If the handler is a JS frame, restore the context to the frame.
1269 // (kind == ENTRY) == (ebp == 0) == (esi == 0), so we could test either
1273 j(zero, &skip, Label::kNear);
1274 mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
1277 JumpToHandlerEntry();
1281 void MacroAssembler::ThrowUncatchable(Register value) {
1282 // Adjust this code if not the case.
1283 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1284 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1285 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1286 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1287 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1288 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1290 // The exception is expected in eax.
1291 if (!value.is(eax)) {
1294 // Drop the stack pointer to the top of the top stack handler.
1295 ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
1296 mov(esp, Operand::StaticVariable(handler_address));
1298 // Unwind the handlers until the top ENTRY handler is found.
1299 Label fetch_next, check_kind;
1300 jmp(&check_kind, Label::kNear);
1302 mov(esp, Operand(esp, StackHandlerConstants::kNextOffset));
1305 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
1306 test(Operand(esp, StackHandlerConstants::kStateOffset),
1307 Immediate(StackHandler::KindField::kMask));
1308 j(not_zero, &fetch_next);
1310 // Set the top handler address to next handler past the top ENTRY handler.
1311 pop(Operand::StaticVariable(handler_address));
1313 // Remove the code object and state, compute the handler address in edi.
1314 pop(edi); // Code object.
1315 pop(edx); // Index and state.
1317 // Clear the context pointer and frame pointer (0 was saved in the handler).
1321 JumpToHandlerEntry();
1325 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1329 Label same_contexts;
1331 ASSERT(!holder_reg.is(scratch1));
1332 ASSERT(!holder_reg.is(scratch2));
1333 ASSERT(!scratch1.is(scratch2));
1335 // Load current lexical context from the stack frame.
1336 mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset));
1338 // When generating debug code, make sure the lexical context is set.
1339 if (emit_debug_code()) {
1340 cmp(scratch1, Immediate(0));
1341 Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
1343 // Load the native context of the current context.
1345 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1346 mov(scratch1, FieldOperand(scratch1, offset));
1347 mov(scratch1, FieldOperand(scratch1, GlobalObject::kNativeContextOffset));
1349 // Check the context is a native context.
1350 if (emit_debug_code()) {
1351 // Read the first word and compare to native_context_map.
1352 cmp(FieldOperand(scratch1, HeapObject::kMapOffset),
1353 isolate()->factory()->native_context_map());
1354 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
1357 // Check if both contexts are the same.
1358 cmp(scratch1, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1359 j(equal, &same_contexts);
1361 // Compare security tokens, save holder_reg on the stack so we can use it
1362 // as a temporary register.
1364 // Check that the security token in the calling global object is
1365 // compatible with the security token in the receiving global
1368 FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1370 // Check the context is a native context.
1371 if (emit_debug_code()) {
1372 cmp(scratch2, isolate()->factory()->null_value());
1373 Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
1375 // Read the first word and compare to native_context_map(),
1376 cmp(FieldOperand(scratch2, HeapObject::kMapOffset),
1377 isolate()->factory()->native_context_map());
1378 Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
1381 int token_offset = Context::kHeaderSize +
1382 Context::SECURITY_TOKEN_INDEX * kPointerSize;
1383 mov(scratch1, FieldOperand(scratch1, token_offset));
1384 cmp(scratch1, FieldOperand(scratch2, token_offset));
1387 bind(&same_contexts);
1391 // Compute the hash code from the untagged key. This must be kept in sync
1392 // with ComputeIntegerHash in utils.h.
1394 // Note: r0 will contain hash code
1395 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
1396 // Xor original key with a seed.
1397 if (Serializer::enabled()) {
1398 ExternalReference roots_array_start =
1399 ExternalReference::roots_array_start(isolate());
1400 mov(scratch, Immediate(Heap::kHashSeedRootIndex));
1402 Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
1406 int32_t seed = isolate()->heap()->HashSeed();
1407 xor_(r0, Immediate(seed));
1410 // hash = ~hash + (hash << 15);
1415 // hash = hash ^ (hash >> 12);
1419 // hash = hash + (hash << 2);
1420 lea(r0, Operand(r0, r0, times_4, 0));
1421 // hash = hash ^ (hash >> 4);
1425 // hash = hash * 2057;
1427 // hash = hash ^ (hash >> 16);
1435 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
1444 // elements - holds the slow-case elements of the receiver and is unchanged.
1446 // key - holds the smi key on entry and is unchanged.
1448 // Scratch registers:
1450 // r0 - holds the untagged key on entry and holds the hash once computed.
1452 // r1 - used to hold the capacity mask of the dictionary
1454 // r2 - used for the index into the dictionary.
1456 // result - holds the result on exit if the load succeeds and we fall through.
1460 GetNumberHash(r0, r1);
1462 // Compute capacity mask.
1463 mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset));
1464 shr(r1, kSmiTagSize); // convert smi to int
1467 // Generate an unrolled loop that performs a few probes before giving up.
1468 const int kProbes = 4;
1469 for (int i = 0; i < kProbes; i++) {
1470 // Use r2 for index calculations and keep the hash intact in r0.
1472 // Compute the masked index: (hash + i + i * i) & mask.
1474 add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
1478 // Scale the index by multiplying by the entry size.
1479 ASSERT(SeededNumberDictionary::kEntrySize == 3);
1480 lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
1482 // Check if the key matches.
1483 cmp(key, FieldOperand(elements,
1486 SeededNumberDictionary::kElementsStartOffset));
1487 if (i != (kProbes - 1)) {
1495 // Check that the value is a normal propety.
1496 const int kDetailsOffset =
1497 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1498 ASSERT_EQ(NORMAL, 0);
1499 test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
1500 Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
1503 // Get the value at the masked, scaled index.
1504 const int kValueOffset =
1505 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1506 mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
1510 void MacroAssembler::LoadAllocationTopHelper(Register result,
1512 AllocationFlags flags) {
1513 ExternalReference allocation_top =
1514 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1516 // Just return if allocation top is already known.
1517 if ((flags & RESULT_CONTAINS_TOP) != 0) {
1518 // No use of scratch if allocation top is provided.
1519 ASSERT(scratch.is(no_reg));
1521 // Assert that result actually contains top on entry.
1522 cmp(result, Operand::StaticVariable(allocation_top));
1523 Check(equal, kUnexpectedAllocationTop);
1528 // Move address of new object to result. Use scratch register if available.
1529 if (scratch.is(no_reg)) {
1530 mov(result, Operand::StaticVariable(allocation_top));
1532 mov(scratch, Immediate(allocation_top));
1533 mov(result, Operand(scratch, 0));
1538 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
1540 AllocationFlags flags) {
1541 if (emit_debug_code()) {
1542 test(result_end, Immediate(kObjectAlignmentMask));
1543 Check(zero, kUnalignedAllocationInNewSpace);
1546 ExternalReference allocation_top =
1547 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1549 // Update new top. Use scratch if available.
1550 if (scratch.is(no_reg)) {
1551 mov(Operand::StaticVariable(allocation_top), result_end);
1553 mov(Operand(scratch, 0), result_end);
1558 void MacroAssembler::Allocate(int object_size,
1560 Register result_end,
1563 AllocationFlags flags) {
1564 ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
1565 ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
1566 if (!FLAG_inline_new) {
1567 if (emit_debug_code()) {
1568 // Trash the registers to simulate an allocation failure.
1569 mov(result, Immediate(0x7091));
1570 if (result_end.is_valid()) {
1571 mov(result_end, Immediate(0x7191));
1573 if (scratch.is_valid()) {
1574 mov(scratch, Immediate(0x7291));
1580 ASSERT(!result.is(result_end));
1582 // Load address of new object into result.
1583 LoadAllocationTopHelper(result, scratch, flags);
1585 ExternalReference allocation_limit =
1586 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1588 // Align the next allocation. Storing the filler map without checking top is
1589 // safe in new-space because the limit of the heap is aligned there.
1590 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1591 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1592 ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1594 test(result, Immediate(kDoubleAlignmentMask));
1595 j(zero, &aligned, Label::kNear);
1596 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1597 cmp(result, Operand::StaticVariable(allocation_limit));
1598 j(above_equal, gc_required);
1600 mov(Operand(result, 0),
1601 Immediate(isolate()->factory()->one_pointer_filler_map()));
1602 add(result, Immediate(kDoubleSize / 2));
1606 // Calculate new top and bail out if space is exhausted.
1607 Register top_reg = result_end.is_valid() ? result_end : result;
1608 if (!top_reg.is(result)) {
1609 mov(top_reg, result);
1611 add(top_reg, Immediate(object_size));
1612 j(carry, gc_required);
1613 cmp(top_reg, Operand::StaticVariable(allocation_limit));
1614 j(above, gc_required);
1616 // Update allocation top.
1617 UpdateAllocationTopHelper(top_reg, scratch, flags);
1619 // Tag result if requested.
1620 bool tag_result = (flags & TAG_OBJECT) != 0;
1621 if (top_reg.is(result)) {
1623 sub(result, Immediate(object_size - kHeapObjectTag));
1625 sub(result, Immediate(object_size));
1627 } else if (tag_result) {
1628 ASSERT(kHeapObjectTag == 1);
1634 void MacroAssembler::Allocate(int header_size,
1635 ScaleFactor element_size,
1636 Register element_count,
1637 RegisterValueType element_count_type,
1639 Register result_end,
1642 AllocationFlags flags) {
1643 ASSERT((flags & SIZE_IN_WORDS) == 0);
1644 if (!FLAG_inline_new) {
1645 if (emit_debug_code()) {
1646 // Trash the registers to simulate an allocation failure.
1647 mov(result, Immediate(0x7091));
1648 mov(result_end, Immediate(0x7191));
1649 if (scratch.is_valid()) {
1650 mov(scratch, Immediate(0x7291));
1652 // Register element_count is not modified by the function.
1657 ASSERT(!result.is(result_end));
1659 // Load address of new object into result.
1660 LoadAllocationTopHelper(result, scratch, flags);
1662 ExternalReference allocation_limit =
1663 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1665 // Align the next allocation. Storing the filler map without checking top is
1666 // safe in new-space because the limit of the heap is aligned there.
1667 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1668 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1669 ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1671 test(result, Immediate(kDoubleAlignmentMask));
1672 j(zero, &aligned, Label::kNear);
1673 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1674 cmp(result, Operand::StaticVariable(allocation_limit));
1675 j(above_equal, gc_required);
1677 mov(Operand(result, 0),
1678 Immediate(isolate()->factory()->one_pointer_filler_map()));
1679 add(result, Immediate(kDoubleSize / 2));
1683 // Calculate new top and bail out if space is exhausted.
1684 // We assume that element_count*element_size + header_size does not
1686 if (element_count_type == REGISTER_VALUE_IS_SMI) {
1687 STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1);
1688 STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2);
1689 STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4);
1690 ASSERT(element_size >= times_2);
1691 ASSERT(kSmiTagSize == 1);
1692 element_size = static_cast<ScaleFactor>(element_size - 1);
1694 ASSERT(element_count_type == REGISTER_VALUE_IS_INT32);
1696 lea(result_end, Operand(element_count, element_size, header_size));
1697 add(result_end, result);
1698 j(carry, gc_required);
1699 cmp(result_end, Operand::StaticVariable(allocation_limit));
1700 j(above, gc_required);
1702 if ((flags & TAG_OBJECT) != 0) {
1703 ASSERT(kHeapObjectTag == 1);
1707 // Update allocation top.
1708 UpdateAllocationTopHelper(result_end, scratch, flags);
1712 void MacroAssembler::Allocate(Register object_size,
1714 Register result_end,
1717 AllocationFlags flags) {
1718 ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
1719 if (!FLAG_inline_new) {
1720 if (emit_debug_code()) {
1721 // Trash the registers to simulate an allocation failure.
1722 mov(result, Immediate(0x7091));
1723 mov(result_end, Immediate(0x7191));
1724 if (scratch.is_valid()) {
1725 mov(scratch, Immediate(0x7291));
1727 // object_size is left unchanged by this function.
1732 ASSERT(!result.is(result_end));
1734 // Load address of new object into result.
1735 LoadAllocationTopHelper(result, scratch, flags);
1737 ExternalReference allocation_limit =
1738 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1740 // Align the next allocation. Storing the filler map without checking top is
1741 // safe in new-space because the limit of the heap is aligned there.
1742 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1743 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1744 ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1746 test(result, Immediate(kDoubleAlignmentMask));
1747 j(zero, &aligned, Label::kNear);
1748 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1749 cmp(result, Operand::StaticVariable(allocation_limit));
1750 j(above_equal, gc_required);
1752 mov(Operand(result, 0),
1753 Immediate(isolate()->factory()->one_pointer_filler_map()));
1754 add(result, Immediate(kDoubleSize / 2));
1758 // Calculate new top and bail out if space is exhausted.
1759 if (!object_size.is(result_end)) {
1760 mov(result_end, object_size);
1762 add(result_end, result);
1763 j(carry, gc_required);
1764 cmp(result_end, Operand::StaticVariable(allocation_limit));
1765 j(above, gc_required);
1767 // Tag result if requested.
1768 if ((flags & TAG_OBJECT) != 0) {
1769 ASSERT(kHeapObjectTag == 1);
1773 // Update allocation top.
1774 UpdateAllocationTopHelper(result_end, scratch, flags);
1778 void MacroAssembler::UndoAllocationInNewSpace(Register object) {
1779 ExternalReference new_space_allocation_top =
1780 ExternalReference::new_space_allocation_top_address(isolate());
1782 // Make sure the object has no tag before resetting top.
1783 and_(object, Immediate(~kHeapObjectTagMask));
1785 cmp(object, Operand::StaticVariable(new_space_allocation_top));
1786 Check(below, kUndoAllocationOfNonAllocatedMemory);
1788 mov(Operand::StaticVariable(new_space_allocation_top), object);
1792 void MacroAssembler::AllocateHeapNumber(Register result,
1795 Label* gc_required) {
1796 // Allocate heap number in new space.
1797 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
1801 mov(FieldOperand(result, HeapObject::kMapOffset),
1802 Immediate(isolate()->factory()->heap_number_map()));
1806 void MacroAssembler::AllocateTwoByteString(Register result,
1811 Label* gc_required) {
1812 // Calculate the number of bytes needed for the characters in the string while
1813 // observing object alignment.
1814 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1815 ASSERT(kShortSize == 2);
1816 // scratch1 = length * 2 + kObjectAlignmentMask.
1817 lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
1818 and_(scratch1, Immediate(~kObjectAlignmentMask));
1820 // Allocate two byte string in new space.
1821 Allocate(SeqTwoByteString::kHeaderSize,
1824 REGISTER_VALUE_IS_INT32,
1831 // Set the map, length and hash field.
1832 mov(FieldOperand(result, HeapObject::kMapOffset),
1833 Immediate(isolate()->factory()->string_map()));
1834 mov(scratch1, length);
1836 mov(FieldOperand(result, String::kLengthOffset), scratch1);
1837 mov(FieldOperand(result, String::kHashFieldOffset),
1838 Immediate(String::kEmptyHashField));
1842 void MacroAssembler::AllocateAsciiString(Register result,
1847 Label* gc_required) {
1848 // Calculate the number of bytes needed for the characters in the string while
1849 // observing object alignment.
1850 ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1851 mov(scratch1, length);
1852 ASSERT(kCharSize == 1);
1853 add(scratch1, Immediate(kObjectAlignmentMask));
1854 and_(scratch1, Immediate(~kObjectAlignmentMask));
1856 // Allocate ASCII string in new space.
1857 Allocate(SeqOneByteString::kHeaderSize,
1860 REGISTER_VALUE_IS_INT32,
1867 // Set the map, length and hash field.
1868 mov(FieldOperand(result, HeapObject::kMapOffset),
1869 Immediate(isolate()->factory()->ascii_string_map()));
1870 mov(scratch1, length);
1872 mov(FieldOperand(result, String::kLengthOffset), scratch1);
1873 mov(FieldOperand(result, String::kHashFieldOffset),
1874 Immediate(String::kEmptyHashField));
1878 void MacroAssembler::AllocateAsciiString(Register result,
1882 Label* gc_required) {
1885 // Allocate ASCII string in new space.
1886 Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
1887 gc_required, TAG_OBJECT);
1889 // Set the map, length and hash field.
1890 mov(FieldOperand(result, HeapObject::kMapOffset),
1891 Immediate(isolate()->factory()->ascii_string_map()));
1892 mov(FieldOperand(result, String::kLengthOffset),
1893 Immediate(Smi::FromInt(length)));
1894 mov(FieldOperand(result, String::kHashFieldOffset),
1895 Immediate(String::kEmptyHashField));
1899 void MacroAssembler::AllocateTwoByteConsString(Register result,
1902 Label* gc_required) {
1903 // Allocate heap number in new space.
1904 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1907 // Set the map. The other fields are left uninitialized.
1908 mov(FieldOperand(result, HeapObject::kMapOffset),
1909 Immediate(isolate()->factory()->cons_string_map()));
1913 void MacroAssembler::AllocateAsciiConsString(Register result,
1916 Label* gc_required) {
1917 Label allocate_new_space, install_map;
1918 AllocationFlags flags = TAG_OBJECT;
1920 ExternalReference high_promotion_mode = ExternalReference::
1921 new_space_high_promotion_mode_active_address(isolate());
1923 test(Operand::StaticVariable(high_promotion_mode), Immediate(1));
1924 j(zero, &allocate_new_space);
1926 Allocate(ConsString::kSize,
1931 static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
1934 bind(&allocate_new_space);
1935 Allocate(ConsString::kSize,
1943 // Set the map. The other fields are left uninitialized.
1944 mov(FieldOperand(result, HeapObject::kMapOffset),
1945 Immediate(isolate()->factory()->cons_ascii_string_map()));
1949 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
1952 Label* gc_required) {
1953 // Allocate heap number in new space.
1954 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1957 // Set the map. The other fields are left uninitialized.
1958 mov(FieldOperand(result, HeapObject::kMapOffset),
1959 Immediate(isolate()->factory()->sliced_string_map()));
1963 void MacroAssembler::AllocateAsciiSlicedString(Register result,
1966 Label* gc_required) {
1967 // Allocate heap number in new space.
1968 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1971 // Set the map. The other fields are left uninitialized.
1972 mov(FieldOperand(result, HeapObject::kMapOffset),
1973 Immediate(isolate()->factory()->sliced_ascii_string_map()));
1977 // Copy memory, byte-by-byte, from source to destination. Not optimized for
1978 // long or aligned copies. The contents of scratch and length are destroyed.
1979 // Source and destination are incremented by length.
1980 // Many variants of movsb, loop unrolling, word moves, and indexed operands
1981 // have been tried here already, and this is fastest.
1982 // A simpler loop is faster on small copies, but 30% slower on large ones.
1983 // The cld() instruction must have been emitted, to set the direction flag(),
1984 // before calling this function.
1985 void MacroAssembler::CopyBytes(Register source,
1986 Register destination,
1989 Label loop, done, short_string, short_loop;
1990 // Experimentation shows that the short string loop is faster if length < 10.
1991 cmp(length, Immediate(10));
1992 j(less_equal, &short_string);
1994 ASSERT(source.is(esi));
1995 ASSERT(destination.is(edi));
1996 ASSERT(length.is(ecx));
1998 // Because source is 4-byte aligned in our uses of this function,
1999 // we keep source aligned for the rep_movs call by copying the odd bytes
2000 // at the end of the ranges.
2001 mov(scratch, Operand(source, length, times_1, -4));
2002 mov(Operand(destination, length, times_1, -4), scratch);
2006 and_(scratch, Immediate(0x3));
2007 add(destination, scratch);
2010 bind(&short_string);
2011 test(length, length);
2015 mov_b(scratch, Operand(source, 0));
2016 mov_b(Operand(destination, 0), scratch);
2020 j(not_zero, &short_loop);
2026 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
2027 Register end_offset,
2032 mov(Operand(start_offset, 0), filler);
2033 add(start_offset, Immediate(kPointerSize));
2035 cmp(start_offset, end_offset);
2040 void MacroAssembler::BooleanBitTest(Register object,
2043 bit_index += kSmiTagSize + kSmiShiftSize;
2044 ASSERT(IsPowerOf2(kBitsPerByte));
2045 int byte_index = bit_index / kBitsPerByte;
2046 int byte_bit_index = bit_index & (kBitsPerByte - 1);
2047 test_b(FieldOperand(object, field_offset + byte_index),
2048 static_cast<byte>(1 << byte_bit_index));
2053 void MacroAssembler::NegativeZeroTest(Register result,
2055 Label* then_label) {
2057 test(result, result);
2060 j(sign, then_label);
2065 void MacroAssembler::NegativeZeroTest(Register result,
2069 Label* then_label) {
2071 test(result, result);
2075 j(sign, then_label);
2080 void MacroAssembler::TryGetFunctionPrototype(Register function,
2084 bool miss_on_bound_function) {
2085 // Check that the receiver isn't a smi.
2086 JumpIfSmi(function, miss);
2088 // Check that the function really is a function.
2089 CmpObjectType(function, JS_FUNCTION_TYPE, result);
2092 if (miss_on_bound_function) {
2093 // If a bound function, go to miss label.
2095 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
2096 BooleanBitTest(scratch, SharedFunctionInfo::kCompilerHintsOffset,
2097 SharedFunctionInfo::kBoundFunction);
2101 // Make sure that the function has an instance prototype.
2103 movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
2104 test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
2105 j(not_zero, &non_instance);
2107 // Get the prototype or initial map from the function.
2109 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2111 // If the prototype or initial map is the hole, don't return it and
2112 // simply miss the cache instead. This will allow us to allocate a
2113 // prototype object on-demand in the runtime system.
2114 cmp(result, Immediate(isolate()->factory()->the_hole_value()));
2117 // If the function does not have an initial map, we're done.
2119 CmpObjectType(result, MAP_TYPE, scratch);
2120 j(not_equal, &done);
2122 // Get the prototype from the initial map.
2123 mov(result, FieldOperand(result, Map::kPrototypeOffset));
2126 // Non-instance prototype: Fetch prototype from constructor field
2128 bind(&non_instance);
2129 mov(result, FieldOperand(result, Map::kConstructorOffset));
2136 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
2137 ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
2138 call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
2142 void MacroAssembler::TailCallStub(CodeStub* stub) {
2143 ASSERT(allow_stub_calls_ ||
2144 stub->CompilingCallsToThisStubIsGCSafe(isolate()));
2145 jmp(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
2149 void MacroAssembler::StubReturn(int argc) {
2150 ASSERT(argc >= 1 && generating_stub());
2151 ret((argc - 1) * kPointerSize);
2155 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2156 if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
2157 return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
2161 void MacroAssembler::IllegalOperation(int num_arguments) {
2162 if (num_arguments > 0) {
2163 add(esp, Immediate(num_arguments * kPointerSize));
2165 mov(eax, Immediate(isolate()->factory()->undefined_value()));
2169 void MacroAssembler::IndexFromHash(Register hash, Register index) {
2170 // The assert checks that the constants for the maximum number of digits
2171 // for an array index cached in the hash field and the number of bits
2172 // reserved for it does not conflict.
2173 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
2174 (1 << String::kArrayIndexValueBits));
2175 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
2176 // the low kHashShift bits.
2177 and_(hash, String::kArrayIndexValueMask);
2178 STATIC_ASSERT(String::kHashShift >= kSmiTagSize && kSmiTag == 0);
2179 if (String::kHashShift > kSmiTagSize) {
2180 shr(hash, String::kHashShift - kSmiTagSize);
2182 if (!index.is(hash)) {
2188 void MacroAssembler::CallRuntime(const Runtime::Function* f,
2190 SaveFPRegsMode save_doubles) {
2191 // If the expected number of arguments of the runtime function is
2192 // constant, we check that the actual number of arguments match the
2194 if (f->nargs >= 0 && f->nargs != num_arguments) {
2195 IllegalOperation(num_arguments);
2199 // TODO(1236192): Most runtime routines don't need the number of
2200 // arguments passed in because it is constant. At some point we
2201 // should remove this need and make the runtime routine entry code
2203 Set(eax, Immediate(num_arguments));
2204 mov(ebx, Immediate(ExternalReference(f, isolate())));
2205 CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? save_doubles
2211 void MacroAssembler::CallExternalReference(ExternalReference ref,
2212 int num_arguments) {
2213 mov(eax, Immediate(num_arguments));
2214 mov(ebx, Immediate(ref));
2221 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
2224 // TODO(1236192): Most runtime routines don't need the number of
2225 // arguments passed in because it is constant. At some point we
2226 // should remove this need and make the runtime routine entry code
2228 Set(eax, Immediate(num_arguments));
2229 JumpToExternalReference(ext);
2233 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
2236 TailCallExternalReference(ExternalReference(fid, isolate()),
2242 Operand ApiParameterOperand(int index) {
2243 return Operand(esp, index * kPointerSize);
2247 void MacroAssembler::PrepareCallApiFunction(int argc) {
2248 EnterApiExitFrame(argc);
2249 if (emit_debug_code()) {
2250 mov(esi, Immediate(BitCast<int32_t>(kZapValue)));
2255 void MacroAssembler::CallApiFunctionAndReturn(
2256 Address function_address,
2257 Address thunk_address,
2258 Operand thunk_last_arg,
2260 Operand return_value_operand,
2261 Operand* context_restore_operand) {
2262 ExternalReference next_address =
2263 ExternalReference::handle_scope_next_address(isolate());
2264 ExternalReference limit_address =
2265 ExternalReference::handle_scope_limit_address(isolate());
2266 ExternalReference level_address =
2267 ExternalReference::handle_scope_level_address(isolate());
2269 // Allocate HandleScope in callee-save registers.
2270 mov(ebx, Operand::StaticVariable(next_address));
2271 mov(edi, Operand::StaticVariable(limit_address));
2272 add(Operand::StaticVariable(level_address), Immediate(1));
2274 if (FLAG_log_timer_events) {
2275 FrameScope frame(this, StackFrame::MANUAL);
2276 PushSafepointRegisters();
2277 PrepareCallCFunction(1, eax);
2278 mov(Operand(esp, 0),
2279 Immediate(ExternalReference::isolate_address(isolate())));
2280 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
2281 PopSafepointRegisters();
2285 Label profiler_disabled;
2286 Label end_profiler_check;
2287 bool* is_profiling_flag =
2288 isolate()->cpu_profiler()->is_profiling_address();
2289 STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
2290 mov(eax, Immediate(reinterpret_cast<Address>(is_profiling_flag)));
2291 cmpb(Operand(eax, 0), 0);
2292 j(zero, &profiler_disabled);
2294 // Additional parameter is the address of the actual getter function.
2295 mov(thunk_last_arg, Immediate(function_address));
2296 // Call the api function.
2297 call(thunk_address, RelocInfo::RUNTIME_ENTRY);
2298 jmp(&end_profiler_check);
2300 bind(&profiler_disabled);
2301 // Call the api function.
2302 call(function_address, RelocInfo::RUNTIME_ENTRY);
2303 bind(&end_profiler_check);
2305 if (FLAG_log_timer_events) {
2306 FrameScope frame(this, StackFrame::MANUAL);
2307 PushSafepointRegisters();
2308 PrepareCallCFunction(1, eax);
2309 mov(Operand(esp, 0),
2310 Immediate(ExternalReference::isolate_address(isolate())));
2311 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
2312 PopSafepointRegisters();
2316 // Load the value from ReturnValue
2317 mov(eax, return_value_operand);
2319 Label promote_scheduled_exception;
2320 Label exception_handled;
2321 Label delete_allocated_handles;
2322 Label leave_exit_frame;
2325 // No more valid handles (the result handle was the last one). Restore
2326 // previous handle scope.
2327 mov(Operand::StaticVariable(next_address), ebx);
2328 sub(Operand::StaticVariable(level_address), Immediate(1));
2329 Assert(above_equal, kInvalidHandleScopeLevel);
2330 cmp(edi, Operand::StaticVariable(limit_address));
2331 j(not_equal, &delete_allocated_handles);
2332 bind(&leave_exit_frame);
2334 // Check if the function scheduled an exception.
2335 ExternalReference scheduled_exception_address =
2336 ExternalReference::scheduled_exception_address(isolate());
2337 cmp(Operand::StaticVariable(scheduled_exception_address),
2338 Immediate(isolate()->factory()->the_hole_value()));
2339 j(not_equal, &promote_scheduled_exception);
2340 bind(&exception_handled);
2342 #if ENABLE_EXTRA_CHECKS
2343 // Check if the function returned a valid JavaScript value.
2345 Register return_value = eax;
2348 JumpIfSmi(return_value, &ok, Label::kNear);
2349 mov(map, FieldOperand(return_value, HeapObject::kMapOffset));
2351 CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2352 j(below, &ok, Label::kNear);
2354 CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
2355 j(above_equal, &ok, Label::kNear);
2357 cmp(map, isolate()->factory()->heap_number_map());
2358 j(equal, &ok, Label::kNear);
2360 cmp(return_value, isolate()->factory()->undefined_value());
2361 j(equal, &ok, Label::kNear);
2363 cmp(return_value, isolate()->factory()->true_value());
2364 j(equal, &ok, Label::kNear);
2366 cmp(return_value, isolate()->factory()->false_value());
2367 j(equal, &ok, Label::kNear);
2369 cmp(return_value, isolate()->factory()->null_value());
2370 j(equal, &ok, Label::kNear);
2372 Abort(kAPICallReturnedInvalidObject);
2377 bool restore_context = context_restore_operand != NULL;
2378 if (restore_context) {
2379 mov(esi, *context_restore_operand);
2381 LeaveApiExitFrame(!restore_context);
2382 ret(stack_space * kPointerSize);
2384 bind(&promote_scheduled_exception);
2386 FrameScope frame(this, StackFrame::INTERNAL);
2387 CallRuntime(Runtime::kPromoteScheduledException, 0);
2389 jmp(&exception_handled);
2391 // HandleScope limit has changed. Delete allocated extensions.
2392 ExternalReference delete_extensions =
2393 ExternalReference::delete_handle_scope_extensions(isolate());
2394 bind(&delete_allocated_handles);
2395 mov(Operand::StaticVariable(limit_address), edi);
2397 mov(Operand(esp, 0),
2398 Immediate(ExternalReference::isolate_address(isolate())));
2399 mov(eax, Immediate(delete_extensions));
2402 jmp(&leave_exit_frame);
2406 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
2407 // Set the entry point and jump to the C entry runtime stub.
2408 mov(ebx, Immediate(ext));
2410 jmp(ces.GetCode(isolate()), RelocInfo::CODE_TARGET);
2414 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
2415 // This macro takes the dst register to make the code more readable
2416 // at the call sites. However, the dst register has to be ecx to
2417 // follow the calling convention which requires the call type to be
2419 ASSERT(dst.is(ecx));
2420 if (call_kind == CALL_AS_FUNCTION) {
2421 // Set to some non-zero smi by updating the least significant
2423 mov_b(dst, 1 << kSmiTagSize);
2425 // Set to smi zero by clearing the register.
2431 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2432 const ParameterCount& actual,
2433 Handle<Code> code_constant,
2434 const Operand& code_operand,
2436 bool* definitely_mismatches,
2438 Label::Distance done_near,
2439 const CallWrapper& call_wrapper,
2440 CallKind call_kind) {
2441 bool definitely_matches = false;
2442 *definitely_mismatches = false;
2444 if (expected.is_immediate()) {
2445 ASSERT(actual.is_immediate());
2446 if (expected.immediate() == actual.immediate()) {
2447 definitely_matches = true;
2449 mov(eax, actual.immediate());
2450 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
2451 if (expected.immediate() == sentinel) {
2452 // Don't worry about adapting arguments for builtins that
2453 // don't want that done. Skip adaption code by making it look
2454 // like we have a match between expected and actual number of
2456 definitely_matches = true;
2458 *definitely_mismatches = true;
2459 mov(ebx, expected.immediate());
2463 if (actual.is_immediate()) {
2464 // Expected is in register, actual is immediate. This is the
2465 // case when we invoke function values without going through the
2467 cmp(expected.reg(), actual.immediate());
2469 ASSERT(expected.reg().is(ebx));
2470 mov(eax, actual.immediate());
2471 } else if (!expected.reg().is(actual.reg())) {
2472 // Both expected and actual are in (different) registers. This
2473 // is the case when we invoke functions using call and apply.
2474 cmp(expected.reg(), actual.reg());
2476 ASSERT(actual.reg().is(eax));
2477 ASSERT(expected.reg().is(ebx));
2481 if (!definitely_matches) {
2482 Handle<Code> adaptor =
2483 isolate()->builtins()->ArgumentsAdaptorTrampoline();
2484 if (!code_constant.is_null()) {
2485 mov(edx, Immediate(code_constant));
2486 add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
2487 } else if (!code_operand.is_reg(edx)) {
2488 mov(edx, code_operand);
2491 if (flag == CALL_FUNCTION) {
2492 call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
2493 SetCallKind(ecx, call_kind);
2494 call(adaptor, RelocInfo::CODE_TARGET);
2495 call_wrapper.AfterCall();
2496 if (!*definitely_mismatches) {
2497 jmp(done, done_near);
2500 SetCallKind(ecx, call_kind);
2501 jmp(adaptor, RelocInfo::CODE_TARGET);
2508 void MacroAssembler::InvokeCode(const Operand& code,
2509 const ParameterCount& expected,
2510 const ParameterCount& actual,
2512 const CallWrapper& call_wrapper,
2513 CallKind call_kind) {
2514 // You can't call a function without a valid frame.
2515 ASSERT(flag == JUMP_FUNCTION || has_frame());
2518 bool definitely_mismatches = false;
2519 InvokePrologue(expected, actual, Handle<Code>::null(), code,
2520 &done, &definitely_mismatches, flag, Label::kNear,
2521 call_wrapper, call_kind);
2522 if (!definitely_mismatches) {
2523 if (flag == CALL_FUNCTION) {
2524 call_wrapper.BeforeCall(CallSize(code));
2525 SetCallKind(ecx, call_kind);
2527 call_wrapper.AfterCall();
2529 ASSERT(flag == JUMP_FUNCTION);
2530 SetCallKind(ecx, call_kind);
2538 void MacroAssembler::InvokeCode(Handle<Code> code,
2539 const ParameterCount& expected,
2540 const ParameterCount& actual,
2541 RelocInfo::Mode rmode,
2543 const CallWrapper& call_wrapper,
2544 CallKind call_kind) {
2545 // You can't call a function without a valid frame.
2546 ASSERT(flag == JUMP_FUNCTION || has_frame());
2549 Operand dummy(eax, 0);
2550 bool definitely_mismatches = false;
2551 InvokePrologue(expected, actual, code, dummy, &done, &definitely_mismatches,
2552 flag, Label::kNear, call_wrapper, call_kind);
2553 if (!definitely_mismatches) {
2554 if (flag == CALL_FUNCTION) {
2555 call_wrapper.BeforeCall(CallSize(code, rmode));
2556 SetCallKind(ecx, call_kind);
2558 call_wrapper.AfterCall();
2560 ASSERT(flag == JUMP_FUNCTION);
2561 SetCallKind(ecx, call_kind);
2569 void MacroAssembler::InvokeFunction(Register fun,
2570 const ParameterCount& actual,
2572 const CallWrapper& call_wrapper,
2573 CallKind call_kind) {
2574 // You can't call a function without a valid frame.
2575 ASSERT(flag == JUMP_FUNCTION || has_frame());
2577 ASSERT(fun.is(edi));
2578 mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
2579 mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
2580 mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
2583 ParameterCount expected(ebx);
2584 InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
2585 expected, actual, flag, call_wrapper, call_kind);
2589 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
2590 const ParameterCount& expected,
2591 const ParameterCount& actual,
2593 const CallWrapper& call_wrapper,
2594 CallKind call_kind) {
2595 // You can't call a function without a valid frame.
2596 ASSERT(flag == JUMP_FUNCTION || has_frame());
2598 // Get the function and setup the context.
2599 LoadHeapObject(edi, function);
2600 mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
2602 // We call indirectly through the code field in the function to
2603 // allow recompilation to take effect without changing any of the
2605 InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
2606 expected, actual, flag, call_wrapper, call_kind);
2610 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
2612 const CallWrapper& call_wrapper) {
2613 // You can't call a builtin without a valid frame.
2614 ASSERT(flag == JUMP_FUNCTION || has_frame());
2616 // Rely on the assertion to check that the number of provided
2617 // arguments match the expected number of arguments. Fake a
2618 // parameter count to avoid emitting code to do the check.
2619 ParameterCount expected(0);
2620 GetBuiltinFunction(edi, id);
2621 InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
2622 expected, expected, flag, call_wrapper, CALL_AS_METHOD);
2626 void MacroAssembler::GetBuiltinFunction(Register target,
2627 Builtins::JavaScript id) {
2628 // Load the JavaScript builtin function from the builtins object.
2629 mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2630 mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
2631 mov(target, FieldOperand(target,
2632 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
2636 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2637 ASSERT(!target.is(edi));
2638 // Load the JavaScript builtin function from the builtins object.
2639 GetBuiltinFunction(edi, id);
2640 // Load the code entry point from the function into the target register.
2641 mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
2645 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2646 if (context_chain_length > 0) {
2647 // Move up the chain of contexts to the context containing the slot.
2648 mov(dst, Operand(esi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2649 for (int i = 1; i < context_chain_length; i++) {
2650 mov(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2653 // Slot is in the current function context. Move it into the
2654 // destination register in case we store into it (the write barrier
2655 // cannot be allowed to destroy the context in esi).
2659 // We should not have found a with context by walking the context chain
2660 // (i.e., the static scope chain and runtime context chain do not agree).
2661 // A variable occurring in such a scope should have slot type LOOKUP and
2663 if (emit_debug_code()) {
2664 cmp(FieldOperand(dst, HeapObject::kMapOffset),
2665 isolate()->factory()->with_context_map());
2666 Check(not_equal, kVariableResolvedToWithContext);
2671 void MacroAssembler::LoadTransitionedArrayMapConditional(
2672 ElementsKind expected_kind,
2673 ElementsKind transitioned_kind,
2674 Register map_in_out,
2676 Label* no_map_match) {
2677 // Load the global or builtins object from the current context.
2678 mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2679 mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
2681 // Check that the function's map is the same as the expected cached map.
2682 mov(scratch, Operand(scratch,
2683 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
2685 size_t offset = expected_kind * kPointerSize +
2686 FixedArrayBase::kHeaderSize;
2687 cmp(map_in_out, FieldOperand(scratch, offset));
2688 j(not_equal, no_map_match);
2690 // Use the transitioned cached map.
2691 offset = transitioned_kind * kPointerSize +
2692 FixedArrayBase::kHeaderSize;
2693 mov(map_in_out, FieldOperand(scratch, offset));
2697 void MacroAssembler::LoadInitialArrayMap(
2698 Register function_in, Register scratch,
2699 Register map_out, bool can_have_holes) {
2700 ASSERT(!function_in.is(map_out));
2702 mov(map_out, FieldOperand(function_in,
2703 JSFunction::kPrototypeOrInitialMapOffset));
2704 if (!FLAG_smi_only_arrays) {
2705 ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
2706 LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
2711 } else if (can_have_holes) {
2712 LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
2713 FAST_HOLEY_SMI_ELEMENTS,
2722 void MacroAssembler::LoadGlobalContext(Register global_context) {
2723 // Load the global or builtins object from the current context.
2725 Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2726 // Load the native context from the global or builtins object.
2728 FieldOperand(global_context, GlobalObject::kNativeContextOffset));
2732 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2733 // Load the global or builtins object from the current context.
2735 Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2736 // Load the native context from the global or builtins object.
2738 FieldOperand(function, GlobalObject::kNativeContextOffset));
2739 // Load the function from the native context.
2740 mov(function, Operand(function, Context::SlotOffset(index)));
2744 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2746 // Load the initial map. The global functions all have initial maps.
2747 mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2748 if (emit_debug_code()) {
2750 CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
2753 Abort(kGlobalFunctionsMustHaveInitialMap);
2759 // Store the value in register src in the safepoint register stack
2760 // slot for register dst.
2761 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
2762 mov(SafepointRegisterSlot(dst), src);
2766 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Immediate src) {
2767 mov(SafepointRegisterSlot(dst), src);
2771 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
2772 mov(dst, SafepointRegisterSlot(src));
2776 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
2777 return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
2781 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
2782 // The registers are pushed starting with the lowest encoding,
2783 // which means that lowest encodings are furthest away from
2784 // the stack pointer.
2785 ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
2786 return kNumSafepointRegisters - reg_code - 1;
2790 void MacroAssembler::LoadHeapObject(Register result,
2791 Handle<HeapObject> object) {
2792 AllowDeferredHandleDereference embedding_raw_address;
2793 if (isolate()->heap()->InNewSpace(*object)) {
2794 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2795 mov(result, Operand::ForCell(cell));
2797 mov(result, object);
2802 void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
2803 AllowDeferredHandleDereference using_raw_address;
2804 if (isolate()->heap()->InNewSpace(*object)) {
2805 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2806 cmp(reg, Operand::ForCell(cell));
2813 void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
2814 AllowDeferredHandleDereference using_raw_address;
2815 if (isolate()->heap()->InNewSpace(*object)) {
2816 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2817 push(Operand::ForCell(cell));
2824 void MacroAssembler::Ret() {
2829 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
2830 if (is_uint16(bytes_dropped)) {
2834 add(esp, Immediate(bytes_dropped));
2841 void MacroAssembler::VerifyX87StackDepth(uint32_t depth) {
2842 // Make sure the floating point stack is either empty or has depth items.
2844 // This is very expensive.
2845 ASSERT(FLAG_debug_code && FLAG_enable_slow_asserts);
2847 // The top-of-stack (tos) is 7 if there is one item pushed.
2848 int tos = (8 - depth) % 8;
2849 const int kTopMask = 0x3800;
2853 and_(eax, kTopMask);
2855 cmp(eax, Immediate(tos));
2856 Check(equal, kUnexpectedFPUStackDepthAfterInstruction);
2862 void MacroAssembler::Drop(int stack_elements) {
2863 if (stack_elements > 0) {
2864 add(esp, Immediate(stack_elements * kPointerSize));
2869 void MacroAssembler::Move(Register dst, Register src) {
2876 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
2877 if (FLAG_native_code_counters && counter->Enabled()) {
2878 mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
2883 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
2885 if (FLAG_native_code_counters && counter->Enabled()) {
2886 Operand operand = Operand::StaticVariable(ExternalReference(counter));
2890 add(operand, Immediate(value));
2896 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
2898 if (FLAG_native_code_counters && counter->Enabled()) {
2899 Operand operand = Operand::StaticVariable(ExternalReference(counter));
2903 sub(operand, Immediate(value));
2909 void MacroAssembler::IncrementCounter(Condition cc,
2910 StatsCounter* counter,
2913 if (FLAG_native_code_counters && counter->Enabled()) {
2915 j(NegateCondition(cc), &skip);
2917 IncrementCounter(counter, value);
2924 void MacroAssembler::DecrementCounter(Condition cc,
2925 StatsCounter* counter,
2928 if (FLAG_native_code_counters && counter->Enabled()) {
2930 j(NegateCondition(cc), &skip);
2932 DecrementCounter(counter, value);
2939 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
2940 if (emit_debug_code()) Check(cc, reason);
2944 void MacroAssembler::AssertFastElements(Register elements) {
2945 if (emit_debug_code()) {
2946 Factory* factory = isolate()->factory();
2948 cmp(FieldOperand(elements, HeapObject::kMapOffset),
2949 Immediate(factory->fixed_array_map()));
2951 cmp(FieldOperand(elements, HeapObject::kMapOffset),
2952 Immediate(factory->fixed_double_array_map()));
2954 cmp(FieldOperand(elements, HeapObject::kMapOffset),
2955 Immediate(factory->fixed_cow_array_map()));
2957 Abort(kJSObjectWithFastElementsMapHasSlowElements);
2963 void MacroAssembler::Check(Condition cc, BailoutReason reason) {
2967 // will not return here
2972 void MacroAssembler::CheckStackAlignment() {
2973 int frame_alignment = OS::ActivationFrameAlignment();
2974 int frame_alignment_mask = frame_alignment - 1;
2975 if (frame_alignment > kPointerSize) {
2976 ASSERT(IsPowerOf2(frame_alignment));
2977 Label alignment_as_expected;
2978 test(esp, Immediate(frame_alignment_mask));
2979 j(zero, &alignment_as_expected);
2980 // Abort if stack is not aligned.
2982 bind(&alignment_as_expected);
2987 void MacroAssembler::Abort(BailoutReason reason) {
2988 // We want to pass the msg string like a smi to avoid GC
2989 // problems, however msg is not guaranteed to be aligned
2990 // properly. Instead, we pass an aligned pointer that is
2991 // a proper v8 smi, but also pass the alignment difference
2992 // from the real pointer as a smi.
2993 const char* msg = GetBailoutReason(reason);
2994 intptr_t p1 = reinterpret_cast<intptr_t>(msg);
2995 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
2996 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
2999 RecordComment("Abort message: ");
3003 if (FLAG_trap_on_abort) {
3010 push(Immediate(p0));
3011 push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0))));
3012 // Disable stub call restrictions to always allow calls to abort.
3014 // We don't actually want to generate a pile of code for this, so just
3015 // claim there is a stack frame, without generating one.
3016 FrameScope scope(this, StackFrame::NONE);
3017 CallRuntime(Runtime::kAbort, 2);
3019 CallRuntime(Runtime::kAbort, 2);
3021 // will not return here
3026 void MacroAssembler::LoadInstanceDescriptors(Register map,
3027 Register descriptors) {
3028 mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
3032 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3033 mov(dst, FieldOperand(map, Map::kBitField3Offset));
3034 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3038 void MacroAssembler::LoadPowerOf2(XMMRegister dst,
3041 ASSERT(is_uintn(power + HeapNumber::kExponentBias,
3042 HeapNumber::kExponentBits));
3043 mov(scratch, Immediate(power + HeapNumber::kExponentBias));
3045 psllq(dst, HeapNumber::kMantissaBits);
3049 void MacroAssembler::LookupNumberStringCache(Register object,
3054 // Use of registers. Register result is used as a temporary.
3055 Register number_string_cache = result;
3056 Register mask = scratch1;
3057 Register scratch = scratch2;
3059 // Load the number string cache.
3060 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
3061 // Make the hash mask from the length of the number string cache. It
3062 // contains two elements (number and string) for each cache entry.
3063 mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
3064 shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
3065 sub(mask, Immediate(1)); // Make mask.
3067 // Calculate the entry in the number string cache. The hash value in the
3068 // number string cache for smis is just the smi value, and the hash for
3069 // doubles is the xor of the upper and lower words. See
3070 // Heap::GetNumberStringCache.
3071 Label smi_hash_calculated;
3072 Label load_result_from_cache;
3074 STATIC_ASSERT(kSmiTag == 0);
3075 JumpIfNotSmi(object, ¬_smi, Label::kNear);
3076 mov(scratch, object);
3078 jmp(&smi_hash_calculated, Label::kNear);
3080 cmp(FieldOperand(object, HeapObject::kMapOffset),
3081 isolate()->factory()->heap_number_map());
3082 j(not_equal, not_found);
3083 STATIC_ASSERT(8 == kDoubleSize);
3084 mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
3085 xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
3086 // Object is heap number and hash is now in scratch. Calculate cache index.
3087 and_(scratch, mask);
3088 Register index = scratch;
3089 Register probe = mask;
3091 FieldOperand(number_string_cache,
3093 times_twice_pointer_size,
3094 FixedArray::kHeaderSize));
3095 JumpIfSmi(probe, not_found);
3096 if (CpuFeatures::IsSupported(SSE2)) {
3097 CpuFeatureScope fscope(this, SSE2);
3098 movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
3099 ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
3101 fld_d(FieldOperand(object, HeapNumber::kValueOffset));
3102 fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
3105 j(parity_even, not_found); // Bail out if NaN is involved.
3106 j(not_equal, not_found); // The cache did not contain this value.
3107 jmp(&load_result_from_cache, Label::kNear);
3109 bind(&smi_hash_calculated);
3110 // Object is smi and hash is now in scratch. Calculate cache index.
3111 and_(scratch, mask);
3112 // Check if the entry is the smi we are looking for.
3114 FieldOperand(number_string_cache,
3116 times_twice_pointer_size,
3117 FixedArray::kHeaderSize));
3118 j(not_equal, not_found);
3120 // Get the result from the cache.
3121 bind(&load_result_from_cache);
3123 FieldOperand(number_string_cache,
3125 times_twice_pointer_size,
3126 FixedArray::kHeaderSize + kPointerSize));
3127 IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
3131 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
3132 Register instance_type,
3135 if (!scratch.is(instance_type)) {
3136 mov(scratch, instance_type);
3139 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
3140 cmp(scratch, kStringTag | kSeqStringTag | kOneByteStringTag);
3141 j(not_equal, failure);
3145 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
3150 // Check that both objects are not smis.
3151 STATIC_ASSERT(kSmiTag == 0);
3152 mov(scratch1, object1);
3153 and_(scratch1, object2);
3154 JumpIfSmi(scratch1, failure);
3156 // Load instance type for both strings.
3157 mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
3158 mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset));
3159 movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
3160 movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
3162 // Check that both are flat ASCII strings.
3163 const int kFlatAsciiStringMask =
3164 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
3165 const int kFlatAsciiStringTag =
3166 kStringTag | kOneByteStringTag | kSeqStringTag;
3167 // Interleave bits from both instance types and compare them in one check.
3168 ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
3169 and_(scratch1, kFlatAsciiStringMask);
3170 and_(scratch2, kFlatAsciiStringMask);
3171 lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
3172 cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 3));
3173 j(not_equal, failure);
3177 void MacroAssembler::JumpIfNotUniqueName(Operand operand,
3178 Label* not_unique_name,
3179 Label::Distance distance) {
3180 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3182 test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
3184 cmpb(operand, static_cast<uint8_t>(SYMBOL_TYPE));
3185 j(not_equal, not_unique_name, distance);
3191 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
3192 int frame_alignment = OS::ActivationFrameAlignment();
3193 if (frame_alignment != 0) {
3194 // Make stack end at alignment and make room for num_arguments words
3195 // and the original value of esp.
3197 sub(esp, Immediate((num_arguments + 1) * kPointerSize));
3198 ASSERT(IsPowerOf2(frame_alignment));
3199 and_(esp, -frame_alignment);
3200 mov(Operand(esp, num_arguments * kPointerSize), scratch);
3202 sub(esp, Immediate(num_arguments * kPointerSize));
3207 void MacroAssembler::CallCFunction(ExternalReference function,
3208 int num_arguments) {
3209 // Trashing eax is ok as it will be the return value.
3210 mov(eax, Immediate(function));
3211 CallCFunction(eax, num_arguments);
3215 void MacroAssembler::CallCFunction(Register function,
3216 int num_arguments) {
3217 ASSERT(has_frame());
3218 // Check stack alignment.
3219 if (emit_debug_code()) {
3220 CheckStackAlignment();
3224 if (OS::ActivationFrameAlignment() != 0) {
3225 mov(esp, Operand(esp, num_arguments * kPointerSize));
3227 add(esp, Immediate(num_arguments * kPointerSize));
3232 bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
3233 if (r1.is(r2)) return true;
3234 if (r1.is(r3)) return true;
3235 if (r1.is(r4)) return true;
3236 if (r2.is(r3)) return true;
3237 if (r2.is(r4)) return true;
3238 if (r3.is(r4)) return true;
3243 CodePatcher::CodePatcher(byte* address, int size)
3244 : address_(address),
3246 masm_(NULL, address, size + Assembler::kGap) {
3247 // Create a new macro assembler pointing to the address of the code to patch.
3248 // The size is adjusted with kGap on order for the assembler to generate size
3249 // bytes of instructions without failing with buffer size constraints.
3250 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3254 CodePatcher::~CodePatcher() {
3255 // Indicate that code has changed.
3256 CPU::FlushICache(address_, size_);
3258 // Check that the code was patched as expected.
3259 ASSERT(masm_.pc_ == address_ + size_);
3260 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3264 void MacroAssembler::CheckPageFlag(
3269 Label* condition_met,
3270 Label::Distance condition_met_distance) {
3271 ASSERT(cc == zero || cc == not_zero);
3272 if (scratch.is(object)) {
3273 and_(scratch, Immediate(~Page::kPageAlignmentMask));
3275 mov(scratch, Immediate(~Page::kPageAlignmentMask));
3276 and_(scratch, object);
3278 if (mask < (1 << kBitsPerByte)) {
3279 test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
3280 static_cast<uint8_t>(mask));
3282 test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
3284 j(cc, condition_met, condition_met_distance);
3288 void MacroAssembler::CheckPageFlagForMap(
3292 Label* condition_met,
3293 Label::Distance condition_met_distance) {
3294 ASSERT(cc == zero || cc == not_zero);
3295 Page* page = Page::FromAddress(map->address());
3296 ExternalReference reference(ExternalReference::page_flags(page));
3297 // The inlined static address check of the page's flags relies
3298 // on maps never being compacted.
3299 ASSERT(!isolate()->heap()->mark_compact_collector()->
3300 IsOnEvacuationCandidate(*map));
3301 if (mask < (1 << kBitsPerByte)) {
3302 test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask));
3304 test(Operand::StaticVariable(reference), Immediate(mask));
3306 j(cc, condition_met, condition_met_distance);
3310 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
3312 Label* if_deprecated) {
3313 if (map->CanBeDeprecated()) {
3315 mov(scratch, FieldOperand(scratch, Map::kBitField3Offset));
3316 and_(scratch, Immediate(Smi::FromInt(Map::Deprecated::kMask)));
3317 j(not_zero, if_deprecated);
3322 void MacroAssembler::JumpIfBlack(Register object,
3326 Label::Distance on_black_near) {
3327 HasColor(object, scratch0, scratch1,
3328 on_black, on_black_near,
3329 1, 0); // kBlackBitPattern.
3330 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
3334 void MacroAssembler::HasColor(Register object,
3335 Register bitmap_scratch,
3336 Register mask_scratch,
3338 Label::Distance has_color_distance,
3341 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
3343 GetMarkBits(object, bitmap_scratch, mask_scratch);
3345 Label other_color, word_boundary;
3346 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
3347 j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
3348 add(mask_scratch, mask_scratch); // Shift left 1 by adding.
3349 j(zero, &word_boundary, Label::kNear);
3350 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
3351 j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
3352 jmp(&other_color, Label::kNear);
3354 bind(&word_boundary);
3355 test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1);
3357 j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
3362 void MacroAssembler::GetMarkBits(Register addr_reg,
3363 Register bitmap_reg,
3364 Register mask_reg) {
3365 ASSERT(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
3366 mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
3367 and_(bitmap_reg, addr_reg);
3370 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
3373 (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
3375 add(bitmap_reg, ecx);
3377 shr(ecx, kPointerSizeLog2);
3378 and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
3379 mov(mask_reg, Immediate(1));
3384 void MacroAssembler::EnsureNotWhite(
3386 Register bitmap_scratch,
3387 Register mask_scratch,
3388 Label* value_is_white_and_not_data,
3389 Label::Distance distance) {
3390 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
3391 GetMarkBits(value, bitmap_scratch, mask_scratch);
3393 // If the value is black or grey we don't need to do anything.
3394 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3395 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
3396 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
3397 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3401 // Since both black and grey have a 1 in the first position and white does
3402 // not have a 1 there we only need to check one bit.
3403 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
3404 j(not_zero, &done, Label::kNear);
3406 if (emit_debug_code()) {
3407 // Check for impossible bit pattern.
3410 // shl. May overflow making the check conservative.
3411 add(mask_scratch, mask_scratch);
3412 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
3413 j(zero, &ok, Label::kNear);
3419 // Value is white. We check whether it is data that doesn't need scanning.
3420 // Currently only checks for HeapNumber and non-cons strings.
3421 Register map = ecx; // Holds map while checking type.
3422 Register length = ecx; // Holds length of object after checking type.
3423 Label not_heap_number;
3424 Label is_data_object;
3426 // Check for heap-number
3427 mov(map, FieldOperand(value, HeapObject::kMapOffset));
3428 cmp(map, isolate()->factory()->heap_number_map());
3429 j(not_equal, ¬_heap_number, Label::kNear);
3430 mov(length, Immediate(HeapNumber::kSize));
3431 jmp(&is_data_object, Label::kNear);
3433 bind(¬_heap_number);
3434 // Check for strings.
3435 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3436 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3437 // If it's a string and it's not a cons string then it's an object containing
3439 Register instance_type = ecx;
3440 movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3441 test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
3442 j(not_zero, value_is_white_and_not_data);
3443 // It's a non-indirect (non-cons and non-slice) string.
3444 // If it's external, the length is just ExternalString::kSize.
3445 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
3447 // External strings are the only ones with the kExternalStringTag bit
3449 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
3450 ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
3451 test_b(instance_type, kExternalStringTag);
3452 j(zero, ¬_external, Label::kNear);
3453 mov(length, Immediate(ExternalString::kSize));
3454 jmp(&is_data_object, Label::kNear);
3456 bind(¬_external);
3457 // Sequential string, either ASCII or UC16.
3458 ASSERT(kOneByteStringTag == 0x04);
3459 and_(length, Immediate(kStringEncodingMask));
3460 xor_(length, Immediate(kStringEncodingMask));
3461 add(length, Immediate(0x04));
3462 // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
3463 // by 2. If we multiply the string length as smi by this, it still
3464 // won't overflow a 32-bit value.
3465 ASSERT_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
3466 ASSERT(SeqOneByteString::kMaxSize <=
3467 static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
3468 imul(length, FieldOperand(value, String::kLengthOffset));
3469 shr(length, 2 + kSmiTagSize + kSmiShiftSize);
3470 add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
3471 and_(length, Immediate(~kObjectAlignmentMask));
3473 bind(&is_data_object);
3474 // Value is a data object, and it is white. Mark it black. Since we know
3475 // that the object is white we can make it black by flipping one bit.
3476 or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
3478 and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
3479 add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
3481 if (emit_debug_code()) {
3482 mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3483 cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
3484 Check(less_equal, kLiveBytesCountOverflowChunkSize);
3491 void MacroAssembler::EnumLength(Register dst, Register map) {
3492 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3493 mov(dst, FieldOperand(map, Map::kBitField3Offset));
3494 and_(dst, Immediate(Smi::FromInt(Map::EnumLengthBits::kMask)));
3498 void MacroAssembler::CheckEnumCache(Label* call_runtime) {
3502 // Check if the enum length field is properly initialized, indicating that
3503 // there is an enum cache.
3504 mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
3506 EnumLength(edx, ebx);
3507 cmp(edx, Immediate(Smi::FromInt(Map::kInvalidEnumCache)));
3508 j(equal, call_runtime);
3513 mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
3515 // For all objects but the receiver, check that the cache is empty.
3516 EnumLength(edx, ebx);
3517 cmp(edx, Immediate(Smi::FromInt(0)));
3518 j(not_equal, call_runtime);
3522 // Check that there are no elements. Register rcx contains the current JS
3523 // object we've reached through the prototype chain.
3524 mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
3525 cmp(ecx, isolate()->factory()->empty_fixed_array());
3526 j(not_equal, call_runtime);
3528 mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
3529 cmp(ecx, isolate()->factory()->null_value());
3530 j(not_equal, &next);
3534 void MacroAssembler::TestJSArrayForAllocationMemento(
3535 Register receiver_reg,
3536 Register scratch_reg,
3537 Label* no_memento_found) {
3538 ExternalReference new_space_start =
3539 ExternalReference::new_space_start(isolate());
3540 ExternalReference new_space_allocation_top =
3541 ExternalReference::new_space_allocation_top_address(isolate());
3543 lea(scratch_reg, Operand(receiver_reg,
3544 JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
3545 cmp(scratch_reg, Immediate(new_space_start));
3546 j(less, no_memento_found);
3547 cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
3548 j(greater, no_memento_found);
3549 cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
3550 Immediate(isolate()->factory()->allocation_memento_map()));
3554 } } // namespace v8::internal
3556 #endif // V8_TARGET_ARCH_IA32