1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include <limits.h> // For LONG_MIN, LONG_MAX.
32 #if V8_TARGET_ARCH_MIPS
34 #include "bootstrapper.h"
36 #include "cpu-profiler.h"
38 #include "isolate-inl.h"
44 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
45 : Assembler(arg_isolate, buffer, size),
46 generating_stub_(false),
48 if (isolate() != NULL) {
49 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
55 void MacroAssembler::Load(Register dst,
56 const MemOperand& src,
58 ASSERT(!r.IsDouble());
61 } else if (r.IsUInteger8()) {
63 } else if (r.IsInteger16()) {
65 } else if (r.IsUInteger16()) {
73 void MacroAssembler::Store(Register src,
74 const MemOperand& dst,
76 ASSERT(!r.IsDouble());
77 if (r.IsInteger8() || r.IsUInteger8()) {
79 } else if (r.IsInteger16() || r.IsUInteger16()) {
87 void MacroAssembler::LoadRoot(Register destination,
88 Heap::RootListIndex index) {
89 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
93 void MacroAssembler::LoadRoot(Register destination,
94 Heap::RootListIndex index,
96 Register src1, const Operand& src2) {
97 Branch(2, NegateCondition(cond), src1, src2);
98 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
102 void MacroAssembler::StoreRoot(Register source,
103 Heap::RootListIndex index) {
104 sw(source, MemOperand(s6, index << kPointerSizeLog2));
108 void MacroAssembler::StoreRoot(Register source,
109 Heap::RootListIndex index,
111 Register src1, const Operand& src2) {
112 Branch(2, NegateCondition(cond), src1, src2);
113 sw(source, MemOperand(s6, index << kPointerSizeLog2));
117 // Push and pop all registers that can hold pointers.
118 void MacroAssembler::PushSafepointRegisters() {
119 // Safepoints expect a block of kNumSafepointRegisters values on the
120 // stack, so adjust the stack for unsaved registers.
121 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
122 ASSERT(num_unsaved >= 0);
123 if (num_unsaved > 0) {
124 Subu(sp, sp, Operand(num_unsaved * kPointerSize));
126 MultiPush(kSafepointSavedRegisters);
130 void MacroAssembler::PopSafepointRegisters() {
131 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
132 MultiPop(kSafepointSavedRegisters);
133 if (num_unsaved > 0) {
134 Addu(sp, sp, Operand(num_unsaved * kPointerSize));
139 void MacroAssembler::PushSafepointRegistersAndDoubles() {
140 PushSafepointRegisters();
141 Subu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
142 for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
143 FPURegister reg = FPURegister::FromAllocationIndex(i);
144 sdc1(reg, MemOperand(sp, i * kDoubleSize));
149 void MacroAssembler::PopSafepointRegistersAndDoubles() {
150 for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
151 FPURegister reg = FPURegister::FromAllocationIndex(i);
152 ldc1(reg, MemOperand(sp, i * kDoubleSize));
154 Addu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
155 PopSafepointRegisters();
159 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
161 sw(src, SafepointRegistersAndDoublesSlot(dst));
165 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
166 sw(src, SafepointRegisterSlot(dst));
170 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
171 lw(dst, SafepointRegisterSlot(src));
175 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
176 // The registers are pushed starting with the highest encoding,
177 // which means that lowest encodings are closest to the stack pointer.
178 return kSafepointRegisterStackIndexMap[reg_code];
182 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
183 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
187 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
188 UNIMPLEMENTED_MIPS();
189 // General purpose registers are pushed last on the stack.
190 int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
191 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
192 return MemOperand(sp, doubles_size + register_offset);
196 void MacroAssembler::InNewSpace(Register object,
200 ASSERT(cc == eq || cc == ne);
201 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
202 Branch(branch, cc, scratch,
203 Operand(ExternalReference::new_space_start(isolate())));
207 void MacroAssembler::RecordWriteField(
213 SaveFPRegsMode save_fp,
214 RememberedSetAction remembered_set_action,
215 SmiCheck smi_check) {
216 ASSERT(!AreAliased(value, dst, t8, object));
217 // First, check if a write barrier is even needed. The tests below
218 // catch stores of Smis.
221 // Skip barrier if writing a smi.
222 if (smi_check == INLINE_SMI_CHECK) {
223 JumpIfSmi(value, &done);
226 // Although the object register is tagged, the offset is relative to the start
227 // of the object, so so offset must be a multiple of kPointerSize.
228 ASSERT(IsAligned(offset, kPointerSize));
230 Addu(dst, object, Operand(offset - kHeapObjectTag));
231 if (emit_debug_code()) {
233 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
234 Branch(&ok, eq, t8, Operand(zero_reg));
235 stop("Unaligned cell in write barrier");
244 remembered_set_action,
249 // Clobber clobbered input registers when running with the debug-code flag
250 // turned on to provoke errors.
251 if (emit_debug_code()) {
252 li(value, Operand(BitCast<int32_t>(kZapValue + 4)));
253 li(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
258 // Will clobber 4 registers: object, address, scratch, ip. The
259 // register 'object' contains a heap object pointer. The heap object
260 // tag is shifted away.
261 void MacroAssembler::RecordWrite(Register object,
265 SaveFPRegsMode fp_mode,
266 RememberedSetAction remembered_set_action,
267 SmiCheck smi_check) {
268 ASSERT(!AreAliased(object, address, value, t8));
269 ASSERT(!AreAliased(object, address, value, t9));
271 if (emit_debug_code()) {
272 lw(at, MemOperand(address));
274 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
277 // Count number of write barriers in generated code.
278 isolate()->counters()->write_barriers_static()->Increment();
279 // TODO(mstarzinger): Dynamic counter missing.
281 // First, check if a write barrier is even needed. The tests below
282 // catch stores of smis and stores into the young generation.
285 if (smi_check == INLINE_SMI_CHECK) {
286 ASSERT_EQ(0, kSmiTag);
287 JumpIfSmi(value, &done);
291 value, // Used as scratch.
292 MemoryChunk::kPointersToHereAreInterestingMask,
295 CheckPageFlag(object,
296 value, // Used as scratch.
297 MemoryChunk::kPointersFromHereAreInterestingMask,
301 // Record the actual write.
302 if (ra_status == kRAHasNotBeenSaved) {
305 RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
307 if (ra_status == kRAHasNotBeenSaved) {
313 // Clobber clobbered registers when running with the debug-code flag
314 // turned on to provoke errors.
315 if (emit_debug_code()) {
316 li(address, Operand(BitCast<int32_t>(kZapValue + 12)));
317 li(value, Operand(BitCast<int32_t>(kZapValue + 16)));
322 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
325 SaveFPRegsMode fp_mode,
326 RememberedSetFinalAction and_then) {
328 if (emit_debug_code()) {
330 JumpIfNotInNewSpace(object, scratch, &ok);
331 stop("Remembered set pointer is in new space");
334 // Load store buffer top.
335 ExternalReference store_buffer =
336 ExternalReference::store_buffer_top(isolate());
337 li(t8, Operand(store_buffer));
338 lw(scratch, MemOperand(t8));
339 // Store pointer to buffer and increment buffer top.
340 sw(address, MemOperand(scratch));
341 Addu(scratch, scratch, kPointerSize);
342 // Write back new top of buffer.
343 sw(scratch, MemOperand(t8));
344 // Call stub on end of buffer.
345 // Check for end of buffer.
346 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
347 if (and_then == kFallThroughAtEnd) {
348 Branch(&done, eq, t8, Operand(zero_reg));
350 ASSERT(and_then == kReturnAtEnd);
351 Ret(eq, t8, Operand(zero_reg));
354 StoreBufferOverflowStub store_buffer_overflow =
355 StoreBufferOverflowStub(fp_mode);
356 CallStub(&store_buffer_overflow);
359 if (and_then == kReturnAtEnd) {
365 // -----------------------------------------------------------------------------
366 // Allocation support.
369 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
374 ASSERT(!holder_reg.is(scratch));
375 ASSERT(!holder_reg.is(at));
376 ASSERT(!scratch.is(at));
378 // Load current lexical context from the stack frame.
379 lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
380 // In debug mode, make sure the lexical context is set.
382 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
383 scratch, Operand(zero_reg));
386 // Load the native context of the current context.
388 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
389 lw(scratch, FieldMemOperand(scratch, offset));
390 lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
392 // Check the context is a native context.
393 if (emit_debug_code()) {
394 push(holder_reg); // Temporarily save holder on the stack.
395 // Read the first word and compare to the native_context_map.
396 lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
397 LoadRoot(at, Heap::kNativeContextMapRootIndex);
398 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
399 holder_reg, Operand(at));
400 pop(holder_reg); // Restore holder.
403 // Check if both contexts are the same.
404 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
405 Branch(&same_contexts, eq, scratch, Operand(at));
407 // Check the context is a native context.
408 if (emit_debug_code()) {
409 push(holder_reg); // Temporarily save holder on the stack.
410 mov(holder_reg, at); // Move at to its holding place.
411 LoadRoot(at, Heap::kNullValueRootIndex);
412 Check(ne, kJSGlobalProxyContextShouldNotBeNull,
413 holder_reg, Operand(at));
415 lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
416 LoadRoot(at, Heap::kNativeContextMapRootIndex);
417 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
418 holder_reg, Operand(at));
419 // Restore at is not needed. at is reloaded below.
420 pop(holder_reg); // Restore holder.
421 // Restore at to holder's context.
422 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
425 // Check that the security token in the calling global object is
426 // compatible with the security token in the receiving global
428 int token_offset = Context::kHeaderSize +
429 Context::SECURITY_TOKEN_INDEX * kPointerSize;
431 lw(scratch, FieldMemOperand(scratch, token_offset));
432 lw(at, FieldMemOperand(at, token_offset));
433 Branch(miss, ne, scratch, Operand(at));
435 bind(&same_contexts);
439 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
440 // First of all we assign the hash seed to scratch.
441 LoadRoot(scratch, Heap::kHashSeedRootIndex);
444 // Xor original key with a seed.
445 xor_(reg0, reg0, scratch);
447 // Compute the hash code from the untagged key. This must be kept in sync
448 // with ComputeIntegerHash in utils.h.
450 // hash = ~hash + (hash << 15);
451 nor(scratch, reg0, zero_reg);
453 addu(reg0, scratch, at);
455 // hash = hash ^ (hash >> 12);
457 xor_(reg0, reg0, at);
459 // hash = hash + (hash << 2);
461 addu(reg0, reg0, at);
463 // hash = hash ^ (hash >> 4);
465 xor_(reg0, reg0, at);
467 // hash = hash * 2057;
468 sll(scratch, reg0, 11);
470 addu(reg0, reg0, at);
471 addu(reg0, reg0, scratch);
473 // hash = hash ^ (hash >> 16);
475 xor_(reg0, reg0, at);
479 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
488 // elements - holds the slow-case elements of the receiver on entry.
489 // Unchanged unless 'result' is the same register.
491 // key - holds the smi key on entry.
492 // Unchanged unless 'result' is the same register.
495 // result - holds the result on exit if the load succeeded.
496 // Allowed to be the same as 'key' or 'result'.
497 // Unchanged on bailout so 'key' or 'result' can be used
498 // in further computation.
500 // Scratch registers:
502 // reg0 - holds the untagged key on entry and holds the hash once computed.
504 // reg1 - Used to hold the capacity mask of the dictionary.
506 // reg2 - Used for the index into the dictionary.
507 // at - Temporary (avoid MacroAssembler instructions also using 'at').
510 GetNumberHash(reg0, reg1);
512 // Compute the capacity mask.
513 lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
514 sra(reg1, reg1, kSmiTagSize);
515 Subu(reg1, reg1, Operand(1));
517 // Generate an unrolled loop that performs a few probes before giving up.
518 for (int i = 0; i < kNumberDictionaryProbes; i++) {
519 // Use reg2 for index calculations and keep the hash intact in reg0.
521 // Compute the masked index: (hash + i + i * i) & mask.
523 Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
525 and_(reg2, reg2, reg1);
527 // Scale the index by multiplying by the element size.
528 ASSERT(SeededNumberDictionary::kEntrySize == 3);
529 sll(at, reg2, 1); // 2x.
530 addu(reg2, reg2, at); // reg2 = reg2 * 3.
532 // Check if the key is identical to the name.
533 sll(at, reg2, kPointerSizeLog2);
534 addu(reg2, elements, at);
536 lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
537 if (i != kNumberDictionaryProbes - 1) {
538 Branch(&done, eq, key, Operand(at));
540 Branch(miss, ne, key, Operand(at));
545 // Check that the value is a normal property.
546 // reg2: elements + (index * kPointerSize).
547 const int kDetailsOffset =
548 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
549 lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
550 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
551 Branch(miss, ne, at, Operand(zero_reg));
553 // Get the value at the masked, scaled index and return.
554 const int kValueOffset =
555 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
556 lw(result, FieldMemOperand(reg2, kValueOffset));
560 // ---------------------------------------------------------------------------
561 // Instruction macros.
563 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
565 addu(rd, rs, rt.rm());
567 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
568 addiu(rd, rs, rt.imm32_);
570 // li handles the relocation.
579 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
581 subu(rd, rs, rt.rm());
583 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
584 addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
586 // li handles the relocation.
595 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
597 if (kArchVariant == kLoongson) {
601 mul(rd, rs, rt.rm());
604 // li handles the relocation.
607 if (kArchVariant == kLoongson) {
617 void MacroAssembler::Mult(Register rs, const Operand& rt) {
621 // li handles the relocation.
629 void MacroAssembler::Multu(Register rs, const Operand& rt) {
633 // li handles the relocation.
641 void MacroAssembler::Div(Register rs, const Operand& rt) {
645 // li handles the relocation.
653 void MacroAssembler::Divu(Register rs, const Operand& rt) {
657 // li handles the relocation.
665 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
667 and_(rd, rs, rt.rm());
669 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
670 andi(rd, rs, rt.imm32_);
672 // li handles the relocation.
681 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
683 or_(rd, rs, rt.rm());
685 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
686 ori(rd, rs, rt.imm32_);
688 // li handles the relocation.
697 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
699 xor_(rd, rs, rt.rm());
701 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
702 xori(rd, rs, rt.imm32_);
704 // li handles the relocation.
713 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
715 nor(rd, rs, rt.rm());
717 // li handles the relocation.
725 void MacroAssembler::Neg(Register rs, const Operand& rt) {
728 ASSERT(!at.is(rt.rm()));
730 xor_(rs, rt.rm(), at);
734 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
736 slt(rd, rs, rt.rm());
738 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
739 slti(rd, rs, rt.imm32_);
741 // li handles the relocation.
750 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
752 sltu(rd, rs, rt.rm());
754 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
755 sltiu(rd, rs, rt.imm32_);
757 // li handles the relocation.
766 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
767 if (kArchVariant == kMips32r2) {
769 rotrv(rd, rs, rt.rm());
771 rotr(rd, rs, rt.imm32_);
775 subu(at, zero_reg, rt.rm());
777 srlv(rd, rs, rt.rm());
780 if (rt.imm32_ == 0) {
783 srl(at, rs, rt.imm32_);
784 sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
792 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
793 if (kArchVariant == kLoongson) {
801 //------------Pseudo-instructions-------------
803 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
805 lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
809 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
811 swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
815 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
816 AllowDeferredHandleDereference smi_check;
817 if (value->IsSmi()) {
818 li(dst, Operand(value), mode);
820 ASSERT(value->IsHeapObject());
821 if (isolate()->heap()->InNewSpace(*value)) {
822 Handle<Cell> cell = isolate()->factory()->NewCell(value);
823 li(dst, Operand(cell));
824 lw(dst, FieldMemOperand(dst, Cell::kValueOffset));
826 li(dst, Operand(value));
832 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
834 BlockTrampolinePoolScope block_trampoline_pool(this);
835 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
836 // Normal load of an immediate value which does not need Relocation Info.
837 if (is_int16(j.imm32_)) {
838 addiu(rd, zero_reg, j.imm32_);
839 } else if (!(j.imm32_ & kHiMask)) {
840 ori(rd, zero_reg, j.imm32_);
841 } else if (!(j.imm32_ & kImm16Mask)) {
842 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
844 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
845 ori(rd, rd, (j.imm32_ & kImm16Mask));
848 if (MustUseReg(j.rmode_)) {
849 RecordRelocInfo(j.rmode_, j.imm32_);
851 // We always need the same number of instructions as we may need to patch
852 // this code to load another value which may need 2 instructions to load.
853 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
854 ori(rd, rd, (j.imm32_ & kImm16Mask));
859 void MacroAssembler::MultiPush(RegList regs) {
860 int16_t num_to_push = NumberOfBitsSet(regs);
861 int16_t stack_offset = num_to_push * kPointerSize;
863 Subu(sp, sp, Operand(stack_offset));
864 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
865 if ((regs & (1 << i)) != 0) {
866 stack_offset -= kPointerSize;
867 sw(ToRegister(i), MemOperand(sp, stack_offset));
873 void MacroAssembler::MultiPushReversed(RegList regs) {
874 int16_t num_to_push = NumberOfBitsSet(regs);
875 int16_t stack_offset = num_to_push * kPointerSize;
877 Subu(sp, sp, Operand(stack_offset));
878 for (int16_t i = 0; i < kNumRegisters; i++) {
879 if ((regs & (1 << i)) != 0) {
880 stack_offset -= kPointerSize;
881 sw(ToRegister(i), MemOperand(sp, stack_offset));
887 void MacroAssembler::MultiPop(RegList regs) {
888 int16_t stack_offset = 0;
890 for (int16_t i = 0; i < kNumRegisters; i++) {
891 if ((regs & (1 << i)) != 0) {
892 lw(ToRegister(i), MemOperand(sp, stack_offset));
893 stack_offset += kPointerSize;
896 addiu(sp, sp, stack_offset);
900 void MacroAssembler::MultiPopReversed(RegList regs) {
901 int16_t stack_offset = 0;
903 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
904 if ((regs & (1 << i)) != 0) {
905 lw(ToRegister(i), MemOperand(sp, stack_offset));
906 stack_offset += kPointerSize;
909 addiu(sp, sp, stack_offset);
913 void MacroAssembler::MultiPushFPU(RegList regs) {
914 int16_t num_to_push = NumberOfBitsSet(regs);
915 int16_t stack_offset = num_to_push * kDoubleSize;
917 Subu(sp, sp, Operand(stack_offset));
918 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
919 if ((regs & (1 << i)) != 0) {
920 stack_offset -= kDoubleSize;
921 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
927 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
928 int16_t num_to_push = NumberOfBitsSet(regs);
929 int16_t stack_offset = num_to_push * kDoubleSize;
931 Subu(sp, sp, Operand(stack_offset));
932 for (int16_t i = 0; i < kNumRegisters; i++) {
933 if ((regs & (1 << i)) != 0) {
934 stack_offset -= kDoubleSize;
935 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
941 void MacroAssembler::MultiPopFPU(RegList regs) {
942 int16_t stack_offset = 0;
944 for (int16_t i = 0; i < kNumRegisters; i++) {
945 if ((regs & (1 << i)) != 0) {
946 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
947 stack_offset += kDoubleSize;
950 addiu(sp, sp, stack_offset);
954 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
955 int16_t stack_offset = 0;
957 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
958 if ((regs & (1 << i)) != 0) {
959 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
960 stack_offset += kDoubleSize;
963 addiu(sp, sp, stack_offset);
967 void MacroAssembler::FlushICache(Register address, unsigned instructions) {
968 RegList saved_regs = kJSCallerSaved | ra.bit();
969 MultiPush(saved_regs);
970 AllowExternalCallThatCantCauseGC scope(this);
972 // Save to a0 in case address == t0.
974 PrepareCallCFunction(2, t0);
976 li(a1, instructions * kInstrSize);
977 CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
978 MultiPop(saved_regs);
982 void MacroAssembler::Ext(Register rt,
987 ASSERT(pos + size < 33);
989 if (kArchVariant == kMips32r2) {
990 ext_(rt, rs, pos, size);
992 // Move rs to rt and shift it left then right to get the
993 // desired bitfield on the right side and zeroes on the left.
994 int shift_left = 32 - (pos + size);
995 sll(rt, rs, shift_left); // Acts as a move if shift_left == 0.
997 int shift_right = 32 - size;
998 if (shift_right > 0) {
999 srl(rt, rt, shift_right);
1005 void MacroAssembler::Ins(Register rt,
1010 ASSERT(pos + size <= 32);
1013 if (kArchVariant == kMips32r2) {
1014 ins_(rt, rs, pos, size);
1016 ASSERT(!rt.is(t8) && !rs.is(t8));
1017 Subu(at, zero_reg, Operand(1));
1018 srl(at, at, 32 - size);
1022 nor(at, at, zero_reg);
1029 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1031 FPURegister scratch) {
1032 // Move the data from fs to t8.
1034 Cvt_d_uw(fd, t8, scratch);
1038 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1040 FPURegister scratch) {
1041 // Convert rs to a FP value in fd (and fd + 1).
1042 // We do this by converting rs minus the MSB to avoid sign conversion,
1043 // then adding 2^31 to the result (if needed).
1045 ASSERT(!fd.is(scratch));
1049 // Save rs's MSB to t9.
1053 // Move the result to fd.
1056 // Convert fd to a real FP value.
1059 Label conversion_done;
1061 // If rs's MSB was 0, it's done.
1062 // Otherwise we need to add that to the FP register.
1063 Branch(&conversion_done, eq, t9, Operand(zero_reg));
1065 // Load 2^31 into f20 as its float representation.
1067 mtc1(at, FPURegister::from_code(scratch.code() + 1));
1068 mtc1(zero_reg, scratch);
1070 add_d(fd, fd, scratch);
1072 bind(&conversion_done);
1076 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1078 FPURegister scratch) {
1079 Trunc_uw_d(fs, t8, scratch);
1084 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1085 if (kArchVariant == kLoongson && fd.is(fs)) {
1086 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1088 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1095 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1096 if (kArchVariant == kLoongson && fd.is(fs)) {
1097 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1099 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1106 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1107 if (kArchVariant == kLoongson && fd.is(fs)) {
1108 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1110 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1117 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1118 if (kArchVariant == kLoongson && fd.is(fs)) {
1119 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1121 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1128 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1130 FPURegister scratch) {
1131 ASSERT(!fd.is(scratch));
1134 // Load 2^31 into scratch as its float representation.
1136 mtc1(at, FPURegister::from_code(scratch.code() + 1));
1137 mtc1(zero_reg, scratch);
1138 // Test if scratch > fd.
1139 // If fd < 2^31 we can convert it normally.
1140 Label simple_convert;
1141 BranchF(&simple_convert, NULL, lt, fd, scratch);
1143 // First we subtract 2^31 from fd, then trunc it to rs
1144 // and add 2^31 to rs.
1145 sub_d(scratch, fd, scratch);
1146 trunc_w_d(scratch, scratch);
1148 Or(rs, rs, 1 << 31);
1152 // Simple conversion.
1153 bind(&simple_convert);
1154 trunc_w_d(scratch, fd);
1161 void MacroAssembler::BranchF(Label* target,
1166 BranchDelaySlot bd) {
1167 BlockTrampolinePoolScope block_trampoline_pool(this);
1173 ASSERT(nan || target);
1174 // Check for unordered (NaN) cases.
1176 c(UN, D, cmp1, cmp2);
1181 // Here NaN cases were either handled by this function or are assumed to
1182 // have been handled by the caller.
1183 // Unsigned conditions are treated as their signed counterpart.
1186 c(OLT, D, cmp1, cmp2);
1190 c(ULE, D, cmp1, cmp2);
1194 c(ULT, D, cmp1, cmp2);
1198 c(OLE, D, cmp1, cmp2);
1202 c(EQ, D, cmp1, cmp2);
1206 c(UEQ, D, cmp1, cmp2);
1210 c(EQ, D, cmp1, cmp2);
1214 c(UEQ, D, cmp1, cmp2);
1222 if (bd == PROTECT) {
1228 void MacroAssembler::Move(FPURegister dst, double imm) {
1229 static const DoubleRepresentation minus_zero(-0.0);
1230 static const DoubleRepresentation zero(0.0);
1231 DoubleRepresentation value_rep(imm);
1232 // Handle special values first.
1233 bool force_load = dst.is(kDoubleRegZero);
1234 if (value_rep == zero && !force_load) {
1235 mov_d(dst, kDoubleRegZero);
1236 } else if (value_rep == minus_zero && !force_load) {
1237 neg_d(dst, kDoubleRegZero);
1240 DoubleAsTwoUInt32(imm, &lo, &hi);
1241 // Move the low part of the double into the lower of the corresponding FPU
1242 // register of FPU register pair.
1244 li(at, Operand(lo));
1247 mtc1(zero_reg, dst);
1249 // Move the high part of the double into the higher of the corresponding FPU
1250 // register of FPU register pair.
1252 li(at, Operand(hi));
1253 mtc1(at, dst.high());
1255 mtc1(zero_reg, dst.high());
1261 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1262 if (kArchVariant == kLoongson) {
1264 Branch(&done, ne, rt, Operand(zero_reg));
1273 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1274 if (kArchVariant == kLoongson) {
1276 Branch(&done, eq, rt, Operand(zero_reg));
1285 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
1286 if (kArchVariant == kLoongson) {
1287 // Tests an FP condition code and then conditionally move rs to rd.
1288 // We do not currently use any FPU cc bit other than bit 0.
1290 ASSERT(!(rs.is(t8) || rd.is(t8)));
1292 Register scratch = t8;
1293 // For testing purposes we need to fetch content of the FCSR register and
1294 // than test its cc (floating point condition code) bit (for cc = 0, it is
1295 // 24. bit of the FCSR).
1296 cfc1(scratch, FCSR);
1297 // For the MIPS I, II and III architectures, the contents of scratch is
1298 // UNPREDICTABLE for the instruction immediately following CFC1.
1300 srl(scratch, scratch, 16);
1301 andi(scratch, scratch, 0x0080);
1302 Branch(&done, eq, scratch, Operand(zero_reg));
1311 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
1312 if (kArchVariant == kLoongson) {
1313 // Tests an FP condition code and then conditionally move rs to rd.
1314 // We do not currently use any FPU cc bit other than bit 0.
1316 ASSERT(!(rs.is(t8) || rd.is(t8)));
1318 Register scratch = t8;
1319 // For testing purposes we need to fetch content of the FCSR register and
1320 // than test its cc (floating point condition code) bit (for cc = 0, it is
1321 // 24. bit of the FCSR).
1322 cfc1(scratch, FCSR);
1323 // For the MIPS I, II and III architectures, the contents of scratch is
1324 // UNPREDICTABLE for the instruction immediately following CFC1.
1326 srl(scratch, scratch, 16);
1327 andi(scratch, scratch, 0x0080);
1328 Branch(&done, ne, scratch, Operand(zero_reg));
1337 void MacroAssembler::Clz(Register rd, Register rs) {
1338 if (kArchVariant == kLoongson) {
1339 ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
1341 Register scratch = t9;
1347 and_(scratch, at, mask);
1348 Branch(&end, ne, scratch, Operand(zero_reg));
1350 Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
1359 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1361 DoubleRegister double_input,
1363 DoubleRegister double_scratch,
1364 Register except_flag,
1365 CheckForInexactConversion check_inexact) {
1366 ASSERT(!result.is(scratch));
1367 ASSERT(!double_input.is(double_scratch));
1368 ASSERT(!except_flag.is(scratch));
1372 // Clear the except flag (0 = no exception)
1373 mov(except_flag, zero_reg);
1375 // Test for values that can be exactly represented as a signed 32-bit integer.
1376 cvt_w_d(double_scratch, double_input);
1377 mfc1(result, double_scratch);
1378 cvt_d_w(double_scratch, double_scratch);
1379 BranchF(&done, NULL, eq, double_input, double_scratch);
1381 int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
1383 if (check_inexact == kDontCheckForInexactConversion) {
1384 // Ignore inexact exceptions.
1385 except_mask &= ~kFCSRInexactFlagMask;
1389 cfc1(scratch, FCSR);
1390 // Disable FPU exceptions.
1391 ctc1(zero_reg, FCSR);
1393 // Do operation based on rounding mode.
1394 switch (rounding_mode) {
1395 case kRoundToNearest:
1396 Round_w_d(double_scratch, double_input);
1399 Trunc_w_d(double_scratch, double_input);
1401 case kRoundToPlusInf:
1402 Ceil_w_d(double_scratch, double_input);
1404 case kRoundToMinusInf:
1405 Floor_w_d(double_scratch, double_input);
1407 } // End of switch-statement.
1410 cfc1(except_flag, FCSR);
1412 ctc1(scratch, FCSR);
1413 // Move the converted value into the result register.
1414 mfc1(result, double_scratch);
1416 // Check for fpu exceptions.
1417 And(except_flag, except_flag, Operand(except_mask));
1423 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
1424 DoubleRegister double_input,
1426 DoubleRegister single_scratch = kLithiumScratchDouble.low();
1427 Register scratch = at;
1428 Register scratch2 = t9;
1430 // Clear cumulative exception flags and save the FCSR.
1431 cfc1(scratch2, FCSR);
1432 ctc1(zero_reg, FCSR);
1433 // Try a conversion to a signed integer.
1434 trunc_w_d(single_scratch, double_input);
1435 mfc1(result, single_scratch);
1436 // Retrieve and restore the FCSR.
1437 cfc1(scratch, FCSR);
1438 ctc1(scratch2, FCSR);
1439 // Check for overflow and NaNs.
1442 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
1443 // If we had no exceptions we are done.
1444 Branch(done, eq, scratch, Operand(zero_reg));
1448 void MacroAssembler::TruncateDoubleToI(Register result,
1449 DoubleRegister double_input) {
1452 TryInlineTruncateDoubleToI(result, double_input, &done);
1454 // If we fell through then inline version didn't succeed - call stub instead.
1456 Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
1457 sdc1(double_input, MemOperand(sp, 0));
1459 DoubleToIStub stub(sp, result, 0, true, true);
1462 Addu(sp, sp, Operand(kDoubleSize));
1469 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
1471 DoubleRegister double_scratch = f12;
1472 ASSERT(!result.is(object));
1474 ldc1(double_scratch,
1475 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
1476 TryInlineTruncateDoubleToI(result, double_scratch, &done);
1478 // If we fell through then inline version didn't succeed - call stub instead.
1480 DoubleToIStub stub(object,
1482 HeapNumber::kValueOffset - kHeapObjectTag,
1492 void MacroAssembler::TruncateNumberToI(Register object,
1494 Register heap_number_map,
1496 Label* not_number) {
1498 ASSERT(!result.is(object));
1500 UntagAndJumpIfSmi(result, object, &done);
1501 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
1502 TruncateHeapNumberToI(result, object);
1508 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1510 int num_least_bits) {
1511 Ext(dst, src, kSmiTagSize, num_least_bits);
1515 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1517 int num_least_bits) {
1518 And(dst, src, Operand((1 << num_least_bits) - 1));
1522 // Emulated condtional branches do not emit a nop in the branch delay slot.
1524 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1525 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
1526 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1527 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1530 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1531 BranchShort(offset, bdslot);
1535 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1537 BranchDelaySlot bdslot) {
1538 BranchShort(offset, cond, rs, rt, bdslot);
1542 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1543 if (L->is_bound()) {
1545 BranchShort(L, bdslot);
1550 if (is_trampoline_emitted()) {
1553 BranchShort(L, bdslot);
1559 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1561 BranchDelaySlot bdslot) {
1562 if (L->is_bound()) {
1564 BranchShort(L, cond, rs, rt, bdslot);
1566 if (cond != cc_always) {
1568 Condition neg_cond = NegateCondition(cond);
1569 BranchShort(&skip, neg_cond, rs, rt);
1577 if (is_trampoline_emitted()) {
1578 if (cond != cc_always) {
1580 Condition neg_cond = NegateCondition(cond);
1581 BranchShort(&skip, neg_cond, rs, rt);
1588 BranchShort(L, cond, rs, rt, bdslot);
1594 void MacroAssembler::Branch(Label* L,
1597 Heap::RootListIndex index,
1598 BranchDelaySlot bdslot) {
1599 LoadRoot(at, index);
1600 Branch(L, cond, rs, Operand(at), bdslot);
1604 void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
1607 // Emit a nop in the branch delay slot if required.
1608 if (bdslot == PROTECT)
1613 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1615 BranchDelaySlot bdslot) {
1616 BRANCH_ARGS_CHECK(cond, rs, rt);
1617 ASSERT(!rs.is(zero_reg));
1618 Register r2 = no_reg;
1619 Register scratch = at;
1622 // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
1624 BlockTrampolinePoolScope block_trampoline_pool(this);
1631 beq(rs, r2, offset);
1634 bne(rs, r2, offset);
1636 // Signed comparison.
1638 if (r2.is(zero_reg)) {
1641 slt(scratch, r2, rs);
1642 bne(scratch, zero_reg, offset);
1646 if (r2.is(zero_reg)) {
1649 slt(scratch, rs, r2);
1650 beq(scratch, zero_reg, offset);
1654 if (r2.is(zero_reg)) {
1657 slt(scratch, rs, r2);
1658 bne(scratch, zero_reg, offset);
1662 if (r2.is(zero_reg)) {
1665 slt(scratch, r2, rs);
1666 beq(scratch, zero_reg, offset);
1669 // Unsigned comparison.
1671 if (r2.is(zero_reg)) {
1674 sltu(scratch, r2, rs);
1675 bne(scratch, zero_reg, offset);
1678 case Ugreater_equal:
1679 if (r2.is(zero_reg)) {
1682 sltu(scratch, rs, r2);
1683 beq(scratch, zero_reg, offset);
1687 if (r2.is(zero_reg)) {
1688 // No code needs to be emitted.
1691 sltu(scratch, rs, r2);
1692 bne(scratch, zero_reg, offset);
1696 if (r2.is(zero_reg)) {
1699 sltu(scratch, r2, rs);
1700 beq(scratch, zero_reg, offset);
1707 // Be careful to always use shifted_branch_offset only just before the
1708 // branch instruction, as the location will be remember for patching the
1710 BlockTrampolinePoolScope block_trampoline_pool(this);
1716 // We don't want any other register but scratch clobbered.
1717 ASSERT(!scratch.is(rs));
1720 beq(rs, r2, offset);
1723 // We don't want any other register but scratch clobbered.
1724 ASSERT(!scratch.is(rs));
1727 bne(rs, r2, offset);
1729 // Signed comparison.
1731 if (rt.imm32_ == 0) {
1736 slt(scratch, r2, rs);
1737 bne(scratch, zero_reg, offset);
1741 if (rt.imm32_ == 0) {
1743 } else if (is_int16(rt.imm32_)) {
1744 slti(scratch, rs, rt.imm32_);
1745 beq(scratch, zero_reg, offset);
1749 slt(scratch, rs, r2);
1750 beq(scratch, zero_reg, offset);
1754 if (rt.imm32_ == 0) {
1756 } else if (is_int16(rt.imm32_)) {
1757 slti(scratch, rs, rt.imm32_);
1758 bne(scratch, zero_reg, offset);
1762 slt(scratch, rs, r2);
1763 bne(scratch, zero_reg, offset);
1767 if (rt.imm32_ == 0) {
1772 slt(scratch, r2, rs);
1773 beq(scratch, zero_reg, offset);
1776 // Unsigned comparison.
1778 if (rt.imm32_ == 0) {
1783 sltu(scratch, r2, rs);
1784 bne(scratch, zero_reg, offset);
1787 case Ugreater_equal:
1788 if (rt.imm32_ == 0) {
1790 } else if (is_int16(rt.imm32_)) {
1791 sltiu(scratch, rs, rt.imm32_);
1792 beq(scratch, zero_reg, offset);
1796 sltu(scratch, rs, r2);
1797 beq(scratch, zero_reg, offset);
1801 if (rt.imm32_ == 0) {
1802 // No code needs to be emitted.
1804 } else if (is_int16(rt.imm32_)) {
1805 sltiu(scratch, rs, rt.imm32_);
1806 bne(scratch, zero_reg, offset);
1810 sltu(scratch, rs, r2);
1811 bne(scratch, zero_reg, offset);
1815 if (rt.imm32_ == 0) {
1820 sltu(scratch, r2, rs);
1821 beq(scratch, zero_reg, offset);
1828 // Emit a nop in the branch delay slot if required.
1829 if (bdslot == PROTECT)
1834 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
1835 // We use branch_offset as an argument for the branch instructions to be sure
1836 // it is called just before generating the branch instruction, as needed.
1838 b(shifted_branch_offset(L, false));
1840 // Emit a nop in the branch delay slot if required.
1841 if (bdslot == PROTECT)
1846 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
1848 BranchDelaySlot bdslot) {
1849 BRANCH_ARGS_CHECK(cond, rs, rt);
1852 Register r2 = no_reg;
1853 Register scratch = at;
1855 BlockTrampolinePoolScope block_trampoline_pool(this);
1857 // Be careful to always use shifted_branch_offset only just before the
1858 // branch instruction, as the location will be remember for patching the
1862 offset = shifted_branch_offset(L, false);
1866 offset = shifted_branch_offset(L, false);
1867 beq(rs, r2, offset);
1870 offset = shifted_branch_offset(L, false);
1871 bne(rs, r2, offset);
1873 // Signed comparison.
1875 if (r2.is(zero_reg)) {
1876 offset = shifted_branch_offset(L, false);
1879 slt(scratch, r2, rs);
1880 offset = shifted_branch_offset(L, false);
1881 bne(scratch, zero_reg, offset);
1885 if (r2.is(zero_reg)) {
1886 offset = shifted_branch_offset(L, false);
1889 slt(scratch, rs, r2);
1890 offset = shifted_branch_offset(L, false);
1891 beq(scratch, zero_reg, offset);
1895 if (r2.is(zero_reg)) {
1896 offset = shifted_branch_offset(L, false);
1899 slt(scratch, rs, r2);
1900 offset = shifted_branch_offset(L, false);
1901 bne(scratch, zero_reg, offset);
1905 if (r2.is(zero_reg)) {
1906 offset = shifted_branch_offset(L, false);
1909 slt(scratch, r2, rs);
1910 offset = shifted_branch_offset(L, false);
1911 beq(scratch, zero_reg, offset);
1914 // Unsigned comparison.
1916 if (r2.is(zero_reg)) {
1917 offset = shifted_branch_offset(L, false);
1920 sltu(scratch, r2, rs);
1921 offset = shifted_branch_offset(L, false);
1922 bne(scratch, zero_reg, offset);
1925 case Ugreater_equal:
1926 if (r2.is(zero_reg)) {
1927 offset = shifted_branch_offset(L, false);
1930 sltu(scratch, rs, r2);
1931 offset = shifted_branch_offset(L, false);
1932 beq(scratch, zero_reg, offset);
1936 if (r2.is(zero_reg)) {
1937 // No code needs to be emitted.
1940 sltu(scratch, rs, r2);
1941 offset = shifted_branch_offset(L, false);
1942 bne(scratch, zero_reg, offset);
1946 if (r2.is(zero_reg)) {
1947 offset = shifted_branch_offset(L, false);
1950 sltu(scratch, r2, rs);
1951 offset = shifted_branch_offset(L, false);
1952 beq(scratch, zero_reg, offset);
1959 // Be careful to always use shifted_branch_offset only just before the
1960 // branch instruction, as the location will be remember for patching the
1962 BlockTrampolinePoolScope block_trampoline_pool(this);
1965 offset = shifted_branch_offset(L, false);
1969 ASSERT(!scratch.is(rs));
1972 offset = shifted_branch_offset(L, false);
1973 beq(rs, r2, offset);
1976 ASSERT(!scratch.is(rs));
1979 offset = shifted_branch_offset(L, false);
1980 bne(rs, r2, offset);
1982 // Signed comparison.
1984 if (rt.imm32_ == 0) {
1985 offset = shifted_branch_offset(L, false);
1988 ASSERT(!scratch.is(rs));
1991 slt(scratch, r2, rs);
1992 offset = shifted_branch_offset(L, false);
1993 bne(scratch, zero_reg, offset);
1997 if (rt.imm32_ == 0) {
1998 offset = shifted_branch_offset(L, false);
2000 } else if (is_int16(rt.imm32_)) {
2001 slti(scratch, rs, rt.imm32_);
2002 offset = shifted_branch_offset(L, false);
2003 beq(scratch, zero_reg, offset);
2005 ASSERT(!scratch.is(rs));
2008 slt(scratch, rs, r2);
2009 offset = shifted_branch_offset(L, false);
2010 beq(scratch, zero_reg, offset);
2014 if (rt.imm32_ == 0) {
2015 offset = shifted_branch_offset(L, false);
2017 } else if (is_int16(rt.imm32_)) {
2018 slti(scratch, rs, rt.imm32_);
2019 offset = shifted_branch_offset(L, false);
2020 bne(scratch, zero_reg, offset);
2022 ASSERT(!scratch.is(rs));
2025 slt(scratch, rs, r2);
2026 offset = shifted_branch_offset(L, false);
2027 bne(scratch, zero_reg, offset);
2031 if (rt.imm32_ == 0) {
2032 offset = shifted_branch_offset(L, false);
2035 ASSERT(!scratch.is(rs));
2038 slt(scratch, r2, rs);
2039 offset = shifted_branch_offset(L, false);
2040 beq(scratch, zero_reg, offset);
2043 // Unsigned comparison.
2045 if (rt.imm32_ == 0) {
2046 offset = shifted_branch_offset(L, false);
2049 ASSERT(!scratch.is(rs));
2052 sltu(scratch, r2, rs);
2053 offset = shifted_branch_offset(L, false);
2054 bne(scratch, zero_reg, offset);
2057 case Ugreater_equal:
2058 if (rt.imm32_ == 0) {
2059 offset = shifted_branch_offset(L, false);
2061 } else if (is_int16(rt.imm32_)) {
2062 sltiu(scratch, rs, rt.imm32_);
2063 offset = shifted_branch_offset(L, false);
2064 beq(scratch, zero_reg, offset);
2066 ASSERT(!scratch.is(rs));
2069 sltu(scratch, rs, r2);
2070 offset = shifted_branch_offset(L, false);
2071 beq(scratch, zero_reg, offset);
2075 if (rt.imm32_ == 0) {
2076 // No code needs to be emitted.
2078 } else if (is_int16(rt.imm32_)) {
2079 sltiu(scratch, rs, rt.imm32_);
2080 offset = shifted_branch_offset(L, false);
2081 bne(scratch, zero_reg, offset);
2083 ASSERT(!scratch.is(rs));
2086 sltu(scratch, rs, r2);
2087 offset = shifted_branch_offset(L, false);
2088 bne(scratch, zero_reg, offset);
2092 if (rt.imm32_ == 0) {
2093 offset = shifted_branch_offset(L, false);
2096 ASSERT(!scratch.is(rs));
2099 sltu(scratch, r2, rs);
2100 offset = shifted_branch_offset(L, false);
2101 beq(scratch, zero_reg, offset);
2108 // Check that offset could actually hold on an int16_t.
2109 ASSERT(is_int16(offset));
2110 // Emit a nop in the branch delay slot if required.
2111 if (bdslot == PROTECT)
2116 void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
2117 BranchAndLinkShort(offset, bdslot);
2121 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
2123 BranchDelaySlot bdslot) {
2124 BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2128 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
2129 if (L->is_bound()) {
2131 BranchAndLinkShort(L, bdslot);
2136 if (is_trampoline_emitted()) {
2139 BranchAndLinkShort(L, bdslot);
2145 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2147 BranchDelaySlot bdslot) {
2148 if (L->is_bound()) {
2150 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2153 Condition neg_cond = NegateCondition(cond);
2154 BranchShort(&skip, neg_cond, rs, rt);
2159 if (is_trampoline_emitted()) {
2161 Condition neg_cond = NegateCondition(cond);
2162 BranchShort(&skip, neg_cond, rs, rt);
2166 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2172 // We need to use a bgezal or bltzal, but they can't be used directly with the
2173 // slt instructions. We could use sub or add instead but we would miss overflow
2174 // cases, so we keep slt and add an intermediate third instruction.
2175 void MacroAssembler::BranchAndLinkShort(int16_t offset,
2176 BranchDelaySlot bdslot) {
2179 // Emit a nop in the branch delay slot if required.
2180 if (bdslot == PROTECT)
2185 void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
2186 Register rs, const Operand& rt,
2187 BranchDelaySlot bdslot) {
2188 BRANCH_ARGS_CHECK(cond, rs, rt);
2189 Register r2 = no_reg;
2190 Register scratch = at;
2194 } else if (cond != cc_always) {
2200 BlockTrampolinePoolScope block_trampoline_pool(this);
2216 // Signed comparison.
2218 slt(scratch, r2, rs);
2219 addiu(scratch, scratch, -1);
2220 bgezal(scratch, offset);
2223 slt(scratch, rs, r2);
2224 addiu(scratch, scratch, -1);
2225 bltzal(scratch, offset);
2228 slt(scratch, rs, r2);
2229 addiu(scratch, scratch, -1);
2230 bgezal(scratch, offset);
2233 slt(scratch, r2, rs);
2234 addiu(scratch, scratch, -1);
2235 bltzal(scratch, offset);
2238 // Unsigned comparison.
2240 sltu(scratch, r2, rs);
2241 addiu(scratch, scratch, -1);
2242 bgezal(scratch, offset);
2244 case Ugreater_equal:
2245 sltu(scratch, rs, r2);
2246 addiu(scratch, scratch, -1);
2247 bltzal(scratch, offset);
2250 sltu(scratch, rs, r2);
2251 addiu(scratch, scratch, -1);
2252 bgezal(scratch, offset);
2255 sltu(scratch, r2, rs);
2256 addiu(scratch, scratch, -1);
2257 bltzal(scratch, offset);
2264 // Emit a nop in the branch delay slot if required.
2265 if (bdslot == PROTECT)
2270 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
2271 bal(shifted_branch_offset(L, false));
2273 // Emit a nop in the branch delay slot if required.
2274 if (bdslot == PROTECT)
2279 void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
2281 BranchDelaySlot bdslot) {
2282 BRANCH_ARGS_CHECK(cond, rs, rt);
2285 Register r2 = no_reg;
2286 Register scratch = at;
2289 } else if (cond != cc_always) {
2295 BlockTrampolinePoolScope block_trampoline_pool(this);
2298 offset = shifted_branch_offset(L, false);
2304 offset = shifted_branch_offset(L, false);
2310 offset = shifted_branch_offset(L, false);
2314 // Signed comparison.
2316 slt(scratch, r2, rs);
2317 addiu(scratch, scratch, -1);
2318 offset = shifted_branch_offset(L, false);
2319 bgezal(scratch, offset);
2322 slt(scratch, rs, r2);
2323 addiu(scratch, scratch, -1);
2324 offset = shifted_branch_offset(L, false);
2325 bltzal(scratch, offset);
2328 slt(scratch, rs, r2);
2329 addiu(scratch, scratch, -1);
2330 offset = shifted_branch_offset(L, false);
2331 bgezal(scratch, offset);
2334 slt(scratch, r2, rs);
2335 addiu(scratch, scratch, -1);
2336 offset = shifted_branch_offset(L, false);
2337 bltzal(scratch, offset);
2340 // Unsigned comparison.
2342 sltu(scratch, r2, rs);
2343 addiu(scratch, scratch, -1);
2344 offset = shifted_branch_offset(L, false);
2345 bgezal(scratch, offset);
2347 case Ugreater_equal:
2348 sltu(scratch, rs, r2);
2349 addiu(scratch, scratch, -1);
2350 offset = shifted_branch_offset(L, false);
2351 bltzal(scratch, offset);
2354 sltu(scratch, rs, r2);
2355 addiu(scratch, scratch, -1);
2356 offset = shifted_branch_offset(L, false);
2357 bgezal(scratch, offset);
2360 sltu(scratch, r2, rs);
2361 addiu(scratch, scratch, -1);
2362 offset = shifted_branch_offset(L, false);
2363 bltzal(scratch, offset);
2370 // Check that offset could actually hold on an int16_t.
2371 ASSERT(is_int16(offset));
2373 // Emit a nop in the branch delay slot if required.
2374 if (bdslot == PROTECT)
2379 void MacroAssembler::Jump(Register target,
2383 BranchDelaySlot bd) {
2384 BlockTrampolinePoolScope block_trampoline_pool(this);
2385 if (cond == cc_always) {
2388 BRANCH_ARGS_CHECK(cond, rs, rt);
2389 Branch(2, NegateCondition(cond), rs, rt);
2392 // Emit a nop in the branch delay slot if required.
2398 void MacroAssembler::Jump(intptr_t target,
2399 RelocInfo::Mode rmode,
2403 BranchDelaySlot bd) {
2405 if (cond != cc_always) {
2406 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
2408 // The first instruction of 'li' may be placed in the delay slot.
2409 // This is not an issue, t9 is expected to be clobbered anyway.
2410 li(t9, Operand(target, rmode));
2411 Jump(t9, al, zero_reg, Operand(zero_reg), bd);
2416 void MacroAssembler::Jump(Address target,
2417 RelocInfo::Mode rmode,
2421 BranchDelaySlot bd) {
2422 ASSERT(!RelocInfo::IsCodeTarget(rmode));
2423 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2427 void MacroAssembler::Jump(Handle<Code> code,
2428 RelocInfo::Mode rmode,
2432 BranchDelaySlot bd) {
2433 ASSERT(RelocInfo::IsCodeTarget(rmode));
2434 AllowDeferredHandleDereference embedding_raw_address;
2435 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2439 int MacroAssembler::CallSize(Register target,
2443 BranchDelaySlot bd) {
2446 if (cond == cc_always) {
2455 return size * kInstrSize;
2459 // Note: To call gcc-compiled C code on mips, you must call thru t9.
2460 void MacroAssembler::Call(Register target,
2464 BranchDelaySlot bd) {
2465 BlockTrampolinePoolScope block_trampoline_pool(this);
2468 if (cond == cc_always) {
2471 BRANCH_ARGS_CHECK(cond, rs, rt);
2472 Branch(2, NegateCondition(cond), rs, rt);
2475 // Emit a nop in the branch delay slot if required.
2479 ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
2480 SizeOfCodeGeneratedSince(&start));
2484 int MacroAssembler::CallSize(Address target,
2485 RelocInfo::Mode rmode,
2489 BranchDelaySlot bd) {
2490 int size = CallSize(t9, cond, rs, rt, bd);
2491 return size + 2 * kInstrSize;
2495 void MacroAssembler::Call(Address target,
2496 RelocInfo::Mode rmode,
2500 BranchDelaySlot bd) {
2501 BlockTrampolinePoolScope block_trampoline_pool(this);
2504 int32_t target_int = reinterpret_cast<int32_t>(target);
2505 // Must record previous source positions before the
2506 // li() generates a new code target.
2507 positions_recorder()->WriteRecordedPositions();
2508 li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
2509 Call(t9, cond, rs, rt, bd);
2510 ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2511 SizeOfCodeGeneratedSince(&start));
2515 int MacroAssembler::CallSize(Handle<Code> code,
2516 RelocInfo::Mode rmode,
2517 TypeFeedbackId ast_id,
2521 BranchDelaySlot bd) {
2522 AllowDeferredHandleDereference using_raw_address;
2523 return CallSize(reinterpret_cast<Address>(code.location()),
2524 rmode, cond, rs, rt, bd);
2528 void MacroAssembler::Call(Handle<Code> code,
2529 RelocInfo::Mode rmode,
2530 TypeFeedbackId ast_id,
2534 BranchDelaySlot bd) {
2535 BlockTrampolinePoolScope block_trampoline_pool(this);
2538 ASSERT(RelocInfo::IsCodeTarget(rmode));
2539 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
2540 SetRecordedAstId(ast_id);
2541 rmode = RelocInfo::CODE_TARGET_WITH_ID;
2543 AllowDeferredHandleDereference embedding_raw_address;
2544 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
2545 ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
2546 SizeOfCodeGeneratedSince(&start));
2550 void MacroAssembler::Ret(Condition cond,
2553 BranchDelaySlot bd) {
2554 Jump(ra, cond, rs, rt, bd);
2558 void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
2559 BlockTrampolinePoolScope block_trampoline_pool(this);
2562 imm28 = jump_address(L);
2563 imm28 &= kImm28Mask;
2564 { BlockGrowBufferScope block_buf_growth(this);
2565 // Buffer growth (and relocation) must be blocked for internal references
2566 // until associated instructions are emitted and available to be patched.
2567 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2570 // Emit a nop in the branch delay slot if required.
2571 if (bdslot == PROTECT)
2576 void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
2577 BlockTrampolinePoolScope block_trampoline_pool(this);
2580 imm32 = jump_address(L);
2581 { BlockGrowBufferScope block_buf_growth(this);
2582 // Buffer growth (and relocation) must be blocked for internal references
2583 // until associated instructions are emitted and available to be patched.
2584 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2585 lui(at, (imm32 & kHiMask) >> kLuiShift);
2586 ori(at, at, (imm32 & kImm16Mask));
2590 // Emit a nop in the branch delay slot if required.
2591 if (bdslot == PROTECT)
2596 void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
2597 BlockTrampolinePoolScope block_trampoline_pool(this);
2600 imm32 = jump_address(L);
2601 { BlockGrowBufferScope block_buf_growth(this);
2602 // Buffer growth (and relocation) must be blocked for internal references
2603 // until associated instructions are emitted and available to be patched.
2604 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2605 lui(at, (imm32 & kHiMask) >> kLuiShift);
2606 ori(at, at, (imm32 & kImm16Mask));
2610 // Emit a nop in the branch delay slot if required.
2611 if (bdslot == PROTECT)
2616 void MacroAssembler::DropAndRet(int drop) {
2617 Ret(USE_DELAY_SLOT);
2618 addiu(sp, sp, drop * kPointerSize);
2621 void MacroAssembler::DropAndRet(int drop,
2624 const Operand& r2) {
2625 // Both Drop and Ret need to be conditional.
2627 if (cond != cc_always) {
2628 Branch(&skip, NegateCondition(cond), r1, r2);
2634 if (cond != cc_always) {
2640 void MacroAssembler::Drop(int count,
2643 const Operand& op) {
2651 Branch(&skip, NegateCondition(cond), reg, op);
2654 addiu(sp, sp, count * kPointerSize);
2663 void MacroAssembler::Swap(Register reg1,
2666 if (scratch.is(no_reg)) {
2667 Xor(reg1, reg1, Operand(reg2));
2668 Xor(reg2, reg2, Operand(reg1));
2669 Xor(reg1, reg1, Operand(reg2));
2678 void MacroAssembler::Call(Label* target) {
2679 BranchAndLink(target);
2683 void MacroAssembler::Push(Handle<Object> handle) {
2684 li(at, Operand(handle));
2689 #ifdef ENABLE_DEBUGGER_SUPPORT
2691 void MacroAssembler::DebugBreak() {
2692 PrepareCEntryArgs(0);
2693 PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
2695 ASSERT(AllowThisStubCall(&ces));
2696 Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
2699 #endif // ENABLE_DEBUGGER_SUPPORT
2702 // ---------------------------------------------------------------------------
2703 // Exception handling.
2705 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
2706 int handler_index) {
2707 // Adjust this code if not the case.
2708 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2709 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2710 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2711 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2712 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2713 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2715 // For the JSEntry handler, we must preserve a0-a3 and s0.
2716 // t1-t3 are available. We will build up the handler from the bottom by
2717 // pushing on the stack.
2718 // Set up the code object (t1) and the state (t2) for pushing.
2720 StackHandler::IndexField::encode(handler_index) |
2721 StackHandler::KindField::encode(kind);
2722 li(t1, Operand(CodeObject()), CONSTANT_SIZE);
2723 li(t2, Operand(state));
2725 // Push the frame pointer, context, state, and code object.
2726 if (kind == StackHandler::JS_ENTRY) {
2727 ASSERT_EQ(Smi::FromInt(0), 0);
2728 // The second zero_reg indicates no context.
2729 // The first zero_reg is the NULL frame pointer.
2730 // The operands are reversed to match the order of MultiPush/Pop.
2731 Push(zero_reg, zero_reg, t2, t1);
2733 MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit());
2736 // Link the current handler as the next handler.
2737 li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2738 lw(t1, MemOperand(t2));
2740 // Set this new handler as the current one.
2741 sw(sp, MemOperand(t2));
2745 void MacroAssembler::PopTryHandler() {
2746 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2748 Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
2749 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2750 sw(a1, MemOperand(at));
2754 void MacroAssembler::JumpToHandlerEntry() {
2755 // Compute the handler entry address and jump to it. The handler table is
2756 // a fixed array of (smi-tagged) code offsets.
2757 // v0 = exception, a1 = code object, a2 = state.
2758 lw(a3, FieldMemOperand(a1, Code::kHandlerTableOffset)); // Handler table.
2759 Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2760 srl(a2, a2, StackHandler::kKindWidth); // Handler index.
2761 sll(a2, a2, kPointerSizeLog2);
2763 lw(a2, MemOperand(a2)); // Smi-tagged offset.
2764 Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
2765 sra(t9, a2, kSmiTagSize);
2771 void MacroAssembler::Throw(Register value) {
2772 // Adjust this code if not the case.
2773 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2774 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2775 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2776 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2777 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2778 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2780 // The exception is expected in v0.
2783 // Drop the stack pointer to the top of the top handler.
2784 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
2786 lw(sp, MemOperand(a3));
2788 // Restore the next handler.
2790 sw(a2, MemOperand(a3));
2792 // Get the code object (a1) and state (a2). Restore the context and frame
2794 MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
2796 // If the handler is a JS frame, restore the context to the frame.
2797 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
2800 Branch(&done, eq, cp, Operand(zero_reg));
2801 sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2804 JumpToHandlerEntry();
2808 void MacroAssembler::ThrowUncatchable(Register value) {
2809 // Adjust this code if not the case.
2810 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2811 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2812 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2813 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2814 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2815 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2817 // The exception is expected in v0.
2818 if (!value.is(v0)) {
2821 // Drop the stack pointer to the top of the top stack handler.
2822 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2823 lw(sp, MemOperand(a3));
2825 // Unwind the handlers until the ENTRY handler is found.
2826 Label fetch_next, check_kind;
2829 lw(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
2832 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
2833 lw(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
2834 And(a2, a2, Operand(StackHandler::KindField::kMask));
2835 Branch(&fetch_next, ne, a2, Operand(zero_reg));
2837 // Set the top handler address to next handler past the top ENTRY handler.
2839 sw(a2, MemOperand(a3));
2841 // Get the code object (a1) and state (a2). Clear the context and frame
2842 // pointer (0 was saved in the handler).
2843 MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
2845 JumpToHandlerEntry();
2849 void MacroAssembler::Allocate(int object_size,
2854 AllocationFlags flags) {
2855 ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
2856 if (!FLAG_inline_new) {
2857 if (emit_debug_code()) {
2858 // Trash the registers to simulate an allocation failure.
2860 li(scratch1, 0x7191);
2861 li(scratch2, 0x7291);
2867 ASSERT(!result.is(scratch1));
2868 ASSERT(!result.is(scratch2));
2869 ASSERT(!scratch1.is(scratch2));
2870 ASSERT(!scratch1.is(t9));
2871 ASSERT(!scratch2.is(t9));
2872 ASSERT(!result.is(t9));
2874 // Make object size into bytes.
2875 if ((flags & SIZE_IN_WORDS) != 0) {
2876 object_size *= kPointerSize;
2878 ASSERT_EQ(0, object_size & kObjectAlignmentMask);
2880 // Check relative positions of allocation top and limit addresses.
2881 // ARM adds additional checks to make sure the ldm instruction can be
2882 // used. On MIPS we don't have ldm so we don't need additional checks either.
2883 ExternalReference allocation_top =
2884 AllocationUtils::GetAllocationTopReference(isolate(), flags);
2885 ExternalReference allocation_limit =
2886 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2889 reinterpret_cast<intptr_t>(allocation_top.address());
2891 reinterpret_cast<intptr_t>(allocation_limit.address());
2892 ASSERT((limit - top) == kPointerSize);
2894 // Set up allocation top address and object size registers.
2895 Register topaddr = scratch1;
2896 li(topaddr, Operand(allocation_top));
2898 // This code stores a temporary value in t9.
2899 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2900 // Load allocation top into result and allocation limit into t9.
2901 lw(result, MemOperand(topaddr));
2902 lw(t9, MemOperand(topaddr, kPointerSize));
2904 if (emit_debug_code()) {
2905 // Assert that result actually contains top on entry. t9 is used
2906 // immediately below so this use of t9 does not cause difference with
2907 // respect to register content between debug and release mode.
2908 lw(t9, MemOperand(topaddr));
2909 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
2911 // Load allocation limit into t9. Result already contains allocation top.
2912 lw(t9, MemOperand(topaddr, limit - top));
2915 if ((flags & DOUBLE_ALIGNMENT) != 0) {
2916 // Align the next allocation. Storing the filler map without checking top is
2917 // safe in new-space because the limit of the heap is aligned there.
2918 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
2919 ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
2920 And(scratch2, result, Operand(kDoubleAlignmentMask));
2922 Branch(&aligned, eq, scratch2, Operand(zero_reg));
2923 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
2924 Branch(gc_required, Ugreater_equal, result, Operand(t9));
2926 li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
2927 sw(scratch2, MemOperand(result));
2928 Addu(result, result, Operand(kDoubleSize / 2));
2932 // Calculate new top and bail out if new space is exhausted. Use result
2933 // to calculate the new top.
2934 Addu(scratch2, result, Operand(object_size));
2935 Branch(gc_required, Ugreater, scratch2, Operand(t9));
2936 sw(scratch2, MemOperand(topaddr));
2938 // Tag object if requested.
2939 if ((flags & TAG_OBJECT) != 0) {
2940 Addu(result, result, Operand(kHeapObjectTag));
2945 void MacroAssembler::Allocate(Register object_size,
2950 AllocationFlags flags) {
2951 if (!FLAG_inline_new) {
2952 if (emit_debug_code()) {
2953 // Trash the registers to simulate an allocation failure.
2955 li(scratch1, 0x7191);
2956 li(scratch2, 0x7291);
2962 ASSERT(!result.is(scratch1));
2963 ASSERT(!result.is(scratch2));
2964 ASSERT(!scratch1.is(scratch2));
2965 ASSERT(!object_size.is(t9));
2966 ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
2968 // Check relative positions of allocation top and limit addresses.
2969 // ARM adds additional checks to make sure the ldm instruction can be
2970 // used. On MIPS we don't have ldm so we don't need additional checks either.
2971 ExternalReference allocation_top =
2972 AllocationUtils::GetAllocationTopReference(isolate(), flags);
2973 ExternalReference allocation_limit =
2974 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2976 reinterpret_cast<intptr_t>(allocation_top.address());
2978 reinterpret_cast<intptr_t>(allocation_limit.address());
2979 ASSERT((limit - top) == kPointerSize);
2981 // Set up allocation top address and object size registers.
2982 Register topaddr = scratch1;
2983 li(topaddr, Operand(allocation_top));
2985 // This code stores a temporary value in t9.
2986 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2987 // Load allocation top into result and allocation limit into t9.
2988 lw(result, MemOperand(topaddr));
2989 lw(t9, MemOperand(topaddr, kPointerSize));
2991 if (emit_debug_code()) {
2992 // Assert that result actually contains top on entry. t9 is used
2993 // immediately below so this use of t9 does not cause difference with
2994 // respect to register content between debug and release mode.
2995 lw(t9, MemOperand(topaddr));
2996 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
2998 // Load allocation limit into t9. Result already contains allocation top.
2999 lw(t9, MemOperand(topaddr, limit - top));
3002 if ((flags & DOUBLE_ALIGNMENT) != 0) {
3003 // Align the next allocation. Storing the filler map without checking top is
3004 // safe in new-space because the limit of the heap is aligned there.
3005 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
3006 ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
3007 And(scratch2, result, Operand(kDoubleAlignmentMask));
3009 Branch(&aligned, eq, scratch2, Operand(zero_reg));
3010 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
3011 Branch(gc_required, Ugreater_equal, result, Operand(t9));
3013 li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
3014 sw(scratch2, MemOperand(result));
3015 Addu(result, result, Operand(kDoubleSize / 2));
3019 // Calculate new top and bail out if new space is exhausted. Use result
3020 // to calculate the new top. Object size may be in words so a shift is
3021 // required to get the number of bytes.
3022 if ((flags & SIZE_IN_WORDS) != 0) {
3023 sll(scratch2, object_size, kPointerSizeLog2);
3024 Addu(scratch2, result, scratch2);
3026 Addu(scratch2, result, Operand(object_size));
3028 Branch(gc_required, Ugreater, scratch2, Operand(t9));
3030 // Update allocation top. result temporarily holds the new top.
3031 if (emit_debug_code()) {
3032 And(t9, scratch2, Operand(kObjectAlignmentMask));
3033 Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
3035 sw(scratch2, MemOperand(topaddr));
3037 // Tag object if requested.
3038 if ((flags & TAG_OBJECT) != 0) {
3039 Addu(result, result, Operand(kHeapObjectTag));
3044 void MacroAssembler::UndoAllocationInNewSpace(Register object,
3046 ExternalReference new_space_allocation_top =
3047 ExternalReference::new_space_allocation_top_address(isolate());
3049 // Make sure the object has no tag before resetting top.
3050 And(object, object, Operand(~kHeapObjectTagMask));
3052 // Check that the object un-allocated is below the current top.
3053 li(scratch, Operand(new_space_allocation_top));
3054 lw(scratch, MemOperand(scratch));
3055 Check(less, kUndoAllocationOfNonAllocatedMemory,
3056 object, Operand(scratch));
3058 // Write the address of the object to un-allocate as the current top.
3059 li(scratch, Operand(new_space_allocation_top));
3060 sw(object, MemOperand(scratch));
3064 void MacroAssembler::AllocateTwoByteString(Register result,
3069 Label* gc_required) {
3070 // Calculate the number of bytes needed for the characters in the string while
3071 // observing object alignment.
3072 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3073 sll(scratch1, length, 1); // Length in bytes, not chars.
3074 addiu(scratch1, scratch1,
3075 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3076 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3078 // Allocate two-byte string in new space.
3086 // Set the map, length and hash field.
3087 InitializeNewString(result,
3089 Heap::kStringMapRootIndex,
3095 void MacroAssembler::AllocateAsciiString(Register result,
3100 Label* gc_required) {
3101 // Calculate the number of bytes needed for the characters in the string
3102 // while observing object alignment.
3103 ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3104 ASSERT(kCharSize == 1);
3105 addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3106 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3108 // Allocate ASCII string in new space.
3116 // Set the map, length and hash field.
3117 InitializeNewString(result,
3119 Heap::kAsciiStringMapRootIndex,
3125 void MacroAssembler::AllocateTwoByteConsString(Register result,
3129 Label* gc_required) {
3130 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3132 InitializeNewString(result,
3134 Heap::kConsStringMapRootIndex,
3140 void MacroAssembler::AllocateAsciiConsString(Register result,
3144 Label* gc_required) {
3145 Label allocate_new_space, install_map;
3146 AllocationFlags flags = TAG_OBJECT;
3148 ExternalReference high_promotion_mode = ExternalReference::
3149 new_space_high_promotion_mode_active_address(isolate());
3150 li(scratch1, Operand(high_promotion_mode));
3151 lw(scratch1, MemOperand(scratch1, 0));
3152 Branch(&allocate_new_space, eq, scratch1, Operand(zero_reg));
3154 Allocate(ConsString::kSize,
3159 static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
3163 bind(&allocate_new_space);
3164 Allocate(ConsString::kSize,
3173 InitializeNewString(result,
3175 Heap::kConsAsciiStringMapRootIndex,
3181 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3185 Label* gc_required) {
3186 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3189 InitializeNewString(result,
3191 Heap::kSlicedStringMapRootIndex,
3197 void MacroAssembler::AllocateAsciiSlicedString(Register result,
3201 Label* gc_required) {
3202 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3205 InitializeNewString(result,
3207 Heap::kSlicedAsciiStringMapRootIndex,
3213 void MacroAssembler::JumpIfNotUniqueName(Register reg,
3214 Label* not_unique_name) {
3215 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3217 And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3218 Branch(&succeed, eq, at, Operand(zero_reg));
3219 Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
3225 // Allocates a heap number or jumps to the label if the young space is full and
3226 // a scavenge is needed.
3227 void MacroAssembler::AllocateHeapNumber(Register result,
3230 Register heap_number_map,
3232 TaggingMode tagging_mode) {
3233 // Allocate an object in the heap for the heap number and tag it as a heap
3235 Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
3236 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3238 // Store heap number map in the allocated object.
3239 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3240 if (tagging_mode == TAG_RESULT) {
3241 sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3243 sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3248 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3252 Label* gc_required) {
3253 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3254 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3255 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3259 // Copies a fixed number of fields of heap objects from src to dst.
3260 void MacroAssembler::CopyFields(Register dst,
3264 ASSERT((temps & dst.bit()) == 0);
3265 ASSERT((temps & src.bit()) == 0);
3266 // Primitive implementation using only one temporary register.
3268 Register tmp = no_reg;
3269 // Find a temp register in temps list.
3270 for (int i = 0; i < kNumRegisters; i++) {
3271 if ((temps & (1 << i)) != 0) {
3276 ASSERT(!tmp.is(no_reg));
3278 for (int i = 0; i < field_count; i++) {
3279 lw(tmp, FieldMemOperand(src, i * kPointerSize));
3280 sw(tmp, FieldMemOperand(dst, i * kPointerSize));
3285 void MacroAssembler::CopyBytes(Register src,
3289 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3291 // Align src before copying in word size chunks.
3292 Branch(&byte_loop, le, length, Operand(kPointerSize));
3293 bind(&align_loop_1);
3294 And(scratch, src, kPointerSize - 1);
3295 Branch(&word_loop, eq, scratch, Operand(zero_reg));
3296 lbu(scratch, MemOperand(src));
3298 sb(scratch, MemOperand(dst));
3300 Subu(length, length, Operand(1));
3301 Branch(&align_loop_1, ne, length, Operand(zero_reg));
3303 // Copy bytes in word size chunks.
3305 if (emit_debug_code()) {
3306 And(scratch, src, kPointerSize - 1);
3307 Assert(eq, kExpectingAlignmentForCopyBytes,
3308 scratch, Operand(zero_reg));
3310 Branch(&byte_loop, lt, length, Operand(kPointerSize));
3311 lw(scratch, MemOperand(src));
3312 Addu(src, src, kPointerSize);
3314 // TODO(kalmard) check if this can be optimized to use sw in most cases.
3315 // Can't use unaligned access - copy byte by byte.
3316 sb(scratch, MemOperand(dst, 0));
3317 srl(scratch, scratch, 8);
3318 sb(scratch, MemOperand(dst, 1));
3319 srl(scratch, scratch, 8);
3320 sb(scratch, MemOperand(dst, 2));
3321 srl(scratch, scratch, 8);
3322 sb(scratch, MemOperand(dst, 3));
3325 Subu(length, length, Operand(kPointerSize));
3328 // Copy the last bytes if any left.
3330 Branch(&done, eq, length, Operand(zero_reg));
3332 lbu(scratch, MemOperand(src));
3334 sb(scratch, MemOperand(dst));
3336 Subu(length, length, Operand(1));
3337 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3342 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3343 Register end_offset,
3348 sw(filler, MemOperand(start_offset));
3349 Addu(start_offset, start_offset, kPointerSize);
3351 Branch(&loop, lt, start_offset, Operand(end_offset));
3355 void MacroAssembler::CheckFastElements(Register map,
3358 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3359 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3360 STATIC_ASSERT(FAST_ELEMENTS == 2);
3361 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3362 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3363 Branch(fail, hi, scratch,
3364 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3368 void MacroAssembler::CheckFastObjectElements(Register map,
3371 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3372 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3373 STATIC_ASSERT(FAST_ELEMENTS == 2);
3374 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3375 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3376 Branch(fail, ls, scratch,
3377 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3378 Branch(fail, hi, scratch,
3379 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3383 void MacroAssembler::CheckFastSmiElements(Register map,
3386 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3387 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3388 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3389 Branch(fail, hi, scratch,
3390 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3394 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3396 Register elements_reg,
3401 int elements_offset) {
3402 Label smi_value, maybe_nan, have_double_value, is_nan, done;
3403 Register mantissa_reg = scratch2;
3404 Register exponent_reg = scratch3;
3406 // Handle smi values specially.
3407 JumpIfSmi(value_reg, &smi_value);
3409 // Ensure that the object is a heap number
3412 Heap::kHeapNumberMapRootIndex,
3416 // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
3418 li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
3419 lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3420 Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
3422 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3424 bind(&have_double_value);
3425 sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3426 Addu(scratch1, scratch1, elements_reg);
3427 sw(mantissa_reg, FieldMemOperand(
3428 scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
3429 uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
3430 sizeof(kHoleNanLower32);
3431 sw(exponent_reg, FieldMemOperand(scratch1, offset));
3435 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3436 // it's an Infinity, and the non-NaN code path applies.
3437 Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
3438 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3439 Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
3441 // Load canonical NaN for storing into the double array.
3442 LoadRoot(at, Heap::kNanValueRootIndex);
3443 lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
3444 lw(exponent_reg, FieldMemOperand(at, HeapNumber::kValueOffset + 4));
3445 jmp(&have_double_value);
3448 Addu(scratch1, elements_reg,
3449 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
3451 sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3452 Addu(scratch1, scratch1, scratch2);
3453 // scratch1 is now effective address of the double element
3455 Register untagged_value = elements_reg;
3456 SmiUntag(untagged_value, value_reg);
3457 mtc1(untagged_value, f2);
3459 sdc1(f0, MemOperand(scratch1, 0));
3464 void MacroAssembler::CompareMapAndBranch(Register obj,
3467 Label* early_success,
3470 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3471 CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
3475 void MacroAssembler::CompareMapAndBranch(Register obj_map,
3477 Label* early_success,
3480 Branch(branch_to, cond, obj_map, Operand(map));
3484 void MacroAssembler::CheckMap(Register obj,
3488 SmiCheckType smi_check_type) {
3489 if (smi_check_type == DO_SMI_CHECK) {
3490 JumpIfSmi(obj, fail);
3493 CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
3498 void MacroAssembler::DispatchMap(Register obj,
3501 Handle<Code> success,
3502 SmiCheckType smi_check_type) {
3504 if (smi_check_type == DO_SMI_CHECK) {
3505 JumpIfSmi(obj, &fail);
3507 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3508 Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
3513 void MacroAssembler::CheckMap(Register obj,
3515 Heap::RootListIndex index,
3517 SmiCheckType smi_check_type) {
3518 if (smi_check_type == DO_SMI_CHECK) {
3519 JumpIfSmi(obj, fail);
3521 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3522 LoadRoot(at, index);
3523 Branch(fail, ne, scratch, Operand(at));
3527 void MacroAssembler::MovFromFloatResult(DoubleRegister dst) {
3528 if (IsMipsSoftFloatABI) {
3531 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
3536 void MacroAssembler::MovFromFloatParameter(DoubleRegister dst) {
3537 if (IsMipsSoftFloatABI) {
3540 Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
3545 void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
3546 if (!IsMipsSoftFloatABI) {
3554 void MacroAssembler::MovToFloatResult(DoubleRegister src) {
3555 if (!IsMipsSoftFloatABI) {
3563 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
3564 DoubleRegister src2) {
3565 if (!IsMipsSoftFloatABI) {
3567 ASSERT(!src1.is(f14));
3581 // -----------------------------------------------------------------------------
3582 // JavaScript invokes.
3584 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3585 const ParameterCount& actual,
3586 Handle<Code> code_constant,
3589 bool* definitely_mismatches,
3591 const CallWrapper& call_wrapper) {
3592 bool definitely_matches = false;
3593 *definitely_mismatches = false;
3594 Label regular_invoke;
3596 // Check whether the expected and actual arguments count match. If not,
3597 // setup registers according to contract with ArgumentsAdaptorTrampoline:
3598 // a0: actual arguments count
3599 // a1: function (passed through to callee)
3600 // a2: expected arguments count
3602 // The code below is made a lot easier because the calling code already sets
3603 // up actual and expected registers according to the contract if values are
3604 // passed in registers.
3605 ASSERT(actual.is_immediate() || actual.reg().is(a0));
3606 ASSERT(expected.is_immediate() || expected.reg().is(a2));
3607 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
3609 if (expected.is_immediate()) {
3610 ASSERT(actual.is_immediate());
3611 if (expected.immediate() == actual.immediate()) {
3612 definitely_matches = true;
3614 li(a0, Operand(actual.immediate()));
3615 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3616 if (expected.immediate() == sentinel) {
3617 // Don't worry about adapting arguments for builtins that
3618 // don't want that done. Skip adaption code by making it look
3619 // like we have a match between expected and actual number of
3621 definitely_matches = true;
3623 *definitely_mismatches = true;
3624 li(a2, Operand(expected.immediate()));
3627 } else if (actual.is_immediate()) {
3628 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.immediate()));
3629 li(a0, Operand(actual.immediate()));
3631 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg()));
3634 if (!definitely_matches) {
3635 if (!code_constant.is_null()) {
3636 li(a3, Operand(code_constant));
3637 addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
3640 Handle<Code> adaptor =
3641 isolate()->builtins()->ArgumentsAdaptorTrampoline();
3642 if (flag == CALL_FUNCTION) {
3643 call_wrapper.BeforeCall(CallSize(adaptor));
3645 call_wrapper.AfterCall();
3646 if (!*definitely_mismatches) {
3650 Jump(adaptor, RelocInfo::CODE_TARGET);
3652 bind(®ular_invoke);
3657 void MacroAssembler::InvokeCode(Register code,
3658 const ParameterCount& expected,
3659 const ParameterCount& actual,
3661 const CallWrapper& call_wrapper) {
3662 // You can't call a function without a valid frame.
3663 ASSERT(flag == JUMP_FUNCTION || has_frame());
3667 bool definitely_mismatches = false;
3668 InvokePrologue(expected, actual, Handle<Code>::null(), code,
3669 &done, &definitely_mismatches, flag,
3671 if (!definitely_mismatches) {
3672 if (flag == CALL_FUNCTION) {
3673 call_wrapper.BeforeCall(CallSize(code));
3675 call_wrapper.AfterCall();
3677 ASSERT(flag == JUMP_FUNCTION);
3680 // Continue here if InvokePrologue does handle the invocation due to
3681 // mismatched parameter counts.
3687 void MacroAssembler::InvokeFunction(Register function,
3688 const ParameterCount& actual,
3690 const CallWrapper& call_wrapper) {
3691 // You can't call a function without a valid frame.
3692 ASSERT(flag == JUMP_FUNCTION || has_frame());
3694 // Contract with called JS functions requires that function is passed in a1.
3695 ASSERT(function.is(a1));
3696 Register expected_reg = a2;
3697 Register code_reg = a3;
3699 lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3700 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3702 FieldMemOperand(code_reg,
3703 SharedFunctionInfo::kFormalParameterCountOffset));
3704 sra(expected_reg, expected_reg, kSmiTagSize);
3705 lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3707 ParameterCount expected(expected_reg);
3708 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
3712 void MacroAssembler::InvokeFunction(Register function,
3713 const ParameterCount& expected,
3714 const ParameterCount& actual,
3716 const CallWrapper& call_wrapper) {
3717 // You can't call a function without a valid frame.
3718 ASSERT(flag == JUMP_FUNCTION || has_frame());
3720 // Contract with called JS functions requires that function is passed in a1.
3721 ASSERT(function.is(a1));
3723 // Get the function and setup the context.
3724 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3726 // We call indirectly through the code field in the function to
3727 // allow recompilation to take effect without changing any of the
3729 lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3730 InvokeCode(a3, expected, actual, flag, call_wrapper);
3734 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3735 const ParameterCount& expected,
3736 const ParameterCount& actual,
3738 const CallWrapper& call_wrapper) {
3740 InvokeFunction(a1, expected, actual, flag, call_wrapper);
3744 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
3748 lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
3749 IsInstanceJSObjectType(map, scratch, fail);
3753 void MacroAssembler::IsInstanceJSObjectType(Register map,
3756 lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
3757 Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
3758 Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
3762 void MacroAssembler::IsObjectJSStringType(Register object,
3765 ASSERT(kNotStringTag != 0);
3767 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3768 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3769 And(scratch, scratch, Operand(kIsNotStringMask));
3770 Branch(fail, ne, scratch, Operand(zero_reg));
3774 void MacroAssembler::IsObjectNameType(Register object,
3777 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3778 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3779 Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
3783 // ---------------------------------------------------------------------------
3784 // Support functions.
3787 void MacroAssembler::TryGetFunctionPrototype(Register function,
3791 bool miss_on_bound_function) {
3792 // Check that the receiver isn't a smi.
3793 JumpIfSmi(function, miss);
3795 // Check that the function really is a function. Load map into result reg.
3796 GetObjectType(function, result, scratch);
3797 Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
3799 if (miss_on_bound_function) {
3801 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3803 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3804 And(scratch, scratch,
3805 Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
3806 Branch(miss, ne, scratch, Operand(zero_reg));
3809 // Make sure that the function has an instance prototype.
3811 lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3812 And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
3813 Branch(&non_instance, ne, scratch, Operand(zero_reg));
3815 // Get the prototype or initial map from the function.
3817 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3819 // If the prototype or initial map is the hole, don't return it and
3820 // simply miss the cache instead. This will allow us to allocate a
3821 // prototype object on-demand in the runtime system.
3822 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
3823 Branch(miss, eq, result, Operand(t8));
3825 // If the function does not have an initial map, we're done.
3827 GetObjectType(result, scratch, scratch);
3828 Branch(&done, ne, scratch, Operand(MAP_TYPE));
3830 // Get the prototype from the initial map.
3831 lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
3834 // Non-instance prototype: Fetch prototype from constructor field
3836 bind(&non_instance);
3837 lw(result, FieldMemOperand(result, Map::kConstructorOffset));
3844 void MacroAssembler::GetObjectType(Register object,
3846 Register type_reg) {
3847 lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
3848 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3852 // -----------------------------------------------------------------------------
3855 void MacroAssembler::CallStub(CodeStub* stub,
3856 TypeFeedbackId ast_id,
3860 BranchDelaySlot bd) {
3861 ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
3862 Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id,
3867 void MacroAssembler::TailCallStub(CodeStub* stub,
3871 BranchDelaySlot bd) {
3872 Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
3876 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3877 return ref0.address() - ref1.address();
3881 void MacroAssembler::CallApiFunctionAndReturn(
3882 Register function_address,
3883 ExternalReference thunk_ref,
3885 MemOperand return_value_operand,
3886 MemOperand* context_restore_operand) {
3887 ExternalReference next_address =
3888 ExternalReference::handle_scope_next_address(isolate());
3889 const int kNextOffset = 0;
3890 const int kLimitOffset = AddressOffset(
3891 ExternalReference::handle_scope_limit_address(isolate()),
3893 const int kLevelOffset = AddressOffset(
3894 ExternalReference::handle_scope_level_address(isolate()),
3897 ASSERT(function_address.is(a1) || function_address.is(a2));
3899 Label profiler_disabled;
3900 Label end_profiler_check;
3901 bool* is_profiling_flag =
3902 isolate()->cpu_profiler()->is_profiling_address();
3903 STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
3904 li(t9, reinterpret_cast<int32_t>(is_profiling_flag));
3905 lb(t9, MemOperand(t9, 0));
3906 Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
3908 // Additional parameter is the address of the actual callback.
3909 li(t9, Operand(thunk_ref));
3910 jmp(&end_profiler_check);
3912 bind(&profiler_disabled);
3913 mov(t9, function_address);
3914 bind(&end_profiler_check);
3916 // Allocate HandleScope in callee-save registers.
3917 li(s3, Operand(next_address));
3918 lw(s0, MemOperand(s3, kNextOffset));
3919 lw(s1, MemOperand(s3, kLimitOffset));
3920 lw(s2, MemOperand(s3, kLevelOffset));
3921 Addu(s2, s2, Operand(1));
3922 sw(s2, MemOperand(s3, kLevelOffset));
3924 if (FLAG_log_timer_events) {
3925 FrameScope frame(this, StackFrame::MANUAL);
3926 PushSafepointRegisters();
3927 PrepareCallCFunction(1, a0);
3928 li(a0, Operand(ExternalReference::isolate_address(isolate())));
3929 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
3930 PopSafepointRegisters();
3933 // Native call returns to the DirectCEntry stub which redirects to the
3934 // return address pushed on stack (could have moved after GC).
3935 // DirectCEntry stub itself is generated early and never moves.
3936 DirectCEntryStub stub;
3937 stub.GenerateCall(this, t9);
3939 if (FLAG_log_timer_events) {
3940 FrameScope frame(this, StackFrame::MANUAL);
3941 PushSafepointRegisters();
3942 PrepareCallCFunction(1, a0);
3943 li(a0, Operand(ExternalReference::isolate_address(isolate())));
3944 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
3945 PopSafepointRegisters();
3948 Label promote_scheduled_exception;
3949 Label exception_handled;
3950 Label delete_allocated_handles;
3951 Label leave_exit_frame;
3952 Label return_value_loaded;
3954 // Load value from ReturnValue.
3955 lw(v0, return_value_operand);
3956 bind(&return_value_loaded);
3958 // No more valid handles (the result handle was the last one). Restore
3959 // previous handle scope.
3960 sw(s0, MemOperand(s3, kNextOffset));
3961 if (emit_debug_code()) {
3962 lw(a1, MemOperand(s3, kLevelOffset));
3963 Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
3965 Subu(s2, s2, Operand(1));
3966 sw(s2, MemOperand(s3, kLevelOffset));
3967 lw(at, MemOperand(s3, kLimitOffset));
3968 Branch(&delete_allocated_handles, ne, s1, Operand(at));
3970 // Check if the function scheduled an exception.
3971 bind(&leave_exit_frame);
3972 LoadRoot(t0, Heap::kTheHoleValueRootIndex);
3973 li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
3974 lw(t1, MemOperand(at));
3975 Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
3976 bind(&exception_handled);
3978 bool restore_context = context_restore_operand != NULL;
3979 if (restore_context) {
3980 lw(cp, *context_restore_operand);
3982 li(s0, Operand(stack_space));
3983 LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN);
3985 bind(&promote_scheduled_exception);
3987 FrameScope frame(this, StackFrame::INTERNAL);
3988 CallExternalReference(
3989 ExternalReference(Runtime::kPromoteScheduledException, isolate()),
3992 jmp(&exception_handled);
3994 // HandleScope limit has changed. Delete allocated extensions.
3995 bind(&delete_allocated_handles);
3996 sw(s1, MemOperand(s3, kLimitOffset));
3999 PrepareCallCFunction(1, s1);
4000 li(a0, Operand(ExternalReference::isolate_address(isolate())));
4001 CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
4004 jmp(&leave_exit_frame);
4008 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4009 return has_frame_ || !stub->SometimesSetsUpAFrame();
4013 void MacroAssembler::IllegalOperation(int num_arguments) {
4014 if (num_arguments > 0) {
4015 addiu(sp, sp, num_arguments * kPointerSize);
4017 LoadRoot(v0, Heap::kUndefinedValueRootIndex);
4021 void MacroAssembler::IndexFromHash(Register hash,
4023 // If the hash field contains an array index pick it out. The assert checks
4024 // that the constants for the maximum number of digits for an array index
4025 // cached in the hash field and the number of bits reserved for it does not
4027 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
4028 (1 << String::kArrayIndexValueBits));
4029 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
4030 // the low kHashShift bits.
4031 STATIC_ASSERT(kSmiTag == 0);
4032 Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
4033 sll(index, hash, kSmiTagSize);
4037 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4041 Register heap_number_map,
4043 ObjectToDoubleFlags flags) {
4045 if ((flags & OBJECT_NOT_SMI) == 0) {
4047 JumpIfNotSmi(object, ¬_smi);
4048 // Remove smi tag and convert to double.
4049 sra(scratch1, object, kSmiTagSize);
4050 mtc1(scratch1, result);
4051 cvt_d_w(result, result);
4055 // Check for heap number and load double value from it.
4056 lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4057 Branch(not_number, ne, scratch1, Operand(heap_number_map));
4059 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4060 // If exponent is all ones the number is either a NaN or +/-Infinity.
4061 Register exponent = scratch1;
4062 Register mask_reg = scratch2;
4063 lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4064 li(mask_reg, HeapNumber::kExponentMask);
4066 And(exponent, exponent, mask_reg);
4067 Branch(not_number, eq, exponent, Operand(mask_reg));
4069 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4074 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4076 Register scratch1) {
4077 sra(scratch1, smi, kSmiTagSize);
4078 mtc1(scratch1, value);
4079 cvt_d_w(value, value);
4083 void MacroAssembler::AdduAndCheckForOverflow(Register dst,
4086 Register overflow_dst,
4088 ASSERT(!dst.is(overflow_dst));
4089 ASSERT(!dst.is(scratch));
4090 ASSERT(!overflow_dst.is(scratch));
4091 ASSERT(!overflow_dst.is(left));
4092 ASSERT(!overflow_dst.is(right));
4094 if (left.is(right) && dst.is(left)) {
4095 ASSERT(!dst.is(t9));
4096 ASSERT(!scratch.is(t9));
4097 ASSERT(!left.is(t9));
4098 ASSERT(!right.is(t9));
4099 ASSERT(!overflow_dst.is(t9));
4105 mov(scratch, left); // Preserve left.
4106 addu(dst, left, right); // Left is overwritten.
4107 xor_(scratch, dst, scratch); // Original left.
4108 xor_(overflow_dst, dst, right);
4109 and_(overflow_dst, overflow_dst, scratch);
4110 } else if (dst.is(right)) {
4111 mov(scratch, right); // Preserve right.
4112 addu(dst, left, right); // Right is overwritten.
4113 xor_(scratch, dst, scratch); // Original right.
4114 xor_(overflow_dst, dst, left);
4115 and_(overflow_dst, overflow_dst, scratch);
4117 addu(dst, left, right);
4118 xor_(overflow_dst, dst, left);
4119 xor_(scratch, dst, right);
4120 and_(overflow_dst, scratch, overflow_dst);
4125 void MacroAssembler::SubuAndCheckForOverflow(Register dst,
4128 Register overflow_dst,
4130 ASSERT(!dst.is(overflow_dst));
4131 ASSERT(!dst.is(scratch));
4132 ASSERT(!overflow_dst.is(scratch));
4133 ASSERT(!overflow_dst.is(left));
4134 ASSERT(!overflow_dst.is(right));
4135 ASSERT(!scratch.is(left));
4136 ASSERT(!scratch.is(right));
4138 // This happens with some crankshaft code. Since Subu works fine if
4139 // left == right, let's not make that restriction here.
4140 if (left.is(right)) {
4142 mov(overflow_dst, zero_reg);
4147 mov(scratch, left); // Preserve left.
4148 subu(dst, left, right); // Left is overwritten.
4149 xor_(overflow_dst, dst, scratch); // scratch is original left.
4150 xor_(scratch, scratch, right); // scratch is original left.
4151 and_(overflow_dst, scratch, overflow_dst);
4152 } else if (dst.is(right)) {
4153 mov(scratch, right); // Preserve right.
4154 subu(dst, left, right); // Right is overwritten.
4155 xor_(overflow_dst, dst, left);
4156 xor_(scratch, left, scratch); // Original right.
4157 and_(overflow_dst, scratch, overflow_dst);
4159 subu(dst, left, right);
4160 xor_(overflow_dst, dst, left);
4161 xor_(scratch, left, right);
4162 and_(overflow_dst, scratch, overflow_dst);
4167 void MacroAssembler::CallRuntime(const Runtime::Function* f,
4169 SaveFPRegsMode save_doubles) {
4170 // All parameters are on the stack. v0 has the return value after call.
4172 // If the expected number of arguments of the runtime function is
4173 // constant, we check that the actual number of arguments match the
4175 if (f->nargs >= 0 && f->nargs != num_arguments) {
4176 IllegalOperation(num_arguments);
4180 // TODO(1236192): Most runtime routines don't need the number of
4181 // arguments passed in because it is constant. At some point we
4182 // should remove this need and make the runtime routine entry code
4184 PrepareCEntryArgs(num_arguments);
4185 PrepareCEntryFunction(ExternalReference(f, isolate()));
4186 CEntryStub stub(1, save_doubles);
4191 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
4193 BranchDelaySlot bd) {
4194 PrepareCEntryArgs(num_arguments);
4195 PrepareCEntryFunction(ext);
4198 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
4202 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
4205 // TODO(1236192): Most runtime routines don't need the number of
4206 // arguments passed in because it is constant. At some point we
4207 // should remove this need and make the runtime routine entry code
4209 PrepareCEntryArgs(num_arguments);
4210 JumpToExternalReference(ext);
4214 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
4217 TailCallExternalReference(ExternalReference(fid, isolate()),
4223 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
4224 BranchDelaySlot bd) {
4225 PrepareCEntryFunction(builtin);
4227 Jump(stub.GetCode(isolate()),
4228 RelocInfo::CODE_TARGET,
4236 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
4238 const CallWrapper& call_wrapper) {
4239 // You can't call a builtin without a valid frame.
4240 ASSERT(flag == JUMP_FUNCTION || has_frame());
4242 GetBuiltinEntry(t9, id);
4243 if (flag == CALL_FUNCTION) {
4244 call_wrapper.BeforeCall(CallSize(t9));
4246 call_wrapper.AfterCall();
4248 ASSERT(flag == JUMP_FUNCTION);
4254 void MacroAssembler::GetBuiltinFunction(Register target,
4255 Builtins::JavaScript id) {
4256 // Load the builtins object into target register.
4257 lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4258 lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
4259 // Load the JavaScript builtin function from the builtins object.
4260 lw(target, FieldMemOperand(target,
4261 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
4265 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
4266 ASSERT(!target.is(a1));
4267 GetBuiltinFunction(a1, id);
4268 // Load the code entry point from the builtins object.
4269 lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4273 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4274 Register scratch1, Register scratch2) {
4275 if (FLAG_native_code_counters && counter->Enabled()) {
4276 li(scratch1, Operand(value));
4277 li(scratch2, Operand(ExternalReference(counter)));
4278 sw(scratch1, MemOperand(scratch2));
4283 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4284 Register scratch1, Register scratch2) {
4286 if (FLAG_native_code_counters && counter->Enabled()) {
4287 li(scratch2, Operand(ExternalReference(counter)));
4288 lw(scratch1, MemOperand(scratch2));
4289 Addu(scratch1, scratch1, Operand(value));
4290 sw(scratch1, MemOperand(scratch2));
4295 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4296 Register scratch1, Register scratch2) {
4298 if (FLAG_native_code_counters && counter->Enabled()) {
4299 li(scratch2, Operand(ExternalReference(counter)));
4300 lw(scratch1, MemOperand(scratch2));
4301 Subu(scratch1, scratch1, Operand(value));
4302 sw(scratch1, MemOperand(scratch2));
4307 // -----------------------------------------------------------------------------
4310 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
4311 Register rs, Operand rt) {
4312 if (emit_debug_code())
4313 Check(cc, reason, rs, rt);
4317 void MacroAssembler::AssertFastElements(Register elements) {
4318 if (emit_debug_code()) {
4319 ASSERT(!elements.is(at));
4322 lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4323 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4324 Branch(&ok, eq, elements, Operand(at));
4325 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4326 Branch(&ok, eq, elements, Operand(at));
4327 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4328 Branch(&ok, eq, elements, Operand(at));
4329 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4336 void MacroAssembler::Check(Condition cc, BailoutReason reason,
4337 Register rs, Operand rt) {
4339 Branch(&L, cc, rs, rt);
4341 // Will not return here.
4346 void MacroAssembler::Abort(BailoutReason reason) {
4349 // We want to pass the msg string like a smi to avoid GC
4350 // problems, however msg is not guaranteed to be aligned
4351 // properly. Instead, we pass an aligned pointer that is
4352 // a proper v8 smi, but also pass the alignment difference
4353 // from the real pointer as a smi.
4354 const char* msg = GetBailoutReason(reason);
4355 intptr_t p1 = reinterpret_cast<intptr_t>(msg);
4356 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
4357 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
4360 RecordComment("Abort message: ");
4364 if (FLAG_trap_on_abort) {
4370 li(a0, Operand(p0));
4372 li(a0, Operand(Smi::FromInt(p1 - p0)));
4374 // Disable stub call restrictions to always allow calls to abort.
4376 // We don't actually want to generate a pile of code for this, so just
4377 // claim there is a stack frame, without generating one.
4378 FrameScope scope(this, StackFrame::NONE);
4379 CallRuntime(Runtime::kAbort, 2);
4381 CallRuntime(Runtime::kAbort, 2);
4383 // Will not return here.
4384 if (is_trampoline_pool_blocked()) {
4385 // If the calling code cares about the exact number of
4386 // instructions generated, we insert padding here to keep the size
4387 // of the Abort macro constant.
4388 // Currently in debug mode with debug_code enabled the number of
4389 // generated instructions is 14, so we use this as a maximum value.
4390 static const int kExpectedAbortInstructions = 14;
4391 int abort_instructions = InstructionsGeneratedSince(&abort_start);
4392 ASSERT(abort_instructions <= kExpectedAbortInstructions);
4393 while (abort_instructions++ < kExpectedAbortInstructions) {
4400 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4401 if (context_chain_length > 0) {
4402 // Move up the chain of contexts to the context containing the slot.
4403 lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4404 for (int i = 1; i < context_chain_length; i++) {
4405 lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4408 // Slot is in the current function context. Move it into the
4409 // destination register in case we store into it (the write barrier
4410 // cannot be allowed to destroy the context in esi).
4416 void MacroAssembler::LoadTransitionedArrayMapConditional(
4417 ElementsKind expected_kind,
4418 ElementsKind transitioned_kind,
4419 Register map_in_out,
4421 Label* no_map_match) {
4422 // Load the global or builtins object from the current context.
4424 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4425 lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
4427 // Check that the function's map is the same as the expected cached map.
4430 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4431 size_t offset = expected_kind * kPointerSize +
4432 FixedArrayBase::kHeaderSize;
4433 lw(at, FieldMemOperand(scratch, offset));
4434 Branch(no_map_match, ne, map_in_out, Operand(at));
4436 // Use the transitioned cached map.
4437 offset = transitioned_kind * kPointerSize +
4438 FixedArrayBase::kHeaderSize;
4439 lw(map_in_out, FieldMemOperand(scratch, offset));
4443 void MacroAssembler::LoadInitialArrayMap(
4444 Register function_in, Register scratch,
4445 Register map_out, bool can_have_holes) {
4446 ASSERT(!function_in.is(map_out));
4448 lw(map_out, FieldMemOperand(function_in,
4449 JSFunction::kPrototypeOrInitialMapOffset));
4450 if (!FLAG_smi_only_arrays) {
4451 ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
4452 LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
4457 } else if (can_have_holes) {
4458 LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
4459 FAST_HOLEY_SMI_ELEMENTS,
4468 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4469 // Load the global or builtins object from the current context.
4471 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4472 // Load the native context from the global or builtins object.
4473 lw(function, FieldMemOperand(function,
4474 GlobalObject::kNativeContextOffset));
4475 // Load the function from the native context.
4476 lw(function, MemOperand(function, Context::SlotOffset(index)));
4480 void MacroAssembler::LoadArrayFunction(Register function) {
4481 // Load the global or builtins object from the current context.
4483 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4484 // Load the global context from the global or builtins object.
4486 FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
4487 // Load the array function from the native context.
4489 MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
4493 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4496 // Load the initial map. The global functions all have initial maps.
4497 lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4498 if (emit_debug_code()) {
4500 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4503 Abort(kGlobalFunctionsMustHaveInitialMap);
4509 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
4510 if (frame_mode == BUILD_STUB_FRAME) {
4512 Push(Smi::FromInt(StackFrame::STUB));
4513 // Adjust FP to point to saved FP.
4514 Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4516 PredictableCodeSizeScope predictible_code_size_scope(
4517 this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
4518 // The following three instructions must remain together and unmodified
4519 // for code aging to work properly.
4520 if (isolate()->IsCodePreAgingActive()) {
4521 // Pre-age the code.
4522 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
4523 nop(Assembler::CODE_AGE_MARKER_NOP);
4524 // Load the stub address to t9 and call it,
4525 // GetCodeAgeAndParity() extracts the stub address from this instruction.
4527 Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
4529 nop(); // Prevent jalr to jal optimization.
4531 nop(); // Branch delay slot nop.
4532 nop(); // Pad the empty space.
4534 Push(ra, fp, cp, a1);
4535 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4536 // Adjust fp to point to caller's fp.
4537 Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4543 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4544 addiu(sp, sp, -5 * kPointerSize);
4545 li(t8, Operand(Smi::FromInt(type)));
4546 li(t9, Operand(CodeObject()), CONSTANT_SIZE);
4547 sw(ra, MemOperand(sp, 4 * kPointerSize));
4548 sw(fp, MemOperand(sp, 3 * kPointerSize));
4549 sw(cp, MemOperand(sp, 2 * kPointerSize));
4550 sw(t8, MemOperand(sp, 1 * kPointerSize));
4551 sw(t9, MemOperand(sp, 0 * kPointerSize));
4552 // Adjust FP to point to saved FP.
4554 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
4558 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4560 lw(fp, MemOperand(sp, 0 * kPointerSize));
4561 lw(ra, MemOperand(sp, 1 * kPointerSize));
4562 addiu(sp, sp, 2 * kPointerSize);
4566 void MacroAssembler::EnterExitFrame(bool save_doubles,
4568 // Set up the frame structure on the stack.
4569 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4570 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4571 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4573 // This is how the stack will look:
4574 // fp + 2 (==kCallerSPDisplacement) - old stack's end
4575 // [fp + 1 (==kCallerPCOffset)] - saved old ra
4576 // [fp + 0 (==kCallerFPOffset)] - saved old fp
4577 // [fp - 1 (==kSPOffset)] - sp of the called function
4578 // [fp - 2 (==kCodeOffset)] - CodeObject
4579 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
4580 // new stack (will contain saved ra)
4583 addiu(sp, sp, -4 * kPointerSize);
4584 sw(ra, MemOperand(sp, 3 * kPointerSize));
4585 sw(fp, MemOperand(sp, 2 * kPointerSize));
4586 addiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
4588 if (emit_debug_code()) {
4589 sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
4592 // Accessed from ExitFrame::code_slot.
4593 li(t8, Operand(CodeObject()), CONSTANT_SIZE);
4594 sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
4596 // Save the frame pointer and the context in top.
4597 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4598 sw(fp, MemOperand(t8));
4599 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4600 sw(cp, MemOperand(t8));
4602 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4604 // The stack must be allign to 0 modulo 8 for stores with sdc1.
4605 ASSERT(kDoubleSize == frame_alignment);
4606 if (frame_alignment > 0) {
4607 ASSERT(IsPowerOf2(frame_alignment));
4608 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4610 int space = FPURegister::kMaxNumRegisters * kDoubleSize;
4611 Subu(sp, sp, Operand(space));
4612 // Remember: we only need to save every 2nd double FPU value.
4613 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4614 FPURegister reg = FPURegister::from_code(i);
4615 sdc1(reg, MemOperand(sp, i * kDoubleSize));
4619 // Reserve place for the return address, stack space and an optional slot
4620 // (used by the DirectCEntryStub to hold the return value if a struct is
4621 // returned) and align the frame preparing for calling the runtime function.
4622 ASSERT(stack_space >= 0);
4623 Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4624 if (frame_alignment > 0) {
4625 ASSERT(IsPowerOf2(frame_alignment));
4626 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4629 // Set the exit frame sp value to point just before the return address
4631 addiu(at, sp, kPointerSize);
4632 sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
4636 void MacroAssembler::LeaveExitFrame(bool save_doubles,
4637 Register argument_count,
4638 bool restore_context,
4640 // Optionally restore all double registers.
4642 // Remember: we only need to restore every 2nd double FPU value.
4643 lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
4644 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4645 FPURegister reg = FPURegister::from_code(i);
4646 ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
4651 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4652 sw(zero_reg, MemOperand(t8));
4654 // Restore current context from top and clear it in debug mode.
4655 if (restore_context) {
4656 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4657 lw(cp, MemOperand(t8));
4660 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4661 sw(a3, MemOperand(t8));
4664 // Pop the arguments, restore registers, and return.
4665 mov(sp, fp); // Respect ABI stack constraint.
4666 lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
4667 lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
4669 if (argument_count.is_valid()) {
4670 sll(t8, argument_count, kPointerSizeLog2);
4675 Ret(USE_DELAY_SLOT);
4676 // If returning, the instruction in the delay slot will be the addiu below.
4682 void MacroAssembler::InitializeNewString(Register string,
4684 Heap::RootListIndex map_index,
4686 Register scratch2) {
4687 sll(scratch1, length, kSmiTagSize);
4688 LoadRoot(scratch2, map_index);
4689 sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
4690 li(scratch1, Operand(String::kEmptyHashField));
4691 sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
4692 sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
4696 int MacroAssembler::ActivationFrameAlignment() {
4697 #if V8_HOST_ARCH_MIPS
4698 // Running on the real platform. Use the alignment as mandated by the local
4700 // Note: This will break if we ever start generating snapshots on one Mips
4701 // platform for another Mips platform with a different alignment.
4702 return OS::ActivationFrameAlignment();
4703 #else // V8_HOST_ARCH_MIPS
4704 // If we are using the simulator then we should always align to the expected
4705 // alignment. As the simulator is used to generate snapshots we do not know
4706 // if the target platform will need alignment, so this is controlled from a
4708 return FLAG_sim_stack_alignment;
4709 #endif // V8_HOST_ARCH_MIPS
4713 void MacroAssembler::AssertStackIsAligned() {
4714 if (emit_debug_code()) {
4715 const int frame_alignment = ActivationFrameAlignment();
4716 const int frame_alignment_mask = frame_alignment - 1;
4718 if (frame_alignment > kPointerSize) {
4719 Label alignment_as_expected;
4720 ASSERT(IsPowerOf2(frame_alignment));
4721 andi(at, sp, frame_alignment_mask);
4722 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
4723 // Don't use Check here, as it will call Runtime_Abort re-entering here.
4724 stop("Unexpected stack alignment");
4725 bind(&alignment_as_expected);
4731 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
4734 Label* not_power_of_two_or_zero) {
4735 Subu(scratch, reg, Operand(1));
4736 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
4737 scratch, Operand(zero_reg));
4738 and_(at, scratch, reg); // In the delay slot.
4739 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
4743 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
4744 ASSERT(!reg.is(overflow));
4745 mov(overflow, reg); // Save original value.
4747 xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
4751 void MacroAssembler::SmiTagCheckOverflow(Register dst,
4753 Register overflow) {
4755 // Fall back to slower case.
4756 SmiTagCheckOverflow(dst, overflow);
4758 ASSERT(!dst.is(src));
4759 ASSERT(!dst.is(overflow));
4760 ASSERT(!src.is(overflow));
4762 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
4767 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
4770 JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
4775 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
4777 Label* non_smi_case) {
4778 JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
4782 void MacroAssembler::JumpIfSmi(Register value,
4785 BranchDelaySlot bd) {
4786 ASSERT_EQ(0, kSmiTag);
4787 andi(scratch, value, kSmiTagMask);
4788 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
4791 void MacroAssembler::JumpIfNotSmi(Register value,
4792 Label* not_smi_label,
4794 BranchDelaySlot bd) {
4795 ASSERT_EQ(0, kSmiTag);
4796 andi(scratch, value, kSmiTagMask);
4797 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
4801 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
4803 Label* on_not_both_smi) {
4804 STATIC_ASSERT(kSmiTag == 0);
4805 ASSERT_EQ(1, kSmiTagMask);
4806 or_(at, reg1, reg2);
4807 JumpIfNotSmi(at, on_not_both_smi);
4811 void MacroAssembler::JumpIfEitherSmi(Register reg1,
4813 Label* on_either_smi) {
4814 STATIC_ASSERT(kSmiTag == 0);
4815 ASSERT_EQ(1, kSmiTagMask);
4816 // Both Smi tags must be 1 (not Smi).
4817 and_(at, reg1, reg2);
4818 JumpIfSmi(at, on_either_smi);
4822 void MacroAssembler::AssertNotSmi(Register object) {
4823 if (emit_debug_code()) {
4824 STATIC_ASSERT(kSmiTag == 0);
4825 andi(at, object, kSmiTagMask);
4826 Check(ne, kOperandIsASmi, at, Operand(zero_reg));
4831 void MacroAssembler::AssertSmi(Register object) {
4832 if (emit_debug_code()) {
4833 STATIC_ASSERT(kSmiTag == 0);
4834 andi(at, object, kSmiTagMask);
4835 Check(eq, kOperandIsASmi, at, Operand(zero_reg));
4840 void MacroAssembler::AssertString(Register object) {
4841 if (emit_debug_code()) {
4842 STATIC_ASSERT(kSmiTag == 0);
4844 Check(ne, kOperandIsASmiAndNotAString, t0, Operand(zero_reg));
4846 lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
4847 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
4848 Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
4854 void MacroAssembler::AssertName(Register object) {
4855 if (emit_debug_code()) {
4856 STATIC_ASSERT(kSmiTag == 0);
4858 Check(ne, kOperandIsASmiAndNotAName, t0, Operand(zero_reg));
4860 lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
4861 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
4862 Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
4868 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
4869 if (emit_debug_code()) {
4870 ASSERT(!reg.is(at));
4871 LoadRoot(at, index);
4872 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
4877 void MacroAssembler::JumpIfNotHeapNumber(Register object,
4878 Register heap_number_map,
4880 Label* on_not_heap_number) {
4881 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4882 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4883 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
4887 void MacroAssembler::LookupNumberStringCache(Register object,
4893 // Use of registers. Register result is used as a temporary.
4894 Register number_string_cache = result;
4895 Register mask = scratch3;
4897 // Load the number string cache.
4898 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
4900 // Make the hash mask from the length of the number string cache. It
4901 // contains two elements (number and string) for each cache entry.
4902 lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
4903 // Divide length by two (length is a smi).
4904 sra(mask, mask, kSmiTagSize + 1);
4905 Addu(mask, mask, -1); // Make mask.
4907 // Calculate the entry in the number string cache. The hash value in the
4908 // number string cache for smis is just the smi value, and the hash for
4909 // doubles is the xor of the upper and lower words. See
4910 // Heap::GetNumberStringCache.
4912 Label load_result_from_cache;
4913 JumpIfSmi(object, &is_smi);
4916 Heap::kHeapNumberMapRootIndex,
4920 STATIC_ASSERT(8 == kDoubleSize);
4923 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
4924 lw(scratch2, MemOperand(scratch1, kPointerSize));
4925 lw(scratch1, MemOperand(scratch1, 0));
4926 Xor(scratch1, scratch1, Operand(scratch2));
4927 And(scratch1, scratch1, Operand(mask));
4929 // Calculate address of entry in string cache: each entry consists
4930 // of two pointer sized fields.
4931 sll(scratch1, scratch1, kPointerSizeLog2 + 1);
4932 Addu(scratch1, number_string_cache, scratch1);
4934 Register probe = mask;
4935 lw(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
4936 JumpIfSmi(probe, not_found);
4937 ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
4938 ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
4939 BranchF(&load_result_from_cache, NULL, eq, f12, f14);
4943 Register scratch = scratch1;
4944 sra(scratch, object, 1); // Shift away the tag.
4945 And(scratch, mask, Operand(scratch));
4947 // Calculate address of entry in string cache: each entry consists
4948 // of two pointer sized fields.
4949 sll(scratch, scratch, kPointerSizeLog2 + 1);
4950 Addu(scratch, number_string_cache, scratch);
4952 // Check if the entry is the smi we are looking for.
4953 lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
4954 Branch(not_found, ne, object, Operand(probe));
4956 // Get the result from the cache.
4957 bind(&load_result_from_cache);
4958 lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
4960 IncrementCounter(isolate()->counters()->number_to_string_native(),
4967 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
4973 // Test that both first and second are sequential ASCII strings.
4974 // Assume that they are non-smis.
4975 lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
4976 lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
4977 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
4978 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
4980 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
4988 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
4993 // Check that neither is a smi.
4994 STATIC_ASSERT(kSmiTag == 0);
4995 And(scratch1, first, Operand(second));
4996 JumpIfSmi(scratch1, failure);
4997 JumpIfNonSmisNotBothSequentialAsciiStrings(first,
5005 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
5011 const int kFlatAsciiStringMask =
5012 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5013 const int kFlatAsciiStringTag =
5014 kStringTag | kOneByteStringTag | kSeqStringTag;
5015 ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
5016 andi(scratch1, first, kFlatAsciiStringMask);
5017 Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
5018 andi(scratch2, second, kFlatAsciiStringMask);
5019 Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
5023 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
5026 const int kFlatAsciiStringMask =
5027 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5028 const int kFlatAsciiStringTag =
5029 kStringTag | kOneByteStringTag | kSeqStringTag;
5030 And(scratch, type, Operand(kFlatAsciiStringMask));
5031 Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
5035 static const int kRegisterPassedArguments = 4;
5037 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
5038 int num_double_arguments) {
5039 int stack_passed_words = 0;
5040 num_reg_arguments += 2 * num_double_arguments;
5042 // Up to four simple arguments are passed in registers a0..a3.
5043 if (num_reg_arguments > kRegisterPassedArguments) {
5044 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5046 stack_passed_words += kCArgSlotCount;
5047 return stack_passed_words;
5051 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5055 uint32_t encoding_mask) {
5058 Check(ne, kNonObject, at, Operand(zero_reg));
5060 lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
5061 lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
5063 andi(at, at, kStringRepresentationMask | kStringEncodingMask);
5064 li(scratch, Operand(encoding_mask));
5065 Check(eq, kUnexpectedStringType, at, Operand(scratch));
5067 // The index is assumed to be untagged coming in, tag it to compare with the
5068 // string length without using a temp register, it is restored at the end of
5070 Label index_tag_ok, index_tag_bad;
5071 TrySmiTag(index, scratch, &index_tag_bad);
5072 Branch(&index_tag_ok);
5073 bind(&index_tag_bad);
5074 Abort(kIndexIsTooLarge);
5075 bind(&index_tag_ok);
5077 lw(at, FieldMemOperand(string, String::kLengthOffset));
5078 Check(lt, kIndexIsTooLarge, index, Operand(at));
5080 ASSERT(Smi::FromInt(0) == 0);
5081 Check(ge, kIndexIsNegative, index, Operand(zero_reg));
5083 SmiUntag(index, index);
5087 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5088 int num_double_arguments,
5090 int frame_alignment = ActivationFrameAlignment();
5092 // Up to four simple arguments are passed in registers a0..a3.
5093 // Those four arguments must have reserved argument slots on the stack for
5094 // mips, even though those argument slots are not normally used.
5095 // Remaining arguments are pushed on the stack, above (higher address than)
5096 // the argument slots.
5097 int stack_passed_arguments = CalculateStackPassedWords(
5098 num_reg_arguments, num_double_arguments);
5099 if (frame_alignment > kPointerSize) {
5100 // Make stack end at alignment and make room for num_arguments - 4 words
5101 // and the original value of sp.
5103 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5104 ASSERT(IsPowerOf2(frame_alignment));
5105 And(sp, sp, Operand(-frame_alignment));
5106 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
5108 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5113 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5115 PrepareCallCFunction(num_reg_arguments, 0, scratch);
5119 void MacroAssembler::CallCFunction(ExternalReference function,
5120 int num_reg_arguments,
5121 int num_double_arguments) {
5122 li(t8, Operand(function));
5123 CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
5127 void MacroAssembler::CallCFunction(Register function,
5128 int num_reg_arguments,
5129 int num_double_arguments) {
5130 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
5134 void MacroAssembler::CallCFunction(ExternalReference function,
5135 int num_arguments) {
5136 CallCFunction(function, num_arguments, 0);
5140 void MacroAssembler::CallCFunction(Register function,
5141 int num_arguments) {
5142 CallCFunction(function, num_arguments, 0);
5146 void MacroAssembler::CallCFunctionHelper(Register function,
5147 int num_reg_arguments,
5148 int num_double_arguments) {
5149 ASSERT(has_frame());
5150 // Make sure that the stack is aligned before calling a C function unless
5151 // running in the simulator. The simulator has its own alignment check which
5152 // provides more information.
5153 // The argument stots are presumed to have been set up by
5154 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
5156 #if V8_HOST_ARCH_MIPS
5157 if (emit_debug_code()) {
5158 int frame_alignment = OS::ActivationFrameAlignment();
5159 int frame_alignment_mask = frame_alignment - 1;
5160 if (frame_alignment > kPointerSize) {
5161 ASSERT(IsPowerOf2(frame_alignment));
5162 Label alignment_as_expected;
5163 And(at, sp, Operand(frame_alignment_mask));
5164 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5165 // Don't use Check here, as it will call Runtime_Abort possibly
5166 // re-entering here.
5167 stop("Unexpected alignment in CallCFunction");
5168 bind(&alignment_as_expected);
5171 #endif // V8_HOST_ARCH_MIPS
5173 // Just call directly. The function called cannot cause a GC, or
5174 // allow preemption, so the return address in the link register
5177 if (!function.is(t9)) {
5184 int stack_passed_arguments = CalculateStackPassedWords(
5185 num_reg_arguments, num_double_arguments);
5187 if (OS::ActivationFrameAlignment() > kPointerSize) {
5188 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5190 Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
5195 #undef BRANCH_ARGS_CHECK
5198 void MacroAssembler::PatchRelocatedValue(Register li_location,
5200 Register new_value) {
5201 lw(scratch, MemOperand(li_location));
5202 // At this point scratch is a lui(at, ...) instruction.
5203 if (emit_debug_code()) {
5204 And(scratch, scratch, kOpcodeMask);
5205 Check(eq, kTheInstructionToPatchShouldBeALui,
5206 scratch, Operand(LUI));
5207 lw(scratch, MemOperand(li_location));
5209 srl(t9, new_value, kImm16Bits);
5210 Ins(scratch, t9, 0, kImm16Bits);
5211 sw(scratch, MemOperand(li_location));
5213 lw(scratch, MemOperand(li_location, kInstrSize));
5214 // scratch is now ori(at, ...).
5215 if (emit_debug_code()) {
5216 And(scratch, scratch, kOpcodeMask);
5217 Check(eq, kTheInstructionToPatchShouldBeAnOri,
5218 scratch, Operand(ORI));
5219 lw(scratch, MemOperand(li_location, kInstrSize));
5221 Ins(scratch, new_value, 0, kImm16Bits);
5222 sw(scratch, MemOperand(li_location, kInstrSize));
5224 // Update the I-cache so the new lui and ori can be executed.
5225 FlushICache(li_location, 2);
5228 void MacroAssembler::GetRelocatedValue(Register li_location,
5231 lw(value, MemOperand(li_location));
5232 if (emit_debug_code()) {
5233 And(value, value, kOpcodeMask);
5234 Check(eq, kTheInstructionShouldBeALui,
5235 value, Operand(LUI));
5236 lw(value, MemOperand(li_location));
5239 // value now holds a lui instruction. Extract the immediate.
5240 sll(value, value, kImm16Bits);
5242 lw(scratch, MemOperand(li_location, kInstrSize));
5243 if (emit_debug_code()) {
5244 And(scratch, scratch, kOpcodeMask);
5245 Check(eq, kTheInstructionShouldBeAnOri,
5246 scratch, Operand(ORI));
5247 lw(scratch, MemOperand(li_location, kInstrSize));
5249 // "scratch" now holds an ori instruction. Extract the immediate.
5250 andi(scratch, scratch, kImm16Mask);
5252 // Merge the results.
5253 or_(value, value, scratch);
5257 void MacroAssembler::CheckPageFlag(
5262 Label* condition_met) {
5263 And(scratch, object, Operand(~Page::kPageAlignmentMask));
5264 lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5265 And(scratch, scratch, Operand(mask));
5266 Branch(condition_met, cc, scratch, Operand(zero_reg));
5270 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
5272 Label* if_deprecated) {
5273 if (map->CanBeDeprecated()) {
5274 li(scratch, Operand(map));
5275 lw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
5276 And(scratch, scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
5277 Branch(if_deprecated, ne, scratch, Operand(zero_reg));
5282 void MacroAssembler::JumpIfBlack(Register object,
5286 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
5287 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5291 void MacroAssembler::HasColor(Register object,
5292 Register bitmap_scratch,
5293 Register mask_scratch,
5297 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
5298 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
5300 GetMarkBits(object, bitmap_scratch, mask_scratch);
5302 Label other_color, word_boundary;
5303 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5304 And(t8, t9, Operand(mask_scratch));
5305 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
5306 // Shift left 1 by adding.
5307 Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
5308 Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
5309 And(t8, t9, Operand(mask_scratch));
5310 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
5313 bind(&word_boundary);
5314 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
5315 And(t9, t9, Operand(1));
5316 Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
5321 // Detect some, but not all, common pointer-free objects. This is used by the
5322 // incremental write barrier which doesn't care about oddballs (they are always
5323 // marked black immediately so this code is not hit).
5324 void MacroAssembler::JumpIfDataObject(Register value,
5326 Label* not_data_object) {
5327 ASSERT(!AreAliased(value, scratch, t8, no_reg));
5328 Label is_data_object;
5329 lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5330 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5331 Branch(&is_data_object, eq, t8, Operand(scratch));
5332 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5333 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5334 // If it's a string and it's not a cons string then it's an object containing
5336 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5337 And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
5338 Branch(not_data_object, ne, t8, Operand(zero_reg));
5339 bind(&is_data_object);
5343 void MacroAssembler::GetMarkBits(Register addr_reg,
5344 Register bitmap_reg,
5345 Register mask_reg) {
5346 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
5347 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5348 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
5349 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
5350 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
5351 sll(t8, t8, kPointerSizeLog2);
5352 Addu(bitmap_reg, bitmap_reg, t8);
5354 sllv(mask_reg, t8, mask_reg);
5358 void MacroAssembler::EnsureNotWhite(
5360 Register bitmap_scratch,
5361 Register mask_scratch,
5362 Register load_scratch,
5363 Label* value_is_white_and_not_data) {
5364 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
5365 GetMarkBits(value, bitmap_scratch, mask_scratch);
5367 // If the value is black or grey we don't need to do anything.
5368 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5369 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5370 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
5371 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5375 // Since both black and grey have a 1 in the first position and white does
5376 // not have a 1 there we only need to check one bit.
5377 lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5378 And(t8, mask_scratch, load_scratch);
5379 Branch(&done, ne, t8, Operand(zero_reg));
5381 if (emit_debug_code()) {
5382 // Check for impossible bit pattern.
5384 // sll may overflow, making the check conservative.
5385 sll(t8, mask_scratch, 1);
5386 And(t8, load_scratch, t8);
5387 Branch(&ok, eq, t8, Operand(zero_reg));
5388 stop("Impossible marking bit pattern");
5392 // Value is white. We check whether it is data that doesn't need scanning.
5393 // Currently only checks for HeapNumber and non-cons strings.
5394 Register map = load_scratch; // Holds map while checking type.
5395 Register length = load_scratch; // Holds length of object after testing type.
5396 Label is_data_object;
5398 // Check for heap-number
5399 lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
5400 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5403 Branch(&skip, ne, t8, Operand(map));
5404 li(length, HeapNumber::kSize);
5405 Branch(&is_data_object);
5409 // Check for strings.
5410 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5411 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5412 // If it's a string and it's not a cons string then it's an object containing
5414 Register instance_type = load_scratch;
5415 lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
5416 And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
5417 Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5418 // It's a non-indirect (non-cons and non-slice) string.
5419 // If it's external, the length is just ExternalString::kSize.
5420 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5421 // External strings are the only ones with the kExternalStringTag bit
5423 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
5424 ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
5425 And(t8, instance_type, Operand(kExternalStringTag));
5428 Branch(&skip, eq, t8, Operand(zero_reg));
5429 li(length, ExternalString::kSize);
5430 Branch(&is_data_object);
5434 // Sequential string, either ASCII or UC16.
5435 // For ASCII (char-size of 1) we shift the smi tag away to get the length.
5436 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
5437 // getting the length multiplied by 2.
5438 ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
5439 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
5440 lw(t9, FieldMemOperand(value, String::kLengthOffset));
5441 And(t8, instance_type, Operand(kStringEncodingMask));
5444 Branch(&skip, eq, t8, Operand(zero_reg));
5448 Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
5449 And(length, length, Operand(~kObjectAlignmentMask));
5451 bind(&is_data_object);
5452 // Value is a data object, and it is white. Mark it black. Since we know
5453 // that the object is white we can make it black by flipping one bit.
5454 lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5455 Or(t8, t8, Operand(mask_scratch));
5456 sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5458 And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5459 lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5460 Addu(t8, t8, Operand(length));
5461 sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5467 void MacroAssembler::Throw(BailoutReason reason) {
5471 const char* msg = GetBailoutReason(reason);
5473 RecordComment("Throw message: ");
5478 li(a0, Operand(Smi::FromInt(reason)));
5480 // Disable stub call restrictions to always allow calls to throw.
5482 // We don't actually want to generate a pile of code for this, so just
5483 // claim there is a stack frame, without generating one.
5484 FrameScope scope(this, StackFrame::NONE);
5485 CallRuntime(Runtime::kThrowMessage, 1);
5487 CallRuntime(Runtime::kThrowMessage, 1);
5489 // will not return here
5490 if (is_trampoline_pool_blocked()) {
5491 // If the calling code cares throw the exact number of
5492 // instructions generated, we insert padding here to keep the size
5493 // of the ThrowMessage macro constant.
5494 // Currently in debug mode with debug_code enabled the number of
5495 // generated instructions is 14, so we use this as a maximum value.
5496 static const int kExpectedThrowMessageInstructions = 14;
5497 int throw_instructions = InstructionsGeneratedSince(&throw_start);
5498 ASSERT(throw_instructions <= kExpectedThrowMessageInstructions);
5499 while (throw_instructions++ < kExpectedThrowMessageInstructions) {
5506 void MacroAssembler::ThrowIf(Condition cc,
5507 BailoutReason reason,
5511 Branch(&L, NegateCondition(cc), rs, rt);
5513 // will not return here
5518 void MacroAssembler::LoadInstanceDescriptors(Register map,
5519 Register descriptors) {
5520 lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
5524 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
5525 lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5526 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
5530 void MacroAssembler::EnumLength(Register dst, Register map) {
5531 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
5532 lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5533 And(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
5537 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5538 Register empty_fixed_array_value = t2;
5539 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5543 // Check if the enum length field is properly initialized, indicating that
5544 // there is an enum cache.
5545 lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5549 call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
5554 lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5556 // For all objects but the receiver, check that the cache is empty.
5558 Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
5562 // Check that there are no elements. Register a2 contains the current JS
5563 // object we've reached through the prototype chain.
5565 lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
5566 Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
5568 // Second chance, the object may be using the empty slow element dictionary.
5569 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
5570 Branch(call_runtime, ne, a2, Operand(at));
5573 lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
5574 Branch(&next, ne, a2, Operand(null_value));
5578 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5579 ASSERT(!output_reg.is(input_reg));
5581 li(output_reg, Operand(255));
5582 // Normal branch: nop in delay slot.
5583 Branch(&done, gt, input_reg, Operand(output_reg));
5584 // Use delay slot in this branch.
5585 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
5586 mov(output_reg, zero_reg); // In delay slot.
5587 mov(output_reg, input_reg); // Value is in range 0..255.
5592 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
5593 DoubleRegister input_reg,
5594 DoubleRegister temp_double_reg) {
5599 Move(temp_double_reg, 0.0);
5600 BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
5602 // Double value is less than zero, NaN or Inf, return 0.
5603 mov(result_reg, zero_reg);
5606 // Double value is >= 255, return 255.
5608 Move(temp_double_reg, 255.0);
5609 BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
5610 li(result_reg, Operand(255));
5613 // In 0-255 range, round and truncate.
5615 cvt_w_d(temp_double_reg, input_reg);
5616 mfc1(result_reg, temp_double_reg);
5621 void MacroAssembler::TestJSArrayForAllocationMemento(
5622 Register receiver_reg,
5623 Register scratch_reg,
5624 Label* no_memento_found,
5626 Label* allocation_memento_present) {
5627 ExternalReference new_space_start =
5628 ExternalReference::new_space_start(isolate());
5629 ExternalReference new_space_allocation_top =
5630 ExternalReference::new_space_allocation_top_address(isolate());
5631 Addu(scratch_reg, receiver_reg,
5632 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5633 Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
5634 li(at, Operand(new_space_allocation_top));
5635 lw(at, MemOperand(at));
5636 Branch(no_memento_found, gt, scratch_reg, Operand(at));
5637 lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
5638 if (allocation_memento_present) {
5639 Branch(allocation_memento_present, cond, scratch_reg,
5640 Operand(isolate()->factory()->allocation_memento_map()));
5645 Register GetRegisterThatIsNotOneOf(Register reg1,
5652 if (reg1.is_valid()) regs |= reg1.bit();
5653 if (reg2.is_valid()) regs |= reg2.bit();
5654 if (reg3.is_valid()) regs |= reg3.bit();
5655 if (reg4.is_valid()) regs |= reg4.bit();
5656 if (reg5.is_valid()) regs |= reg5.bit();
5657 if (reg6.is_valid()) regs |= reg6.bit();
5659 for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
5660 Register candidate = Register::FromAllocationIndex(i);
5661 if (regs & candidate.bit()) continue;
5669 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5674 ASSERT(!scratch1.is(scratch0));
5675 Factory* factory = isolate()->factory();
5676 Register current = scratch0;
5679 // Scratch contained elements pointer.
5680 Move(current, object);
5682 // Loop based on the map going up the prototype chain.
5684 lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
5685 lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
5686 Ext(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
5687 Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
5688 lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
5689 Branch(&loop_again, ne, current, Operand(factory->null_value()));
5693 bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
5694 if (r1.is(r2)) return true;
5695 if (r1.is(r3)) return true;
5696 if (r1.is(r4)) return true;
5697 if (r2.is(r3)) return true;
5698 if (r2.is(r4)) return true;
5699 if (r3.is(r4)) return true;
5704 CodePatcher::CodePatcher(byte* address, int instructions)
5705 : address_(address),
5706 size_(instructions * Assembler::kInstrSize),
5707 masm_(NULL, address, size_ + Assembler::kGap) {
5708 // Create a new macro assembler pointing to the address of the code to patch.
5709 // The size is adjusted with kGap on order for the assembler to generate size
5710 // bytes of instructions without failing with buffer size constraints.
5711 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5715 CodePatcher::~CodePatcher() {
5716 // Indicate that code has changed.
5717 CPU::FlushICache(address_, size_);
5719 // Check that the code was patched as expected.
5720 ASSERT(masm_.pc_ == address_ + size_);
5721 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5725 void CodePatcher::Emit(Instr instr) {
5726 masm()->emit(instr);
5730 void CodePatcher::Emit(Address addr) {
5731 masm()->emit(reinterpret_cast<Instr>(addr));
5735 void CodePatcher::ChangeBranchCondition(Condition cond) {
5736 Instr instr = Assembler::instr_at(masm_.pc_);
5737 ASSERT(Assembler::IsBranch(instr));
5738 uint32_t opcode = Assembler::GetOpcodeField(instr);
5739 // Currently only the 'eq' and 'ne' cond values are supported and the simple
5740 // branch instructions (with opcode being the branch type).
5741 // There are some special cases (see Assembler::IsBranch()) so extending this
5743 ASSERT(opcode == BEQ ||
5751 opcode = (cond == eq) ? BEQ : BNE;
5752 instr = (instr & ~kOpcodeMask) | opcode;
5757 } } // namespace v8::internal
5759 #endif // V8_TARGET_ARCH_MIPS