1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include <limits.h> // For LONG_MIN, LONG_MAX.
32 #if V8_TARGET_ARCH_MIPS
34 #include "bootstrapper.h"
36 #include "cpu-profiler.h"
38 #include "isolate-inl.h"
44 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
45 : Assembler(arg_isolate, buffer, size),
46 generating_stub_(false),
47 allow_stub_calls_(true),
49 if (isolate() != NULL) {
50 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
56 void MacroAssembler::LoadRoot(Register destination,
57 Heap::RootListIndex index) {
58 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
62 void MacroAssembler::LoadRoot(Register destination,
63 Heap::RootListIndex index,
65 Register src1, const Operand& src2) {
66 Branch(2, NegateCondition(cond), src1, src2);
67 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
71 void MacroAssembler::StoreRoot(Register source,
72 Heap::RootListIndex index) {
73 sw(source, MemOperand(s6, index << kPointerSizeLog2));
77 void MacroAssembler::StoreRoot(Register source,
78 Heap::RootListIndex index,
80 Register src1, const Operand& src2) {
81 Branch(2, NegateCondition(cond), src1, src2);
82 sw(source, MemOperand(s6, index << kPointerSizeLog2));
86 void MacroAssembler::LoadHeapObject(Register result,
87 Handle<HeapObject> object) {
88 AllowDeferredHandleDereference using_raw_address;
89 if (isolate()->heap()->InNewSpace(*object)) {
90 Handle<Cell> cell = isolate()->factory()->NewCell(object);
91 li(result, Operand(cell));
92 lw(result, FieldMemOperand(result, Cell::kValueOffset));
94 li(result, Operand(object));
99 // Push and pop all registers that can hold pointers.
100 void MacroAssembler::PushSafepointRegisters() {
101 // Safepoints expect a block of kNumSafepointRegisters values on the
102 // stack, so adjust the stack for unsaved registers.
103 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
104 ASSERT(num_unsaved >= 0);
105 if (num_unsaved > 0) {
106 Subu(sp, sp, Operand(num_unsaved * kPointerSize));
108 MultiPush(kSafepointSavedRegisters);
112 void MacroAssembler::PopSafepointRegisters() {
113 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
114 MultiPop(kSafepointSavedRegisters);
115 if (num_unsaved > 0) {
116 Addu(sp, sp, Operand(num_unsaved * kPointerSize));
121 void MacroAssembler::PushSafepointRegistersAndDoubles() {
122 PushSafepointRegisters();
123 Subu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
124 for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
125 FPURegister reg = FPURegister::FromAllocationIndex(i);
126 sdc1(reg, MemOperand(sp, i * kDoubleSize));
131 void MacroAssembler::PopSafepointRegistersAndDoubles() {
132 for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
133 FPURegister reg = FPURegister::FromAllocationIndex(i);
134 ldc1(reg, MemOperand(sp, i * kDoubleSize));
136 Addu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
137 PopSafepointRegisters();
141 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
143 sw(src, SafepointRegistersAndDoublesSlot(dst));
147 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
148 sw(src, SafepointRegisterSlot(dst));
152 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
153 lw(dst, SafepointRegisterSlot(src));
157 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
158 // The registers are pushed starting with the highest encoding,
159 // which means that lowest encodings are closest to the stack pointer.
160 return kSafepointRegisterStackIndexMap[reg_code];
164 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
165 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
169 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
170 UNIMPLEMENTED_MIPS();
171 // General purpose registers are pushed last on the stack.
172 int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
173 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
174 return MemOperand(sp, doubles_size + register_offset);
178 void MacroAssembler::InNewSpace(Register object,
182 ASSERT(cc == eq || cc == ne);
183 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
184 Branch(branch, cc, scratch,
185 Operand(ExternalReference::new_space_start(isolate())));
189 void MacroAssembler::RecordWriteField(
195 SaveFPRegsMode save_fp,
196 RememberedSetAction remembered_set_action,
197 SmiCheck smi_check) {
198 ASSERT(!AreAliased(value, dst, t8, object));
199 // First, check if a write barrier is even needed. The tests below
200 // catch stores of Smis.
203 // Skip barrier if writing a smi.
204 if (smi_check == INLINE_SMI_CHECK) {
205 JumpIfSmi(value, &done);
208 // Although the object register is tagged, the offset is relative to the start
209 // of the object, so so offset must be a multiple of kPointerSize.
210 ASSERT(IsAligned(offset, kPointerSize));
212 Addu(dst, object, Operand(offset - kHeapObjectTag));
213 if (emit_debug_code()) {
215 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
216 Branch(&ok, eq, t8, Operand(zero_reg));
217 stop("Unaligned cell in write barrier");
226 remembered_set_action,
231 // Clobber clobbered input registers when running with the debug-code flag
232 // turned on to provoke errors.
233 if (emit_debug_code()) {
234 li(value, Operand(BitCast<int32_t>(kZapValue + 4)));
235 li(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
240 // Will clobber 4 registers: object, address, scratch, ip. The
241 // register 'object' contains a heap object pointer. The heap object
242 // tag is shifted away.
243 void MacroAssembler::RecordWrite(Register object,
247 SaveFPRegsMode fp_mode,
248 RememberedSetAction remembered_set_action,
249 SmiCheck smi_check) {
250 ASSERT(!AreAliased(object, address, value, t8));
251 ASSERT(!AreAliased(object, address, value, t9));
253 if (emit_debug_code()) {
254 lw(at, MemOperand(address));
256 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
261 if (smi_check == INLINE_SMI_CHECK) {
262 ASSERT_EQ(0, kSmiTag);
263 JumpIfSmi(value, &done);
267 value, // Used as scratch.
268 MemoryChunk::kPointersToHereAreInterestingMask,
271 CheckPageFlag(object,
272 value, // Used as scratch.
273 MemoryChunk::kPointersFromHereAreInterestingMask,
277 // Record the actual write.
278 if (ra_status == kRAHasNotBeenSaved) {
281 RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
283 if (ra_status == kRAHasNotBeenSaved) {
289 // Clobber clobbered registers when running with the debug-code flag
290 // turned on to provoke errors.
291 if (emit_debug_code()) {
292 li(address, Operand(BitCast<int32_t>(kZapValue + 12)));
293 li(value, Operand(BitCast<int32_t>(kZapValue + 16)));
298 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
301 SaveFPRegsMode fp_mode,
302 RememberedSetFinalAction and_then) {
304 if (emit_debug_code()) {
306 JumpIfNotInNewSpace(object, scratch, &ok);
307 stop("Remembered set pointer is in new space");
310 // Load store buffer top.
311 ExternalReference store_buffer =
312 ExternalReference::store_buffer_top(isolate());
313 li(t8, Operand(store_buffer));
314 lw(scratch, MemOperand(t8));
315 // Store pointer to buffer and increment buffer top.
316 sw(address, MemOperand(scratch));
317 Addu(scratch, scratch, kPointerSize);
318 // Write back new top of buffer.
319 sw(scratch, MemOperand(t8));
320 // Call stub on end of buffer.
321 // Check for end of buffer.
322 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
323 if (and_then == kFallThroughAtEnd) {
324 Branch(&done, eq, t8, Operand(zero_reg));
326 ASSERT(and_then == kReturnAtEnd);
327 Ret(eq, t8, Operand(zero_reg));
330 StoreBufferOverflowStub store_buffer_overflow =
331 StoreBufferOverflowStub(fp_mode);
332 CallStub(&store_buffer_overflow);
335 if (and_then == kReturnAtEnd) {
341 // -----------------------------------------------------------------------------
342 // Allocation support.
345 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
350 ASSERT(!holder_reg.is(scratch));
351 ASSERT(!holder_reg.is(at));
352 ASSERT(!scratch.is(at));
354 // Load current lexical context from the stack frame.
355 lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
356 // In debug mode, make sure the lexical context is set.
358 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
359 scratch, Operand(zero_reg));
362 // Load the native context of the current context.
364 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
365 lw(scratch, FieldMemOperand(scratch, offset));
366 lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
368 // Check the context is a native context.
369 if (emit_debug_code()) {
370 push(holder_reg); // Temporarily save holder on the stack.
371 // Read the first word and compare to the native_context_map.
372 lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
373 LoadRoot(at, Heap::kNativeContextMapRootIndex);
374 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
375 holder_reg, Operand(at));
376 pop(holder_reg); // Restore holder.
379 // Check if both contexts are the same.
380 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
381 Branch(&same_contexts, eq, scratch, Operand(at));
383 // Check the context is a native context.
384 if (emit_debug_code()) {
385 push(holder_reg); // Temporarily save holder on the stack.
386 mov(holder_reg, at); // Move at to its holding place.
387 LoadRoot(at, Heap::kNullValueRootIndex);
388 Check(ne, kJSGlobalProxyContextShouldNotBeNull,
389 holder_reg, Operand(at));
391 lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
392 LoadRoot(at, Heap::kNativeContextMapRootIndex);
393 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
394 holder_reg, Operand(at));
395 // Restore at is not needed. at is reloaded below.
396 pop(holder_reg); // Restore holder.
397 // Restore at to holder's context.
398 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
401 // Check that the security token in the calling global object is
402 // compatible with the security token in the receiving global
404 int token_offset = Context::kHeaderSize +
405 Context::SECURITY_TOKEN_INDEX * kPointerSize;
407 lw(scratch, FieldMemOperand(scratch, token_offset));
408 lw(at, FieldMemOperand(at, token_offset));
409 Branch(miss, ne, scratch, Operand(at));
411 bind(&same_contexts);
415 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
416 // First of all we assign the hash seed to scratch.
417 LoadRoot(scratch, Heap::kHashSeedRootIndex);
420 // Xor original key with a seed.
421 xor_(reg0, reg0, scratch);
423 // Compute the hash code from the untagged key. This must be kept in sync
424 // with ComputeIntegerHash in utils.h.
426 // hash = ~hash + (hash << 15);
427 nor(scratch, reg0, zero_reg);
429 addu(reg0, scratch, at);
431 // hash = hash ^ (hash >> 12);
433 xor_(reg0, reg0, at);
435 // hash = hash + (hash << 2);
437 addu(reg0, reg0, at);
439 // hash = hash ^ (hash >> 4);
441 xor_(reg0, reg0, at);
443 // hash = hash * 2057;
444 sll(scratch, reg0, 11);
446 addu(reg0, reg0, at);
447 addu(reg0, reg0, scratch);
449 // hash = hash ^ (hash >> 16);
451 xor_(reg0, reg0, at);
455 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
464 // elements - holds the slow-case elements of the receiver on entry.
465 // Unchanged unless 'result' is the same register.
467 // key - holds the smi key on entry.
468 // Unchanged unless 'result' is the same register.
471 // result - holds the result on exit if the load succeeded.
472 // Allowed to be the same as 'key' or 'result'.
473 // Unchanged on bailout so 'key' or 'result' can be used
474 // in further computation.
476 // Scratch registers:
478 // reg0 - holds the untagged key on entry and holds the hash once computed.
480 // reg1 - Used to hold the capacity mask of the dictionary.
482 // reg2 - Used for the index into the dictionary.
483 // at - Temporary (avoid MacroAssembler instructions also using 'at').
486 GetNumberHash(reg0, reg1);
488 // Compute the capacity mask.
489 lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
490 sra(reg1, reg1, kSmiTagSize);
491 Subu(reg1, reg1, Operand(1));
493 // Generate an unrolled loop that performs a few probes before giving up.
494 static const int kProbes = 4;
495 for (int i = 0; i < kProbes; i++) {
496 // Use reg2 for index calculations and keep the hash intact in reg0.
498 // Compute the masked index: (hash + i + i * i) & mask.
500 Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
502 and_(reg2, reg2, reg1);
504 // Scale the index by multiplying by the element size.
505 ASSERT(SeededNumberDictionary::kEntrySize == 3);
506 sll(at, reg2, 1); // 2x.
507 addu(reg2, reg2, at); // reg2 = reg2 * 3.
509 // Check if the key is identical to the name.
510 sll(at, reg2, kPointerSizeLog2);
511 addu(reg2, elements, at);
513 lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
514 if (i != kProbes - 1) {
515 Branch(&done, eq, key, Operand(at));
517 Branch(miss, ne, key, Operand(at));
522 // Check that the value is a normal property.
523 // reg2: elements + (index * kPointerSize).
524 const int kDetailsOffset =
525 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
526 lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
527 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
528 Branch(miss, ne, at, Operand(zero_reg));
530 // Get the value at the masked, scaled index and return.
531 const int kValueOffset =
532 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
533 lw(result, FieldMemOperand(reg2, kValueOffset));
537 // ---------------------------------------------------------------------------
538 // Instruction macros.
540 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
542 addu(rd, rs, rt.rm());
544 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
545 addiu(rd, rs, rt.imm32_);
547 // li handles the relocation.
556 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
558 subu(rd, rs, rt.rm());
560 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
561 addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
563 // li handles the relocation.
572 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
574 if (kArchVariant == kLoongson) {
578 mul(rd, rs, rt.rm());
581 // li handles the relocation.
584 if (kArchVariant == kLoongson) {
594 void MacroAssembler::Mult(Register rs, const Operand& rt) {
598 // li handles the relocation.
606 void MacroAssembler::Multu(Register rs, const Operand& rt) {
610 // li handles the relocation.
618 void MacroAssembler::Div(Register rs, const Operand& rt) {
622 // li handles the relocation.
630 void MacroAssembler::Divu(Register rs, const Operand& rt) {
634 // li handles the relocation.
642 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
644 and_(rd, rs, rt.rm());
646 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
647 andi(rd, rs, rt.imm32_);
649 // li handles the relocation.
658 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
660 or_(rd, rs, rt.rm());
662 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
663 ori(rd, rs, rt.imm32_);
665 // li handles the relocation.
674 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
676 xor_(rd, rs, rt.rm());
678 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
679 xori(rd, rs, rt.imm32_);
681 // li handles the relocation.
690 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
692 nor(rd, rs, rt.rm());
694 // li handles the relocation.
702 void MacroAssembler::Neg(Register rs, const Operand& rt) {
705 ASSERT(!at.is(rt.rm()));
707 xor_(rs, rt.rm(), at);
711 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
713 slt(rd, rs, rt.rm());
715 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
716 slti(rd, rs, rt.imm32_);
718 // li handles the relocation.
727 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
729 sltu(rd, rs, rt.rm());
731 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
732 sltiu(rd, rs, rt.imm32_);
734 // li handles the relocation.
743 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
744 if (kArchVariant == kMips32r2) {
746 rotrv(rd, rs, rt.rm());
748 rotr(rd, rs, rt.imm32_);
752 subu(at, zero_reg, rt.rm());
754 srlv(rd, rs, rt.rm());
757 if (rt.imm32_ == 0) {
760 srl(at, rs, rt.imm32_);
761 sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
769 //------------Pseudo-instructions-------------
771 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
773 BlockTrampolinePoolScope block_trampoline_pool(this);
774 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
775 // Normal load of an immediate value which does not need Relocation Info.
776 if (is_int16(j.imm32_)) {
777 addiu(rd, zero_reg, j.imm32_);
778 } else if (!(j.imm32_ & kHiMask)) {
779 ori(rd, zero_reg, j.imm32_);
780 } else if (!(j.imm32_ & kImm16Mask)) {
781 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
783 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
784 ori(rd, rd, (j.imm32_ & kImm16Mask));
787 if (MustUseReg(j.rmode_)) {
788 RecordRelocInfo(j.rmode_, j.imm32_);
790 // We always need the same number of instructions as we may need to patch
791 // this code to load another value which may need 2 instructions to load.
792 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
793 ori(rd, rd, (j.imm32_ & kImm16Mask));
798 void MacroAssembler::MultiPush(RegList regs) {
799 int16_t num_to_push = NumberOfBitsSet(regs);
800 int16_t stack_offset = num_to_push * kPointerSize;
802 Subu(sp, sp, Operand(stack_offset));
803 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
804 if ((regs & (1 << i)) != 0) {
805 stack_offset -= kPointerSize;
806 sw(ToRegister(i), MemOperand(sp, stack_offset));
812 void MacroAssembler::MultiPushReversed(RegList regs) {
813 int16_t num_to_push = NumberOfBitsSet(regs);
814 int16_t stack_offset = num_to_push * kPointerSize;
816 Subu(sp, sp, Operand(stack_offset));
817 for (int16_t i = 0; i < kNumRegisters; i++) {
818 if ((regs & (1 << i)) != 0) {
819 stack_offset -= kPointerSize;
820 sw(ToRegister(i), MemOperand(sp, stack_offset));
826 void MacroAssembler::MultiPop(RegList regs) {
827 int16_t stack_offset = 0;
829 for (int16_t i = 0; i < kNumRegisters; i++) {
830 if ((regs & (1 << i)) != 0) {
831 lw(ToRegister(i), MemOperand(sp, stack_offset));
832 stack_offset += kPointerSize;
835 addiu(sp, sp, stack_offset);
839 void MacroAssembler::MultiPopReversed(RegList regs) {
840 int16_t stack_offset = 0;
842 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
843 if ((regs & (1 << i)) != 0) {
844 lw(ToRegister(i), MemOperand(sp, stack_offset));
845 stack_offset += kPointerSize;
848 addiu(sp, sp, stack_offset);
852 void MacroAssembler::MultiPushFPU(RegList regs) {
853 int16_t num_to_push = NumberOfBitsSet(regs);
854 int16_t stack_offset = num_to_push * kDoubleSize;
856 Subu(sp, sp, Operand(stack_offset));
857 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
858 if ((regs & (1 << i)) != 0) {
859 stack_offset -= kDoubleSize;
860 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
866 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
867 int16_t num_to_push = NumberOfBitsSet(regs);
868 int16_t stack_offset = num_to_push * kDoubleSize;
870 Subu(sp, sp, Operand(stack_offset));
871 for (int16_t i = 0; i < kNumRegisters; i++) {
872 if ((regs & (1 << i)) != 0) {
873 stack_offset -= kDoubleSize;
874 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
880 void MacroAssembler::MultiPopFPU(RegList regs) {
881 int16_t stack_offset = 0;
883 for (int16_t i = 0; i < kNumRegisters; i++) {
884 if ((regs & (1 << i)) != 0) {
885 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
886 stack_offset += kDoubleSize;
889 addiu(sp, sp, stack_offset);
893 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
894 int16_t stack_offset = 0;
896 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
897 if ((regs & (1 << i)) != 0) {
898 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
899 stack_offset += kDoubleSize;
902 addiu(sp, sp, stack_offset);
906 void MacroAssembler::FlushICache(Register address, unsigned instructions) {
907 RegList saved_regs = kJSCallerSaved | ra.bit();
908 MultiPush(saved_regs);
909 AllowExternalCallThatCantCauseGC scope(this);
911 // Save to a0 in case address == t0.
913 PrepareCallCFunction(2, t0);
915 li(a1, instructions * kInstrSize);
916 CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
917 MultiPop(saved_regs);
921 void MacroAssembler::Ext(Register rt,
926 ASSERT(pos + size < 33);
928 if (kArchVariant == kMips32r2) {
929 ext_(rt, rs, pos, size);
931 // Move rs to rt and shift it left then right to get the
932 // desired bitfield on the right side and zeroes on the left.
933 int shift_left = 32 - (pos + size);
934 sll(rt, rs, shift_left); // Acts as a move if shift_left == 0.
936 int shift_right = 32 - size;
937 if (shift_right > 0) {
938 srl(rt, rt, shift_right);
944 void MacroAssembler::Ins(Register rt,
949 ASSERT(pos + size <= 32);
952 if (kArchVariant == kMips32r2) {
953 ins_(rt, rs, pos, size);
955 ASSERT(!rt.is(t8) && !rs.is(t8));
956 Subu(at, zero_reg, Operand(1));
957 srl(at, at, 32 - size);
961 nor(at, at, zero_reg);
968 void MacroAssembler::Cvt_d_uw(FPURegister fd,
970 FPURegister scratch) {
971 // Move the data from fs to t8.
973 Cvt_d_uw(fd, t8, scratch);
977 void MacroAssembler::Cvt_d_uw(FPURegister fd,
979 FPURegister scratch) {
980 // Convert rs to a FP value in fd (and fd + 1).
981 // We do this by converting rs minus the MSB to avoid sign conversion,
982 // then adding 2^31 to the result (if needed).
984 ASSERT(!fd.is(scratch));
988 // Save rs's MSB to t9.
992 // Move the result to fd.
995 // Convert fd to a real FP value.
998 Label conversion_done;
1000 // If rs's MSB was 0, it's done.
1001 // Otherwise we need to add that to the FP register.
1002 Branch(&conversion_done, eq, t9, Operand(zero_reg));
1004 // Load 2^31 into f20 as its float representation.
1006 mtc1(at, FPURegister::from_code(scratch.code() + 1));
1007 mtc1(zero_reg, scratch);
1009 add_d(fd, fd, scratch);
1011 bind(&conversion_done);
1015 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1017 FPURegister scratch) {
1018 Trunc_uw_d(fs, t8, scratch);
1023 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1024 if (kArchVariant == kLoongson && fd.is(fs)) {
1025 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1027 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1034 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1035 if (kArchVariant == kLoongson && fd.is(fs)) {
1036 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1038 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1045 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1046 if (kArchVariant == kLoongson && fd.is(fs)) {
1047 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1049 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1056 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1057 if (kArchVariant == kLoongson && fd.is(fs)) {
1058 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1060 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1067 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1069 FPURegister scratch) {
1070 ASSERT(!fd.is(scratch));
1073 // Load 2^31 into scratch as its float representation.
1075 mtc1(at, FPURegister::from_code(scratch.code() + 1));
1076 mtc1(zero_reg, scratch);
1077 // Test if scratch > fd.
1078 // If fd < 2^31 we can convert it normally.
1079 Label simple_convert;
1080 BranchF(&simple_convert, NULL, lt, fd, scratch);
1082 // First we subtract 2^31 from fd, then trunc it to rs
1083 // and add 2^31 to rs.
1084 sub_d(scratch, fd, scratch);
1085 trunc_w_d(scratch, scratch);
1087 Or(rs, rs, 1 << 31);
1091 // Simple conversion.
1092 bind(&simple_convert);
1093 trunc_w_d(scratch, fd);
1100 void MacroAssembler::BranchF(Label* target,
1105 BranchDelaySlot bd) {
1106 BlockTrampolinePoolScope block_trampoline_pool(this);
1112 ASSERT(nan || target);
1113 // Check for unordered (NaN) cases.
1115 c(UN, D, cmp1, cmp2);
1120 // Here NaN cases were either handled by this function or are assumed to
1121 // have been handled by the caller.
1122 // Unsigned conditions are treated as their signed counterpart.
1125 c(OLT, D, cmp1, cmp2);
1129 c(ULE, D, cmp1, cmp2);
1133 c(ULT, D, cmp1, cmp2);
1137 c(OLE, D, cmp1, cmp2);
1141 c(EQ, D, cmp1, cmp2);
1145 c(UEQ, D, cmp1, cmp2);
1149 c(EQ, D, cmp1, cmp2);
1153 c(UEQ, D, cmp1, cmp2);
1161 if (bd == PROTECT) {
1167 void MacroAssembler::Move(FPURegister dst, double imm) {
1168 static const DoubleRepresentation minus_zero(-0.0);
1169 static const DoubleRepresentation zero(0.0);
1170 DoubleRepresentation value(imm);
1171 // Handle special values first.
1172 bool force_load = dst.is(kDoubleRegZero);
1173 if (value.bits == zero.bits && !force_load) {
1174 mov_d(dst, kDoubleRegZero);
1175 } else if (value.bits == minus_zero.bits && !force_load) {
1176 neg_d(dst, kDoubleRegZero);
1179 DoubleAsTwoUInt32(imm, &lo, &hi);
1180 // Move the low part of the double into the lower of the corresponding FPU
1181 // register of FPU register pair.
1183 li(at, Operand(lo));
1186 mtc1(zero_reg, dst);
1188 // Move the high part of the double into the higher of the corresponding FPU
1189 // register of FPU register pair.
1191 li(at, Operand(hi));
1192 mtc1(at, dst.high());
1194 mtc1(zero_reg, dst.high());
1200 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1201 if (kArchVariant == kLoongson) {
1203 Branch(&done, ne, rt, Operand(zero_reg));
1212 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1213 if (kArchVariant == kLoongson) {
1215 Branch(&done, eq, rt, Operand(zero_reg));
1224 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
1225 if (kArchVariant == kLoongson) {
1226 // Tests an FP condition code and then conditionally move rs to rd.
1227 // We do not currently use any FPU cc bit other than bit 0.
1229 ASSERT(!(rs.is(t8) || rd.is(t8)));
1231 Register scratch = t8;
1232 // For testing purposes we need to fetch content of the FCSR register and
1233 // than test its cc (floating point condition code) bit (for cc = 0, it is
1234 // 24. bit of the FCSR).
1235 cfc1(scratch, FCSR);
1236 // For the MIPS I, II and III architectures, the contents of scratch is
1237 // UNPREDICTABLE for the instruction immediately following CFC1.
1239 srl(scratch, scratch, 16);
1240 andi(scratch, scratch, 0x0080);
1241 Branch(&done, eq, scratch, Operand(zero_reg));
1250 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
1251 if (kArchVariant == kLoongson) {
1252 // Tests an FP condition code and then conditionally move rs to rd.
1253 // We do not currently use any FPU cc bit other than bit 0.
1255 ASSERT(!(rs.is(t8) || rd.is(t8)));
1257 Register scratch = t8;
1258 // For testing purposes we need to fetch content of the FCSR register and
1259 // than test its cc (floating point condition code) bit (for cc = 0, it is
1260 // 24. bit of the FCSR).
1261 cfc1(scratch, FCSR);
1262 // For the MIPS I, II and III architectures, the contents of scratch is
1263 // UNPREDICTABLE for the instruction immediately following CFC1.
1265 srl(scratch, scratch, 16);
1266 andi(scratch, scratch, 0x0080);
1267 Branch(&done, ne, scratch, Operand(zero_reg));
1276 void MacroAssembler::Clz(Register rd, Register rs) {
1277 if (kArchVariant == kLoongson) {
1278 ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
1280 Register scratch = t9;
1286 and_(scratch, at, mask);
1287 Branch(&end, ne, scratch, Operand(zero_reg));
1289 Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
1298 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1300 DoubleRegister double_input,
1302 DoubleRegister double_scratch,
1303 Register except_flag,
1304 CheckForInexactConversion check_inexact) {
1305 ASSERT(!result.is(scratch));
1306 ASSERT(!double_input.is(double_scratch));
1307 ASSERT(!except_flag.is(scratch));
1311 // Clear the except flag (0 = no exception)
1312 mov(except_flag, zero_reg);
1314 // Test for values that can be exactly represented as a signed 32-bit integer.
1315 cvt_w_d(double_scratch, double_input);
1316 mfc1(result, double_scratch);
1317 cvt_d_w(double_scratch, double_scratch);
1318 BranchF(&done, NULL, eq, double_input, double_scratch);
1320 int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
1322 if (check_inexact == kDontCheckForInexactConversion) {
1323 // Ignore inexact exceptions.
1324 except_mask &= ~kFCSRInexactFlagMask;
1328 cfc1(scratch, FCSR);
1329 // Disable FPU exceptions.
1330 ctc1(zero_reg, FCSR);
1332 // Do operation based on rounding mode.
1333 switch (rounding_mode) {
1334 case kRoundToNearest:
1335 Round_w_d(double_scratch, double_input);
1338 Trunc_w_d(double_scratch, double_input);
1340 case kRoundToPlusInf:
1341 Ceil_w_d(double_scratch, double_input);
1343 case kRoundToMinusInf:
1344 Floor_w_d(double_scratch, double_input);
1346 } // End of switch-statement.
1349 cfc1(except_flag, FCSR);
1351 ctc1(scratch, FCSR);
1352 // Move the converted value into the result register.
1353 mfc1(result, double_scratch);
1355 // Check for fpu exceptions.
1356 And(except_flag, except_flag, Operand(except_mask));
1362 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
1363 DoubleRegister double_input,
1365 DoubleRegister single_scratch = kLithiumScratchDouble.low();
1366 Register scratch = at;
1367 Register scratch2 = t9;
1369 // Clear cumulative exception flags and save the FCSR.
1370 cfc1(scratch2, FCSR);
1371 ctc1(zero_reg, FCSR);
1372 // Try a conversion to a signed integer.
1373 trunc_w_d(single_scratch, double_input);
1374 mfc1(result, single_scratch);
1375 // Retrieve and restore the FCSR.
1376 cfc1(scratch, FCSR);
1377 ctc1(scratch2, FCSR);
1378 // Check for overflow and NaNs.
1381 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
1382 // If we had no exceptions we are done.
1383 Branch(done, eq, scratch, Operand(zero_reg));
1387 void MacroAssembler::TruncateDoubleToI(Register result,
1388 DoubleRegister double_input) {
1391 TryInlineTruncateDoubleToI(result, double_input, &done);
1393 // If we fell through then inline version didn't succeed - call stub instead.
1395 Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
1396 sdc1(double_input, MemOperand(sp, 0));
1398 DoubleToIStub stub(sp, result, 0, true, true);
1401 Addu(sp, sp, Operand(kDoubleSize));
1408 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
1410 DoubleRegister double_scratch = f12;
1411 ASSERT(!result.is(object));
1413 ldc1(double_scratch,
1414 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
1415 TryInlineTruncateDoubleToI(result, double_scratch, &done);
1417 // If we fell through then inline version didn't succeed - call stub instead.
1419 DoubleToIStub stub(object,
1421 HeapNumber::kValueOffset - kHeapObjectTag,
1431 void MacroAssembler::TruncateNumberToI(Register object,
1433 Register heap_number_map,
1435 Label* not_number) {
1437 ASSERT(!result.is(object));
1439 UntagAndJumpIfSmi(result, object, &done);
1440 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
1441 TruncateHeapNumberToI(result, object);
1447 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1449 int num_least_bits) {
1450 Ext(dst, src, kSmiTagSize, num_least_bits);
1454 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1456 int num_least_bits) {
1457 And(dst, src, Operand((1 << num_least_bits) - 1));
1461 // Emulated condtional branches do not emit a nop in the branch delay slot.
1463 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1464 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
1465 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1466 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1469 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1470 BranchShort(offset, bdslot);
1474 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1476 BranchDelaySlot bdslot) {
1477 BranchShort(offset, cond, rs, rt, bdslot);
1481 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1482 if (L->is_bound()) {
1484 BranchShort(L, bdslot);
1489 if (is_trampoline_emitted()) {
1492 BranchShort(L, bdslot);
1498 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1500 BranchDelaySlot bdslot) {
1501 if (L->is_bound()) {
1503 BranchShort(L, cond, rs, rt, bdslot);
1506 Condition neg_cond = NegateCondition(cond);
1507 BranchShort(&skip, neg_cond, rs, rt);
1512 if (is_trampoline_emitted()) {
1514 Condition neg_cond = NegateCondition(cond);
1515 BranchShort(&skip, neg_cond, rs, rt);
1519 BranchShort(L, cond, rs, rt, bdslot);
1525 void MacroAssembler::Branch(Label* L,
1528 Heap::RootListIndex index,
1529 BranchDelaySlot bdslot) {
1530 LoadRoot(at, index);
1531 Branch(L, cond, rs, Operand(at), bdslot);
1535 void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
1538 // Emit a nop in the branch delay slot if required.
1539 if (bdslot == PROTECT)
1544 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1546 BranchDelaySlot bdslot) {
1547 BRANCH_ARGS_CHECK(cond, rs, rt);
1548 ASSERT(!rs.is(zero_reg));
1549 Register r2 = no_reg;
1550 Register scratch = at;
1553 // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
1555 BlockTrampolinePoolScope block_trampoline_pool(this);
1562 beq(rs, r2, offset);
1565 bne(rs, r2, offset);
1567 // Signed comparison.
1569 if (r2.is(zero_reg)) {
1572 slt(scratch, r2, rs);
1573 bne(scratch, zero_reg, offset);
1577 if (r2.is(zero_reg)) {
1580 slt(scratch, rs, r2);
1581 beq(scratch, zero_reg, offset);
1585 if (r2.is(zero_reg)) {
1588 slt(scratch, rs, r2);
1589 bne(scratch, zero_reg, offset);
1593 if (r2.is(zero_reg)) {
1596 slt(scratch, r2, rs);
1597 beq(scratch, zero_reg, offset);
1600 // Unsigned comparison.
1602 if (r2.is(zero_reg)) {
1605 sltu(scratch, r2, rs);
1606 bne(scratch, zero_reg, offset);
1609 case Ugreater_equal:
1610 if (r2.is(zero_reg)) {
1613 sltu(scratch, rs, r2);
1614 beq(scratch, zero_reg, offset);
1618 if (r2.is(zero_reg)) {
1619 // No code needs to be emitted.
1622 sltu(scratch, rs, r2);
1623 bne(scratch, zero_reg, offset);
1627 if (r2.is(zero_reg)) {
1630 sltu(scratch, r2, rs);
1631 beq(scratch, zero_reg, offset);
1638 // Be careful to always use shifted_branch_offset only just before the
1639 // branch instruction, as the location will be remember for patching the
1641 BlockTrampolinePoolScope block_trampoline_pool(this);
1647 // We don't want any other register but scratch clobbered.
1648 ASSERT(!scratch.is(rs));
1651 beq(rs, r2, offset);
1654 // We don't want any other register but scratch clobbered.
1655 ASSERT(!scratch.is(rs));
1658 bne(rs, r2, offset);
1660 // Signed comparison.
1662 if (rt.imm32_ == 0) {
1667 slt(scratch, r2, rs);
1668 bne(scratch, zero_reg, offset);
1672 if (rt.imm32_ == 0) {
1674 } else if (is_int16(rt.imm32_)) {
1675 slti(scratch, rs, rt.imm32_);
1676 beq(scratch, zero_reg, offset);
1680 slt(scratch, rs, r2);
1681 beq(scratch, zero_reg, offset);
1685 if (rt.imm32_ == 0) {
1687 } else if (is_int16(rt.imm32_)) {
1688 slti(scratch, rs, rt.imm32_);
1689 bne(scratch, zero_reg, offset);
1693 slt(scratch, rs, r2);
1694 bne(scratch, zero_reg, offset);
1698 if (rt.imm32_ == 0) {
1703 slt(scratch, r2, rs);
1704 beq(scratch, zero_reg, offset);
1707 // Unsigned comparison.
1709 if (rt.imm32_ == 0) {
1714 sltu(scratch, r2, rs);
1715 bne(scratch, zero_reg, offset);
1718 case Ugreater_equal:
1719 if (rt.imm32_ == 0) {
1721 } else if (is_int16(rt.imm32_)) {
1722 sltiu(scratch, rs, rt.imm32_);
1723 beq(scratch, zero_reg, offset);
1727 sltu(scratch, rs, r2);
1728 beq(scratch, zero_reg, offset);
1732 if (rt.imm32_ == 0) {
1733 // No code needs to be emitted.
1735 } else if (is_int16(rt.imm32_)) {
1736 sltiu(scratch, rs, rt.imm32_);
1737 bne(scratch, zero_reg, offset);
1741 sltu(scratch, rs, r2);
1742 bne(scratch, zero_reg, offset);
1746 if (rt.imm32_ == 0) {
1751 sltu(scratch, r2, rs);
1752 beq(scratch, zero_reg, offset);
1759 // Emit a nop in the branch delay slot if required.
1760 if (bdslot == PROTECT)
1765 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
1766 // We use branch_offset as an argument for the branch instructions to be sure
1767 // it is called just before generating the branch instruction, as needed.
1769 b(shifted_branch_offset(L, false));
1771 // Emit a nop in the branch delay slot if required.
1772 if (bdslot == PROTECT)
1777 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
1779 BranchDelaySlot bdslot) {
1780 BRANCH_ARGS_CHECK(cond, rs, rt);
1783 Register r2 = no_reg;
1784 Register scratch = at;
1786 BlockTrampolinePoolScope block_trampoline_pool(this);
1788 // Be careful to always use shifted_branch_offset only just before the
1789 // branch instruction, as the location will be remember for patching the
1793 offset = shifted_branch_offset(L, false);
1797 offset = shifted_branch_offset(L, false);
1798 beq(rs, r2, offset);
1801 offset = shifted_branch_offset(L, false);
1802 bne(rs, r2, offset);
1804 // Signed comparison.
1806 if (r2.is(zero_reg)) {
1807 offset = shifted_branch_offset(L, false);
1810 slt(scratch, r2, rs);
1811 offset = shifted_branch_offset(L, false);
1812 bne(scratch, zero_reg, offset);
1816 if (r2.is(zero_reg)) {
1817 offset = shifted_branch_offset(L, false);
1820 slt(scratch, rs, r2);
1821 offset = shifted_branch_offset(L, false);
1822 beq(scratch, zero_reg, offset);
1826 if (r2.is(zero_reg)) {
1827 offset = shifted_branch_offset(L, false);
1830 slt(scratch, rs, r2);
1831 offset = shifted_branch_offset(L, false);
1832 bne(scratch, zero_reg, offset);
1836 if (r2.is(zero_reg)) {
1837 offset = shifted_branch_offset(L, false);
1840 slt(scratch, r2, rs);
1841 offset = shifted_branch_offset(L, false);
1842 beq(scratch, zero_reg, offset);
1845 // Unsigned comparison.
1847 if (r2.is(zero_reg)) {
1848 offset = shifted_branch_offset(L, false);
1851 sltu(scratch, r2, rs);
1852 offset = shifted_branch_offset(L, false);
1853 bne(scratch, zero_reg, offset);
1856 case Ugreater_equal:
1857 if (r2.is(zero_reg)) {
1858 offset = shifted_branch_offset(L, false);
1861 sltu(scratch, rs, r2);
1862 offset = shifted_branch_offset(L, false);
1863 beq(scratch, zero_reg, offset);
1867 if (r2.is(zero_reg)) {
1868 // No code needs to be emitted.
1871 sltu(scratch, rs, r2);
1872 offset = shifted_branch_offset(L, false);
1873 bne(scratch, zero_reg, offset);
1877 if (r2.is(zero_reg)) {
1878 offset = shifted_branch_offset(L, false);
1881 sltu(scratch, r2, rs);
1882 offset = shifted_branch_offset(L, false);
1883 beq(scratch, zero_reg, offset);
1890 // Be careful to always use shifted_branch_offset only just before the
1891 // branch instruction, as the location will be remember for patching the
1893 BlockTrampolinePoolScope block_trampoline_pool(this);
1896 offset = shifted_branch_offset(L, false);
1900 ASSERT(!scratch.is(rs));
1903 offset = shifted_branch_offset(L, false);
1904 beq(rs, r2, offset);
1907 ASSERT(!scratch.is(rs));
1910 offset = shifted_branch_offset(L, false);
1911 bne(rs, r2, offset);
1913 // Signed comparison.
1915 if (rt.imm32_ == 0) {
1916 offset = shifted_branch_offset(L, false);
1919 ASSERT(!scratch.is(rs));
1922 slt(scratch, r2, rs);
1923 offset = shifted_branch_offset(L, false);
1924 bne(scratch, zero_reg, offset);
1928 if (rt.imm32_ == 0) {
1929 offset = shifted_branch_offset(L, false);
1931 } else if (is_int16(rt.imm32_)) {
1932 slti(scratch, rs, rt.imm32_);
1933 offset = shifted_branch_offset(L, false);
1934 beq(scratch, zero_reg, offset);
1936 ASSERT(!scratch.is(rs));
1939 slt(scratch, rs, r2);
1940 offset = shifted_branch_offset(L, false);
1941 beq(scratch, zero_reg, offset);
1945 if (rt.imm32_ == 0) {
1946 offset = shifted_branch_offset(L, false);
1948 } else if (is_int16(rt.imm32_)) {
1949 slti(scratch, rs, rt.imm32_);
1950 offset = shifted_branch_offset(L, false);
1951 bne(scratch, zero_reg, offset);
1953 ASSERT(!scratch.is(rs));
1956 slt(scratch, rs, r2);
1957 offset = shifted_branch_offset(L, false);
1958 bne(scratch, zero_reg, offset);
1962 if (rt.imm32_ == 0) {
1963 offset = shifted_branch_offset(L, false);
1966 ASSERT(!scratch.is(rs));
1969 slt(scratch, r2, rs);
1970 offset = shifted_branch_offset(L, false);
1971 beq(scratch, zero_reg, offset);
1974 // Unsigned comparison.
1976 if (rt.imm32_ == 0) {
1977 offset = shifted_branch_offset(L, false);
1980 ASSERT(!scratch.is(rs));
1983 sltu(scratch, r2, rs);
1984 offset = shifted_branch_offset(L, false);
1985 bne(scratch, zero_reg, offset);
1988 case Ugreater_equal:
1989 if (rt.imm32_ == 0) {
1990 offset = shifted_branch_offset(L, false);
1992 } else if (is_int16(rt.imm32_)) {
1993 sltiu(scratch, rs, rt.imm32_);
1994 offset = shifted_branch_offset(L, false);
1995 beq(scratch, zero_reg, offset);
1997 ASSERT(!scratch.is(rs));
2000 sltu(scratch, rs, r2);
2001 offset = shifted_branch_offset(L, false);
2002 beq(scratch, zero_reg, offset);
2006 if (rt.imm32_ == 0) {
2007 // No code needs to be emitted.
2009 } else if (is_int16(rt.imm32_)) {
2010 sltiu(scratch, rs, rt.imm32_);
2011 offset = shifted_branch_offset(L, false);
2012 bne(scratch, zero_reg, offset);
2014 ASSERT(!scratch.is(rs));
2017 sltu(scratch, rs, r2);
2018 offset = shifted_branch_offset(L, false);
2019 bne(scratch, zero_reg, offset);
2023 if (rt.imm32_ == 0) {
2024 offset = shifted_branch_offset(L, false);
2027 ASSERT(!scratch.is(rs));
2030 sltu(scratch, r2, rs);
2031 offset = shifted_branch_offset(L, false);
2032 beq(scratch, zero_reg, offset);
2039 // Check that offset could actually hold on an int16_t.
2040 ASSERT(is_int16(offset));
2041 // Emit a nop in the branch delay slot if required.
2042 if (bdslot == PROTECT)
2047 void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
2048 BranchAndLinkShort(offset, bdslot);
2052 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
2054 BranchDelaySlot bdslot) {
2055 BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2059 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
2060 if (L->is_bound()) {
2062 BranchAndLinkShort(L, bdslot);
2067 if (is_trampoline_emitted()) {
2070 BranchAndLinkShort(L, bdslot);
2076 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2078 BranchDelaySlot bdslot) {
2079 if (L->is_bound()) {
2081 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2084 Condition neg_cond = NegateCondition(cond);
2085 BranchShort(&skip, neg_cond, rs, rt);
2090 if (is_trampoline_emitted()) {
2092 Condition neg_cond = NegateCondition(cond);
2093 BranchShort(&skip, neg_cond, rs, rt);
2097 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2103 // We need to use a bgezal or bltzal, but they can't be used directly with the
2104 // slt instructions. We could use sub or add instead but we would miss overflow
2105 // cases, so we keep slt and add an intermediate third instruction.
2106 void MacroAssembler::BranchAndLinkShort(int16_t offset,
2107 BranchDelaySlot bdslot) {
2110 // Emit a nop in the branch delay slot if required.
2111 if (bdslot == PROTECT)
2116 void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
2117 Register rs, const Operand& rt,
2118 BranchDelaySlot bdslot) {
2119 BRANCH_ARGS_CHECK(cond, rs, rt);
2120 Register r2 = no_reg;
2121 Register scratch = at;
2125 } else if (cond != cc_always) {
2131 BlockTrampolinePoolScope block_trampoline_pool(this);
2147 // Signed comparison.
2149 slt(scratch, r2, rs);
2150 addiu(scratch, scratch, -1);
2151 bgezal(scratch, offset);
2154 slt(scratch, rs, r2);
2155 addiu(scratch, scratch, -1);
2156 bltzal(scratch, offset);
2159 slt(scratch, rs, r2);
2160 addiu(scratch, scratch, -1);
2161 bgezal(scratch, offset);
2164 slt(scratch, r2, rs);
2165 addiu(scratch, scratch, -1);
2166 bltzal(scratch, offset);
2169 // Unsigned comparison.
2171 sltu(scratch, r2, rs);
2172 addiu(scratch, scratch, -1);
2173 bgezal(scratch, offset);
2175 case Ugreater_equal:
2176 sltu(scratch, rs, r2);
2177 addiu(scratch, scratch, -1);
2178 bltzal(scratch, offset);
2181 sltu(scratch, rs, r2);
2182 addiu(scratch, scratch, -1);
2183 bgezal(scratch, offset);
2186 sltu(scratch, r2, rs);
2187 addiu(scratch, scratch, -1);
2188 bltzal(scratch, offset);
2195 // Emit a nop in the branch delay slot if required.
2196 if (bdslot == PROTECT)
2201 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
2202 bal(shifted_branch_offset(L, false));
2204 // Emit a nop in the branch delay slot if required.
2205 if (bdslot == PROTECT)
2210 void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
2212 BranchDelaySlot bdslot) {
2213 BRANCH_ARGS_CHECK(cond, rs, rt);
2216 Register r2 = no_reg;
2217 Register scratch = at;
2220 } else if (cond != cc_always) {
2226 BlockTrampolinePoolScope block_trampoline_pool(this);
2229 offset = shifted_branch_offset(L, false);
2235 offset = shifted_branch_offset(L, false);
2241 offset = shifted_branch_offset(L, false);
2245 // Signed comparison.
2247 slt(scratch, r2, rs);
2248 addiu(scratch, scratch, -1);
2249 offset = shifted_branch_offset(L, false);
2250 bgezal(scratch, offset);
2253 slt(scratch, rs, r2);
2254 addiu(scratch, scratch, -1);
2255 offset = shifted_branch_offset(L, false);
2256 bltzal(scratch, offset);
2259 slt(scratch, rs, r2);
2260 addiu(scratch, scratch, -1);
2261 offset = shifted_branch_offset(L, false);
2262 bgezal(scratch, offset);
2265 slt(scratch, r2, rs);
2266 addiu(scratch, scratch, -1);
2267 offset = shifted_branch_offset(L, false);
2268 bltzal(scratch, offset);
2271 // Unsigned comparison.
2273 sltu(scratch, r2, rs);
2274 addiu(scratch, scratch, -1);
2275 offset = shifted_branch_offset(L, false);
2276 bgezal(scratch, offset);
2278 case Ugreater_equal:
2279 sltu(scratch, rs, r2);
2280 addiu(scratch, scratch, -1);
2281 offset = shifted_branch_offset(L, false);
2282 bltzal(scratch, offset);
2285 sltu(scratch, rs, r2);
2286 addiu(scratch, scratch, -1);
2287 offset = shifted_branch_offset(L, false);
2288 bgezal(scratch, offset);
2291 sltu(scratch, r2, rs);
2292 addiu(scratch, scratch, -1);
2293 offset = shifted_branch_offset(L, false);
2294 bltzal(scratch, offset);
2301 // Check that offset could actually hold on an int16_t.
2302 ASSERT(is_int16(offset));
2304 // Emit a nop in the branch delay slot if required.
2305 if (bdslot == PROTECT)
2310 void MacroAssembler::Jump(Register target,
2314 BranchDelaySlot bd) {
2315 BlockTrampolinePoolScope block_trampoline_pool(this);
2316 if (cond == cc_always) {
2319 BRANCH_ARGS_CHECK(cond, rs, rt);
2320 Branch(2, NegateCondition(cond), rs, rt);
2323 // Emit a nop in the branch delay slot if required.
2329 void MacroAssembler::Jump(intptr_t target,
2330 RelocInfo::Mode rmode,
2334 BranchDelaySlot bd) {
2336 if (cond != cc_always) {
2337 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
2339 // The first instruction of 'li' may be placed in the delay slot.
2340 // This is not an issue, t9 is expected to be clobbered anyway.
2341 li(t9, Operand(target, rmode));
2342 Jump(t9, al, zero_reg, Operand(zero_reg), bd);
2347 void MacroAssembler::Jump(Address target,
2348 RelocInfo::Mode rmode,
2352 BranchDelaySlot bd) {
2353 ASSERT(!RelocInfo::IsCodeTarget(rmode));
2354 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2358 void MacroAssembler::Jump(Handle<Code> code,
2359 RelocInfo::Mode rmode,
2363 BranchDelaySlot bd) {
2364 ASSERT(RelocInfo::IsCodeTarget(rmode));
2365 AllowDeferredHandleDereference embedding_raw_address;
2366 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2370 int MacroAssembler::CallSize(Register target,
2374 BranchDelaySlot bd) {
2377 if (cond == cc_always) {
2386 return size * kInstrSize;
2390 // Note: To call gcc-compiled C code on mips, you must call thru t9.
2391 void MacroAssembler::Call(Register target,
2395 BranchDelaySlot bd) {
2396 BlockTrampolinePoolScope block_trampoline_pool(this);
2399 if (cond == cc_always) {
2402 BRANCH_ARGS_CHECK(cond, rs, rt);
2403 Branch(2, NegateCondition(cond), rs, rt);
2406 // Emit a nop in the branch delay slot if required.
2410 ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
2411 SizeOfCodeGeneratedSince(&start));
2415 int MacroAssembler::CallSize(Address target,
2416 RelocInfo::Mode rmode,
2420 BranchDelaySlot bd) {
2421 int size = CallSize(t9, cond, rs, rt, bd);
2422 return size + 2 * kInstrSize;
2426 void MacroAssembler::Call(Address target,
2427 RelocInfo::Mode rmode,
2431 BranchDelaySlot bd) {
2432 BlockTrampolinePoolScope block_trampoline_pool(this);
2435 int32_t target_int = reinterpret_cast<int32_t>(target);
2436 // Must record previous source positions before the
2437 // li() generates a new code target.
2438 positions_recorder()->WriteRecordedPositions();
2439 li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
2440 Call(t9, cond, rs, rt, bd);
2441 ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2442 SizeOfCodeGeneratedSince(&start));
2446 int MacroAssembler::CallSize(Handle<Code> code,
2447 RelocInfo::Mode rmode,
2448 TypeFeedbackId ast_id,
2452 BranchDelaySlot bd) {
2453 AllowDeferredHandleDereference using_raw_address;
2454 return CallSize(reinterpret_cast<Address>(code.location()),
2455 rmode, cond, rs, rt, bd);
2459 void MacroAssembler::Call(Handle<Code> code,
2460 RelocInfo::Mode rmode,
2461 TypeFeedbackId ast_id,
2465 BranchDelaySlot bd) {
2466 BlockTrampolinePoolScope block_trampoline_pool(this);
2469 ASSERT(RelocInfo::IsCodeTarget(rmode));
2470 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
2471 SetRecordedAstId(ast_id);
2472 rmode = RelocInfo::CODE_TARGET_WITH_ID;
2474 AllowDeferredHandleDereference embedding_raw_address;
2475 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
2476 ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
2477 SizeOfCodeGeneratedSince(&start));
2481 void MacroAssembler::Ret(Condition cond,
2484 BranchDelaySlot bd) {
2485 Jump(ra, cond, rs, rt, bd);
2489 void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
2490 BlockTrampolinePoolScope block_trampoline_pool(this);
2493 imm28 = jump_address(L);
2494 imm28 &= kImm28Mask;
2495 { BlockGrowBufferScope block_buf_growth(this);
2496 // Buffer growth (and relocation) must be blocked for internal references
2497 // until associated instructions are emitted and available to be patched.
2498 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2501 // Emit a nop in the branch delay slot if required.
2502 if (bdslot == PROTECT)
2507 void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
2508 BlockTrampolinePoolScope block_trampoline_pool(this);
2511 imm32 = jump_address(L);
2512 { BlockGrowBufferScope block_buf_growth(this);
2513 // Buffer growth (and relocation) must be blocked for internal references
2514 // until associated instructions are emitted and available to be patched.
2515 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2516 lui(at, (imm32 & kHiMask) >> kLuiShift);
2517 ori(at, at, (imm32 & kImm16Mask));
2521 // Emit a nop in the branch delay slot if required.
2522 if (bdslot == PROTECT)
2527 void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
2528 BlockTrampolinePoolScope block_trampoline_pool(this);
2531 imm32 = jump_address(L);
2532 { BlockGrowBufferScope block_buf_growth(this);
2533 // Buffer growth (and relocation) must be blocked for internal references
2534 // until associated instructions are emitted and available to be patched.
2535 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2536 lui(at, (imm32 & kHiMask) >> kLuiShift);
2537 ori(at, at, (imm32 & kImm16Mask));
2541 // Emit a nop in the branch delay slot if required.
2542 if (bdslot == PROTECT)
2547 void MacroAssembler::DropAndRet(int drop) {
2548 Ret(USE_DELAY_SLOT);
2549 addiu(sp, sp, drop * kPointerSize);
2552 void MacroAssembler::DropAndRet(int drop,
2555 const Operand& r2) {
2556 // Both Drop and Ret need to be conditional.
2558 if (cond != cc_always) {
2559 Branch(&skip, NegateCondition(cond), r1, r2);
2565 if (cond != cc_always) {
2571 void MacroAssembler::Drop(int count,
2574 const Operand& op) {
2582 Branch(&skip, NegateCondition(cond), reg, op);
2585 addiu(sp, sp, count * kPointerSize);
2594 void MacroAssembler::Swap(Register reg1,
2597 if (scratch.is(no_reg)) {
2598 Xor(reg1, reg1, Operand(reg2));
2599 Xor(reg2, reg2, Operand(reg1));
2600 Xor(reg1, reg1, Operand(reg2));
2609 void MacroAssembler::Call(Label* target) {
2610 BranchAndLink(target);
2614 void MacroAssembler::Push(Handle<Object> handle) {
2615 li(at, Operand(handle));
2620 #ifdef ENABLE_DEBUGGER_SUPPORT
2622 void MacroAssembler::DebugBreak() {
2623 PrepareCEntryArgs(0);
2624 PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
2626 ASSERT(AllowThisStubCall(&ces));
2627 Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
2630 #endif // ENABLE_DEBUGGER_SUPPORT
2633 // ---------------------------------------------------------------------------
2634 // Exception handling.
2636 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
2637 int handler_index) {
2638 // Adjust this code if not the case.
2639 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2640 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2641 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2642 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2643 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2644 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2646 // For the JSEntry handler, we must preserve a0-a3 and s0.
2647 // t1-t3 are available. We will build up the handler from the bottom by
2648 // pushing on the stack.
2649 // Set up the code object (t1) and the state (t2) for pushing.
2651 StackHandler::IndexField::encode(handler_index) |
2652 StackHandler::KindField::encode(kind);
2653 li(t1, Operand(CodeObject()), CONSTANT_SIZE);
2654 li(t2, Operand(state));
2656 // Push the frame pointer, context, state, and code object.
2657 if (kind == StackHandler::JS_ENTRY) {
2658 ASSERT_EQ(Smi::FromInt(0), 0);
2659 // The second zero_reg indicates no context.
2660 // The first zero_reg is the NULL frame pointer.
2661 // The operands are reversed to match the order of MultiPush/Pop.
2662 Push(zero_reg, zero_reg, t2, t1);
2664 MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit());
2667 // Link the current handler as the next handler.
2668 li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2669 lw(t1, MemOperand(t2));
2671 // Set this new handler as the current one.
2672 sw(sp, MemOperand(t2));
2676 void MacroAssembler::PopTryHandler() {
2677 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2679 Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
2680 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2681 sw(a1, MemOperand(at));
2685 void MacroAssembler::JumpToHandlerEntry() {
2686 // Compute the handler entry address and jump to it. The handler table is
2687 // a fixed array of (smi-tagged) code offsets.
2688 // v0 = exception, a1 = code object, a2 = state.
2689 lw(a3, FieldMemOperand(a1, Code::kHandlerTableOffset)); // Handler table.
2690 Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2691 srl(a2, a2, StackHandler::kKindWidth); // Handler index.
2692 sll(a2, a2, kPointerSizeLog2);
2694 lw(a2, MemOperand(a2)); // Smi-tagged offset.
2695 Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
2696 sra(t9, a2, kSmiTagSize);
2702 void MacroAssembler::Throw(Register value) {
2703 // Adjust this code if not the case.
2704 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2705 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2706 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2707 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2708 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2709 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2711 // The exception is expected in v0.
2714 // Drop the stack pointer to the top of the top handler.
2715 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
2717 lw(sp, MemOperand(a3));
2719 // Restore the next handler.
2721 sw(a2, MemOperand(a3));
2723 // Get the code object (a1) and state (a2). Restore the context and frame
2725 MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
2727 // If the handler is a JS frame, restore the context to the frame.
2728 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
2731 Branch(&done, eq, cp, Operand(zero_reg));
2732 sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2735 JumpToHandlerEntry();
2739 void MacroAssembler::ThrowUncatchable(Register value) {
2740 // Adjust this code if not the case.
2741 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2742 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2743 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2744 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2745 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2746 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2748 // The exception is expected in v0.
2749 if (!value.is(v0)) {
2752 // Drop the stack pointer to the top of the top stack handler.
2753 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2754 lw(sp, MemOperand(a3));
2756 // Unwind the handlers until the ENTRY handler is found.
2757 Label fetch_next, check_kind;
2760 lw(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
2763 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
2764 lw(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
2765 And(a2, a2, Operand(StackHandler::KindField::kMask));
2766 Branch(&fetch_next, ne, a2, Operand(zero_reg));
2768 // Set the top handler address to next handler past the top ENTRY handler.
2770 sw(a2, MemOperand(a3));
2772 // Get the code object (a1) and state (a2). Clear the context and frame
2773 // pointer (0 was saved in the handler).
2774 MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
2776 JumpToHandlerEntry();
2780 void MacroAssembler::Allocate(int object_size,
2785 AllocationFlags flags) {
2786 ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
2787 if (!FLAG_inline_new) {
2788 if (emit_debug_code()) {
2789 // Trash the registers to simulate an allocation failure.
2791 li(scratch1, 0x7191);
2792 li(scratch2, 0x7291);
2798 ASSERT(!result.is(scratch1));
2799 ASSERT(!result.is(scratch2));
2800 ASSERT(!scratch1.is(scratch2));
2801 ASSERT(!scratch1.is(t9));
2802 ASSERT(!scratch2.is(t9));
2803 ASSERT(!result.is(t9));
2805 // Make object size into bytes.
2806 if ((flags & SIZE_IN_WORDS) != 0) {
2807 object_size *= kPointerSize;
2809 ASSERT_EQ(0, object_size & kObjectAlignmentMask);
2811 // Check relative positions of allocation top and limit addresses.
2812 // ARM adds additional checks to make sure the ldm instruction can be
2813 // used. On MIPS we don't have ldm so we don't need additional checks either.
2814 ExternalReference allocation_top =
2815 AllocationUtils::GetAllocationTopReference(isolate(), flags);
2816 ExternalReference allocation_limit =
2817 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2820 reinterpret_cast<intptr_t>(allocation_top.address());
2822 reinterpret_cast<intptr_t>(allocation_limit.address());
2823 ASSERT((limit - top) == kPointerSize);
2825 // Set up allocation top address and object size registers.
2826 Register topaddr = scratch1;
2827 li(topaddr, Operand(allocation_top));
2829 // This code stores a temporary value in t9.
2830 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2831 // Load allocation top into result and allocation limit into t9.
2832 lw(result, MemOperand(topaddr));
2833 lw(t9, MemOperand(topaddr, kPointerSize));
2835 if (emit_debug_code()) {
2836 // Assert that result actually contains top on entry. t9 is used
2837 // immediately below so this use of t9 does not cause difference with
2838 // respect to register content between debug and release mode.
2839 lw(t9, MemOperand(topaddr));
2840 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
2842 // Load allocation limit into t9. Result already contains allocation top.
2843 lw(t9, MemOperand(topaddr, limit - top));
2846 if ((flags & DOUBLE_ALIGNMENT) != 0) {
2847 // Align the next allocation. Storing the filler map without checking top is
2848 // safe in new-space because the limit of the heap is aligned there.
2849 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
2850 ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
2851 And(scratch2, result, Operand(kDoubleAlignmentMask));
2853 Branch(&aligned, eq, scratch2, Operand(zero_reg));
2854 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
2855 Branch(gc_required, Ugreater_equal, result, Operand(t9));
2857 li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
2858 sw(scratch2, MemOperand(result));
2859 Addu(result, result, Operand(kDoubleSize / 2));
2863 // Calculate new top and bail out if new space is exhausted. Use result
2864 // to calculate the new top.
2865 Addu(scratch2, result, Operand(object_size));
2866 Branch(gc_required, Ugreater, scratch2, Operand(t9));
2867 sw(scratch2, MemOperand(topaddr));
2869 // Tag object if requested.
2870 if ((flags & TAG_OBJECT) != 0) {
2871 Addu(result, result, Operand(kHeapObjectTag));
2876 void MacroAssembler::Allocate(Register object_size,
2881 AllocationFlags flags) {
2882 if (!FLAG_inline_new) {
2883 if (emit_debug_code()) {
2884 // Trash the registers to simulate an allocation failure.
2886 li(scratch1, 0x7191);
2887 li(scratch2, 0x7291);
2893 ASSERT(!result.is(scratch1));
2894 ASSERT(!result.is(scratch2));
2895 ASSERT(!scratch1.is(scratch2));
2896 ASSERT(!object_size.is(t9));
2897 ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
2899 // Check relative positions of allocation top and limit addresses.
2900 // ARM adds additional checks to make sure the ldm instruction can be
2901 // used. On MIPS we don't have ldm so we don't need additional checks either.
2902 ExternalReference allocation_top =
2903 AllocationUtils::GetAllocationTopReference(isolate(), flags);
2904 ExternalReference allocation_limit =
2905 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2907 reinterpret_cast<intptr_t>(allocation_top.address());
2909 reinterpret_cast<intptr_t>(allocation_limit.address());
2910 ASSERT((limit - top) == kPointerSize);
2912 // Set up allocation top address and object size registers.
2913 Register topaddr = scratch1;
2914 li(topaddr, Operand(allocation_top));
2916 // This code stores a temporary value in t9.
2917 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2918 // Load allocation top into result and allocation limit into t9.
2919 lw(result, MemOperand(topaddr));
2920 lw(t9, MemOperand(topaddr, kPointerSize));
2922 if (emit_debug_code()) {
2923 // Assert that result actually contains top on entry. t9 is used
2924 // immediately below so this use of t9 does not cause difference with
2925 // respect to register content between debug and release mode.
2926 lw(t9, MemOperand(topaddr));
2927 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
2929 // Load allocation limit into t9. Result already contains allocation top.
2930 lw(t9, MemOperand(topaddr, limit - top));
2933 if ((flags & DOUBLE_ALIGNMENT) != 0) {
2934 // Align the next allocation. Storing the filler map without checking top is
2935 // safe in new-space because the limit of the heap is aligned there.
2936 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
2937 ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
2938 And(scratch2, result, Operand(kDoubleAlignmentMask));
2940 Branch(&aligned, eq, scratch2, Operand(zero_reg));
2941 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
2942 Branch(gc_required, Ugreater_equal, result, Operand(t9));
2944 li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
2945 sw(scratch2, MemOperand(result));
2946 Addu(result, result, Operand(kDoubleSize / 2));
2950 // Calculate new top and bail out if new space is exhausted. Use result
2951 // to calculate the new top. Object size may be in words so a shift is
2952 // required to get the number of bytes.
2953 if ((flags & SIZE_IN_WORDS) != 0) {
2954 sll(scratch2, object_size, kPointerSizeLog2);
2955 Addu(scratch2, result, scratch2);
2957 Addu(scratch2, result, Operand(object_size));
2959 Branch(gc_required, Ugreater, scratch2, Operand(t9));
2961 // Update allocation top. result temporarily holds the new top.
2962 if (emit_debug_code()) {
2963 And(t9, scratch2, Operand(kObjectAlignmentMask));
2964 Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
2966 sw(scratch2, MemOperand(topaddr));
2968 // Tag object if requested.
2969 if ((flags & TAG_OBJECT) != 0) {
2970 Addu(result, result, Operand(kHeapObjectTag));
2975 void MacroAssembler::UndoAllocationInNewSpace(Register object,
2977 ExternalReference new_space_allocation_top =
2978 ExternalReference::new_space_allocation_top_address(isolate());
2980 // Make sure the object has no tag before resetting top.
2981 And(object, object, Operand(~kHeapObjectTagMask));
2983 // Check that the object un-allocated is below the current top.
2984 li(scratch, Operand(new_space_allocation_top));
2985 lw(scratch, MemOperand(scratch));
2986 Check(less, kUndoAllocationOfNonAllocatedMemory,
2987 object, Operand(scratch));
2989 // Write the address of the object to un-allocate as the current top.
2990 li(scratch, Operand(new_space_allocation_top));
2991 sw(object, MemOperand(scratch));
2995 void MacroAssembler::AllocateTwoByteString(Register result,
3000 Label* gc_required) {
3001 // Calculate the number of bytes needed for the characters in the string while
3002 // observing object alignment.
3003 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3004 sll(scratch1, length, 1); // Length in bytes, not chars.
3005 addiu(scratch1, scratch1,
3006 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3007 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3009 // Allocate two-byte string in new space.
3017 // Set the map, length and hash field.
3018 InitializeNewString(result,
3020 Heap::kStringMapRootIndex,
3026 void MacroAssembler::AllocateAsciiString(Register result,
3031 Label* gc_required) {
3032 // Calculate the number of bytes needed for the characters in the string
3033 // while observing object alignment.
3034 ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3035 ASSERT(kCharSize == 1);
3036 addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3037 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3039 // Allocate ASCII string in new space.
3047 // Set the map, length and hash field.
3048 InitializeNewString(result,
3050 Heap::kAsciiStringMapRootIndex,
3056 void MacroAssembler::AllocateTwoByteConsString(Register result,
3060 Label* gc_required) {
3061 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3063 InitializeNewString(result,
3065 Heap::kConsStringMapRootIndex,
3071 void MacroAssembler::AllocateAsciiConsString(Register result,
3075 Label* gc_required) {
3076 Label allocate_new_space, install_map;
3077 AllocationFlags flags = TAG_OBJECT;
3079 ExternalReference high_promotion_mode = ExternalReference::
3080 new_space_high_promotion_mode_active_address(isolate());
3081 li(scratch1, Operand(high_promotion_mode));
3082 lw(scratch1, MemOperand(scratch1, 0));
3083 Branch(&allocate_new_space, eq, scratch1, Operand(zero_reg));
3085 Allocate(ConsString::kSize,
3090 static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
3094 bind(&allocate_new_space);
3095 Allocate(ConsString::kSize,
3104 InitializeNewString(result,
3106 Heap::kConsAsciiStringMapRootIndex,
3112 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3116 Label* gc_required) {
3117 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3120 InitializeNewString(result,
3122 Heap::kSlicedStringMapRootIndex,
3128 void MacroAssembler::AllocateAsciiSlicedString(Register result,
3132 Label* gc_required) {
3133 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3136 InitializeNewString(result,
3138 Heap::kSlicedAsciiStringMapRootIndex,
3144 void MacroAssembler::JumpIfNotUniqueName(Register reg,
3145 Label* not_unique_name) {
3146 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3148 And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3149 Branch(&succeed, eq, at, Operand(zero_reg));
3150 Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
3156 // Allocates a heap number or jumps to the label if the young space is full and
3157 // a scavenge is needed.
3158 void MacroAssembler::AllocateHeapNumber(Register result,
3161 Register heap_number_map,
3163 TaggingMode tagging_mode) {
3164 // Allocate an object in the heap for the heap number and tag it as a heap
3166 Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
3167 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3169 // Store heap number map in the allocated object.
3170 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3171 if (tagging_mode == TAG_RESULT) {
3172 sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3174 sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3179 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3183 Label* gc_required) {
3184 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3185 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3186 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3190 // Copies a fixed number of fields of heap objects from src to dst.
3191 void MacroAssembler::CopyFields(Register dst,
3195 ASSERT((temps & dst.bit()) == 0);
3196 ASSERT((temps & src.bit()) == 0);
3197 // Primitive implementation using only one temporary register.
3199 Register tmp = no_reg;
3200 // Find a temp register in temps list.
3201 for (int i = 0; i < kNumRegisters; i++) {
3202 if ((temps & (1 << i)) != 0) {
3207 ASSERT(!tmp.is(no_reg));
3209 for (int i = 0; i < field_count; i++) {
3210 lw(tmp, FieldMemOperand(src, i * kPointerSize));
3211 sw(tmp, FieldMemOperand(dst, i * kPointerSize));
3216 void MacroAssembler::CopyBytes(Register src,
3220 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3222 // Align src before copying in word size chunks.
3223 Branch(&byte_loop, le, length, Operand(kPointerSize));
3224 bind(&align_loop_1);
3225 And(scratch, src, kPointerSize - 1);
3226 Branch(&word_loop, eq, scratch, Operand(zero_reg));
3227 lbu(scratch, MemOperand(src));
3229 sb(scratch, MemOperand(dst));
3231 Subu(length, length, Operand(1));
3232 Branch(&align_loop_1, ne, length, Operand(zero_reg));
3234 // Copy bytes in word size chunks.
3236 if (emit_debug_code()) {
3237 And(scratch, src, kPointerSize - 1);
3238 Assert(eq, kExpectingAlignmentForCopyBytes,
3239 scratch, Operand(zero_reg));
3241 Branch(&byte_loop, lt, length, Operand(kPointerSize));
3242 lw(scratch, MemOperand(src));
3243 Addu(src, src, kPointerSize);
3245 // TODO(kalmard) check if this can be optimized to use sw in most cases.
3246 // Can't use unaligned access - copy byte by byte.
3247 sb(scratch, MemOperand(dst, 0));
3248 srl(scratch, scratch, 8);
3249 sb(scratch, MemOperand(dst, 1));
3250 srl(scratch, scratch, 8);
3251 sb(scratch, MemOperand(dst, 2));
3252 srl(scratch, scratch, 8);
3253 sb(scratch, MemOperand(dst, 3));
3256 Subu(length, length, Operand(kPointerSize));
3259 // Copy the last bytes if any left.
3261 Branch(&done, eq, length, Operand(zero_reg));
3263 lbu(scratch, MemOperand(src));
3265 sb(scratch, MemOperand(dst));
3267 Subu(length, length, Operand(1));
3268 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3273 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3274 Register end_offset,
3279 sw(filler, MemOperand(start_offset));
3280 Addu(start_offset, start_offset, kPointerSize);
3282 Branch(&loop, lt, start_offset, Operand(end_offset));
3286 void MacroAssembler::CheckFastElements(Register map,
3289 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3290 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3291 STATIC_ASSERT(FAST_ELEMENTS == 2);
3292 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3293 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3294 Branch(fail, hi, scratch,
3295 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3299 void MacroAssembler::CheckFastObjectElements(Register map,
3302 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3303 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3304 STATIC_ASSERT(FAST_ELEMENTS == 2);
3305 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3306 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3307 Branch(fail, ls, scratch,
3308 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3309 Branch(fail, hi, scratch,
3310 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3314 void MacroAssembler::CheckFastSmiElements(Register map,
3317 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3318 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3319 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3320 Branch(fail, hi, scratch,
3321 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3325 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3327 Register elements_reg,
3332 int elements_offset) {
3333 Label smi_value, maybe_nan, have_double_value, is_nan, done;
3334 Register mantissa_reg = scratch2;
3335 Register exponent_reg = scratch3;
3337 // Handle smi values specially.
3338 JumpIfSmi(value_reg, &smi_value);
3340 // Ensure that the object is a heap number
3343 Heap::kHeapNumberMapRootIndex,
3347 // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
3349 li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
3350 lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3351 Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
3353 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3355 bind(&have_double_value);
3356 sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3357 Addu(scratch1, scratch1, elements_reg);
3358 sw(mantissa_reg, FieldMemOperand(
3359 scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
3360 uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
3361 sizeof(kHoleNanLower32);
3362 sw(exponent_reg, FieldMemOperand(scratch1, offset));
3366 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3367 // it's an Infinity, and the non-NaN code path applies.
3368 Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
3369 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3370 Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
3372 // Load canonical NaN for storing into the double array.
3373 uint64_t nan_int64 = BitCast<uint64_t>(
3374 FixedDoubleArray::canonical_not_the_hole_nan_as_double());
3375 li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
3376 li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
3377 jmp(&have_double_value);
3380 Addu(scratch1, elements_reg,
3381 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
3383 sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3384 Addu(scratch1, scratch1, scratch2);
3385 // scratch1 is now effective address of the double element
3387 Register untagged_value = elements_reg;
3388 SmiUntag(untagged_value, value_reg);
3389 mtc1(untagged_value, f2);
3391 sdc1(f0, MemOperand(scratch1, 0));
3396 void MacroAssembler::CompareMapAndBranch(Register obj,
3399 Label* early_success,
3402 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3403 CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
3407 void MacroAssembler::CompareMapAndBranch(Register obj_map,
3409 Label* early_success,
3412 Branch(branch_to, cond, obj_map, Operand(map));
3416 void MacroAssembler::CheckMap(Register obj,
3420 SmiCheckType smi_check_type) {
3421 if (smi_check_type == DO_SMI_CHECK) {
3422 JumpIfSmi(obj, fail);
3425 CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
3430 void MacroAssembler::DispatchMap(Register obj,
3433 Handle<Code> success,
3434 SmiCheckType smi_check_type) {
3436 if (smi_check_type == DO_SMI_CHECK) {
3437 JumpIfSmi(obj, &fail);
3439 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3440 Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
3445 void MacroAssembler::CheckMap(Register obj,
3447 Heap::RootListIndex index,
3449 SmiCheckType smi_check_type) {
3450 if (smi_check_type == DO_SMI_CHECK) {
3451 JumpIfSmi(obj, fail);
3453 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3454 LoadRoot(at, index);
3455 Branch(fail, ne, scratch, Operand(at));
3459 void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
3460 if (IsMipsSoftFloatABI) {
3463 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
3468 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
3469 if (!IsMipsSoftFloatABI) {
3477 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
3478 DoubleRegister dreg2) {
3479 if (!IsMipsSoftFloatABI) {
3480 if (dreg2.is(f12)) {
3481 ASSERT(!dreg1.is(f14));
3489 Move(a0, a1, dreg1);
3490 Move(a2, a3, dreg2);
3495 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
3497 if (!IsMipsSoftFloatABI) {
3507 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
3508 // This macro takes the dst register to make the code more readable
3509 // at the call sites. However, the dst register has to be t1 to
3510 // follow the calling convention which requires the call type to be
3513 if (call_kind == CALL_AS_FUNCTION) {
3514 li(dst, Operand(Smi::FromInt(1)));
3516 li(dst, Operand(Smi::FromInt(0)));
3521 // -----------------------------------------------------------------------------
3522 // JavaScript invokes.
3524 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3525 const ParameterCount& actual,
3526 Handle<Code> code_constant,
3529 bool* definitely_mismatches,
3531 const CallWrapper& call_wrapper,
3532 CallKind call_kind) {
3533 bool definitely_matches = false;
3534 *definitely_mismatches = false;
3535 Label regular_invoke;
3537 // Check whether the expected and actual arguments count match. If not,
3538 // setup registers according to contract with ArgumentsAdaptorTrampoline:
3539 // a0: actual arguments count
3540 // a1: function (passed through to callee)
3541 // a2: expected arguments count
3542 // a3: callee code entry
3544 // The code below is made a lot easier because the calling code already sets
3545 // up actual and expected registers according to the contract if values are
3546 // passed in registers.
3547 ASSERT(actual.is_immediate() || actual.reg().is(a0));
3548 ASSERT(expected.is_immediate() || expected.reg().is(a2));
3549 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
3551 if (expected.is_immediate()) {
3552 ASSERT(actual.is_immediate());
3553 if (expected.immediate() == actual.immediate()) {
3554 definitely_matches = true;
3556 li(a0, Operand(actual.immediate()));
3557 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3558 if (expected.immediate() == sentinel) {
3559 // Don't worry about adapting arguments for builtins that
3560 // don't want that done. Skip adaption code by making it look
3561 // like we have a match between expected and actual number of
3563 definitely_matches = true;
3565 *definitely_mismatches = true;
3566 li(a2, Operand(expected.immediate()));
3569 } else if (actual.is_immediate()) {
3570 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.immediate()));
3571 li(a0, Operand(actual.immediate()));
3573 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg()));
3576 if (!definitely_matches) {
3577 if (!code_constant.is_null()) {
3578 li(a3, Operand(code_constant));
3579 addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
3582 Handle<Code> adaptor =
3583 isolate()->builtins()->ArgumentsAdaptorTrampoline();
3584 if (flag == CALL_FUNCTION) {
3585 call_wrapper.BeforeCall(CallSize(adaptor));
3586 SetCallKind(t1, call_kind);
3588 call_wrapper.AfterCall();
3589 if (!*definitely_mismatches) {
3593 SetCallKind(t1, call_kind);
3594 Jump(adaptor, RelocInfo::CODE_TARGET);
3596 bind(®ular_invoke);
3601 void MacroAssembler::InvokeCode(Register code,
3602 const ParameterCount& expected,
3603 const ParameterCount& actual,
3605 const CallWrapper& call_wrapper,
3606 CallKind call_kind) {
3607 // You can't call a function without a valid frame.
3608 ASSERT(flag == JUMP_FUNCTION || has_frame());
3612 bool definitely_mismatches = false;
3613 InvokePrologue(expected, actual, Handle<Code>::null(), code,
3614 &done, &definitely_mismatches, flag,
3615 call_wrapper, call_kind);
3616 if (!definitely_mismatches) {
3617 if (flag == CALL_FUNCTION) {
3618 call_wrapper.BeforeCall(CallSize(code));
3619 SetCallKind(t1, call_kind);
3621 call_wrapper.AfterCall();
3623 ASSERT(flag == JUMP_FUNCTION);
3624 SetCallKind(t1, call_kind);
3627 // Continue here if InvokePrologue does handle the invocation due to
3628 // mismatched parameter counts.
3634 void MacroAssembler::InvokeCode(Handle<Code> code,
3635 const ParameterCount& expected,
3636 const ParameterCount& actual,
3637 RelocInfo::Mode rmode,
3639 CallKind call_kind) {
3640 // You can't call a function without a valid frame.
3641 ASSERT(flag == JUMP_FUNCTION || has_frame());
3645 bool definitely_mismatches = false;
3646 InvokePrologue(expected, actual, code, no_reg,
3647 &done, &definitely_mismatches, flag,
3648 NullCallWrapper(), call_kind);
3649 if (!definitely_mismatches) {
3650 if (flag == CALL_FUNCTION) {
3651 SetCallKind(t1, call_kind);
3654 SetCallKind(t1, call_kind);
3657 // Continue here if InvokePrologue does handle the invocation due to
3658 // mismatched parameter counts.
3664 void MacroAssembler::InvokeFunction(Register function,
3665 const ParameterCount& actual,
3667 const CallWrapper& call_wrapper,
3668 CallKind call_kind) {
3669 // You can't call a function without a valid frame.
3670 ASSERT(flag == JUMP_FUNCTION || has_frame());
3672 // Contract with called JS functions requires that function is passed in a1.
3673 ASSERT(function.is(a1));
3674 Register expected_reg = a2;
3675 Register code_reg = a3;
3677 lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3678 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3680 FieldMemOperand(code_reg,
3681 SharedFunctionInfo::kFormalParameterCountOffset));
3682 sra(expected_reg, expected_reg, kSmiTagSize);
3683 lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3685 ParameterCount expected(expected_reg);
3686 InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
3690 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3691 const ParameterCount& expected,
3692 const ParameterCount& actual,
3694 const CallWrapper& call_wrapper,
3695 CallKind call_kind) {
3696 // You can't call a function without a valid frame.
3697 ASSERT(flag == JUMP_FUNCTION || has_frame());
3699 // Get the function and setup the context.
3700 LoadHeapObject(a1, function);
3701 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3703 // We call indirectly through the code field in the function to
3704 // allow recompilation to take effect without changing any of the
3706 lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3707 InvokeCode(a3, expected, actual, flag, call_wrapper, call_kind);
3711 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
3715 lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
3716 IsInstanceJSObjectType(map, scratch, fail);
3720 void MacroAssembler::IsInstanceJSObjectType(Register map,
3723 lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
3724 Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
3725 Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
3729 void MacroAssembler::IsObjectJSStringType(Register object,
3732 ASSERT(kNotStringTag != 0);
3734 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3735 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3736 And(scratch, scratch, Operand(kIsNotStringMask));
3737 Branch(fail, ne, scratch, Operand(zero_reg));
3741 void MacroAssembler::IsObjectNameType(Register object,
3744 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3745 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3746 Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
3750 // ---------------------------------------------------------------------------
3751 // Support functions.
3754 void MacroAssembler::TryGetFunctionPrototype(Register function,
3758 bool miss_on_bound_function) {
3759 // Check that the receiver isn't a smi.
3760 JumpIfSmi(function, miss);
3762 // Check that the function really is a function. Load map into result reg.
3763 GetObjectType(function, result, scratch);
3764 Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
3766 if (miss_on_bound_function) {
3768 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3770 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3771 And(scratch, scratch,
3772 Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
3773 Branch(miss, ne, scratch, Operand(zero_reg));
3776 // Make sure that the function has an instance prototype.
3778 lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3779 And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
3780 Branch(&non_instance, ne, scratch, Operand(zero_reg));
3782 // Get the prototype or initial map from the function.
3784 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3786 // If the prototype or initial map is the hole, don't return it and
3787 // simply miss the cache instead. This will allow us to allocate a
3788 // prototype object on-demand in the runtime system.
3789 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
3790 Branch(miss, eq, result, Operand(t8));
3792 // If the function does not have an initial map, we're done.
3794 GetObjectType(result, scratch, scratch);
3795 Branch(&done, ne, scratch, Operand(MAP_TYPE));
3797 // Get the prototype from the initial map.
3798 lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
3801 // Non-instance prototype: Fetch prototype from constructor field
3803 bind(&non_instance);
3804 lw(result, FieldMemOperand(result, Map::kConstructorOffset));
3811 void MacroAssembler::GetObjectType(Register object,
3813 Register type_reg) {
3814 lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
3815 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3819 // -----------------------------------------------------------------------------
3822 void MacroAssembler::CallStub(CodeStub* stub,
3823 TypeFeedbackId ast_id,
3827 BranchDelaySlot bd) {
3828 ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
3829 Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id,
3834 void MacroAssembler::TailCallStub(CodeStub* stub) {
3835 ASSERT(allow_stub_calls_ ||
3836 stub->CompilingCallsToThisStubIsGCSafe(isolate()));
3837 Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
3841 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3842 return ref0.address() - ref1.address();
3846 void MacroAssembler::CallApiFunctionAndReturn(
3847 ExternalReference function,
3848 Address function_address,
3849 ExternalReference thunk_ref,
3850 Register thunk_last_arg,
3852 MemOperand return_value_operand,
3853 MemOperand* context_restore_operand) {
3854 ExternalReference next_address =
3855 ExternalReference::handle_scope_next_address(isolate());
3856 const int kNextOffset = 0;
3857 const int kLimitOffset = AddressOffset(
3858 ExternalReference::handle_scope_limit_address(isolate()),
3860 const int kLevelOffset = AddressOffset(
3861 ExternalReference::handle_scope_level_address(isolate()),
3864 // Allocate HandleScope in callee-save registers.
3865 li(s3, Operand(next_address));
3866 lw(s0, MemOperand(s3, kNextOffset));
3867 lw(s1, MemOperand(s3, kLimitOffset));
3868 lw(s2, MemOperand(s3, kLevelOffset));
3869 Addu(s2, s2, Operand(1));
3870 sw(s2, MemOperand(s3, kLevelOffset));
3872 if (FLAG_log_timer_events) {
3873 FrameScope frame(this, StackFrame::MANUAL);
3874 PushSafepointRegisters();
3875 PrepareCallCFunction(1, a0);
3876 li(a0, Operand(ExternalReference::isolate_address(isolate())));
3877 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
3878 PopSafepointRegisters();
3881 Label profiler_disabled;
3882 Label end_profiler_check;
3883 bool* is_profiling_flag =
3884 isolate()->cpu_profiler()->is_profiling_address();
3885 STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
3886 li(t9, reinterpret_cast<int32_t>(is_profiling_flag));
3887 lb(t9, MemOperand(t9, 0));
3888 beq(t9, zero_reg, &profiler_disabled);
3890 // Third parameter is the address of the actual getter function.
3891 li(thunk_last_arg, reinterpret_cast<int32_t>(function_address));
3892 li(t9, Operand(thunk_ref));
3893 jmp(&end_profiler_check);
3895 bind(&profiler_disabled);
3896 li(t9, Operand(function));
3898 bind(&end_profiler_check);
3900 // Native call returns to the DirectCEntry stub which redirects to the
3901 // return address pushed on stack (could have moved after GC).
3902 // DirectCEntry stub itself is generated early and never moves.
3903 DirectCEntryStub stub;
3904 stub.GenerateCall(this, t9);
3906 if (FLAG_log_timer_events) {
3907 FrameScope frame(this, StackFrame::MANUAL);
3908 PushSafepointRegisters();
3909 PrepareCallCFunction(1, a0);
3910 li(a0, Operand(ExternalReference::isolate_address(isolate())));
3911 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
3912 PopSafepointRegisters();
3915 Label promote_scheduled_exception;
3916 Label exception_handled;
3917 Label delete_allocated_handles;
3918 Label leave_exit_frame;
3919 Label return_value_loaded;
3921 // Load value from ReturnValue.
3922 lw(v0, return_value_operand);
3923 bind(&return_value_loaded);
3925 // No more valid handles (the result handle was the last one). Restore
3926 // previous handle scope.
3927 sw(s0, MemOperand(s3, kNextOffset));
3928 if (emit_debug_code()) {
3929 lw(a1, MemOperand(s3, kLevelOffset));
3930 Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
3932 Subu(s2, s2, Operand(1));
3933 sw(s2, MemOperand(s3, kLevelOffset));
3934 lw(at, MemOperand(s3, kLimitOffset));
3935 Branch(&delete_allocated_handles, ne, s1, Operand(at));
3937 // Check if the function scheduled an exception.
3938 bind(&leave_exit_frame);
3939 LoadRoot(t0, Heap::kTheHoleValueRootIndex);
3940 li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
3941 lw(t1, MemOperand(at));
3942 Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
3943 bind(&exception_handled);
3945 bool restore_context = context_restore_operand != NULL;
3946 if (restore_context) {
3947 lw(cp, *context_restore_operand);
3949 li(s0, Operand(stack_space));
3950 LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN);
3952 bind(&promote_scheduled_exception);
3954 FrameScope frame(this, StackFrame::INTERNAL);
3955 CallExternalReference(
3956 ExternalReference(Runtime::kPromoteScheduledException, isolate()),
3959 jmp(&exception_handled);
3961 // HandleScope limit has changed. Delete allocated extensions.
3962 bind(&delete_allocated_handles);
3963 sw(s1, MemOperand(s3, kLimitOffset));
3966 PrepareCallCFunction(1, s1);
3967 li(a0, Operand(ExternalReference::isolate_address(isolate())));
3968 CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
3971 jmp(&leave_exit_frame);
3975 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
3976 if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
3977 return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
3981 void MacroAssembler::IllegalOperation(int num_arguments) {
3982 if (num_arguments > 0) {
3983 addiu(sp, sp, num_arguments * kPointerSize);
3985 LoadRoot(v0, Heap::kUndefinedValueRootIndex);
3989 void MacroAssembler::IndexFromHash(Register hash,
3991 // If the hash field contains an array index pick it out. The assert checks
3992 // that the constants for the maximum number of digits for an array index
3993 // cached in the hash field and the number of bits reserved for it does not
3995 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
3996 (1 << String::kArrayIndexValueBits));
3997 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
3998 // the low kHashShift bits.
3999 STATIC_ASSERT(kSmiTag == 0);
4000 Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
4001 sll(index, hash, kSmiTagSize);
4005 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4009 Register heap_number_map,
4011 ObjectToDoubleFlags flags) {
4013 if ((flags & OBJECT_NOT_SMI) == 0) {
4015 JumpIfNotSmi(object, ¬_smi);
4016 // Remove smi tag and convert to double.
4017 sra(scratch1, object, kSmiTagSize);
4018 mtc1(scratch1, result);
4019 cvt_d_w(result, result);
4023 // Check for heap number and load double value from it.
4024 lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4025 Branch(not_number, ne, scratch1, Operand(heap_number_map));
4027 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4028 // If exponent is all ones the number is either a NaN or +/-Infinity.
4029 Register exponent = scratch1;
4030 Register mask_reg = scratch2;
4031 lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4032 li(mask_reg, HeapNumber::kExponentMask);
4034 And(exponent, exponent, mask_reg);
4035 Branch(not_number, eq, exponent, Operand(mask_reg));
4037 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4042 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4044 Register scratch1) {
4045 sra(scratch1, smi, kSmiTagSize);
4046 mtc1(scratch1, value);
4047 cvt_d_w(value, value);
4051 void MacroAssembler::AdduAndCheckForOverflow(Register dst,
4054 Register overflow_dst,
4056 ASSERT(!dst.is(overflow_dst));
4057 ASSERT(!dst.is(scratch));
4058 ASSERT(!overflow_dst.is(scratch));
4059 ASSERT(!overflow_dst.is(left));
4060 ASSERT(!overflow_dst.is(right));
4062 if (left.is(right) && dst.is(left)) {
4063 ASSERT(!dst.is(t9));
4064 ASSERT(!scratch.is(t9));
4065 ASSERT(!left.is(t9));
4066 ASSERT(!right.is(t9));
4067 ASSERT(!overflow_dst.is(t9));
4073 mov(scratch, left); // Preserve left.
4074 addu(dst, left, right); // Left is overwritten.
4075 xor_(scratch, dst, scratch); // Original left.
4076 xor_(overflow_dst, dst, right);
4077 and_(overflow_dst, overflow_dst, scratch);
4078 } else if (dst.is(right)) {
4079 mov(scratch, right); // Preserve right.
4080 addu(dst, left, right); // Right is overwritten.
4081 xor_(scratch, dst, scratch); // Original right.
4082 xor_(overflow_dst, dst, left);
4083 and_(overflow_dst, overflow_dst, scratch);
4085 addu(dst, left, right);
4086 xor_(overflow_dst, dst, left);
4087 xor_(scratch, dst, right);
4088 and_(overflow_dst, scratch, overflow_dst);
4093 void MacroAssembler::SubuAndCheckForOverflow(Register dst,
4096 Register overflow_dst,
4098 ASSERT(!dst.is(overflow_dst));
4099 ASSERT(!dst.is(scratch));
4100 ASSERT(!overflow_dst.is(scratch));
4101 ASSERT(!overflow_dst.is(left));
4102 ASSERT(!overflow_dst.is(right));
4103 ASSERT(!scratch.is(left));
4104 ASSERT(!scratch.is(right));
4106 // This happens with some crankshaft code. Since Subu works fine if
4107 // left == right, let's not make that restriction here.
4108 if (left.is(right)) {
4110 mov(overflow_dst, zero_reg);
4115 mov(scratch, left); // Preserve left.
4116 subu(dst, left, right); // Left is overwritten.
4117 xor_(overflow_dst, dst, scratch); // scratch is original left.
4118 xor_(scratch, scratch, right); // scratch is original left.
4119 and_(overflow_dst, scratch, overflow_dst);
4120 } else if (dst.is(right)) {
4121 mov(scratch, right); // Preserve right.
4122 subu(dst, left, right); // Right is overwritten.
4123 xor_(overflow_dst, dst, left);
4124 xor_(scratch, left, scratch); // Original right.
4125 and_(overflow_dst, scratch, overflow_dst);
4127 subu(dst, left, right);
4128 xor_(overflow_dst, dst, left);
4129 xor_(scratch, left, right);
4130 and_(overflow_dst, scratch, overflow_dst);
4135 void MacroAssembler::CallRuntime(const Runtime::Function* f,
4137 SaveFPRegsMode save_doubles) {
4138 // All parameters are on the stack. v0 has the return value after call.
4140 // If the expected number of arguments of the runtime function is
4141 // constant, we check that the actual number of arguments match the
4143 if (f->nargs >= 0 && f->nargs != num_arguments) {
4144 IllegalOperation(num_arguments);
4148 // TODO(1236192): Most runtime routines don't need the number of
4149 // arguments passed in because it is constant. At some point we
4150 // should remove this need and make the runtime routine entry code
4152 PrepareCEntryArgs(num_arguments);
4153 PrepareCEntryFunction(ExternalReference(f, isolate()));
4154 CEntryStub stub(1, save_doubles);
4159 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
4161 BranchDelaySlot bd) {
4162 PrepareCEntryArgs(num_arguments);
4163 PrepareCEntryFunction(ext);
4166 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
4170 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
4173 // TODO(1236192): Most runtime routines don't need the number of
4174 // arguments passed in because it is constant. At some point we
4175 // should remove this need and make the runtime routine entry code
4177 PrepareCEntryArgs(num_arguments);
4178 JumpToExternalReference(ext);
4182 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
4185 TailCallExternalReference(ExternalReference(fid, isolate()),
4191 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
4192 BranchDelaySlot bd) {
4193 PrepareCEntryFunction(builtin);
4195 Jump(stub.GetCode(isolate()),
4196 RelocInfo::CODE_TARGET,
4204 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
4206 const CallWrapper& call_wrapper) {
4207 // You can't call a builtin without a valid frame.
4208 ASSERT(flag == JUMP_FUNCTION || has_frame());
4210 GetBuiltinEntry(t9, id);
4211 if (flag == CALL_FUNCTION) {
4212 call_wrapper.BeforeCall(CallSize(t9));
4213 SetCallKind(t1, CALL_AS_METHOD);
4215 call_wrapper.AfterCall();
4217 ASSERT(flag == JUMP_FUNCTION);
4218 SetCallKind(t1, CALL_AS_METHOD);
4224 void MacroAssembler::GetBuiltinFunction(Register target,
4225 Builtins::JavaScript id) {
4226 // Load the builtins object into target register.
4227 lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4228 lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
4229 // Load the JavaScript builtin function from the builtins object.
4230 lw(target, FieldMemOperand(target,
4231 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
4235 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
4236 ASSERT(!target.is(a1));
4237 GetBuiltinFunction(a1, id);
4238 // Load the code entry point from the builtins object.
4239 lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4243 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4244 Register scratch1, Register scratch2) {
4245 if (FLAG_native_code_counters && counter->Enabled()) {
4246 li(scratch1, Operand(value));
4247 li(scratch2, Operand(ExternalReference(counter)));
4248 sw(scratch1, MemOperand(scratch2));
4253 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4254 Register scratch1, Register scratch2) {
4256 if (FLAG_native_code_counters && counter->Enabled()) {
4257 li(scratch2, Operand(ExternalReference(counter)));
4258 lw(scratch1, MemOperand(scratch2));
4259 Addu(scratch1, scratch1, Operand(value));
4260 sw(scratch1, MemOperand(scratch2));
4265 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4266 Register scratch1, Register scratch2) {
4268 if (FLAG_native_code_counters && counter->Enabled()) {
4269 li(scratch2, Operand(ExternalReference(counter)));
4270 lw(scratch1, MemOperand(scratch2));
4271 Subu(scratch1, scratch1, Operand(value));
4272 sw(scratch1, MemOperand(scratch2));
4277 // -----------------------------------------------------------------------------
4280 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
4281 Register rs, Operand rt) {
4282 if (emit_debug_code())
4283 Check(cc, reason, rs, rt);
4287 void MacroAssembler::AssertFastElements(Register elements) {
4288 if (emit_debug_code()) {
4289 ASSERT(!elements.is(at));
4292 lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4293 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4294 Branch(&ok, eq, elements, Operand(at));
4295 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4296 Branch(&ok, eq, elements, Operand(at));
4297 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4298 Branch(&ok, eq, elements, Operand(at));
4299 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4306 void MacroAssembler::Check(Condition cc, BailoutReason reason,
4307 Register rs, Operand rt) {
4309 Branch(&L, cc, rs, rt);
4311 // Will not return here.
4316 void MacroAssembler::Abort(BailoutReason reason) {
4319 // We want to pass the msg string like a smi to avoid GC
4320 // problems, however msg is not guaranteed to be aligned
4321 // properly. Instead, we pass an aligned pointer that is
4322 // a proper v8 smi, but also pass the alignment difference
4323 // from the real pointer as a smi.
4324 const char* msg = GetBailoutReason(reason);
4325 intptr_t p1 = reinterpret_cast<intptr_t>(msg);
4326 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
4327 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
4330 RecordComment("Abort message: ");
4334 if (FLAG_trap_on_abort) {
4340 li(a0, Operand(p0));
4342 li(a0, Operand(Smi::FromInt(p1 - p0)));
4344 // Disable stub call restrictions to always allow calls to abort.
4346 // We don't actually want to generate a pile of code for this, so just
4347 // claim there is a stack frame, without generating one.
4348 FrameScope scope(this, StackFrame::NONE);
4349 CallRuntime(Runtime::kAbort, 2);
4351 CallRuntime(Runtime::kAbort, 2);
4353 // Will not return here.
4354 if (is_trampoline_pool_blocked()) {
4355 // If the calling code cares about the exact number of
4356 // instructions generated, we insert padding here to keep the size
4357 // of the Abort macro constant.
4358 // Currently in debug mode with debug_code enabled the number of
4359 // generated instructions is 14, so we use this as a maximum value.
4360 static const int kExpectedAbortInstructions = 14;
4361 int abort_instructions = InstructionsGeneratedSince(&abort_start);
4362 ASSERT(abort_instructions <= kExpectedAbortInstructions);
4363 while (abort_instructions++ < kExpectedAbortInstructions) {
4370 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4371 if (context_chain_length > 0) {
4372 // Move up the chain of contexts to the context containing the slot.
4373 lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4374 for (int i = 1; i < context_chain_length; i++) {
4375 lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4378 // Slot is in the current function context. Move it into the
4379 // destination register in case we store into it (the write barrier
4380 // cannot be allowed to destroy the context in esi).
4386 void MacroAssembler::LoadTransitionedArrayMapConditional(
4387 ElementsKind expected_kind,
4388 ElementsKind transitioned_kind,
4389 Register map_in_out,
4391 Label* no_map_match) {
4392 // Load the global or builtins object from the current context.
4394 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4395 lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
4397 // Check that the function's map is the same as the expected cached map.
4400 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4401 size_t offset = expected_kind * kPointerSize +
4402 FixedArrayBase::kHeaderSize;
4403 lw(at, FieldMemOperand(scratch, offset));
4404 Branch(no_map_match, ne, map_in_out, Operand(at));
4406 // Use the transitioned cached map.
4407 offset = transitioned_kind * kPointerSize +
4408 FixedArrayBase::kHeaderSize;
4409 lw(map_in_out, FieldMemOperand(scratch, offset));
4413 void MacroAssembler::LoadInitialArrayMap(
4414 Register function_in, Register scratch,
4415 Register map_out, bool can_have_holes) {
4416 ASSERT(!function_in.is(map_out));
4418 lw(map_out, FieldMemOperand(function_in,
4419 JSFunction::kPrototypeOrInitialMapOffset));
4420 if (!FLAG_smi_only_arrays) {
4421 ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
4422 LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
4427 } else if (can_have_holes) {
4428 LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
4429 FAST_HOLEY_SMI_ELEMENTS,
4438 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4439 // Load the global or builtins object from the current context.
4441 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4442 // Load the native context from the global or builtins object.
4443 lw(function, FieldMemOperand(function,
4444 GlobalObject::kNativeContextOffset));
4445 // Load the function from the native context.
4446 lw(function, MemOperand(function, Context::SlotOffset(index)));
4450 void MacroAssembler::LoadArrayFunction(Register function) {
4451 // Load the global or builtins object from the current context.
4453 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4454 // Load the global context from the global or builtins object.
4456 FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
4457 // Load the array function from the native context.
4459 MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
4463 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4466 // Load the initial map. The global functions all have initial maps.
4467 lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4468 if (emit_debug_code()) {
4470 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4473 Abort(kGlobalFunctionsMustHaveInitialMap);
4479 void MacroAssembler::LoadNumber(Register object,
4481 Register heap_number_map,
4483 Label* not_number) {
4486 UntagAndJumpIfSmi(scratch, object, &is_smi);
4487 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
4489 ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
4500 void MacroAssembler::LoadNumberAsInt32Double(Register object,
4501 DoubleRegister double_dst,
4502 Register heap_number_map,
4505 FPURegister double_scratch,
4507 ASSERT(!scratch1.is(object) && !scratch2.is(object));
4508 ASSERT(!scratch1.is(scratch2));
4509 ASSERT(!heap_number_map.is(object) &&
4510 !heap_number_map.is(scratch1) &&
4511 !heap_number_map.is(scratch2));
4513 Label done, obj_is_not_smi;
4515 UntagAndJumpIfNotSmi(scratch1, object, &obj_is_not_smi);
4516 mtc1(scratch1, double_scratch);
4517 cvt_d_w(double_dst, double_scratch);
4520 bind(&obj_is_not_smi);
4521 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
4524 // Load the double value.
4525 ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
4527 Register except_flag = scratch2;
4528 EmitFPUTruncate(kRoundToZero,
4534 kCheckForInexactConversion);
4536 // Jump to not_int32 if the operation did not succeed.
4537 Branch(not_int32, ne, except_flag, Operand(zero_reg));
4542 void MacroAssembler::LoadNumberAsInt32(Register object,
4544 Register heap_number_map,
4547 FPURegister double_scratch0,
4548 FPURegister double_scratch1,
4550 ASSERT(!dst.is(object));
4551 ASSERT(!scratch1.is(object) && !scratch2.is(object));
4552 ASSERT(!scratch1.is(scratch2));
4554 Label done, maybe_undefined;
4556 UntagAndJumpIfSmi(dst, object, &done);
4558 JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
4560 // Object is a heap number.
4561 // Convert the floating point value to a 32-bit integer.
4562 // Load the double value.
4563 ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
4565 Register except_flag = scratch2;
4566 EmitFPUTruncate(kRoundToZero,
4572 kCheckForInexactConversion);
4574 // Jump to not_int32 if the operation did not succeed.
4575 Branch(not_int32, ne, except_flag, Operand(zero_reg));
4578 bind(&maybe_undefined);
4579 LoadRoot(at, Heap::kUndefinedValueRootIndex);
4580 Branch(not_int32, ne, object, Operand(at));
4581 // |undefined| is truncated to 0.
4582 li(dst, Operand(Smi::FromInt(0)));
4589 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
4590 if (frame_mode == BUILD_STUB_FRAME) {
4592 Push(Smi::FromInt(StackFrame::STUB));
4593 // Adjust FP to point to saved FP.
4594 Addu(fp, sp, Operand(2 * kPointerSize));
4596 PredictableCodeSizeScope predictible_code_size_scope(
4597 this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
4598 // The following three instructions must remain together and unmodified
4599 // for code aging to work properly.
4600 if (isolate()->IsCodePreAgingActive()) {
4601 // Pre-age the code.
4602 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
4603 nop(Assembler::CODE_AGE_MARKER_NOP);
4604 // Save the function's original return address
4605 // (it will be clobbered by Call(t9))
4607 // Load the stub address to t9 and call it
4609 Operand(reinterpret_cast<uint32_t>(stub->instruction_start())));
4611 // Record the stub address in the empty space for GetCodeAgeAndParity()
4612 dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
4614 Push(ra, fp, cp, a1);
4615 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4616 // Adjust fp to point to caller's fp.
4617 Addu(fp, sp, Operand(2 * kPointerSize));
4623 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4624 addiu(sp, sp, -5 * kPointerSize);
4625 li(t8, Operand(Smi::FromInt(type)));
4626 li(t9, Operand(CodeObject()), CONSTANT_SIZE);
4627 sw(ra, MemOperand(sp, 4 * kPointerSize));
4628 sw(fp, MemOperand(sp, 3 * kPointerSize));
4629 sw(cp, MemOperand(sp, 2 * kPointerSize));
4630 sw(t8, MemOperand(sp, 1 * kPointerSize));
4631 sw(t9, MemOperand(sp, 0 * kPointerSize));
4632 addiu(fp, sp, 3 * kPointerSize);
4636 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4638 lw(fp, MemOperand(sp, 0 * kPointerSize));
4639 lw(ra, MemOperand(sp, 1 * kPointerSize));
4640 addiu(sp, sp, 2 * kPointerSize);
4644 void MacroAssembler::EnterExitFrame(bool save_doubles,
4646 // Set up the frame structure on the stack.
4647 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4648 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4649 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4651 // This is how the stack will look:
4652 // fp + 2 (==kCallerSPDisplacement) - old stack's end
4653 // [fp + 1 (==kCallerPCOffset)] - saved old ra
4654 // [fp + 0 (==kCallerFPOffset)] - saved old fp
4655 // [fp - 1 (==kSPOffset)] - sp of the called function
4656 // [fp - 2 (==kCodeOffset)] - CodeObject
4657 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
4658 // new stack (will contain saved ra)
4661 addiu(sp, sp, -4 * kPointerSize);
4662 sw(ra, MemOperand(sp, 3 * kPointerSize));
4663 sw(fp, MemOperand(sp, 2 * kPointerSize));
4664 addiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
4666 if (emit_debug_code()) {
4667 sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
4670 // Accessed from ExitFrame::code_slot.
4671 li(t8, Operand(CodeObject()), CONSTANT_SIZE);
4672 sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
4674 // Save the frame pointer and the context in top.
4675 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4676 sw(fp, MemOperand(t8));
4677 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4678 sw(cp, MemOperand(t8));
4680 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4682 // The stack must be allign to 0 modulo 8 for stores with sdc1.
4683 ASSERT(kDoubleSize == frame_alignment);
4684 if (frame_alignment > 0) {
4685 ASSERT(IsPowerOf2(frame_alignment));
4686 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4688 int space = FPURegister::kMaxNumRegisters * kDoubleSize;
4689 Subu(sp, sp, Operand(space));
4690 // Remember: we only need to save every 2nd double FPU value.
4691 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4692 FPURegister reg = FPURegister::from_code(i);
4693 sdc1(reg, MemOperand(sp, i * kDoubleSize));
4697 // Reserve place for the return address, stack space and an optional slot
4698 // (used by the DirectCEntryStub to hold the return value if a struct is
4699 // returned) and align the frame preparing for calling the runtime function.
4700 ASSERT(stack_space >= 0);
4701 Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4702 if (frame_alignment > 0) {
4703 ASSERT(IsPowerOf2(frame_alignment));
4704 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4707 // Set the exit frame sp value to point just before the return address
4709 addiu(at, sp, kPointerSize);
4710 sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
4714 void MacroAssembler::LeaveExitFrame(bool save_doubles,
4715 Register argument_count,
4716 bool restore_context,
4718 // Optionally restore all double registers.
4720 // Remember: we only need to restore every 2nd double FPU value.
4721 lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
4722 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4723 FPURegister reg = FPURegister::from_code(i);
4724 ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
4729 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4730 sw(zero_reg, MemOperand(t8));
4732 // Restore current context from top and clear it in debug mode.
4733 if (restore_context) {
4734 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4735 lw(cp, MemOperand(t8));
4738 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4739 sw(a3, MemOperand(t8));
4742 // Pop the arguments, restore registers, and return.
4743 mov(sp, fp); // Respect ABI stack constraint.
4744 lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
4745 lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
4747 if (argument_count.is_valid()) {
4748 sll(t8, argument_count, kPointerSizeLog2);
4753 Ret(USE_DELAY_SLOT);
4754 // If returning, the instruction in the delay slot will be the addiu below.
4760 void MacroAssembler::InitializeNewString(Register string,
4762 Heap::RootListIndex map_index,
4764 Register scratch2) {
4765 sll(scratch1, length, kSmiTagSize);
4766 LoadRoot(scratch2, map_index);
4767 sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
4768 li(scratch1, Operand(String::kEmptyHashField));
4769 sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
4770 sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
4774 int MacroAssembler::ActivationFrameAlignment() {
4775 #if V8_HOST_ARCH_MIPS
4776 // Running on the real platform. Use the alignment as mandated by the local
4778 // Note: This will break if we ever start generating snapshots on one Mips
4779 // platform for another Mips platform with a different alignment.
4780 return OS::ActivationFrameAlignment();
4781 #else // V8_HOST_ARCH_MIPS
4782 // If we are using the simulator then we should always align to the expected
4783 // alignment. As the simulator is used to generate snapshots we do not know
4784 // if the target platform will need alignment, so this is controlled from a
4786 return FLAG_sim_stack_alignment;
4787 #endif // V8_HOST_ARCH_MIPS
4791 void MacroAssembler::AssertStackIsAligned() {
4792 if (emit_debug_code()) {
4793 const int frame_alignment = ActivationFrameAlignment();
4794 const int frame_alignment_mask = frame_alignment - 1;
4796 if (frame_alignment > kPointerSize) {
4797 Label alignment_as_expected;
4798 ASSERT(IsPowerOf2(frame_alignment));
4799 andi(at, sp, frame_alignment_mask);
4800 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
4801 // Don't use Check here, as it will call Runtime_Abort re-entering here.
4802 stop("Unexpected stack alignment");
4803 bind(&alignment_as_expected);
4809 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
4812 Label* not_power_of_two_or_zero) {
4813 Subu(scratch, reg, Operand(1));
4814 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
4815 scratch, Operand(zero_reg));
4816 and_(at, scratch, reg); // In the delay slot.
4817 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
4821 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
4822 ASSERT(!reg.is(overflow));
4823 mov(overflow, reg); // Save original value.
4825 xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
4829 void MacroAssembler::SmiTagCheckOverflow(Register dst,
4831 Register overflow) {
4833 // Fall back to slower case.
4834 SmiTagCheckOverflow(dst, overflow);
4836 ASSERT(!dst.is(src));
4837 ASSERT(!dst.is(overflow));
4838 ASSERT(!src.is(overflow));
4840 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
4845 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
4848 JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
4853 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
4855 Label* non_smi_case) {
4856 JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
4860 void MacroAssembler::JumpIfSmi(Register value,
4863 BranchDelaySlot bd) {
4864 ASSERT_EQ(0, kSmiTag);
4865 andi(scratch, value, kSmiTagMask);
4866 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
4869 void MacroAssembler::JumpIfNotSmi(Register value,
4870 Label* not_smi_label,
4872 BranchDelaySlot bd) {
4873 ASSERT_EQ(0, kSmiTag);
4874 andi(scratch, value, kSmiTagMask);
4875 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
4879 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
4881 Label* on_not_both_smi) {
4882 STATIC_ASSERT(kSmiTag == 0);
4883 ASSERT_EQ(1, kSmiTagMask);
4884 or_(at, reg1, reg2);
4885 JumpIfNotSmi(at, on_not_both_smi);
4889 void MacroAssembler::JumpIfEitherSmi(Register reg1,
4891 Label* on_either_smi) {
4892 STATIC_ASSERT(kSmiTag == 0);
4893 ASSERT_EQ(1, kSmiTagMask);
4894 // Both Smi tags must be 1 (not Smi).
4895 and_(at, reg1, reg2);
4896 JumpIfSmi(at, on_either_smi);
4900 void MacroAssembler::AssertNotSmi(Register object) {
4901 if (emit_debug_code()) {
4902 STATIC_ASSERT(kSmiTag == 0);
4903 andi(at, object, kSmiTagMask);
4904 Check(ne, kOperandIsASmi, at, Operand(zero_reg));
4909 void MacroAssembler::AssertSmi(Register object) {
4910 if (emit_debug_code()) {
4911 STATIC_ASSERT(kSmiTag == 0);
4912 andi(at, object, kSmiTagMask);
4913 Check(eq, kOperandIsASmi, at, Operand(zero_reg));
4918 void MacroAssembler::AssertString(Register object) {
4919 if (emit_debug_code()) {
4920 STATIC_ASSERT(kSmiTag == 0);
4921 And(t0, object, Operand(kSmiTagMask));
4922 Check(ne, kOperandIsASmiAndNotAString, t0, Operand(zero_reg));
4924 lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
4925 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
4926 Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
4932 void MacroAssembler::AssertName(Register object) {
4933 if (emit_debug_code()) {
4934 STATIC_ASSERT(kSmiTag == 0);
4935 And(t0, object, Operand(kSmiTagMask));
4936 Check(ne, kOperandIsASmiAndNotAName, t0, Operand(zero_reg));
4938 lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
4939 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
4940 Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
4946 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
4947 if (emit_debug_code()) {
4948 ASSERT(!reg.is(at));
4949 LoadRoot(at, index);
4950 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
4955 void MacroAssembler::JumpIfNotHeapNumber(Register object,
4956 Register heap_number_map,
4958 Label* on_not_heap_number) {
4959 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4960 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4961 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
4965 void MacroAssembler::LookupNumberStringCache(Register object,
4971 // Use of registers. Register result is used as a temporary.
4972 Register number_string_cache = result;
4973 Register mask = scratch3;
4975 // Load the number string cache.
4976 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
4978 // Make the hash mask from the length of the number string cache. It
4979 // contains two elements (number and string) for each cache entry.
4980 lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
4981 // Divide length by two (length is a smi).
4982 sra(mask, mask, kSmiTagSize + 1);
4983 Addu(mask, mask, -1); // Make mask.
4985 // Calculate the entry in the number string cache. The hash value in the
4986 // number string cache for smis is just the smi value, and the hash for
4987 // doubles is the xor of the upper and lower words. See
4988 // Heap::GetNumberStringCache.
4990 Label load_result_from_cache;
4991 JumpIfSmi(object, &is_smi);
4994 Heap::kHeapNumberMapRootIndex,
4998 STATIC_ASSERT(8 == kDoubleSize);
5001 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
5002 lw(scratch2, MemOperand(scratch1, kPointerSize));
5003 lw(scratch1, MemOperand(scratch1, 0));
5004 Xor(scratch1, scratch1, Operand(scratch2));
5005 And(scratch1, scratch1, Operand(mask));
5007 // Calculate address of entry in string cache: each entry consists
5008 // of two pointer sized fields.
5009 sll(scratch1, scratch1, kPointerSizeLog2 + 1);
5010 Addu(scratch1, number_string_cache, scratch1);
5012 Register probe = mask;
5013 lw(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
5014 JumpIfSmi(probe, not_found);
5015 ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
5016 ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
5017 BranchF(&load_result_from_cache, NULL, eq, f12, f14);
5021 Register scratch = scratch1;
5022 sra(scratch, object, 1); // Shift away the tag.
5023 And(scratch, mask, Operand(scratch));
5025 // Calculate address of entry in string cache: each entry consists
5026 // of two pointer sized fields.
5027 sll(scratch, scratch, kPointerSizeLog2 + 1);
5028 Addu(scratch, number_string_cache, scratch);
5030 // Check if the entry is the smi we are looking for.
5031 lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
5032 Branch(not_found, ne, object, Operand(probe));
5034 // Get the result from the cache.
5035 bind(&load_result_from_cache);
5036 lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
5038 IncrementCounter(isolate()->counters()->number_to_string_native(),
5045 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
5051 // Test that both first and second are sequential ASCII strings.
5052 // Assume that they are non-smis.
5053 lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
5054 lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
5055 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
5056 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
5058 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
5066 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
5071 // Check that neither is a smi.
5072 STATIC_ASSERT(kSmiTag == 0);
5073 And(scratch1, first, Operand(second));
5074 JumpIfSmi(scratch1, failure);
5075 JumpIfNonSmisNotBothSequentialAsciiStrings(first,
5083 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
5089 const int kFlatAsciiStringMask =
5090 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5091 const int kFlatAsciiStringTag =
5092 kStringTag | kOneByteStringTag | kSeqStringTag;
5093 ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
5094 andi(scratch1, first, kFlatAsciiStringMask);
5095 Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
5096 andi(scratch2, second, kFlatAsciiStringMask);
5097 Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
5101 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
5104 const int kFlatAsciiStringMask =
5105 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5106 const int kFlatAsciiStringTag =
5107 kStringTag | kOneByteStringTag | kSeqStringTag;
5108 And(scratch, type, Operand(kFlatAsciiStringMask));
5109 Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
5113 static const int kRegisterPassedArguments = 4;
5115 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
5116 int num_double_arguments) {
5117 int stack_passed_words = 0;
5118 num_reg_arguments += 2 * num_double_arguments;
5120 // Up to four simple arguments are passed in registers a0..a3.
5121 if (num_reg_arguments > kRegisterPassedArguments) {
5122 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5124 stack_passed_words += kCArgSlotCount;
5125 return stack_passed_words;
5129 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5130 int num_double_arguments,
5132 int frame_alignment = ActivationFrameAlignment();
5134 // Up to four simple arguments are passed in registers a0..a3.
5135 // Those four arguments must have reserved argument slots on the stack for
5136 // mips, even though those argument slots are not normally used.
5137 // Remaining arguments are pushed on the stack, above (higher address than)
5138 // the argument slots.
5139 int stack_passed_arguments = CalculateStackPassedWords(
5140 num_reg_arguments, num_double_arguments);
5141 if (frame_alignment > kPointerSize) {
5142 // Make stack end at alignment and make room for num_arguments - 4 words
5143 // and the original value of sp.
5145 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5146 ASSERT(IsPowerOf2(frame_alignment));
5147 And(sp, sp, Operand(-frame_alignment));
5148 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
5150 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5155 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5157 PrepareCallCFunction(num_reg_arguments, 0, scratch);
5161 void MacroAssembler::CallCFunction(ExternalReference function,
5162 int num_reg_arguments,
5163 int num_double_arguments) {
5164 li(t8, Operand(function));
5165 CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
5169 void MacroAssembler::CallCFunction(Register function,
5170 int num_reg_arguments,
5171 int num_double_arguments) {
5172 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
5176 void MacroAssembler::CallCFunction(ExternalReference function,
5177 int num_arguments) {
5178 CallCFunction(function, num_arguments, 0);
5182 void MacroAssembler::CallCFunction(Register function,
5183 int num_arguments) {
5184 CallCFunction(function, num_arguments, 0);
5188 void MacroAssembler::CallCFunctionHelper(Register function,
5189 int num_reg_arguments,
5190 int num_double_arguments) {
5191 ASSERT(has_frame());
5192 // Make sure that the stack is aligned before calling a C function unless
5193 // running in the simulator. The simulator has its own alignment check which
5194 // provides more information.
5195 // The argument stots are presumed to have been set up by
5196 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
5198 #if V8_HOST_ARCH_MIPS
5199 if (emit_debug_code()) {
5200 int frame_alignment = OS::ActivationFrameAlignment();
5201 int frame_alignment_mask = frame_alignment - 1;
5202 if (frame_alignment > kPointerSize) {
5203 ASSERT(IsPowerOf2(frame_alignment));
5204 Label alignment_as_expected;
5205 And(at, sp, Operand(frame_alignment_mask));
5206 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5207 // Don't use Check here, as it will call Runtime_Abort possibly
5208 // re-entering here.
5209 stop("Unexpected alignment in CallCFunction");
5210 bind(&alignment_as_expected);
5213 #endif // V8_HOST_ARCH_MIPS
5215 // Just call directly. The function called cannot cause a GC, or
5216 // allow preemption, so the return address in the link register
5219 if (!function.is(t9)) {
5226 int stack_passed_arguments = CalculateStackPassedWords(
5227 num_reg_arguments, num_double_arguments);
5229 if (OS::ActivationFrameAlignment() > kPointerSize) {
5230 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5232 Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
5237 #undef BRANCH_ARGS_CHECK
5240 void MacroAssembler::PatchRelocatedValue(Register li_location,
5242 Register new_value) {
5243 lw(scratch, MemOperand(li_location));
5244 // At this point scratch is a lui(at, ...) instruction.
5245 if (emit_debug_code()) {
5246 And(scratch, scratch, kOpcodeMask);
5247 Check(eq, kTheInstructionToPatchShouldBeALui,
5248 scratch, Operand(LUI));
5249 lw(scratch, MemOperand(li_location));
5251 srl(t9, new_value, kImm16Bits);
5252 Ins(scratch, t9, 0, kImm16Bits);
5253 sw(scratch, MemOperand(li_location));
5255 lw(scratch, MemOperand(li_location, kInstrSize));
5256 // scratch is now ori(at, ...).
5257 if (emit_debug_code()) {
5258 And(scratch, scratch, kOpcodeMask);
5259 Check(eq, kTheInstructionToPatchShouldBeAnOri,
5260 scratch, Operand(ORI));
5261 lw(scratch, MemOperand(li_location, kInstrSize));
5263 Ins(scratch, new_value, 0, kImm16Bits);
5264 sw(scratch, MemOperand(li_location, kInstrSize));
5266 // Update the I-cache so the new lui and ori can be executed.
5267 FlushICache(li_location, 2);
5270 void MacroAssembler::GetRelocatedValue(Register li_location,
5273 lw(value, MemOperand(li_location));
5274 if (emit_debug_code()) {
5275 And(value, value, kOpcodeMask);
5276 Check(eq, kTheInstructionShouldBeALui,
5277 value, Operand(LUI));
5278 lw(value, MemOperand(li_location));
5281 // value now holds a lui instruction. Extract the immediate.
5282 sll(value, value, kImm16Bits);
5284 lw(scratch, MemOperand(li_location, kInstrSize));
5285 if (emit_debug_code()) {
5286 And(scratch, scratch, kOpcodeMask);
5287 Check(eq, kTheInstructionShouldBeAnOri,
5288 scratch, Operand(ORI));
5289 lw(scratch, MemOperand(li_location, kInstrSize));
5291 // "scratch" now holds an ori instruction. Extract the immediate.
5292 andi(scratch, scratch, kImm16Mask);
5294 // Merge the results.
5295 or_(value, value, scratch);
5299 void MacroAssembler::CheckPageFlag(
5304 Label* condition_met) {
5305 And(scratch, object, Operand(~Page::kPageAlignmentMask));
5306 lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5307 And(scratch, scratch, Operand(mask));
5308 Branch(condition_met, cc, scratch, Operand(zero_reg));
5312 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
5314 Label* if_deprecated) {
5315 if (map->CanBeDeprecated()) {
5316 li(scratch, Operand(map));
5317 lw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
5318 And(scratch, scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
5319 Branch(if_deprecated, ne, scratch, Operand(zero_reg));
5324 void MacroAssembler::JumpIfBlack(Register object,
5328 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
5329 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5333 void MacroAssembler::HasColor(Register object,
5334 Register bitmap_scratch,
5335 Register mask_scratch,
5339 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
5340 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
5342 GetMarkBits(object, bitmap_scratch, mask_scratch);
5344 Label other_color, word_boundary;
5345 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5346 And(t8, t9, Operand(mask_scratch));
5347 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
5348 // Shift left 1 by adding.
5349 Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
5350 Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
5351 And(t8, t9, Operand(mask_scratch));
5352 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
5355 bind(&word_boundary);
5356 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
5357 And(t9, t9, Operand(1));
5358 Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
5363 // Detect some, but not all, common pointer-free objects. This is used by the
5364 // incremental write barrier which doesn't care about oddballs (they are always
5365 // marked black immediately so this code is not hit).
5366 void MacroAssembler::JumpIfDataObject(Register value,
5368 Label* not_data_object) {
5369 ASSERT(!AreAliased(value, scratch, t8, no_reg));
5370 Label is_data_object;
5371 lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5372 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5373 Branch(&is_data_object, eq, t8, Operand(scratch));
5374 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5375 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5376 // If it's a string and it's not a cons string then it's an object containing
5378 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5379 And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
5380 Branch(not_data_object, ne, t8, Operand(zero_reg));
5381 bind(&is_data_object);
5385 void MacroAssembler::GetMarkBits(Register addr_reg,
5386 Register bitmap_reg,
5387 Register mask_reg) {
5388 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
5389 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5390 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
5391 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
5392 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
5393 sll(t8, t8, kPointerSizeLog2);
5394 Addu(bitmap_reg, bitmap_reg, t8);
5396 sllv(mask_reg, t8, mask_reg);
5400 void MacroAssembler::EnsureNotWhite(
5402 Register bitmap_scratch,
5403 Register mask_scratch,
5404 Register load_scratch,
5405 Label* value_is_white_and_not_data) {
5406 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
5407 GetMarkBits(value, bitmap_scratch, mask_scratch);
5409 // If the value is black or grey we don't need to do anything.
5410 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5411 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5412 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
5413 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5417 // Since both black and grey have a 1 in the first position and white does
5418 // not have a 1 there we only need to check one bit.
5419 lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5420 And(t8, mask_scratch, load_scratch);
5421 Branch(&done, ne, t8, Operand(zero_reg));
5423 if (emit_debug_code()) {
5424 // Check for impossible bit pattern.
5426 // sll may overflow, making the check conservative.
5427 sll(t8, mask_scratch, 1);
5428 And(t8, load_scratch, t8);
5429 Branch(&ok, eq, t8, Operand(zero_reg));
5430 stop("Impossible marking bit pattern");
5434 // Value is white. We check whether it is data that doesn't need scanning.
5435 // Currently only checks for HeapNumber and non-cons strings.
5436 Register map = load_scratch; // Holds map while checking type.
5437 Register length = load_scratch; // Holds length of object after testing type.
5438 Label is_data_object;
5440 // Check for heap-number
5441 lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
5442 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5445 Branch(&skip, ne, t8, Operand(map));
5446 li(length, HeapNumber::kSize);
5447 Branch(&is_data_object);
5451 // Check for strings.
5452 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5453 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5454 // If it's a string and it's not a cons string then it's an object containing
5456 Register instance_type = load_scratch;
5457 lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
5458 And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
5459 Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5460 // It's a non-indirect (non-cons and non-slice) string.
5461 // If it's external, the length is just ExternalString::kSize.
5462 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5463 // External strings are the only ones with the kExternalStringTag bit
5465 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
5466 ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
5467 And(t8, instance_type, Operand(kExternalStringTag));
5470 Branch(&skip, eq, t8, Operand(zero_reg));
5471 li(length, ExternalString::kSize);
5472 Branch(&is_data_object);
5476 // Sequential string, either ASCII or UC16.
5477 // For ASCII (char-size of 1) we shift the smi tag away to get the length.
5478 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
5479 // getting the length multiplied by 2.
5480 ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
5481 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
5482 lw(t9, FieldMemOperand(value, String::kLengthOffset));
5483 And(t8, instance_type, Operand(kStringEncodingMask));
5486 Branch(&skip, eq, t8, Operand(zero_reg));
5490 Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
5491 And(length, length, Operand(~kObjectAlignmentMask));
5493 bind(&is_data_object);
5494 // Value is a data object, and it is white. Mark it black. Since we know
5495 // that the object is white we can make it black by flipping one bit.
5496 lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5497 Or(t8, t8, Operand(mask_scratch));
5498 sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5500 And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5501 lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5502 Addu(t8, t8, Operand(length));
5503 sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5509 void MacroAssembler::LoadInstanceDescriptors(Register map,
5510 Register descriptors) {
5511 lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
5515 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
5516 lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5517 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
5521 void MacroAssembler::EnumLength(Register dst, Register map) {
5522 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
5523 lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5524 And(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
5528 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5529 Register empty_fixed_array_value = t2;
5530 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5534 // Check if the enum length field is properly initialized, indicating that
5535 // there is an enum cache.
5536 lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5539 Branch(call_runtime, eq, a3, Operand(Smi::FromInt(Map::kInvalidEnumCache)));
5544 lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5546 // For all objects but the receiver, check that the cache is empty.
5548 Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
5552 // Check that there are no elements. Register r2 contains the current JS
5553 // object we've reached through the prototype chain.
5554 lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
5555 Branch(call_runtime, ne, a2, Operand(empty_fixed_array_value));
5557 lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
5558 Branch(&next, ne, a2, Operand(null_value));
5562 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5563 ASSERT(!output_reg.is(input_reg));
5565 li(output_reg, Operand(255));
5566 // Normal branch: nop in delay slot.
5567 Branch(&done, gt, input_reg, Operand(output_reg));
5568 // Use delay slot in this branch.
5569 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
5570 mov(output_reg, zero_reg); // In delay slot.
5571 mov(output_reg, input_reg); // Value is in range 0..255.
5576 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
5577 DoubleRegister input_reg,
5578 DoubleRegister temp_double_reg) {
5583 Move(temp_double_reg, 0.0);
5584 BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
5586 // Double value is less than zero, NaN or Inf, return 0.
5587 mov(result_reg, zero_reg);
5590 // Double value is >= 255, return 255.
5592 Move(temp_double_reg, 255.0);
5593 BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
5594 li(result_reg, Operand(255));
5597 // In 0-255 range, round and truncate.
5599 cvt_w_d(temp_double_reg, input_reg);
5600 mfc1(result_reg, temp_double_reg);
5605 void MacroAssembler::TestJSArrayForAllocationMemento(
5606 Register receiver_reg,
5607 Register scratch_reg,
5608 Label* no_memento_found,
5610 Label* allocation_memento_present) {
5611 ExternalReference new_space_start =
5612 ExternalReference::new_space_start(isolate());
5613 ExternalReference new_space_allocation_top =
5614 ExternalReference::new_space_allocation_top_address(isolate());
5615 Addu(scratch_reg, receiver_reg,
5616 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5617 Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
5618 li(at, Operand(new_space_allocation_top));
5619 lw(at, MemOperand(at));
5620 Branch(no_memento_found, gt, scratch_reg, Operand(at));
5621 lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
5622 if (allocation_memento_present) {
5623 Branch(allocation_memento_present, cond, scratch_reg,
5624 Operand(isolate()->factory()->allocation_memento_map()));
5629 Register GetRegisterThatIsNotOneOf(Register reg1,
5636 if (reg1.is_valid()) regs |= reg1.bit();
5637 if (reg2.is_valid()) regs |= reg2.bit();
5638 if (reg3.is_valid()) regs |= reg3.bit();
5639 if (reg4.is_valid()) regs |= reg4.bit();
5640 if (reg5.is_valid()) regs |= reg5.bit();
5641 if (reg6.is_valid()) regs |= reg6.bit();
5643 for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
5644 Register candidate = Register::FromAllocationIndex(i);
5645 if (regs & candidate.bit()) continue;
5653 bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
5654 if (r1.is(r2)) return true;
5655 if (r1.is(r3)) return true;
5656 if (r1.is(r4)) return true;
5657 if (r2.is(r3)) return true;
5658 if (r2.is(r4)) return true;
5659 if (r3.is(r4)) return true;
5664 CodePatcher::CodePatcher(byte* address, int instructions)
5665 : address_(address),
5666 size_(instructions * Assembler::kInstrSize),
5667 masm_(NULL, address, size_ + Assembler::kGap) {
5668 // Create a new macro assembler pointing to the address of the code to patch.
5669 // The size is adjusted with kGap on order for the assembler to generate size
5670 // bytes of instructions without failing with buffer size constraints.
5671 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5675 CodePatcher::~CodePatcher() {
5676 // Indicate that code has changed.
5677 CPU::FlushICache(address_, size_);
5679 // Check that the code was patched as expected.
5680 ASSERT(masm_.pc_ == address_ + size_);
5681 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5685 void CodePatcher::Emit(Instr instr) {
5686 masm()->emit(instr);
5690 void CodePatcher::Emit(Address addr) {
5691 masm()->emit(reinterpret_cast<Instr>(addr));
5695 void CodePatcher::ChangeBranchCondition(Condition cond) {
5696 Instr instr = Assembler::instr_at(masm_.pc_);
5697 ASSERT(Assembler::IsBranch(instr));
5698 uint32_t opcode = Assembler::GetOpcodeField(instr);
5699 // Currently only the 'eq' and 'ne' cond values are supported and the simple
5700 // branch instructions (with opcode being the branch type).
5701 // There are some special cases (see Assembler::IsBranch()) so extending this
5703 ASSERT(opcode == BEQ ||
5711 opcode = (cond == eq) ? BEQ : BNE;
5712 instr = (instr & ~kOpcodeMask) | opcode;
5717 } } // namespace v8::internal
5719 #endif // V8_TARGET_ARCH_MIPS