1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include <limits.h> // For LONG_MIN, LONG_MAX.
9 #if V8_TARGET_ARCH_MIPS
11 #include "bootstrapper.h"
13 #include "cpu-profiler.h"
15 #include "isolate-inl.h"
21 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
22 : Assembler(arg_isolate, buffer, size),
23 generating_stub_(false),
25 if (isolate() != NULL) {
26 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
32 void MacroAssembler::Load(Register dst,
33 const MemOperand& src,
35 ASSERT(!r.IsDouble());
38 } else if (r.IsUInteger8()) {
40 } else if (r.IsInteger16()) {
42 } else if (r.IsUInteger16()) {
50 void MacroAssembler::Store(Register src,
51 const MemOperand& dst,
53 ASSERT(!r.IsDouble());
54 if (r.IsInteger8() || r.IsUInteger8()) {
56 } else if (r.IsInteger16() || r.IsUInteger16()) {
64 void MacroAssembler::LoadRoot(Register destination,
65 Heap::RootListIndex index) {
66 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
70 void MacroAssembler::LoadRoot(Register destination,
71 Heap::RootListIndex index,
73 Register src1, const Operand& src2) {
74 Branch(2, NegateCondition(cond), src1, src2);
75 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
79 void MacroAssembler::StoreRoot(Register source,
80 Heap::RootListIndex index) {
81 sw(source, MemOperand(s6, index << kPointerSizeLog2));
85 void MacroAssembler::StoreRoot(Register source,
86 Heap::RootListIndex index,
88 Register src1, const Operand& src2) {
89 Branch(2, NegateCondition(cond), src1, src2);
90 sw(source, MemOperand(s6, index << kPointerSizeLog2));
94 // Push and pop all registers that can hold pointers.
95 void MacroAssembler::PushSafepointRegisters() {
96 // Safepoints expect a block of kNumSafepointRegisters values on the
97 // stack, so adjust the stack for unsaved registers.
98 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
99 ASSERT(num_unsaved >= 0);
100 if (num_unsaved > 0) {
101 Subu(sp, sp, Operand(num_unsaved * kPointerSize));
103 MultiPush(kSafepointSavedRegisters);
107 void MacroAssembler::PopSafepointRegisters() {
108 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
109 MultiPop(kSafepointSavedRegisters);
110 if (num_unsaved > 0) {
111 Addu(sp, sp, Operand(num_unsaved * kPointerSize));
116 void MacroAssembler::PushSafepointRegistersAndDoubles() {
117 PushSafepointRegisters();
118 Subu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
119 for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
120 FPURegister reg = FPURegister::FromAllocationIndex(i);
121 sdc1(reg, MemOperand(sp, i * kDoubleSize));
126 void MacroAssembler::PopSafepointRegistersAndDoubles() {
127 for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
128 FPURegister reg = FPURegister::FromAllocationIndex(i);
129 ldc1(reg, MemOperand(sp, i * kDoubleSize));
131 Addu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
132 PopSafepointRegisters();
136 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
138 sw(src, SafepointRegistersAndDoublesSlot(dst));
142 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
143 sw(src, SafepointRegisterSlot(dst));
147 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
148 lw(dst, SafepointRegisterSlot(src));
152 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
153 // The registers are pushed starting with the highest encoding,
154 // which means that lowest encodings are closest to the stack pointer.
155 return kSafepointRegisterStackIndexMap[reg_code];
159 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
160 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
164 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
165 UNIMPLEMENTED_MIPS();
166 // General purpose registers are pushed last on the stack.
167 int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
168 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
169 return MemOperand(sp, doubles_size + register_offset);
173 void MacroAssembler::InNewSpace(Register object,
177 ASSERT(cc == eq || cc == ne);
178 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
179 Branch(branch, cc, scratch,
180 Operand(ExternalReference::new_space_start(isolate())));
184 void MacroAssembler::RecordWriteField(
190 SaveFPRegsMode save_fp,
191 RememberedSetAction remembered_set_action,
192 SmiCheck smi_check) {
193 ASSERT(!AreAliased(value, dst, t8, object));
194 // First, check if a write barrier is even needed. The tests below
195 // catch stores of Smis.
198 // Skip barrier if writing a smi.
199 if (smi_check == INLINE_SMI_CHECK) {
200 JumpIfSmi(value, &done);
203 // Although the object register is tagged, the offset is relative to the start
204 // of the object, so so offset must be a multiple of kPointerSize.
205 ASSERT(IsAligned(offset, kPointerSize));
207 Addu(dst, object, Operand(offset - kHeapObjectTag));
208 if (emit_debug_code()) {
210 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
211 Branch(&ok, eq, t8, Operand(zero_reg));
212 stop("Unaligned cell in write barrier");
221 remembered_set_action,
226 // Clobber clobbered input registers when running with the debug-code flag
227 // turned on to provoke errors.
228 if (emit_debug_code()) {
229 li(value, Operand(BitCast<int32_t>(kZapValue + 4)));
230 li(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
235 // Will clobber 4 registers: object, address, scratch, ip. The
236 // register 'object' contains a heap object pointer. The heap object
237 // tag is shifted away.
238 void MacroAssembler::RecordWrite(Register object,
242 SaveFPRegsMode fp_mode,
243 RememberedSetAction remembered_set_action,
244 SmiCheck smi_check) {
245 ASSERT(!AreAliased(object, address, value, t8));
246 ASSERT(!AreAliased(object, address, value, t9));
248 if (emit_debug_code()) {
249 lw(at, MemOperand(address));
251 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
254 // Count number of write barriers in generated code.
255 isolate()->counters()->write_barriers_static()->Increment();
256 // TODO(mstarzinger): Dynamic counter missing.
258 // First, check if a write barrier is even needed. The tests below
259 // catch stores of smis and stores into the young generation.
262 if (smi_check == INLINE_SMI_CHECK) {
263 ASSERT_EQ(0, kSmiTag);
264 JumpIfSmi(value, &done);
268 value, // Used as scratch.
269 MemoryChunk::kPointersToHereAreInterestingMask,
272 CheckPageFlag(object,
273 value, // Used as scratch.
274 MemoryChunk::kPointersFromHereAreInterestingMask,
278 // Record the actual write.
279 if (ra_status == kRAHasNotBeenSaved) {
282 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
285 if (ra_status == kRAHasNotBeenSaved) {
291 // Clobber clobbered registers when running with the debug-code flag
292 // turned on to provoke errors.
293 if (emit_debug_code()) {
294 li(address, Operand(BitCast<int32_t>(kZapValue + 12)));
295 li(value, Operand(BitCast<int32_t>(kZapValue + 16)));
300 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
303 SaveFPRegsMode fp_mode,
304 RememberedSetFinalAction and_then) {
306 if (emit_debug_code()) {
308 JumpIfNotInNewSpace(object, scratch, &ok);
309 stop("Remembered set pointer is in new space");
312 // Load store buffer top.
313 ExternalReference store_buffer =
314 ExternalReference::store_buffer_top(isolate());
315 li(t8, Operand(store_buffer));
316 lw(scratch, MemOperand(t8));
317 // Store pointer to buffer and increment buffer top.
318 sw(address, MemOperand(scratch));
319 Addu(scratch, scratch, kPointerSize);
320 // Write back new top of buffer.
321 sw(scratch, MemOperand(t8));
322 // Call stub on end of buffer.
323 // Check for end of buffer.
324 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
325 if (and_then == kFallThroughAtEnd) {
326 Branch(&done, eq, t8, Operand(zero_reg));
328 ASSERT(and_then == kReturnAtEnd);
329 Ret(eq, t8, Operand(zero_reg));
332 StoreBufferOverflowStub store_buffer_overflow =
333 StoreBufferOverflowStub(isolate(), fp_mode);
334 CallStub(&store_buffer_overflow);
337 if (and_then == kReturnAtEnd) {
343 // -----------------------------------------------------------------------------
344 // Allocation support.
347 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
352 ASSERT(!holder_reg.is(scratch));
353 ASSERT(!holder_reg.is(at));
354 ASSERT(!scratch.is(at));
356 // Load current lexical context from the stack frame.
357 lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
358 // In debug mode, make sure the lexical context is set.
360 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
361 scratch, Operand(zero_reg));
364 // Load the native context of the current context.
366 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
367 lw(scratch, FieldMemOperand(scratch, offset));
368 lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
370 // Check the context is a native context.
371 if (emit_debug_code()) {
372 push(holder_reg); // Temporarily save holder on the stack.
373 // Read the first word and compare to the native_context_map.
374 lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
375 LoadRoot(at, Heap::kNativeContextMapRootIndex);
376 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
377 holder_reg, Operand(at));
378 pop(holder_reg); // Restore holder.
381 // Check if both contexts are the same.
382 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
383 Branch(&same_contexts, eq, scratch, Operand(at));
385 // Check the context is a native context.
386 if (emit_debug_code()) {
387 push(holder_reg); // Temporarily save holder on the stack.
388 mov(holder_reg, at); // Move at to its holding place.
389 LoadRoot(at, Heap::kNullValueRootIndex);
390 Check(ne, kJSGlobalProxyContextShouldNotBeNull,
391 holder_reg, Operand(at));
393 lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
394 LoadRoot(at, Heap::kNativeContextMapRootIndex);
395 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
396 holder_reg, Operand(at));
397 // Restore at is not needed. at is reloaded below.
398 pop(holder_reg); // Restore holder.
399 // Restore at to holder's context.
400 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
403 // Check that the security token in the calling global object is
404 // compatible with the security token in the receiving global
406 int token_offset = Context::kHeaderSize +
407 Context::SECURITY_TOKEN_INDEX * kPointerSize;
409 lw(scratch, FieldMemOperand(scratch, token_offset));
410 lw(at, FieldMemOperand(at, token_offset));
411 Branch(miss, ne, scratch, Operand(at));
413 bind(&same_contexts);
417 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
418 // First of all we assign the hash seed to scratch.
419 LoadRoot(scratch, Heap::kHashSeedRootIndex);
422 // Xor original key with a seed.
423 xor_(reg0, reg0, scratch);
425 // Compute the hash code from the untagged key. This must be kept in sync
426 // with ComputeIntegerHash in utils.h.
428 // hash = ~hash + (hash << 15);
429 nor(scratch, reg0, zero_reg);
431 addu(reg0, scratch, at);
433 // hash = hash ^ (hash >> 12);
435 xor_(reg0, reg0, at);
437 // hash = hash + (hash << 2);
439 addu(reg0, reg0, at);
441 // hash = hash ^ (hash >> 4);
443 xor_(reg0, reg0, at);
445 // hash = hash * 2057;
446 sll(scratch, reg0, 11);
448 addu(reg0, reg0, at);
449 addu(reg0, reg0, scratch);
451 // hash = hash ^ (hash >> 16);
453 xor_(reg0, reg0, at);
457 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
466 // elements - holds the slow-case elements of the receiver on entry.
467 // Unchanged unless 'result' is the same register.
469 // key - holds the smi key on entry.
470 // Unchanged unless 'result' is the same register.
473 // result - holds the result on exit if the load succeeded.
474 // Allowed to be the same as 'key' or 'result'.
475 // Unchanged on bailout so 'key' or 'result' can be used
476 // in further computation.
478 // Scratch registers:
480 // reg0 - holds the untagged key on entry and holds the hash once computed.
482 // reg1 - Used to hold the capacity mask of the dictionary.
484 // reg2 - Used for the index into the dictionary.
485 // at - Temporary (avoid MacroAssembler instructions also using 'at').
488 GetNumberHash(reg0, reg1);
490 // Compute the capacity mask.
491 lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
492 sra(reg1, reg1, kSmiTagSize);
493 Subu(reg1, reg1, Operand(1));
495 // Generate an unrolled loop that performs a few probes before giving up.
496 for (int i = 0; i < kNumberDictionaryProbes; i++) {
497 // Use reg2 for index calculations and keep the hash intact in reg0.
499 // Compute the masked index: (hash + i + i * i) & mask.
501 Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
503 and_(reg2, reg2, reg1);
505 // Scale the index by multiplying by the element size.
506 ASSERT(SeededNumberDictionary::kEntrySize == 3);
507 sll(at, reg2, 1); // 2x.
508 addu(reg2, reg2, at); // reg2 = reg2 * 3.
510 // Check if the key is identical to the name.
511 sll(at, reg2, kPointerSizeLog2);
512 addu(reg2, elements, at);
514 lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
515 if (i != kNumberDictionaryProbes - 1) {
516 Branch(&done, eq, key, Operand(at));
518 Branch(miss, ne, key, Operand(at));
523 // Check that the value is a normal property.
524 // reg2: elements + (index * kPointerSize).
525 const int kDetailsOffset =
526 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
527 lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
528 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
529 Branch(miss, ne, at, Operand(zero_reg));
531 // Get the value at the masked, scaled index and return.
532 const int kValueOffset =
533 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
534 lw(result, FieldMemOperand(reg2, kValueOffset));
538 // ---------------------------------------------------------------------------
539 // Instruction macros.
541 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
543 addu(rd, rs, rt.rm());
545 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
546 addiu(rd, rs, rt.imm32_);
548 // li handles the relocation.
557 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
559 subu(rd, rs, rt.rm());
561 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
562 addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
564 // li handles the relocation.
573 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
575 if (kArchVariant == kLoongson) {
579 mul(rd, rs, rt.rm());
582 // li handles the relocation.
585 if (kArchVariant == kLoongson) {
595 void MacroAssembler::Mult(Register rs, const Operand& rt) {
599 // li handles the relocation.
607 void MacroAssembler::Multu(Register rs, const Operand& rt) {
611 // li handles the relocation.
619 void MacroAssembler::Div(Register rs, const Operand& rt) {
623 // li handles the relocation.
631 void MacroAssembler::Divu(Register rs, const Operand& rt) {
635 // li handles the relocation.
643 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
645 and_(rd, rs, rt.rm());
647 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
648 andi(rd, rs, rt.imm32_);
650 // li handles the relocation.
659 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
661 or_(rd, rs, rt.rm());
663 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
664 ori(rd, rs, rt.imm32_);
666 // li handles the relocation.
675 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
677 xor_(rd, rs, rt.rm());
679 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
680 xori(rd, rs, rt.imm32_);
682 // li handles the relocation.
691 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
693 nor(rd, rs, rt.rm());
695 // li handles the relocation.
703 void MacroAssembler::Neg(Register rs, const Operand& rt) {
706 ASSERT(!at.is(rt.rm()));
708 xor_(rs, rt.rm(), at);
712 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
714 slt(rd, rs, rt.rm());
716 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
717 slti(rd, rs, rt.imm32_);
719 // li handles the relocation.
728 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
730 sltu(rd, rs, rt.rm());
732 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
733 sltiu(rd, rs, rt.imm32_);
735 // li handles the relocation.
744 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
745 if (kArchVariant == kMips32r2) {
747 rotrv(rd, rs, rt.rm());
749 rotr(rd, rs, rt.imm32_);
753 subu(at, zero_reg, rt.rm());
755 srlv(rd, rs, rt.rm());
758 if (rt.imm32_ == 0) {
761 srl(at, rs, rt.imm32_);
762 sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
770 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
771 if (kArchVariant == kLoongson) {
779 //------------Pseudo-instructions-------------
781 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
783 lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
787 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
789 swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
793 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
794 AllowDeferredHandleDereference smi_check;
795 if (value->IsSmi()) {
796 li(dst, Operand(value), mode);
798 ASSERT(value->IsHeapObject());
799 if (isolate()->heap()->InNewSpace(*value)) {
800 Handle<Cell> cell = isolate()->factory()->NewCell(value);
801 li(dst, Operand(cell));
802 lw(dst, FieldMemOperand(dst, Cell::kValueOffset));
804 li(dst, Operand(value));
810 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
812 BlockTrampolinePoolScope block_trampoline_pool(this);
813 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
814 // Normal load of an immediate value which does not need Relocation Info.
815 if (is_int16(j.imm32_)) {
816 addiu(rd, zero_reg, j.imm32_);
817 } else if (!(j.imm32_ & kHiMask)) {
818 ori(rd, zero_reg, j.imm32_);
819 } else if (!(j.imm32_ & kImm16Mask)) {
820 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
822 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
823 ori(rd, rd, (j.imm32_ & kImm16Mask));
826 if (MustUseReg(j.rmode_)) {
827 RecordRelocInfo(j.rmode_, j.imm32_);
829 // We always need the same number of instructions as we may need to patch
830 // this code to load another value which may need 2 instructions to load.
831 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
832 ori(rd, rd, (j.imm32_ & kImm16Mask));
837 void MacroAssembler::MultiPush(RegList regs) {
838 int16_t num_to_push = NumberOfBitsSet(regs);
839 int16_t stack_offset = num_to_push * kPointerSize;
841 Subu(sp, sp, Operand(stack_offset));
842 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
843 if ((regs & (1 << i)) != 0) {
844 stack_offset -= kPointerSize;
845 sw(ToRegister(i), MemOperand(sp, stack_offset));
851 void MacroAssembler::MultiPushReversed(RegList regs) {
852 int16_t num_to_push = NumberOfBitsSet(regs);
853 int16_t stack_offset = num_to_push * kPointerSize;
855 Subu(sp, sp, Operand(stack_offset));
856 for (int16_t i = 0; i < kNumRegisters; i++) {
857 if ((regs & (1 << i)) != 0) {
858 stack_offset -= kPointerSize;
859 sw(ToRegister(i), MemOperand(sp, stack_offset));
865 void MacroAssembler::MultiPop(RegList regs) {
866 int16_t stack_offset = 0;
868 for (int16_t i = 0; i < kNumRegisters; i++) {
869 if ((regs & (1 << i)) != 0) {
870 lw(ToRegister(i), MemOperand(sp, stack_offset));
871 stack_offset += kPointerSize;
874 addiu(sp, sp, stack_offset);
878 void MacroAssembler::MultiPopReversed(RegList regs) {
879 int16_t stack_offset = 0;
881 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
882 if ((regs & (1 << i)) != 0) {
883 lw(ToRegister(i), MemOperand(sp, stack_offset));
884 stack_offset += kPointerSize;
887 addiu(sp, sp, stack_offset);
891 void MacroAssembler::MultiPushFPU(RegList regs) {
892 int16_t num_to_push = NumberOfBitsSet(regs);
893 int16_t stack_offset = num_to_push * kDoubleSize;
895 Subu(sp, sp, Operand(stack_offset));
896 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
897 if ((regs & (1 << i)) != 0) {
898 stack_offset -= kDoubleSize;
899 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
905 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
906 int16_t num_to_push = NumberOfBitsSet(regs);
907 int16_t stack_offset = num_to_push * kDoubleSize;
909 Subu(sp, sp, Operand(stack_offset));
910 for (int16_t i = 0; i < kNumRegisters; i++) {
911 if ((regs & (1 << i)) != 0) {
912 stack_offset -= kDoubleSize;
913 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
919 void MacroAssembler::MultiPopFPU(RegList regs) {
920 int16_t stack_offset = 0;
922 for (int16_t i = 0; i < kNumRegisters; i++) {
923 if ((regs & (1 << i)) != 0) {
924 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
925 stack_offset += kDoubleSize;
928 addiu(sp, sp, stack_offset);
932 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
933 int16_t stack_offset = 0;
935 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
936 if ((regs & (1 << i)) != 0) {
937 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
938 stack_offset += kDoubleSize;
941 addiu(sp, sp, stack_offset);
945 void MacroAssembler::FlushICache(Register address, unsigned instructions) {
946 RegList saved_regs = kJSCallerSaved | ra.bit();
947 MultiPush(saved_regs);
948 AllowExternalCallThatCantCauseGC scope(this);
950 // Save to a0 in case address == t0.
952 PrepareCallCFunction(2, t0);
954 li(a1, instructions * kInstrSize);
955 CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
956 MultiPop(saved_regs);
960 void MacroAssembler::Ext(Register rt,
965 ASSERT(pos + size < 33);
967 if (kArchVariant == kMips32r2) {
968 ext_(rt, rs, pos, size);
970 // Move rs to rt and shift it left then right to get the
971 // desired bitfield on the right side and zeroes on the left.
972 int shift_left = 32 - (pos + size);
973 sll(rt, rs, shift_left); // Acts as a move if shift_left == 0.
975 int shift_right = 32 - size;
976 if (shift_right > 0) {
977 srl(rt, rt, shift_right);
983 void MacroAssembler::Ins(Register rt,
988 ASSERT(pos + size <= 32);
991 if (kArchVariant == kMips32r2) {
992 ins_(rt, rs, pos, size);
994 ASSERT(!rt.is(t8) && !rs.is(t8));
995 Subu(at, zero_reg, Operand(1));
996 srl(at, at, 32 - size);
1000 nor(at, at, zero_reg);
1007 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1009 FPURegister scratch) {
1010 // Move the data from fs to t8.
1012 Cvt_d_uw(fd, t8, scratch);
1016 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1018 FPURegister scratch) {
1019 // Convert rs to a FP value in fd (and fd + 1).
1020 // We do this by converting rs minus the MSB to avoid sign conversion,
1021 // then adding 2^31 to the result (if needed).
1023 ASSERT(!fd.is(scratch));
1027 // Save rs's MSB to t9.
1031 // Move the result to fd.
1034 // Convert fd to a real FP value.
1037 Label conversion_done;
1039 // If rs's MSB was 0, it's done.
1040 // Otherwise we need to add that to the FP register.
1041 Branch(&conversion_done, eq, t9, Operand(zero_reg));
1043 // Load 2^31 into f20 as its float representation.
1045 mtc1(at, FPURegister::from_code(scratch.code() + 1));
1046 mtc1(zero_reg, scratch);
1048 add_d(fd, fd, scratch);
1050 bind(&conversion_done);
1054 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1056 FPURegister scratch) {
1057 Trunc_uw_d(fs, t8, scratch);
1062 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1063 if (kArchVariant == kLoongson && fd.is(fs)) {
1064 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1066 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1073 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1074 if (kArchVariant == kLoongson && fd.is(fs)) {
1075 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1077 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1084 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1085 if (kArchVariant == kLoongson && fd.is(fs)) {
1086 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1088 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1095 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1096 if (kArchVariant == kLoongson && fd.is(fs)) {
1097 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1099 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1106 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1108 FPURegister scratch) {
1109 ASSERT(!fd.is(scratch));
1112 // Load 2^31 into scratch as its float representation.
1114 mtc1(at, FPURegister::from_code(scratch.code() + 1));
1115 mtc1(zero_reg, scratch);
1116 // Test if scratch > fd.
1117 // If fd < 2^31 we can convert it normally.
1118 Label simple_convert;
1119 BranchF(&simple_convert, NULL, lt, fd, scratch);
1121 // First we subtract 2^31 from fd, then trunc it to rs
1122 // and add 2^31 to rs.
1123 sub_d(scratch, fd, scratch);
1124 trunc_w_d(scratch, scratch);
1126 Or(rs, rs, 1 << 31);
1130 // Simple conversion.
1131 bind(&simple_convert);
1132 trunc_w_d(scratch, fd);
1139 void MacroAssembler::BranchF(Label* target,
1144 BranchDelaySlot bd) {
1145 BlockTrampolinePoolScope block_trampoline_pool(this);
1151 ASSERT(nan || target);
1152 // Check for unordered (NaN) cases.
1154 c(UN, D, cmp1, cmp2);
1159 // Here NaN cases were either handled by this function or are assumed to
1160 // have been handled by the caller.
1161 // Unsigned conditions are treated as their signed counterpart.
1164 c(OLT, D, cmp1, cmp2);
1168 c(ULE, D, cmp1, cmp2);
1172 c(ULT, D, cmp1, cmp2);
1176 c(OLE, D, cmp1, cmp2);
1180 c(EQ, D, cmp1, cmp2);
1184 c(UEQ, D, cmp1, cmp2);
1188 c(EQ, D, cmp1, cmp2);
1192 c(UEQ, D, cmp1, cmp2);
1200 if (bd == PROTECT) {
1206 void MacroAssembler::Move(FPURegister dst, double imm) {
1207 static const DoubleRepresentation minus_zero(-0.0);
1208 static const DoubleRepresentation zero(0.0);
1209 DoubleRepresentation value_rep(imm);
1210 // Handle special values first.
1211 bool force_load = dst.is(kDoubleRegZero);
1212 if (value_rep == zero && !force_load) {
1213 mov_d(dst, kDoubleRegZero);
1214 } else if (value_rep == minus_zero && !force_load) {
1215 neg_d(dst, kDoubleRegZero);
1218 DoubleAsTwoUInt32(imm, &lo, &hi);
1219 // Move the low part of the double into the lower of the corresponding FPU
1220 // register of FPU register pair.
1222 li(at, Operand(lo));
1225 mtc1(zero_reg, dst);
1227 // Move the high part of the double into the higher of the corresponding FPU
1228 // register of FPU register pair.
1230 li(at, Operand(hi));
1231 mtc1(at, dst.high());
1233 mtc1(zero_reg, dst.high());
1239 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1240 if (kArchVariant == kLoongson) {
1242 Branch(&done, ne, rt, Operand(zero_reg));
1251 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1252 if (kArchVariant == kLoongson) {
1254 Branch(&done, eq, rt, Operand(zero_reg));
1263 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
1264 if (kArchVariant == kLoongson) {
1265 // Tests an FP condition code and then conditionally move rs to rd.
1266 // We do not currently use any FPU cc bit other than bit 0.
1268 ASSERT(!(rs.is(t8) || rd.is(t8)));
1270 Register scratch = t8;
1271 // For testing purposes we need to fetch content of the FCSR register and
1272 // than test its cc (floating point condition code) bit (for cc = 0, it is
1273 // 24. bit of the FCSR).
1274 cfc1(scratch, FCSR);
1275 // For the MIPS I, II and III architectures, the contents of scratch is
1276 // UNPREDICTABLE for the instruction immediately following CFC1.
1278 srl(scratch, scratch, 16);
1279 andi(scratch, scratch, 0x0080);
1280 Branch(&done, eq, scratch, Operand(zero_reg));
1289 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
1290 if (kArchVariant == kLoongson) {
1291 // Tests an FP condition code and then conditionally move rs to rd.
1292 // We do not currently use any FPU cc bit other than bit 0.
1294 ASSERT(!(rs.is(t8) || rd.is(t8)));
1296 Register scratch = t8;
1297 // For testing purposes we need to fetch content of the FCSR register and
1298 // than test its cc (floating point condition code) bit (for cc = 0, it is
1299 // 24. bit of the FCSR).
1300 cfc1(scratch, FCSR);
1301 // For the MIPS I, II and III architectures, the contents of scratch is
1302 // UNPREDICTABLE for the instruction immediately following CFC1.
1304 srl(scratch, scratch, 16);
1305 andi(scratch, scratch, 0x0080);
1306 Branch(&done, ne, scratch, Operand(zero_reg));
1315 void MacroAssembler::Clz(Register rd, Register rs) {
1316 if (kArchVariant == kLoongson) {
1317 ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
1319 Register scratch = t9;
1325 and_(scratch, at, mask);
1326 Branch(&end, ne, scratch, Operand(zero_reg));
1328 Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
1337 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1339 DoubleRegister double_input,
1341 DoubleRegister double_scratch,
1342 Register except_flag,
1343 CheckForInexactConversion check_inexact) {
1344 ASSERT(!result.is(scratch));
1345 ASSERT(!double_input.is(double_scratch));
1346 ASSERT(!except_flag.is(scratch));
1350 // Clear the except flag (0 = no exception)
1351 mov(except_flag, zero_reg);
1353 // Test for values that can be exactly represented as a signed 32-bit integer.
1354 cvt_w_d(double_scratch, double_input);
1355 mfc1(result, double_scratch);
1356 cvt_d_w(double_scratch, double_scratch);
1357 BranchF(&done, NULL, eq, double_input, double_scratch);
1359 int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
1361 if (check_inexact == kDontCheckForInexactConversion) {
1362 // Ignore inexact exceptions.
1363 except_mask &= ~kFCSRInexactFlagMask;
1367 cfc1(scratch, FCSR);
1368 // Disable FPU exceptions.
1369 ctc1(zero_reg, FCSR);
1371 // Do operation based on rounding mode.
1372 switch (rounding_mode) {
1373 case kRoundToNearest:
1374 Round_w_d(double_scratch, double_input);
1377 Trunc_w_d(double_scratch, double_input);
1379 case kRoundToPlusInf:
1380 Ceil_w_d(double_scratch, double_input);
1382 case kRoundToMinusInf:
1383 Floor_w_d(double_scratch, double_input);
1385 } // End of switch-statement.
1388 cfc1(except_flag, FCSR);
1390 ctc1(scratch, FCSR);
1391 // Move the converted value into the result register.
1392 mfc1(result, double_scratch);
1394 // Check for fpu exceptions.
1395 And(except_flag, except_flag, Operand(except_mask));
1401 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
1402 DoubleRegister double_input,
1404 DoubleRegister single_scratch = kLithiumScratchDouble.low();
1405 Register scratch = at;
1406 Register scratch2 = t9;
1408 // Clear cumulative exception flags and save the FCSR.
1409 cfc1(scratch2, FCSR);
1410 ctc1(zero_reg, FCSR);
1411 // Try a conversion to a signed integer.
1412 trunc_w_d(single_scratch, double_input);
1413 mfc1(result, single_scratch);
1414 // Retrieve and restore the FCSR.
1415 cfc1(scratch, FCSR);
1416 ctc1(scratch2, FCSR);
1417 // Check for overflow and NaNs.
1420 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
1421 // If we had no exceptions we are done.
1422 Branch(done, eq, scratch, Operand(zero_reg));
1426 void MacroAssembler::TruncateDoubleToI(Register result,
1427 DoubleRegister double_input) {
1430 TryInlineTruncateDoubleToI(result, double_input, &done);
1432 // If we fell through then inline version didn't succeed - call stub instead.
1434 Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
1435 sdc1(double_input, MemOperand(sp, 0));
1437 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
1440 Addu(sp, sp, Operand(kDoubleSize));
1447 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
1449 DoubleRegister double_scratch = f12;
1450 ASSERT(!result.is(object));
1452 ldc1(double_scratch,
1453 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
1454 TryInlineTruncateDoubleToI(result, double_scratch, &done);
1456 // If we fell through then inline version didn't succeed - call stub instead.
1458 DoubleToIStub stub(isolate(),
1461 HeapNumber::kValueOffset - kHeapObjectTag,
1471 void MacroAssembler::TruncateNumberToI(Register object,
1473 Register heap_number_map,
1475 Label* not_number) {
1477 ASSERT(!result.is(object));
1479 UntagAndJumpIfSmi(result, object, &done);
1480 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
1481 TruncateHeapNumberToI(result, object);
1487 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1489 int num_least_bits) {
1490 Ext(dst, src, kSmiTagSize, num_least_bits);
1494 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1496 int num_least_bits) {
1497 And(dst, src, Operand((1 << num_least_bits) - 1));
1501 // Emulated condtional branches do not emit a nop in the branch delay slot.
1503 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1504 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
1505 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1506 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1509 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1510 BranchShort(offset, bdslot);
1514 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1516 BranchDelaySlot bdslot) {
1517 BranchShort(offset, cond, rs, rt, bdslot);
1521 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1522 if (L->is_bound()) {
1524 BranchShort(L, bdslot);
1529 if (is_trampoline_emitted()) {
1532 BranchShort(L, bdslot);
1538 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1540 BranchDelaySlot bdslot) {
1541 if (L->is_bound()) {
1543 BranchShort(L, cond, rs, rt, bdslot);
1545 if (cond != cc_always) {
1547 Condition neg_cond = NegateCondition(cond);
1548 BranchShort(&skip, neg_cond, rs, rt);
1556 if (is_trampoline_emitted()) {
1557 if (cond != cc_always) {
1559 Condition neg_cond = NegateCondition(cond);
1560 BranchShort(&skip, neg_cond, rs, rt);
1567 BranchShort(L, cond, rs, rt, bdslot);
1573 void MacroAssembler::Branch(Label* L,
1576 Heap::RootListIndex index,
1577 BranchDelaySlot bdslot) {
1578 LoadRoot(at, index);
1579 Branch(L, cond, rs, Operand(at), bdslot);
1583 void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
1586 // Emit a nop in the branch delay slot if required.
1587 if (bdslot == PROTECT)
1592 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1594 BranchDelaySlot bdslot) {
1595 BRANCH_ARGS_CHECK(cond, rs, rt);
1596 ASSERT(!rs.is(zero_reg));
1597 Register r2 = no_reg;
1598 Register scratch = at;
1601 // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
1603 BlockTrampolinePoolScope block_trampoline_pool(this);
1610 beq(rs, r2, offset);
1613 bne(rs, r2, offset);
1615 // Signed comparison.
1617 if (r2.is(zero_reg)) {
1620 slt(scratch, r2, rs);
1621 bne(scratch, zero_reg, offset);
1625 if (r2.is(zero_reg)) {
1628 slt(scratch, rs, r2);
1629 beq(scratch, zero_reg, offset);
1633 if (r2.is(zero_reg)) {
1636 slt(scratch, rs, r2);
1637 bne(scratch, zero_reg, offset);
1641 if (r2.is(zero_reg)) {
1644 slt(scratch, r2, rs);
1645 beq(scratch, zero_reg, offset);
1648 // Unsigned comparison.
1650 if (r2.is(zero_reg)) {
1653 sltu(scratch, r2, rs);
1654 bne(scratch, zero_reg, offset);
1657 case Ugreater_equal:
1658 if (r2.is(zero_reg)) {
1661 sltu(scratch, rs, r2);
1662 beq(scratch, zero_reg, offset);
1666 if (r2.is(zero_reg)) {
1667 // No code needs to be emitted.
1670 sltu(scratch, rs, r2);
1671 bne(scratch, zero_reg, offset);
1675 if (r2.is(zero_reg)) {
1678 sltu(scratch, r2, rs);
1679 beq(scratch, zero_reg, offset);
1686 // Be careful to always use shifted_branch_offset only just before the
1687 // branch instruction, as the location will be remember for patching the
1689 BlockTrampolinePoolScope block_trampoline_pool(this);
1695 // We don't want any other register but scratch clobbered.
1696 ASSERT(!scratch.is(rs));
1699 beq(rs, r2, offset);
1702 // We don't want any other register but scratch clobbered.
1703 ASSERT(!scratch.is(rs));
1706 bne(rs, r2, offset);
1708 // Signed comparison.
1710 if (rt.imm32_ == 0) {
1715 slt(scratch, r2, rs);
1716 bne(scratch, zero_reg, offset);
1720 if (rt.imm32_ == 0) {
1722 } else if (is_int16(rt.imm32_)) {
1723 slti(scratch, rs, rt.imm32_);
1724 beq(scratch, zero_reg, offset);
1728 slt(scratch, rs, r2);
1729 beq(scratch, zero_reg, offset);
1733 if (rt.imm32_ == 0) {
1735 } else if (is_int16(rt.imm32_)) {
1736 slti(scratch, rs, rt.imm32_);
1737 bne(scratch, zero_reg, offset);
1741 slt(scratch, rs, r2);
1742 bne(scratch, zero_reg, offset);
1746 if (rt.imm32_ == 0) {
1751 slt(scratch, r2, rs);
1752 beq(scratch, zero_reg, offset);
1755 // Unsigned comparison.
1757 if (rt.imm32_ == 0) {
1762 sltu(scratch, r2, rs);
1763 bne(scratch, zero_reg, offset);
1766 case Ugreater_equal:
1767 if (rt.imm32_ == 0) {
1769 } else if (is_int16(rt.imm32_)) {
1770 sltiu(scratch, rs, rt.imm32_);
1771 beq(scratch, zero_reg, offset);
1775 sltu(scratch, rs, r2);
1776 beq(scratch, zero_reg, offset);
1780 if (rt.imm32_ == 0) {
1781 // No code needs to be emitted.
1783 } else if (is_int16(rt.imm32_)) {
1784 sltiu(scratch, rs, rt.imm32_);
1785 bne(scratch, zero_reg, offset);
1789 sltu(scratch, rs, r2);
1790 bne(scratch, zero_reg, offset);
1794 if (rt.imm32_ == 0) {
1799 sltu(scratch, r2, rs);
1800 beq(scratch, zero_reg, offset);
1807 // Emit a nop in the branch delay slot if required.
1808 if (bdslot == PROTECT)
1813 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
1814 // We use branch_offset as an argument for the branch instructions to be sure
1815 // it is called just before generating the branch instruction, as needed.
1817 b(shifted_branch_offset(L, false));
1819 // Emit a nop in the branch delay slot if required.
1820 if (bdslot == PROTECT)
1825 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
1827 BranchDelaySlot bdslot) {
1828 BRANCH_ARGS_CHECK(cond, rs, rt);
1831 Register r2 = no_reg;
1832 Register scratch = at;
1834 BlockTrampolinePoolScope block_trampoline_pool(this);
1836 // Be careful to always use shifted_branch_offset only just before the
1837 // branch instruction, as the location will be remember for patching the
1841 offset = shifted_branch_offset(L, false);
1845 offset = shifted_branch_offset(L, false);
1846 beq(rs, r2, offset);
1849 offset = shifted_branch_offset(L, false);
1850 bne(rs, r2, offset);
1852 // Signed comparison.
1854 if (r2.is(zero_reg)) {
1855 offset = shifted_branch_offset(L, false);
1858 slt(scratch, r2, rs);
1859 offset = shifted_branch_offset(L, false);
1860 bne(scratch, zero_reg, offset);
1864 if (r2.is(zero_reg)) {
1865 offset = shifted_branch_offset(L, false);
1868 slt(scratch, rs, r2);
1869 offset = shifted_branch_offset(L, false);
1870 beq(scratch, zero_reg, offset);
1874 if (r2.is(zero_reg)) {
1875 offset = shifted_branch_offset(L, false);
1878 slt(scratch, rs, r2);
1879 offset = shifted_branch_offset(L, false);
1880 bne(scratch, zero_reg, offset);
1884 if (r2.is(zero_reg)) {
1885 offset = shifted_branch_offset(L, false);
1888 slt(scratch, r2, rs);
1889 offset = shifted_branch_offset(L, false);
1890 beq(scratch, zero_reg, offset);
1893 // Unsigned comparison.
1895 if (r2.is(zero_reg)) {
1896 offset = shifted_branch_offset(L, false);
1899 sltu(scratch, r2, rs);
1900 offset = shifted_branch_offset(L, false);
1901 bne(scratch, zero_reg, offset);
1904 case Ugreater_equal:
1905 if (r2.is(zero_reg)) {
1906 offset = shifted_branch_offset(L, false);
1909 sltu(scratch, rs, r2);
1910 offset = shifted_branch_offset(L, false);
1911 beq(scratch, zero_reg, offset);
1915 if (r2.is(zero_reg)) {
1916 // No code needs to be emitted.
1919 sltu(scratch, rs, r2);
1920 offset = shifted_branch_offset(L, false);
1921 bne(scratch, zero_reg, offset);
1925 if (r2.is(zero_reg)) {
1926 offset = shifted_branch_offset(L, false);
1929 sltu(scratch, r2, rs);
1930 offset = shifted_branch_offset(L, false);
1931 beq(scratch, zero_reg, offset);
1938 // Be careful to always use shifted_branch_offset only just before the
1939 // branch instruction, as the location will be remember for patching the
1941 BlockTrampolinePoolScope block_trampoline_pool(this);
1944 offset = shifted_branch_offset(L, false);
1948 ASSERT(!scratch.is(rs));
1951 offset = shifted_branch_offset(L, false);
1952 beq(rs, r2, offset);
1955 ASSERT(!scratch.is(rs));
1958 offset = shifted_branch_offset(L, false);
1959 bne(rs, r2, offset);
1961 // Signed comparison.
1963 if (rt.imm32_ == 0) {
1964 offset = shifted_branch_offset(L, false);
1967 ASSERT(!scratch.is(rs));
1970 slt(scratch, r2, rs);
1971 offset = shifted_branch_offset(L, false);
1972 bne(scratch, zero_reg, offset);
1976 if (rt.imm32_ == 0) {
1977 offset = shifted_branch_offset(L, false);
1979 } else if (is_int16(rt.imm32_)) {
1980 slti(scratch, rs, rt.imm32_);
1981 offset = shifted_branch_offset(L, false);
1982 beq(scratch, zero_reg, offset);
1984 ASSERT(!scratch.is(rs));
1987 slt(scratch, rs, r2);
1988 offset = shifted_branch_offset(L, false);
1989 beq(scratch, zero_reg, offset);
1993 if (rt.imm32_ == 0) {
1994 offset = shifted_branch_offset(L, false);
1996 } else if (is_int16(rt.imm32_)) {
1997 slti(scratch, rs, rt.imm32_);
1998 offset = shifted_branch_offset(L, false);
1999 bne(scratch, zero_reg, offset);
2001 ASSERT(!scratch.is(rs));
2004 slt(scratch, rs, r2);
2005 offset = shifted_branch_offset(L, false);
2006 bne(scratch, zero_reg, offset);
2010 if (rt.imm32_ == 0) {
2011 offset = shifted_branch_offset(L, false);
2014 ASSERT(!scratch.is(rs));
2017 slt(scratch, r2, rs);
2018 offset = shifted_branch_offset(L, false);
2019 beq(scratch, zero_reg, offset);
2022 // Unsigned comparison.
2024 if (rt.imm32_ == 0) {
2025 offset = shifted_branch_offset(L, false);
2028 ASSERT(!scratch.is(rs));
2031 sltu(scratch, r2, rs);
2032 offset = shifted_branch_offset(L, false);
2033 bne(scratch, zero_reg, offset);
2036 case Ugreater_equal:
2037 if (rt.imm32_ == 0) {
2038 offset = shifted_branch_offset(L, false);
2040 } else if (is_int16(rt.imm32_)) {
2041 sltiu(scratch, rs, rt.imm32_);
2042 offset = shifted_branch_offset(L, false);
2043 beq(scratch, zero_reg, offset);
2045 ASSERT(!scratch.is(rs));
2048 sltu(scratch, rs, r2);
2049 offset = shifted_branch_offset(L, false);
2050 beq(scratch, zero_reg, offset);
2054 if (rt.imm32_ == 0) {
2055 // No code needs to be emitted.
2057 } else if (is_int16(rt.imm32_)) {
2058 sltiu(scratch, rs, rt.imm32_);
2059 offset = shifted_branch_offset(L, false);
2060 bne(scratch, zero_reg, offset);
2062 ASSERT(!scratch.is(rs));
2065 sltu(scratch, rs, r2);
2066 offset = shifted_branch_offset(L, false);
2067 bne(scratch, zero_reg, offset);
2071 if (rt.imm32_ == 0) {
2072 offset = shifted_branch_offset(L, false);
2073 beq(rs, zero_reg, offset);
2075 ASSERT(!scratch.is(rs));
2078 sltu(scratch, r2, rs);
2079 offset = shifted_branch_offset(L, false);
2080 beq(scratch, zero_reg, offset);
2087 // Check that offset could actually hold on an int16_t.
2088 ASSERT(is_int16(offset));
2089 // Emit a nop in the branch delay slot if required.
2090 if (bdslot == PROTECT)
2095 void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
2096 BranchAndLinkShort(offset, bdslot);
2100 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
2102 BranchDelaySlot bdslot) {
2103 BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2107 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
2108 if (L->is_bound()) {
2110 BranchAndLinkShort(L, bdslot);
2115 if (is_trampoline_emitted()) {
2118 BranchAndLinkShort(L, bdslot);
2124 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2126 BranchDelaySlot bdslot) {
2127 if (L->is_bound()) {
2129 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2132 Condition neg_cond = NegateCondition(cond);
2133 BranchShort(&skip, neg_cond, rs, rt);
2138 if (is_trampoline_emitted()) {
2140 Condition neg_cond = NegateCondition(cond);
2141 BranchShort(&skip, neg_cond, rs, rt);
2145 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2151 // We need to use a bgezal or bltzal, but they can't be used directly with the
2152 // slt instructions. We could use sub or add instead but we would miss overflow
2153 // cases, so we keep slt and add an intermediate third instruction.
2154 void MacroAssembler::BranchAndLinkShort(int16_t offset,
2155 BranchDelaySlot bdslot) {
2158 // Emit a nop in the branch delay slot if required.
2159 if (bdslot == PROTECT)
2164 void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
2165 Register rs, const Operand& rt,
2166 BranchDelaySlot bdslot) {
2167 BRANCH_ARGS_CHECK(cond, rs, rt);
2168 Register r2 = no_reg;
2169 Register scratch = at;
2173 } else if (cond != cc_always) {
2179 BlockTrampolinePoolScope block_trampoline_pool(this);
2195 // Signed comparison.
2197 slt(scratch, r2, rs);
2198 addiu(scratch, scratch, -1);
2199 bgezal(scratch, offset);
2202 slt(scratch, rs, r2);
2203 addiu(scratch, scratch, -1);
2204 bltzal(scratch, offset);
2207 slt(scratch, rs, r2);
2208 addiu(scratch, scratch, -1);
2209 bgezal(scratch, offset);
2212 slt(scratch, r2, rs);
2213 addiu(scratch, scratch, -1);
2214 bltzal(scratch, offset);
2217 // Unsigned comparison.
2219 sltu(scratch, r2, rs);
2220 addiu(scratch, scratch, -1);
2221 bgezal(scratch, offset);
2223 case Ugreater_equal:
2224 sltu(scratch, rs, r2);
2225 addiu(scratch, scratch, -1);
2226 bltzal(scratch, offset);
2229 sltu(scratch, rs, r2);
2230 addiu(scratch, scratch, -1);
2231 bgezal(scratch, offset);
2234 sltu(scratch, r2, rs);
2235 addiu(scratch, scratch, -1);
2236 bltzal(scratch, offset);
2243 // Emit a nop in the branch delay slot if required.
2244 if (bdslot == PROTECT)
2249 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
2250 bal(shifted_branch_offset(L, false));
2252 // Emit a nop in the branch delay slot if required.
2253 if (bdslot == PROTECT)
2258 void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
2260 BranchDelaySlot bdslot) {
2261 BRANCH_ARGS_CHECK(cond, rs, rt);
2264 Register r2 = no_reg;
2265 Register scratch = at;
2268 } else if (cond != cc_always) {
2274 BlockTrampolinePoolScope block_trampoline_pool(this);
2277 offset = shifted_branch_offset(L, false);
2283 offset = shifted_branch_offset(L, false);
2289 offset = shifted_branch_offset(L, false);
2293 // Signed comparison.
2295 slt(scratch, r2, rs);
2296 addiu(scratch, scratch, -1);
2297 offset = shifted_branch_offset(L, false);
2298 bgezal(scratch, offset);
2301 slt(scratch, rs, r2);
2302 addiu(scratch, scratch, -1);
2303 offset = shifted_branch_offset(L, false);
2304 bltzal(scratch, offset);
2307 slt(scratch, rs, r2);
2308 addiu(scratch, scratch, -1);
2309 offset = shifted_branch_offset(L, false);
2310 bgezal(scratch, offset);
2313 slt(scratch, r2, rs);
2314 addiu(scratch, scratch, -1);
2315 offset = shifted_branch_offset(L, false);
2316 bltzal(scratch, offset);
2319 // Unsigned comparison.
2321 sltu(scratch, r2, rs);
2322 addiu(scratch, scratch, -1);
2323 offset = shifted_branch_offset(L, false);
2324 bgezal(scratch, offset);
2326 case Ugreater_equal:
2327 sltu(scratch, rs, r2);
2328 addiu(scratch, scratch, -1);
2329 offset = shifted_branch_offset(L, false);
2330 bltzal(scratch, offset);
2333 sltu(scratch, rs, r2);
2334 addiu(scratch, scratch, -1);
2335 offset = shifted_branch_offset(L, false);
2336 bgezal(scratch, offset);
2339 sltu(scratch, r2, rs);
2340 addiu(scratch, scratch, -1);
2341 offset = shifted_branch_offset(L, false);
2342 bltzal(scratch, offset);
2349 // Check that offset could actually hold on an int16_t.
2350 ASSERT(is_int16(offset));
2352 // Emit a nop in the branch delay slot if required.
2353 if (bdslot == PROTECT)
2358 void MacroAssembler::Jump(Register target,
2362 BranchDelaySlot bd) {
2363 BlockTrampolinePoolScope block_trampoline_pool(this);
2364 if (cond == cc_always) {
2367 BRANCH_ARGS_CHECK(cond, rs, rt);
2368 Branch(2, NegateCondition(cond), rs, rt);
2371 // Emit a nop in the branch delay slot if required.
2377 void MacroAssembler::Jump(intptr_t target,
2378 RelocInfo::Mode rmode,
2382 BranchDelaySlot bd) {
2384 if (cond != cc_always) {
2385 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
2387 // The first instruction of 'li' may be placed in the delay slot.
2388 // This is not an issue, t9 is expected to be clobbered anyway.
2389 li(t9, Operand(target, rmode));
2390 Jump(t9, al, zero_reg, Operand(zero_reg), bd);
2395 void MacroAssembler::Jump(Address target,
2396 RelocInfo::Mode rmode,
2400 BranchDelaySlot bd) {
2401 ASSERT(!RelocInfo::IsCodeTarget(rmode));
2402 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2406 void MacroAssembler::Jump(Handle<Code> code,
2407 RelocInfo::Mode rmode,
2411 BranchDelaySlot bd) {
2412 ASSERT(RelocInfo::IsCodeTarget(rmode));
2413 AllowDeferredHandleDereference embedding_raw_address;
2414 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2418 int MacroAssembler::CallSize(Register target,
2422 BranchDelaySlot bd) {
2425 if (cond == cc_always) {
2434 return size * kInstrSize;
2438 // Note: To call gcc-compiled C code on mips, you must call thru t9.
2439 void MacroAssembler::Call(Register target,
2443 BranchDelaySlot bd) {
2444 BlockTrampolinePoolScope block_trampoline_pool(this);
2447 if (cond == cc_always) {
2450 BRANCH_ARGS_CHECK(cond, rs, rt);
2451 Branch(2, NegateCondition(cond), rs, rt);
2454 // Emit a nop in the branch delay slot if required.
2458 ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
2459 SizeOfCodeGeneratedSince(&start));
2463 int MacroAssembler::CallSize(Address target,
2464 RelocInfo::Mode rmode,
2468 BranchDelaySlot bd) {
2469 int size = CallSize(t9, cond, rs, rt, bd);
2470 return size + 2 * kInstrSize;
2474 void MacroAssembler::Call(Address target,
2475 RelocInfo::Mode rmode,
2479 BranchDelaySlot bd) {
2480 BlockTrampolinePoolScope block_trampoline_pool(this);
2483 int32_t target_int = reinterpret_cast<int32_t>(target);
2484 // Must record previous source positions before the
2485 // li() generates a new code target.
2486 positions_recorder()->WriteRecordedPositions();
2487 li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
2488 Call(t9, cond, rs, rt, bd);
2489 ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2490 SizeOfCodeGeneratedSince(&start));
2494 int MacroAssembler::CallSize(Handle<Code> code,
2495 RelocInfo::Mode rmode,
2496 TypeFeedbackId ast_id,
2500 BranchDelaySlot bd) {
2501 AllowDeferredHandleDereference using_raw_address;
2502 return CallSize(reinterpret_cast<Address>(code.location()),
2503 rmode, cond, rs, rt, bd);
2507 void MacroAssembler::Call(Handle<Code> code,
2508 RelocInfo::Mode rmode,
2509 TypeFeedbackId ast_id,
2513 BranchDelaySlot bd) {
2514 BlockTrampolinePoolScope block_trampoline_pool(this);
2517 ASSERT(RelocInfo::IsCodeTarget(rmode));
2518 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
2519 SetRecordedAstId(ast_id);
2520 rmode = RelocInfo::CODE_TARGET_WITH_ID;
2522 AllowDeferredHandleDereference embedding_raw_address;
2523 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
2524 ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
2525 SizeOfCodeGeneratedSince(&start));
2529 void MacroAssembler::Ret(Condition cond,
2532 BranchDelaySlot bd) {
2533 Jump(ra, cond, rs, rt, bd);
2537 void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
2538 BlockTrampolinePoolScope block_trampoline_pool(this);
2541 imm28 = jump_address(L);
2542 imm28 &= kImm28Mask;
2543 { BlockGrowBufferScope block_buf_growth(this);
2544 // Buffer growth (and relocation) must be blocked for internal references
2545 // until associated instructions are emitted and available to be patched.
2546 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2549 // Emit a nop in the branch delay slot if required.
2550 if (bdslot == PROTECT)
2555 void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
2556 BlockTrampolinePoolScope block_trampoline_pool(this);
2559 imm32 = jump_address(L);
2560 { BlockGrowBufferScope block_buf_growth(this);
2561 // Buffer growth (and relocation) must be blocked for internal references
2562 // until associated instructions are emitted and available to be patched.
2563 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2564 lui(at, (imm32 & kHiMask) >> kLuiShift);
2565 ori(at, at, (imm32 & kImm16Mask));
2569 // Emit a nop in the branch delay slot if required.
2570 if (bdslot == PROTECT)
2575 void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
2576 BlockTrampolinePoolScope block_trampoline_pool(this);
2579 imm32 = jump_address(L);
2580 { BlockGrowBufferScope block_buf_growth(this);
2581 // Buffer growth (and relocation) must be blocked for internal references
2582 // until associated instructions are emitted and available to be patched.
2583 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2584 lui(at, (imm32 & kHiMask) >> kLuiShift);
2585 ori(at, at, (imm32 & kImm16Mask));
2589 // Emit a nop in the branch delay slot if required.
2590 if (bdslot == PROTECT)
2595 void MacroAssembler::DropAndRet(int drop) {
2596 Ret(USE_DELAY_SLOT);
2597 addiu(sp, sp, drop * kPointerSize);
2600 void MacroAssembler::DropAndRet(int drop,
2603 const Operand& r2) {
2604 // Both Drop and Ret need to be conditional.
2606 if (cond != cc_always) {
2607 Branch(&skip, NegateCondition(cond), r1, r2);
2613 if (cond != cc_always) {
2619 void MacroAssembler::Drop(int count,
2622 const Operand& op) {
2630 Branch(&skip, NegateCondition(cond), reg, op);
2633 addiu(sp, sp, count * kPointerSize);
2642 void MacroAssembler::Swap(Register reg1,
2645 if (scratch.is(no_reg)) {
2646 Xor(reg1, reg1, Operand(reg2));
2647 Xor(reg2, reg2, Operand(reg1));
2648 Xor(reg1, reg1, Operand(reg2));
2657 void MacroAssembler::Call(Label* target) {
2658 BranchAndLink(target);
2662 void MacroAssembler::Push(Handle<Object> handle) {
2663 li(at, Operand(handle));
2668 void MacroAssembler::DebugBreak() {
2669 PrepareCEntryArgs(0);
2670 PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
2671 CEntryStub ces(isolate(), 1);
2672 ASSERT(AllowThisStubCall(&ces));
2673 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
2677 // ---------------------------------------------------------------------------
2678 // Exception handling.
2680 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
2681 int handler_index) {
2682 // Adjust this code if not the case.
2683 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2684 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2685 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2686 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2687 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2688 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2690 // For the JSEntry handler, we must preserve a0-a3 and s0.
2691 // t1-t3 are available. We will build up the handler from the bottom by
2692 // pushing on the stack.
2693 // Set up the code object (t1) and the state (t2) for pushing.
2695 StackHandler::IndexField::encode(handler_index) |
2696 StackHandler::KindField::encode(kind);
2697 li(t1, Operand(CodeObject()), CONSTANT_SIZE);
2698 li(t2, Operand(state));
2700 // Push the frame pointer, context, state, and code object.
2701 if (kind == StackHandler::JS_ENTRY) {
2702 ASSERT_EQ(Smi::FromInt(0), 0);
2703 // The second zero_reg indicates no context.
2704 // The first zero_reg is the NULL frame pointer.
2705 // The operands are reversed to match the order of MultiPush/Pop.
2706 Push(zero_reg, zero_reg, t2, t1);
2708 MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit());
2711 // Link the current handler as the next handler.
2712 li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2713 lw(t1, MemOperand(t2));
2715 // Set this new handler as the current one.
2716 sw(sp, MemOperand(t2));
2720 void MacroAssembler::PopTryHandler() {
2721 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2723 Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
2724 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2725 sw(a1, MemOperand(at));
2729 void MacroAssembler::JumpToHandlerEntry() {
2730 // Compute the handler entry address and jump to it. The handler table is
2731 // a fixed array of (smi-tagged) code offsets.
2732 // v0 = exception, a1 = code object, a2 = state.
2733 lw(a3, FieldMemOperand(a1, Code::kHandlerTableOffset)); // Handler table.
2734 Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2735 srl(a2, a2, StackHandler::kKindWidth); // Handler index.
2736 sll(a2, a2, kPointerSizeLog2);
2738 lw(a2, MemOperand(a2)); // Smi-tagged offset.
2739 Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
2740 sra(t9, a2, kSmiTagSize);
2746 void MacroAssembler::Throw(Register value) {
2747 // Adjust this code if not the case.
2748 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2749 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2750 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2751 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2752 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2753 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2755 // The exception is expected in v0.
2758 // Drop the stack pointer to the top of the top handler.
2759 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
2761 lw(sp, MemOperand(a3));
2763 // Restore the next handler.
2765 sw(a2, MemOperand(a3));
2767 // Get the code object (a1) and state (a2). Restore the context and frame
2769 MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
2771 // If the handler is a JS frame, restore the context to the frame.
2772 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
2775 Branch(&done, eq, cp, Operand(zero_reg));
2776 sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2779 JumpToHandlerEntry();
2783 void MacroAssembler::ThrowUncatchable(Register value) {
2784 // Adjust this code if not the case.
2785 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2786 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2787 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2788 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2789 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2790 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2792 // The exception is expected in v0.
2793 if (!value.is(v0)) {
2796 // Drop the stack pointer to the top of the top stack handler.
2797 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2798 lw(sp, MemOperand(a3));
2800 // Unwind the handlers until the ENTRY handler is found.
2801 Label fetch_next, check_kind;
2804 lw(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
2807 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
2808 lw(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
2809 And(a2, a2, Operand(StackHandler::KindField::kMask));
2810 Branch(&fetch_next, ne, a2, Operand(zero_reg));
2812 // Set the top handler address to next handler past the top ENTRY handler.
2814 sw(a2, MemOperand(a3));
2816 // Get the code object (a1) and state (a2). Clear the context and frame
2817 // pointer (0 was saved in the handler).
2818 MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
2820 JumpToHandlerEntry();
2824 void MacroAssembler::Allocate(int object_size,
2829 AllocationFlags flags) {
2830 ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
2831 if (!FLAG_inline_new) {
2832 if (emit_debug_code()) {
2833 // Trash the registers to simulate an allocation failure.
2835 li(scratch1, 0x7191);
2836 li(scratch2, 0x7291);
2842 ASSERT(!result.is(scratch1));
2843 ASSERT(!result.is(scratch2));
2844 ASSERT(!scratch1.is(scratch2));
2845 ASSERT(!scratch1.is(t9));
2846 ASSERT(!scratch2.is(t9));
2847 ASSERT(!result.is(t9));
2849 // Make object size into bytes.
2850 if ((flags & SIZE_IN_WORDS) != 0) {
2851 object_size *= kPointerSize;
2853 ASSERT_EQ(0, object_size & kObjectAlignmentMask);
2855 // Check relative positions of allocation top and limit addresses.
2856 // ARM adds additional checks to make sure the ldm instruction can be
2857 // used. On MIPS we don't have ldm so we don't need additional checks either.
2858 ExternalReference allocation_top =
2859 AllocationUtils::GetAllocationTopReference(isolate(), flags);
2860 ExternalReference allocation_limit =
2861 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2864 reinterpret_cast<intptr_t>(allocation_top.address());
2866 reinterpret_cast<intptr_t>(allocation_limit.address());
2867 ASSERT((limit - top) == kPointerSize);
2869 // Set up allocation top address and object size registers.
2870 Register topaddr = scratch1;
2871 li(topaddr, Operand(allocation_top));
2873 // This code stores a temporary value in t9.
2874 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2875 // Load allocation top into result and allocation limit into t9.
2876 lw(result, MemOperand(topaddr));
2877 lw(t9, MemOperand(topaddr, kPointerSize));
2879 if (emit_debug_code()) {
2880 // Assert that result actually contains top on entry. t9 is used
2881 // immediately below so this use of t9 does not cause difference with
2882 // respect to register content between debug and release mode.
2883 lw(t9, MemOperand(topaddr));
2884 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
2886 // Load allocation limit into t9. Result already contains allocation top.
2887 lw(t9, MemOperand(topaddr, limit - top));
2890 if ((flags & DOUBLE_ALIGNMENT) != 0) {
2891 // Align the next allocation. Storing the filler map without checking top is
2892 // safe in new-space because the limit of the heap is aligned there.
2893 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
2894 ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
2895 And(scratch2, result, Operand(kDoubleAlignmentMask));
2897 Branch(&aligned, eq, scratch2, Operand(zero_reg));
2898 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
2899 Branch(gc_required, Ugreater_equal, result, Operand(t9));
2901 li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
2902 sw(scratch2, MemOperand(result));
2903 Addu(result, result, Operand(kDoubleSize / 2));
2907 // Calculate new top and bail out if new space is exhausted. Use result
2908 // to calculate the new top.
2909 Addu(scratch2, result, Operand(object_size));
2910 Branch(gc_required, Ugreater, scratch2, Operand(t9));
2911 sw(scratch2, MemOperand(topaddr));
2913 // Tag object if requested.
2914 if ((flags & TAG_OBJECT) != 0) {
2915 Addu(result, result, Operand(kHeapObjectTag));
2920 void MacroAssembler::Allocate(Register object_size,
2925 AllocationFlags flags) {
2926 if (!FLAG_inline_new) {
2927 if (emit_debug_code()) {
2928 // Trash the registers to simulate an allocation failure.
2930 li(scratch1, 0x7191);
2931 li(scratch2, 0x7291);
2937 ASSERT(!result.is(scratch1));
2938 ASSERT(!result.is(scratch2));
2939 ASSERT(!scratch1.is(scratch2));
2940 ASSERT(!object_size.is(t9));
2941 ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
2943 // Check relative positions of allocation top and limit addresses.
2944 // ARM adds additional checks to make sure the ldm instruction can be
2945 // used. On MIPS we don't have ldm so we don't need additional checks either.
2946 ExternalReference allocation_top =
2947 AllocationUtils::GetAllocationTopReference(isolate(), flags);
2948 ExternalReference allocation_limit =
2949 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2951 reinterpret_cast<intptr_t>(allocation_top.address());
2953 reinterpret_cast<intptr_t>(allocation_limit.address());
2954 ASSERT((limit - top) == kPointerSize);
2956 // Set up allocation top address and object size registers.
2957 Register topaddr = scratch1;
2958 li(topaddr, Operand(allocation_top));
2960 // This code stores a temporary value in t9.
2961 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2962 // Load allocation top into result and allocation limit into t9.
2963 lw(result, MemOperand(topaddr));
2964 lw(t9, MemOperand(topaddr, kPointerSize));
2966 if (emit_debug_code()) {
2967 // Assert that result actually contains top on entry. t9 is used
2968 // immediately below so this use of t9 does not cause difference with
2969 // respect to register content between debug and release mode.
2970 lw(t9, MemOperand(topaddr));
2971 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
2973 // Load allocation limit into t9. Result already contains allocation top.
2974 lw(t9, MemOperand(topaddr, limit - top));
2977 if ((flags & DOUBLE_ALIGNMENT) != 0) {
2978 // Align the next allocation. Storing the filler map without checking top is
2979 // safe in new-space because the limit of the heap is aligned there.
2980 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
2981 ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
2982 And(scratch2, result, Operand(kDoubleAlignmentMask));
2984 Branch(&aligned, eq, scratch2, Operand(zero_reg));
2985 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
2986 Branch(gc_required, Ugreater_equal, result, Operand(t9));
2988 li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
2989 sw(scratch2, MemOperand(result));
2990 Addu(result, result, Operand(kDoubleSize / 2));
2994 // Calculate new top and bail out if new space is exhausted. Use result
2995 // to calculate the new top. Object size may be in words so a shift is
2996 // required to get the number of bytes.
2997 if ((flags & SIZE_IN_WORDS) != 0) {
2998 sll(scratch2, object_size, kPointerSizeLog2);
2999 Addu(scratch2, result, scratch2);
3001 Addu(scratch2, result, Operand(object_size));
3003 Branch(gc_required, Ugreater, scratch2, Operand(t9));
3005 // Update allocation top. result temporarily holds the new top.
3006 if (emit_debug_code()) {
3007 And(t9, scratch2, Operand(kObjectAlignmentMask));
3008 Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
3010 sw(scratch2, MemOperand(topaddr));
3012 // Tag object if requested.
3013 if ((flags & TAG_OBJECT) != 0) {
3014 Addu(result, result, Operand(kHeapObjectTag));
3019 void MacroAssembler::UndoAllocationInNewSpace(Register object,
3021 ExternalReference new_space_allocation_top =
3022 ExternalReference::new_space_allocation_top_address(isolate());
3024 // Make sure the object has no tag before resetting top.
3025 And(object, object, Operand(~kHeapObjectTagMask));
3027 // Check that the object un-allocated is below the current top.
3028 li(scratch, Operand(new_space_allocation_top));
3029 lw(scratch, MemOperand(scratch));
3030 Check(less, kUndoAllocationOfNonAllocatedMemory,
3031 object, Operand(scratch));
3033 // Write the address of the object to un-allocate as the current top.
3034 li(scratch, Operand(new_space_allocation_top));
3035 sw(object, MemOperand(scratch));
3039 void MacroAssembler::AllocateTwoByteString(Register result,
3044 Label* gc_required) {
3045 // Calculate the number of bytes needed for the characters in the string while
3046 // observing object alignment.
3047 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3048 sll(scratch1, length, 1); // Length in bytes, not chars.
3049 addiu(scratch1, scratch1,
3050 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3051 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3053 // Allocate two-byte string in new space.
3061 // Set the map, length and hash field.
3062 InitializeNewString(result,
3064 Heap::kStringMapRootIndex,
3070 void MacroAssembler::AllocateAsciiString(Register result,
3075 Label* gc_required) {
3076 // Calculate the number of bytes needed for the characters in the string
3077 // while observing object alignment.
3078 ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3079 ASSERT(kCharSize == 1);
3080 addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3081 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3083 // Allocate ASCII string in new space.
3091 // Set the map, length and hash field.
3092 InitializeNewString(result,
3094 Heap::kAsciiStringMapRootIndex,
3100 void MacroAssembler::AllocateTwoByteConsString(Register result,
3104 Label* gc_required) {
3105 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3107 InitializeNewString(result,
3109 Heap::kConsStringMapRootIndex,
3115 void MacroAssembler::AllocateAsciiConsString(Register result,
3119 Label* gc_required) {
3120 Label allocate_new_space, install_map;
3121 AllocationFlags flags = TAG_OBJECT;
3123 ExternalReference high_promotion_mode = ExternalReference::
3124 new_space_high_promotion_mode_active_address(isolate());
3125 li(scratch1, Operand(high_promotion_mode));
3126 lw(scratch1, MemOperand(scratch1, 0));
3127 Branch(&allocate_new_space, eq, scratch1, Operand(zero_reg));
3129 Allocate(ConsString::kSize,
3134 static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
3138 bind(&allocate_new_space);
3139 Allocate(ConsString::kSize,
3148 InitializeNewString(result,
3150 Heap::kConsAsciiStringMapRootIndex,
3156 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3160 Label* gc_required) {
3161 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3164 InitializeNewString(result,
3166 Heap::kSlicedStringMapRootIndex,
3172 void MacroAssembler::AllocateAsciiSlicedString(Register result,
3176 Label* gc_required) {
3177 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3180 InitializeNewString(result,
3182 Heap::kSlicedAsciiStringMapRootIndex,
3188 void MacroAssembler::JumpIfNotUniqueName(Register reg,
3189 Label* not_unique_name) {
3190 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3192 And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3193 Branch(&succeed, eq, at, Operand(zero_reg));
3194 Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
3200 // Allocates a heap number or jumps to the label if the young space is full and
3201 // a scavenge is needed.
3202 void MacroAssembler::AllocateHeapNumber(Register result,
3205 Register heap_number_map,
3207 TaggingMode tagging_mode) {
3208 // Allocate an object in the heap for the heap number and tag it as a heap
3210 Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
3211 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3213 // Store heap number map in the allocated object.
3214 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3215 if (tagging_mode == TAG_RESULT) {
3216 sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3218 sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3223 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3227 Label* gc_required) {
3228 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3229 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3230 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3234 // Copies a fixed number of fields of heap objects from src to dst.
3235 void MacroAssembler::CopyFields(Register dst,
3239 ASSERT((temps & dst.bit()) == 0);
3240 ASSERT((temps & src.bit()) == 0);
3241 // Primitive implementation using only one temporary register.
3243 Register tmp = no_reg;
3244 // Find a temp register in temps list.
3245 for (int i = 0; i < kNumRegisters; i++) {
3246 if ((temps & (1 << i)) != 0) {
3251 ASSERT(!tmp.is(no_reg));
3253 for (int i = 0; i < field_count; i++) {
3254 lw(tmp, FieldMemOperand(src, i * kPointerSize));
3255 sw(tmp, FieldMemOperand(dst, i * kPointerSize));
3260 void MacroAssembler::CopyBytes(Register src,
3264 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3266 // Align src before copying in word size chunks.
3267 Branch(&byte_loop, le, length, Operand(kPointerSize));
3268 bind(&align_loop_1);
3269 And(scratch, src, kPointerSize - 1);
3270 Branch(&word_loop, eq, scratch, Operand(zero_reg));
3271 lbu(scratch, MemOperand(src));
3273 sb(scratch, MemOperand(dst));
3275 Subu(length, length, Operand(1));
3276 Branch(&align_loop_1, ne, length, Operand(zero_reg));
3278 // Copy bytes in word size chunks.
3280 if (emit_debug_code()) {
3281 And(scratch, src, kPointerSize - 1);
3282 Assert(eq, kExpectingAlignmentForCopyBytes,
3283 scratch, Operand(zero_reg));
3285 Branch(&byte_loop, lt, length, Operand(kPointerSize));
3286 lw(scratch, MemOperand(src));
3287 Addu(src, src, kPointerSize);
3289 // TODO(kalmard) check if this can be optimized to use sw in most cases.
3290 // Can't use unaligned access - copy byte by byte.
3291 if (kArchEndian == kLittle) {
3292 sb(scratch, MemOperand(dst, 0));
3293 srl(scratch, scratch, 8);
3294 sb(scratch, MemOperand(dst, 1));
3295 srl(scratch, scratch, 8);
3296 sb(scratch, MemOperand(dst, 2));
3297 srl(scratch, scratch, 8);
3298 sb(scratch, MemOperand(dst, 3));
3300 sb(scratch, MemOperand(dst, 3));
3301 srl(scratch, scratch, 8);
3302 sb(scratch, MemOperand(dst, 2));
3303 srl(scratch, scratch, 8);
3304 sb(scratch, MemOperand(dst, 1));
3305 srl(scratch, scratch, 8);
3306 sb(scratch, MemOperand(dst, 0));
3311 Subu(length, length, Operand(kPointerSize));
3314 // Copy the last bytes if any left.
3316 Branch(&done, eq, length, Operand(zero_reg));
3318 lbu(scratch, MemOperand(src));
3320 sb(scratch, MemOperand(dst));
3322 Subu(length, length, Operand(1));
3323 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3328 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3329 Register end_offset,
3334 sw(filler, MemOperand(start_offset));
3335 Addu(start_offset, start_offset, kPointerSize);
3337 Branch(&loop, lt, start_offset, Operand(end_offset));
3341 void MacroAssembler::CheckFastElements(Register map,
3344 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3345 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3346 STATIC_ASSERT(FAST_ELEMENTS == 2);
3347 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3348 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3349 Branch(fail, hi, scratch,
3350 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3354 void MacroAssembler::CheckFastObjectElements(Register map,
3357 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3358 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3359 STATIC_ASSERT(FAST_ELEMENTS == 2);
3360 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3361 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3362 Branch(fail, ls, scratch,
3363 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3364 Branch(fail, hi, scratch,
3365 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3369 void MacroAssembler::CheckFastSmiElements(Register map,
3372 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3373 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3374 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3375 Branch(fail, hi, scratch,
3376 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3380 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3382 Register elements_reg,
3387 int elements_offset) {
3388 Label smi_value, maybe_nan, have_double_value, is_nan, done;
3389 Register mantissa_reg = scratch2;
3390 Register exponent_reg = scratch3;
3392 // Handle smi values specially.
3393 JumpIfSmi(value_reg, &smi_value);
3395 // Ensure that the object is a heap number
3398 Heap::kHeapNumberMapRootIndex,
3402 // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
3404 li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
3405 lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3406 Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
3408 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3410 bind(&have_double_value);
3411 sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3412 Addu(scratch1, scratch1, elements_reg);
3414 FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
3415 + kHoleNanLower32Offset));
3417 FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
3418 + kHoleNanUpper32Offset));
3422 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3423 // it's an Infinity, and the non-NaN code path applies.
3424 Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
3425 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3426 Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
3428 // Load canonical NaN for storing into the double array.
3429 LoadRoot(at, Heap::kNanValueRootIndex);
3430 lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
3431 lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
3432 jmp(&have_double_value);
3435 Addu(scratch1, elements_reg,
3436 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
3438 sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3439 Addu(scratch1, scratch1, scratch2);
3440 // scratch1 is now effective address of the double element
3442 Register untagged_value = elements_reg;
3443 SmiUntag(untagged_value, value_reg);
3444 mtc1(untagged_value, f2);
3446 sdc1(f0, MemOperand(scratch1, 0));
3451 void MacroAssembler::CompareMapAndBranch(Register obj,
3454 Label* early_success,
3457 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3458 CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
3462 void MacroAssembler::CompareMapAndBranch(Register obj_map,
3464 Label* early_success,
3467 Branch(branch_to, cond, obj_map, Operand(map));
3471 void MacroAssembler::CheckMap(Register obj,
3475 SmiCheckType smi_check_type) {
3476 if (smi_check_type == DO_SMI_CHECK) {
3477 JumpIfSmi(obj, fail);
3480 CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
3485 void MacroAssembler::DispatchMap(Register obj,
3488 Handle<Code> success,
3489 SmiCheckType smi_check_type) {
3491 if (smi_check_type == DO_SMI_CHECK) {
3492 JumpIfSmi(obj, &fail);
3494 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3495 Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
3500 void MacroAssembler::CheckMap(Register obj,
3502 Heap::RootListIndex index,
3504 SmiCheckType smi_check_type) {
3505 if (smi_check_type == DO_SMI_CHECK) {
3506 JumpIfSmi(obj, fail);
3508 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3509 LoadRoot(at, index);
3510 Branch(fail, ne, scratch, Operand(at));
3514 void MacroAssembler::MovFromFloatResult(DoubleRegister dst) {
3515 if (IsMipsSoftFloatABI) {
3516 if (kArchEndian == kLittle) {
3522 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
3527 void MacroAssembler::MovFromFloatParameter(DoubleRegister dst) {
3528 if (IsMipsSoftFloatABI) {
3529 if (kArchEndian == kLittle) {
3535 Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
3540 void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
3541 if (!IsMipsSoftFloatABI) {
3544 if (kArchEndian == kLittle) {
3553 void MacroAssembler::MovToFloatResult(DoubleRegister src) {
3554 if (!IsMipsSoftFloatABI) {
3557 if (kArchEndian == kLittle) {
3566 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
3567 DoubleRegister src2) {
3568 if (!IsMipsSoftFloatABI) {
3570 ASSERT(!src1.is(f14));
3578 if (kArchEndian == kLittle) {
3589 // -----------------------------------------------------------------------------
3590 // JavaScript invokes.
3592 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3593 const ParameterCount& actual,
3594 Handle<Code> code_constant,
3597 bool* definitely_mismatches,
3599 const CallWrapper& call_wrapper) {
3600 bool definitely_matches = false;
3601 *definitely_mismatches = false;
3602 Label regular_invoke;
3604 // Check whether the expected and actual arguments count match. If not,
3605 // setup registers according to contract with ArgumentsAdaptorTrampoline:
3606 // a0: actual arguments count
3607 // a1: function (passed through to callee)
3608 // a2: expected arguments count
3610 // The code below is made a lot easier because the calling code already sets
3611 // up actual and expected registers according to the contract if values are
3612 // passed in registers.
3613 ASSERT(actual.is_immediate() || actual.reg().is(a0));
3614 ASSERT(expected.is_immediate() || expected.reg().is(a2));
3615 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
3617 if (expected.is_immediate()) {
3618 ASSERT(actual.is_immediate());
3619 if (expected.immediate() == actual.immediate()) {
3620 definitely_matches = true;
3622 li(a0, Operand(actual.immediate()));
3623 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3624 if (expected.immediate() == sentinel) {
3625 // Don't worry about adapting arguments for builtins that
3626 // don't want that done. Skip adaption code by making it look
3627 // like we have a match between expected and actual number of
3629 definitely_matches = true;
3631 *definitely_mismatches = true;
3632 li(a2, Operand(expected.immediate()));
3635 } else if (actual.is_immediate()) {
3636 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.immediate()));
3637 li(a0, Operand(actual.immediate()));
3639 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg()));
3642 if (!definitely_matches) {
3643 if (!code_constant.is_null()) {
3644 li(a3, Operand(code_constant));
3645 addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
3648 Handle<Code> adaptor =
3649 isolate()->builtins()->ArgumentsAdaptorTrampoline();
3650 if (flag == CALL_FUNCTION) {
3651 call_wrapper.BeforeCall(CallSize(adaptor));
3653 call_wrapper.AfterCall();
3654 if (!*definitely_mismatches) {
3658 Jump(adaptor, RelocInfo::CODE_TARGET);
3660 bind(®ular_invoke);
3665 void MacroAssembler::InvokeCode(Register code,
3666 const ParameterCount& expected,
3667 const ParameterCount& actual,
3669 const CallWrapper& call_wrapper) {
3670 // You can't call a function without a valid frame.
3671 ASSERT(flag == JUMP_FUNCTION || has_frame());
3675 bool definitely_mismatches = false;
3676 InvokePrologue(expected, actual, Handle<Code>::null(), code,
3677 &done, &definitely_mismatches, flag,
3679 if (!definitely_mismatches) {
3680 if (flag == CALL_FUNCTION) {
3681 call_wrapper.BeforeCall(CallSize(code));
3683 call_wrapper.AfterCall();
3685 ASSERT(flag == JUMP_FUNCTION);
3688 // Continue here if InvokePrologue does handle the invocation due to
3689 // mismatched parameter counts.
3695 void MacroAssembler::InvokeFunction(Register function,
3696 const ParameterCount& actual,
3698 const CallWrapper& call_wrapper) {
3699 // You can't call a function without a valid frame.
3700 ASSERT(flag == JUMP_FUNCTION || has_frame());
3702 // Contract with called JS functions requires that function is passed in a1.
3703 ASSERT(function.is(a1));
3704 Register expected_reg = a2;
3705 Register code_reg = a3;
3707 lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3708 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3710 FieldMemOperand(code_reg,
3711 SharedFunctionInfo::kFormalParameterCountOffset));
3712 sra(expected_reg, expected_reg, kSmiTagSize);
3713 lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3715 ParameterCount expected(expected_reg);
3716 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
3720 void MacroAssembler::InvokeFunction(Register function,
3721 const ParameterCount& expected,
3722 const ParameterCount& actual,
3724 const CallWrapper& call_wrapper) {
3725 // You can't call a function without a valid frame.
3726 ASSERT(flag == JUMP_FUNCTION || has_frame());
3728 // Contract with called JS functions requires that function is passed in a1.
3729 ASSERT(function.is(a1));
3731 // Get the function and setup the context.
3732 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3734 // We call indirectly through the code field in the function to
3735 // allow recompilation to take effect without changing any of the
3737 lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3738 InvokeCode(a3, expected, actual, flag, call_wrapper);
3742 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3743 const ParameterCount& expected,
3744 const ParameterCount& actual,
3746 const CallWrapper& call_wrapper) {
3748 InvokeFunction(a1, expected, actual, flag, call_wrapper);
3752 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
3756 lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
3757 IsInstanceJSObjectType(map, scratch, fail);
3761 void MacroAssembler::IsInstanceJSObjectType(Register map,
3764 lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
3765 Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
3766 Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
3770 void MacroAssembler::IsObjectJSStringType(Register object,
3773 ASSERT(kNotStringTag != 0);
3775 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3776 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3777 And(scratch, scratch, Operand(kIsNotStringMask));
3778 Branch(fail, ne, scratch, Operand(zero_reg));
3782 void MacroAssembler::IsObjectNameType(Register object,
3785 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3786 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3787 Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
3791 // ---------------------------------------------------------------------------
3792 // Support functions.
3795 void MacroAssembler::TryGetFunctionPrototype(Register function,
3799 bool miss_on_bound_function) {
3800 // Check that the receiver isn't a smi.
3801 JumpIfSmi(function, miss);
3803 // Check that the function really is a function. Load map into result reg.
3804 GetObjectType(function, result, scratch);
3805 Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
3807 if (miss_on_bound_function) {
3809 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3811 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3812 And(scratch, scratch,
3813 Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
3814 Branch(miss, ne, scratch, Operand(zero_reg));
3817 // Make sure that the function has an instance prototype.
3819 lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3820 And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
3821 Branch(&non_instance, ne, scratch, Operand(zero_reg));
3823 // Get the prototype or initial map from the function.
3825 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3827 // If the prototype or initial map is the hole, don't return it and
3828 // simply miss the cache instead. This will allow us to allocate a
3829 // prototype object on-demand in the runtime system.
3830 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
3831 Branch(miss, eq, result, Operand(t8));
3833 // If the function does not have an initial map, we're done.
3835 GetObjectType(result, scratch, scratch);
3836 Branch(&done, ne, scratch, Operand(MAP_TYPE));
3838 // Get the prototype from the initial map.
3839 lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
3842 // Non-instance prototype: Fetch prototype from constructor field
3844 bind(&non_instance);
3845 lw(result, FieldMemOperand(result, Map::kConstructorOffset));
3852 void MacroAssembler::GetObjectType(Register object,
3854 Register type_reg) {
3855 lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
3856 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3860 // -----------------------------------------------------------------------------
3863 void MacroAssembler::CallStub(CodeStub* stub,
3864 TypeFeedbackId ast_id,
3868 BranchDelaySlot bd) {
3869 ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
3870 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
3875 void MacroAssembler::TailCallStub(CodeStub* stub,
3879 BranchDelaySlot bd) {
3880 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
3884 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3885 return ref0.address() - ref1.address();
3889 void MacroAssembler::CallApiFunctionAndReturn(
3890 Register function_address,
3891 ExternalReference thunk_ref,
3893 MemOperand return_value_operand,
3894 MemOperand* context_restore_operand) {
3895 ExternalReference next_address =
3896 ExternalReference::handle_scope_next_address(isolate());
3897 const int kNextOffset = 0;
3898 const int kLimitOffset = AddressOffset(
3899 ExternalReference::handle_scope_limit_address(isolate()),
3901 const int kLevelOffset = AddressOffset(
3902 ExternalReference::handle_scope_level_address(isolate()),
3905 ASSERT(function_address.is(a1) || function_address.is(a2));
3907 Label profiler_disabled;
3908 Label end_profiler_check;
3909 li(t9, Operand(ExternalReference::is_profiling_address(isolate())));
3910 lb(t9, MemOperand(t9, 0));
3911 Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
3913 // Additional parameter is the address of the actual callback.
3914 li(t9, Operand(thunk_ref));
3915 jmp(&end_profiler_check);
3917 bind(&profiler_disabled);
3918 mov(t9, function_address);
3919 bind(&end_profiler_check);
3921 // Allocate HandleScope in callee-save registers.
3922 li(s3, Operand(next_address));
3923 lw(s0, MemOperand(s3, kNextOffset));
3924 lw(s1, MemOperand(s3, kLimitOffset));
3925 lw(s2, MemOperand(s3, kLevelOffset));
3926 Addu(s2, s2, Operand(1));
3927 sw(s2, MemOperand(s3, kLevelOffset));
3929 if (FLAG_log_timer_events) {
3930 FrameScope frame(this, StackFrame::MANUAL);
3931 PushSafepointRegisters();
3932 PrepareCallCFunction(1, a0);
3933 li(a0, Operand(ExternalReference::isolate_address(isolate())));
3934 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
3935 PopSafepointRegisters();
3938 // Native call returns to the DirectCEntry stub which redirects to the
3939 // return address pushed on stack (could have moved after GC).
3940 // DirectCEntry stub itself is generated early and never moves.
3941 DirectCEntryStub stub(isolate());
3942 stub.GenerateCall(this, t9);
3944 if (FLAG_log_timer_events) {
3945 FrameScope frame(this, StackFrame::MANUAL);
3946 PushSafepointRegisters();
3947 PrepareCallCFunction(1, a0);
3948 li(a0, Operand(ExternalReference::isolate_address(isolate())));
3949 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
3950 PopSafepointRegisters();
3953 Label promote_scheduled_exception;
3954 Label exception_handled;
3955 Label delete_allocated_handles;
3956 Label leave_exit_frame;
3957 Label return_value_loaded;
3959 // Load value from ReturnValue.
3960 lw(v0, return_value_operand);
3961 bind(&return_value_loaded);
3963 // No more valid handles (the result handle was the last one). Restore
3964 // previous handle scope.
3965 sw(s0, MemOperand(s3, kNextOffset));
3966 if (emit_debug_code()) {
3967 lw(a1, MemOperand(s3, kLevelOffset));
3968 Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
3970 Subu(s2, s2, Operand(1));
3971 sw(s2, MemOperand(s3, kLevelOffset));
3972 lw(at, MemOperand(s3, kLimitOffset));
3973 Branch(&delete_allocated_handles, ne, s1, Operand(at));
3975 // Check if the function scheduled an exception.
3976 bind(&leave_exit_frame);
3977 LoadRoot(t0, Heap::kTheHoleValueRootIndex);
3978 li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
3979 lw(t1, MemOperand(at));
3980 Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
3981 bind(&exception_handled);
3983 bool restore_context = context_restore_operand != NULL;
3984 if (restore_context) {
3985 lw(cp, *context_restore_operand);
3987 li(s0, Operand(stack_space));
3988 LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN);
3990 bind(&promote_scheduled_exception);
3992 FrameScope frame(this, StackFrame::INTERNAL);
3993 CallExternalReference(
3994 ExternalReference(Runtime::kHiddenPromoteScheduledException, isolate()),
3997 jmp(&exception_handled);
3999 // HandleScope limit has changed. Delete allocated extensions.
4000 bind(&delete_allocated_handles);
4001 sw(s1, MemOperand(s3, kLimitOffset));
4004 PrepareCallCFunction(1, s1);
4005 li(a0, Operand(ExternalReference::isolate_address(isolate())));
4006 CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
4009 jmp(&leave_exit_frame);
4013 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4014 return has_frame_ || !stub->SometimesSetsUpAFrame();
4018 void MacroAssembler::IllegalOperation(int num_arguments) {
4019 if (num_arguments > 0) {
4020 addiu(sp, sp, num_arguments * kPointerSize);
4022 LoadRoot(v0, Heap::kUndefinedValueRootIndex);
4026 void MacroAssembler::IndexFromHash(Register hash,
4028 // If the hash field contains an array index pick it out. The assert checks
4029 // that the constants for the maximum number of digits for an array index
4030 // cached in the hash field and the number of bits reserved for it does not
4032 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
4033 (1 << String::kArrayIndexValueBits));
4034 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
4035 // the low kHashShift bits.
4036 STATIC_ASSERT(kSmiTag == 0);
4037 Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
4038 sll(index, hash, kSmiTagSize);
4042 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4046 Register heap_number_map,
4048 ObjectToDoubleFlags flags) {
4050 if ((flags & OBJECT_NOT_SMI) == 0) {
4052 JumpIfNotSmi(object, ¬_smi);
4053 // Remove smi tag and convert to double.
4054 sra(scratch1, object, kSmiTagSize);
4055 mtc1(scratch1, result);
4056 cvt_d_w(result, result);
4060 // Check for heap number and load double value from it.
4061 lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4062 Branch(not_number, ne, scratch1, Operand(heap_number_map));
4064 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4065 // If exponent is all ones the number is either a NaN or +/-Infinity.
4066 Register exponent = scratch1;
4067 Register mask_reg = scratch2;
4068 lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4069 li(mask_reg, HeapNumber::kExponentMask);
4071 And(exponent, exponent, mask_reg);
4072 Branch(not_number, eq, exponent, Operand(mask_reg));
4074 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4079 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4081 Register scratch1) {
4082 sra(scratch1, smi, kSmiTagSize);
4083 mtc1(scratch1, value);
4084 cvt_d_w(value, value);
4088 void MacroAssembler::AdduAndCheckForOverflow(Register dst,
4091 Register overflow_dst,
4093 ASSERT(!dst.is(overflow_dst));
4094 ASSERT(!dst.is(scratch));
4095 ASSERT(!overflow_dst.is(scratch));
4096 ASSERT(!overflow_dst.is(left));
4097 ASSERT(!overflow_dst.is(right));
4099 if (left.is(right) && dst.is(left)) {
4100 ASSERT(!dst.is(t9));
4101 ASSERT(!scratch.is(t9));
4102 ASSERT(!left.is(t9));
4103 ASSERT(!right.is(t9));
4104 ASSERT(!overflow_dst.is(t9));
4110 mov(scratch, left); // Preserve left.
4111 addu(dst, left, right); // Left is overwritten.
4112 xor_(scratch, dst, scratch); // Original left.
4113 xor_(overflow_dst, dst, right);
4114 and_(overflow_dst, overflow_dst, scratch);
4115 } else if (dst.is(right)) {
4116 mov(scratch, right); // Preserve right.
4117 addu(dst, left, right); // Right is overwritten.
4118 xor_(scratch, dst, scratch); // Original right.
4119 xor_(overflow_dst, dst, left);
4120 and_(overflow_dst, overflow_dst, scratch);
4122 addu(dst, left, right);
4123 xor_(overflow_dst, dst, left);
4124 xor_(scratch, dst, right);
4125 and_(overflow_dst, scratch, overflow_dst);
4130 void MacroAssembler::SubuAndCheckForOverflow(Register dst,
4133 Register overflow_dst,
4135 ASSERT(!dst.is(overflow_dst));
4136 ASSERT(!dst.is(scratch));
4137 ASSERT(!overflow_dst.is(scratch));
4138 ASSERT(!overflow_dst.is(left));
4139 ASSERT(!overflow_dst.is(right));
4140 ASSERT(!scratch.is(left));
4141 ASSERT(!scratch.is(right));
4143 // This happens with some crankshaft code. Since Subu works fine if
4144 // left == right, let's not make that restriction here.
4145 if (left.is(right)) {
4147 mov(overflow_dst, zero_reg);
4152 mov(scratch, left); // Preserve left.
4153 subu(dst, left, right); // Left is overwritten.
4154 xor_(overflow_dst, dst, scratch); // scratch is original left.
4155 xor_(scratch, scratch, right); // scratch is original left.
4156 and_(overflow_dst, scratch, overflow_dst);
4157 } else if (dst.is(right)) {
4158 mov(scratch, right); // Preserve right.
4159 subu(dst, left, right); // Right is overwritten.
4160 xor_(overflow_dst, dst, left);
4161 xor_(scratch, left, scratch); // Original right.
4162 and_(overflow_dst, scratch, overflow_dst);
4164 subu(dst, left, right);
4165 xor_(overflow_dst, dst, left);
4166 xor_(scratch, left, right);
4167 and_(overflow_dst, scratch, overflow_dst);
4172 void MacroAssembler::CallRuntime(const Runtime::Function* f,
4174 SaveFPRegsMode save_doubles) {
4175 // All parameters are on the stack. v0 has the return value after call.
4177 // If the expected number of arguments of the runtime function is
4178 // constant, we check that the actual number of arguments match the
4180 if (f->nargs >= 0 && f->nargs != num_arguments) {
4181 IllegalOperation(num_arguments);
4185 // TODO(1236192): Most runtime routines don't need the number of
4186 // arguments passed in because it is constant. At some point we
4187 // should remove this need and make the runtime routine entry code
4189 PrepareCEntryArgs(num_arguments);
4190 PrepareCEntryFunction(ExternalReference(f, isolate()));
4191 CEntryStub stub(isolate(), 1, save_doubles);
4196 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
4198 BranchDelaySlot bd) {
4199 PrepareCEntryArgs(num_arguments);
4200 PrepareCEntryFunction(ext);
4202 CEntryStub stub(isolate(), 1);
4203 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
4207 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
4210 // TODO(1236192): Most runtime routines don't need the number of
4211 // arguments passed in because it is constant. At some point we
4212 // should remove this need and make the runtime routine entry code
4214 PrepareCEntryArgs(num_arguments);
4215 JumpToExternalReference(ext);
4219 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
4222 TailCallExternalReference(ExternalReference(fid, isolate()),
4228 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
4229 BranchDelaySlot bd) {
4230 PrepareCEntryFunction(builtin);
4231 CEntryStub stub(isolate(), 1);
4232 Jump(stub.GetCode(),
4233 RelocInfo::CODE_TARGET,
4241 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
4243 const CallWrapper& call_wrapper) {
4244 // You can't call a builtin without a valid frame.
4245 ASSERT(flag == JUMP_FUNCTION || has_frame());
4247 GetBuiltinEntry(t9, id);
4248 if (flag == CALL_FUNCTION) {
4249 call_wrapper.BeforeCall(CallSize(t9));
4251 call_wrapper.AfterCall();
4253 ASSERT(flag == JUMP_FUNCTION);
4259 void MacroAssembler::GetBuiltinFunction(Register target,
4260 Builtins::JavaScript id) {
4261 // Load the builtins object into target register.
4262 lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4263 lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
4264 // Load the JavaScript builtin function from the builtins object.
4265 lw(target, FieldMemOperand(target,
4266 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
4270 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
4271 ASSERT(!target.is(a1));
4272 GetBuiltinFunction(a1, id);
4273 // Load the code entry point from the builtins object.
4274 lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4278 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4279 Register scratch1, Register scratch2) {
4280 if (FLAG_native_code_counters && counter->Enabled()) {
4281 li(scratch1, Operand(value));
4282 li(scratch2, Operand(ExternalReference(counter)));
4283 sw(scratch1, MemOperand(scratch2));
4288 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4289 Register scratch1, Register scratch2) {
4291 if (FLAG_native_code_counters && counter->Enabled()) {
4292 li(scratch2, Operand(ExternalReference(counter)));
4293 lw(scratch1, MemOperand(scratch2));
4294 Addu(scratch1, scratch1, Operand(value));
4295 sw(scratch1, MemOperand(scratch2));
4300 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4301 Register scratch1, Register scratch2) {
4303 if (FLAG_native_code_counters && counter->Enabled()) {
4304 li(scratch2, Operand(ExternalReference(counter)));
4305 lw(scratch1, MemOperand(scratch2));
4306 Subu(scratch1, scratch1, Operand(value));
4307 sw(scratch1, MemOperand(scratch2));
4312 // -----------------------------------------------------------------------------
4315 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
4316 Register rs, Operand rt) {
4317 if (emit_debug_code())
4318 Check(cc, reason, rs, rt);
4322 void MacroAssembler::AssertFastElements(Register elements) {
4323 if (emit_debug_code()) {
4324 ASSERT(!elements.is(at));
4327 lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4328 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4329 Branch(&ok, eq, elements, Operand(at));
4330 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4331 Branch(&ok, eq, elements, Operand(at));
4332 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4333 Branch(&ok, eq, elements, Operand(at));
4334 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4341 void MacroAssembler::Check(Condition cc, BailoutReason reason,
4342 Register rs, Operand rt) {
4344 Branch(&L, cc, rs, rt);
4346 // Will not return here.
4351 void MacroAssembler::Abort(BailoutReason reason) {
4355 const char* msg = GetBailoutReason(reason);
4357 RecordComment("Abort message: ");
4361 if (FLAG_trap_on_abort) {
4367 li(a0, Operand(Smi::FromInt(reason)));
4369 // Disable stub call restrictions to always allow calls to abort.
4371 // We don't actually want to generate a pile of code for this, so just
4372 // claim there is a stack frame, without generating one.
4373 FrameScope scope(this, StackFrame::NONE);
4374 CallRuntime(Runtime::kAbort, 1);
4376 CallRuntime(Runtime::kAbort, 1);
4378 // Will not return here.
4379 if (is_trampoline_pool_blocked()) {
4380 // If the calling code cares about the exact number of
4381 // instructions generated, we insert padding here to keep the size
4382 // of the Abort macro constant.
4383 // Currently in debug mode with debug_code enabled the number of
4384 // generated instructions is 10, so we use this as a maximum value.
4385 static const int kExpectedAbortInstructions = 10;
4386 int abort_instructions = InstructionsGeneratedSince(&abort_start);
4387 ASSERT(abort_instructions <= kExpectedAbortInstructions);
4388 while (abort_instructions++ < kExpectedAbortInstructions) {
4395 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4396 if (context_chain_length > 0) {
4397 // Move up the chain of contexts to the context containing the slot.
4398 lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4399 for (int i = 1; i < context_chain_length; i++) {
4400 lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4403 // Slot is in the current function context. Move it into the
4404 // destination register in case we store into it (the write barrier
4405 // cannot be allowed to destroy the context in esi).
4411 void MacroAssembler::LoadTransitionedArrayMapConditional(
4412 ElementsKind expected_kind,
4413 ElementsKind transitioned_kind,
4414 Register map_in_out,
4416 Label* no_map_match) {
4417 // Load the global or builtins object from the current context.
4419 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4420 lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
4422 // Check that the function's map is the same as the expected cached map.
4425 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4426 size_t offset = expected_kind * kPointerSize +
4427 FixedArrayBase::kHeaderSize;
4428 lw(at, FieldMemOperand(scratch, offset));
4429 Branch(no_map_match, ne, map_in_out, Operand(at));
4431 // Use the transitioned cached map.
4432 offset = transitioned_kind * kPointerSize +
4433 FixedArrayBase::kHeaderSize;
4434 lw(map_in_out, FieldMemOperand(scratch, offset));
4438 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4439 // Load the global or builtins object from the current context.
4441 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4442 // Load the native context from the global or builtins object.
4443 lw(function, FieldMemOperand(function,
4444 GlobalObject::kNativeContextOffset));
4445 // Load the function from the native context.
4446 lw(function, MemOperand(function, Context::SlotOffset(index)));
4450 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4453 // Load the initial map. The global functions all have initial maps.
4454 lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4455 if (emit_debug_code()) {
4457 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4460 Abort(kGlobalFunctionsMustHaveInitialMap);
4466 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
4467 if (frame_mode == BUILD_STUB_FRAME) {
4469 Push(Smi::FromInt(StackFrame::STUB));
4470 // Adjust FP to point to saved FP.
4471 Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4473 PredictableCodeSizeScope predictible_code_size_scope(
4474 this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
4475 // The following three instructions must remain together and unmodified
4476 // for code aging to work properly.
4477 if (isolate()->IsCodePreAgingActive()) {
4478 // Pre-age the code.
4479 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
4480 nop(Assembler::CODE_AGE_MARKER_NOP);
4481 // Load the stub address to t9 and call it,
4482 // GetCodeAgeAndParity() extracts the stub address from this instruction.
4484 Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
4486 nop(); // Prevent jalr to jal optimization.
4488 nop(); // Branch delay slot nop.
4489 nop(); // Pad the empty space.
4491 Push(ra, fp, cp, a1);
4492 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4493 // Adjust fp to point to caller's fp.
4494 Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4500 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4501 addiu(sp, sp, -5 * kPointerSize);
4502 li(t8, Operand(Smi::FromInt(type)));
4503 li(t9, Operand(CodeObject()), CONSTANT_SIZE);
4504 sw(ra, MemOperand(sp, 4 * kPointerSize));
4505 sw(fp, MemOperand(sp, 3 * kPointerSize));
4506 sw(cp, MemOperand(sp, 2 * kPointerSize));
4507 sw(t8, MemOperand(sp, 1 * kPointerSize));
4508 sw(t9, MemOperand(sp, 0 * kPointerSize));
4509 // Adjust FP to point to saved FP.
4511 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
4515 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4517 lw(fp, MemOperand(sp, 0 * kPointerSize));
4518 lw(ra, MemOperand(sp, 1 * kPointerSize));
4519 addiu(sp, sp, 2 * kPointerSize);
4523 void MacroAssembler::EnterExitFrame(bool save_doubles,
4525 // Set up the frame structure on the stack.
4526 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4527 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4528 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4530 // This is how the stack will look:
4531 // fp + 2 (==kCallerSPDisplacement) - old stack's end
4532 // [fp + 1 (==kCallerPCOffset)] - saved old ra
4533 // [fp + 0 (==kCallerFPOffset)] - saved old fp
4534 // [fp - 1 (==kSPOffset)] - sp of the called function
4535 // [fp - 2 (==kCodeOffset)] - CodeObject
4536 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
4537 // new stack (will contain saved ra)
4540 addiu(sp, sp, -4 * kPointerSize);
4541 sw(ra, MemOperand(sp, 3 * kPointerSize));
4542 sw(fp, MemOperand(sp, 2 * kPointerSize));
4543 addiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
4545 if (emit_debug_code()) {
4546 sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
4549 // Accessed from ExitFrame::code_slot.
4550 li(t8, Operand(CodeObject()), CONSTANT_SIZE);
4551 sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
4553 // Save the frame pointer and the context in top.
4554 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4555 sw(fp, MemOperand(t8));
4556 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4557 sw(cp, MemOperand(t8));
4559 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4561 // The stack must be allign to 0 modulo 8 for stores with sdc1.
4562 ASSERT(kDoubleSize == frame_alignment);
4563 if (frame_alignment > 0) {
4564 ASSERT(IsPowerOf2(frame_alignment));
4565 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4567 int space = FPURegister::kMaxNumRegisters * kDoubleSize;
4568 Subu(sp, sp, Operand(space));
4569 // Remember: we only need to save every 2nd double FPU value.
4570 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4571 FPURegister reg = FPURegister::from_code(i);
4572 sdc1(reg, MemOperand(sp, i * kDoubleSize));
4576 // Reserve place for the return address, stack space and an optional slot
4577 // (used by the DirectCEntryStub to hold the return value if a struct is
4578 // returned) and align the frame preparing for calling the runtime function.
4579 ASSERT(stack_space >= 0);
4580 Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4581 if (frame_alignment > 0) {
4582 ASSERT(IsPowerOf2(frame_alignment));
4583 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4586 // Set the exit frame sp value to point just before the return address
4588 addiu(at, sp, kPointerSize);
4589 sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
4593 void MacroAssembler::LeaveExitFrame(bool save_doubles,
4594 Register argument_count,
4595 bool restore_context,
4597 // Optionally restore all double registers.
4599 // Remember: we only need to restore every 2nd double FPU value.
4600 lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
4601 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4602 FPURegister reg = FPURegister::from_code(i);
4603 ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
4608 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4609 sw(zero_reg, MemOperand(t8));
4611 // Restore current context from top and clear it in debug mode.
4612 if (restore_context) {
4613 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4614 lw(cp, MemOperand(t8));
4617 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4618 sw(a3, MemOperand(t8));
4621 // Pop the arguments, restore registers, and return.
4622 mov(sp, fp); // Respect ABI stack constraint.
4623 lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
4624 lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
4626 if (argument_count.is_valid()) {
4627 sll(t8, argument_count, kPointerSizeLog2);
4632 Ret(USE_DELAY_SLOT);
4633 // If returning, the instruction in the delay slot will be the addiu below.
4639 void MacroAssembler::InitializeNewString(Register string,
4641 Heap::RootListIndex map_index,
4643 Register scratch2) {
4644 sll(scratch1, length, kSmiTagSize);
4645 LoadRoot(scratch2, map_index);
4646 sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
4647 li(scratch1, Operand(String::kEmptyHashField));
4648 sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
4649 sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
4653 int MacroAssembler::ActivationFrameAlignment() {
4654 #if V8_HOST_ARCH_MIPS
4655 // Running on the real platform. Use the alignment as mandated by the local
4657 // Note: This will break if we ever start generating snapshots on one Mips
4658 // platform for another Mips platform with a different alignment.
4659 return OS::ActivationFrameAlignment();
4660 #else // V8_HOST_ARCH_MIPS
4661 // If we are using the simulator then we should always align to the expected
4662 // alignment. As the simulator is used to generate snapshots we do not know
4663 // if the target platform will need alignment, so this is controlled from a
4665 return FLAG_sim_stack_alignment;
4666 #endif // V8_HOST_ARCH_MIPS
4670 void MacroAssembler::AssertStackIsAligned() {
4671 if (emit_debug_code()) {
4672 const int frame_alignment = ActivationFrameAlignment();
4673 const int frame_alignment_mask = frame_alignment - 1;
4675 if (frame_alignment > kPointerSize) {
4676 Label alignment_as_expected;
4677 ASSERT(IsPowerOf2(frame_alignment));
4678 andi(at, sp, frame_alignment_mask);
4679 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
4680 // Don't use Check here, as it will call Runtime_Abort re-entering here.
4681 stop("Unexpected stack alignment");
4682 bind(&alignment_as_expected);
4688 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
4691 Label* not_power_of_two_or_zero) {
4692 Subu(scratch, reg, Operand(1));
4693 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
4694 scratch, Operand(zero_reg));
4695 and_(at, scratch, reg); // In the delay slot.
4696 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
4700 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
4701 ASSERT(!reg.is(overflow));
4702 mov(overflow, reg); // Save original value.
4704 xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
4708 void MacroAssembler::SmiTagCheckOverflow(Register dst,
4710 Register overflow) {
4712 // Fall back to slower case.
4713 SmiTagCheckOverflow(dst, overflow);
4715 ASSERT(!dst.is(src));
4716 ASSERT(!dst.is(overflow));
4717 ASSERT(!src.is(overflow));
4719 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
4724 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
4727 JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
4732 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
4734 Label* non_smi_case) {
4735 JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
4739 void MacroAssembler::JumpIfSmi(Register value,
4742 BranchDelaySlot bd) {
4743 ASSERT_EQ(0, kSmiTag);
4744 andi(scratch, value, kSmiTagMask);
4745 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
4748 void MacroAssembler::JumpIfNotSmi(Register value,
4749 Label* not_smi_label,
4751 BranchDelaySlot bd) {
4752 ASSERT_EQ(0, kSmiTag);
4753 andi(scratch, value, kSmiTagMask);
4754 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
4758 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
4760 Label* on_not_both_smi) {
4761 STATIC_ASSERT(kSmiTag == 0);
4762 ASSERT_EQ(1, kSmiTagMask);
4763 or_(at, reg1, reg2);
4764 JumpIfNotSmi(at, on_not_both_smi);
4768 void MacroAssembler::JumpIfEitherSmi(Register reg1,
4770 Label* on_either_smi) {
4771 STATIC_ASSERT(kSmiTag == 0);
4772 ASSERT_EQ(1, kSmiTagMask);
4773 // Both Smi tags must be 1 (not Smi).
4774 and_(at, reg1, reg2);
4775 JumpIfSmi(at, on_either_smi);
4779 void MacroAssembler::AssertNotSmi(Register object) {
4780 if (emit_debug_code()) {
4781 STATIC_ASSERT(kSmiTag == 0);
4782 andi(at, object, kSmiTagMask);
4783 Check(ne, kOperandIsASmi, at, Operand(zero_reg));
4788 void MacroAssembler::AssertSmi(Register object) {
4789 if (emit_debug_code()) {
4790 STATIC_ASSERT(kSmiTag == 0);
4791 andi(at, object, kSmiTagMask);
4792 Check(eq, kOperandIsASmi, at, Operand(zero_reg));
4797 void MacroAssembler::AssertString(Register object) {
4798 if (emit_debug_code()) {
4799 STATIC_ASSERT(kSmiTag == 0);
4801 Check(ne, kOperandIsASmiAndNotAString, t0, Operand(zero_reg));
4803 lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
4804 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
4805 Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
4811 void MacroAssembler::AssertName(Register object) {
4812 if (emit_debug_code()) {
4813 STATIC_ASSERT(kSmiTag == 0);
4815 Check(ne, kOperandIsASmiAndNotAName, t0, Operand(zero_reg));
4817 lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
4818 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
4819 Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
4825 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
4827 if (emit_debug_code()) {
4828 Label done_checking;
4829 AssertNotSmi(object);
4830 LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4831 Branch(&done_checking, eq, object, Operand(scratch));
4833 lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
4834 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
4835 Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
4837 bind(&done_checking);
4842 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
4843 if (emit_debug_code()) {
4844 ASSERT(!reg.is(at));
4845 LoadRoot(at, index);
4846 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
4851 void MacroAssembler::JumpIfNotHeapNumber(Register object,
4852 Register heap_number_map,
4854 Label* on_not_heap_number) {
4855 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4856 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4857 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
4861 void MacroAssembler::LookupNumberStringCache(Register object,
4867 // Use of registers. Register result is used as a temporary.
4868 Register number_string_cache = result;
4869 Register mask = scratch3;
4871 // Load the number string cache.
4872 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
4874 // Make the hash mask from the length of the number string cache. It
4875 // contains two elements (number and string) for each cache entry.
4876 lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
4877 // Divide length by two (length is a smi).
4878 sra(mask, mask, kSmiTagSize + 1);
4879 Addu(mask, mask, -1); // Make mask.
4881 // Calculate the entry in the number string cache. The hash value in the
4882 // number string cache for smis is just the smi value, and the hash for
4883 // doubles is the xor of the upper and lower words. See
4884 // Heap::GetNumberStringCache.
4886 Label load_result_from_cache;
4887 JumpIfSmi(object, &is_smi);
4890 Heap::kHeapNumberMapRootIndex,
4894 STATIC_ASSERT(8 == kDoubleSize);
4897 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
4898 lw(scratch2, MemOperand(scratch1, kPointerSize));
4899 lw(scratch1, MemOperand(scratch1, 0));
4900 Xor(scratch1, scratch1, Operand(scratch2));
4901 And(scratch1, scratch1, Operand(mask));
4903 // Calculate address of entry in string cache: each entry consists
4904 // of two pointer sized fields.
4905 sll(scratch1, scratch1, kPointerSizeLog2 + 1);
4906 Addu(scratch1, number_string_cache, scratch1);
4908 Register probe = mask;
4909 lw(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
4910 JumpIfSmi(probe, not_found);
4911 ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
4912 ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
4913 BranchF(&load_result_from_cache, NULL, eq, f12, f14);
4917 Register scratch = scratch1;
4918 sra(scratch, object, 1); // Shift away the tag.
4919 And(scratch, mask, Operand(scratch));
4921 // Calculate address of entry in string cache: each entry consists
4922 // of two pointer sized fields.
4923 sll(scratch, scratch, kPointerSizeLog2 + 1);
4924 Addu(scratch, number_string_cache, scratch);
4926 // Check if the entry is the smi we are looking for.
4927 lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
4928 Branch(not_found, ne, object, Operand(probe));
4930 // Get the result from the cache.
4931 bind(&load_result_from_cache);
4932 lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
4934 IncrementCounter(isolate()->counters()->number_to_string_native(),
4941 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
4947 // Test that both first and second are sequential ASCII strings.
4948 // Assume that they are non-smis.
4949 lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
4950 lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
4951 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
4952 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
4954 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
4962 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
4967 // Check that neither is a smi.
4968 STATIC_ASSERT(kSmiTag == 0);
4969 And(scratch1, first, Operand(second));
4970 JumpIfSmi(scratch1, failure);
4971 JumpIfNonSmisNotBothSequentialAsciiStrings(first,
4979 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
4985 const int kFlatAsciiStringMask =
4986 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
4987 const int kFlatAsciiStringTag =
4988 kStringTag | kOneByteStringTag | kSeqStringTag;
4989 ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
4990 andi(scratch1, first, kFlatAsciiStringMask);
4991 Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
4992 andi(scratch2, second, kFlatAsciiStringMask);
4993 Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
4997 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
5000 const int kFlatAsciiStringMask =
5001 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5002 const int kFlatAsciiStringTag =
5003 kStringTag | kOneByteStringTag | kSeqStringTag;
5004 And(scratch, type, Operand(kFlatAsciiStringMask));
5005 Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
5009 static const int kRegisterPassedArguments = 4;
5011 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
5012 int num_double_arguments) {
5013 int stack_passed_words = 0;
5014 num_reg_arguments += 2 * num_double_arguments;
5016 // Up to four simple arguments are passed in registers a0..a3.
5017 if (num_reg_arguments > kRegisterPassedArguments) {
5018 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5020 stack_passed_words += kCArgSlotCount;
5021 return stack_passed_words;
5025 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5029 uint32_t encoding_mask) {
5032 Check(ne, kNonObject, at, Operand(zero_reg));
5034 lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
5035 lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
5037 andi(at, at, kStringRepresentationMask | kStringEncodingMask);
5038 li(scratch, Operand(encoding_mask));
5039 Check(eq, kUnexpectedStringType, at, Operand(scratch));
5041 // The index is assumed to be untagged coming in, tag it to compare with the
5042 // string length without using a temp register, it is restored at the end of
5044 Label index_tag_ok, index_tag_bad;
5045 TrySmiTag(index, scratch, &index_tag_bad);
5046 Branch(&index_tag_ok);
5047 bind(&index_tag_bad);
5048 Abort(kIndexIsTooLarge);
5049 bind(&index_tag_ok);
5051 lw(at, FieldMemOperand(string, String::kLengthOffset));
5052 Check(lt, kIndexIsTooLarge, index, Operand(at));
5054 ASSERT(Smi::FromInt(0) == 0);
5055 Check(ge, kIndexIsNegative, index, Operand(zero_reg));
5057 SmiUntag(index, index);
5061 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5062 int num_double_arguments,
5064 int frame_alignment = ActivationFrameAlignment();
5066 // Up to four simple arguments are passed in registers a0..a3.
5067 // Those four arguments must have reserved argument slots on the stack for
5068 // mips, even though those argument slots are not normally used.
5069 // Remaining arguments are pushed on the stack, above (higher address than)
5070 // the argument slots.
5071 int stack_passed_arguments = CalculateStackPassedWords(
5072 num_reg_arguments, num_double_arguments);
5073 if (frame_alignment > kPointerSize) {
5074 // Make stack end at alignment and make room for num_arguments - 4 words
5075 // and the original value of sp.
5077 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5078 ASSERT(IsPowerOf2(frame_alignment));
5079 And(sp, sp, Operand(-frame_alignment));
5080 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
5082 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5087 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5089 PrepareCallCFunction(num_reg_arguments, 0, scratch);
5093 void MacroAssembler::CallCFunction(ExternalReference function,
5094 int num_reg_arguments,
5095 int num_double_arguments) {
5096 li(t8, Operand(function));
5097 CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
5101 void MacroAssembler::CallCFunction(Register function,
5102 int num_reg_arguments,
5103 int num_double_arguments) {
5104 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
5108 void MacroAssembler::CallCFunction(ExternalReference function,
5109 int num_arguments) {
5110 CallCFunction(function, num_arguments, 0);
5114 void MacroAssembler::CallCFunction(Register function,
5115 int num_arguments) {
5116 CallCFunction(function, num_arguments, 0);
5120 void MacroAssembler::CallCFunctionHelper(Register function,
5121 int num_reg_arguments,
5122 int num_double_arguments) {
5123 ASSERT(has_frame());
5124 // Make sure that the stack is aligned before calling a C function unless
5125 // running in the simulator. The simulator has its own alignment check which
5126 // provides more information.
5127 // The argument stots are presumed to have been set up by
5128 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
5130 #if V8_HOST_ARCH_MIPS
5131 if (emit_debug_code()) {
5132 int frame_alignment = OS::ActivationFrameAlignment();
5133 int frame_alignment_mask = frame_alignment - 1;
5134 if (frame_alignment > kPointerSize) {
5135 ASSERT(IsPowerOf2(frame_alignment));
5136 Label alignment_as_expected;
5137 And(at, sp, Operand(frame_alignment_mask));
5138 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5139 // Don't use Check here, as it will call Runtime_Abort possibly
5140 // re-entering here.
5141 stop("Unexpected alignment in CallCFunction");
5142 bind(&alignment_as_expected);
5145 #endif // V8_HOST_ARCH_MIPS
5147 // Just call directly. The function called cannot cause a GC, or
5148 // allow preemption, so the return address in the link register
5151 if (!function.is(t9)) {
5158 int stack_passed_arguments = CalculateStackPassedWords(
5159 num_reg_arguments, num_double_arguments);
5161 if (OS::ActivationFrameAlignment() > kPointerSize) {
5162 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5164 Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
5169 #undef BRANCH_ARGS_CHECK
5172 void MacroAssembler::PatchRelocatedValue(Register li_location,
5174 Register new_value) {
5175 lw(scratch, MemOperand(li_location));
5176 // At this point scratch is a lui(at, ...) instruction.
5177 if (emit_debug_code()) {
5178 And(scratch, scratch, kOpcodeMask);
5179 Check(eq, kTheInstructionToPatchShouldBeALui,
5180 scratch, Operand(LUI));
5181 lw(scratch, MemOperand(li_location));
5183 srl(t9, new_value, kImm16Bits);
5184 Ins(scratch, t9, 0, kImm16Bits);
5185 sw(scratch, MemOperand(li_location));
5187 lw(scratch, MemOperand(li_location, kInstrSize));
5188 // scratch is now ori(at, ...).
5189 if (emit_debug_code()) {
5190 And(scratch, scratch, kOpcodeMask);
5191 Check(eq, kTheInstructionToPatchShouldBeAnOri,
5192 scratch, Operand(ORI));
5193 lw(scratch, MemOperand(li_location, kInstrSize));
5195 Ins(scratch, new_value, 0, kImm16Bits);
5196 sw(scratch, MemOperand(li_location, kInstrSize));
5198 // Update the I-cache so the new lui and ori can be executed.
5199 FlushICache(li_location, 2);
5202 void MacroAssembler::GetRelocatedValue(Register li_location,
5205 lw(value, MemOperand(li_location));
5206 if (emit_debug_code()) {
5207 And(value, value, kOpcodeMask);
5208 Check(eq, kTheInstructionShouldBeALui,
5209 value, Operand(LUI));
5210 lw(value, MemOperand(li_location));
5213 // value now holds a lui instruction. Extract the immediate.
5214 sll(value, value, kImm16Bits);
5216 lw(scratch, MemOperand(li_location, kInstrSize));
5217 if (emit_debug_code()) {
5218 And(scratch, scratch, kOpcodeMask);
5219 Check(eq, kTheInstructionShouldBeAnOri,
5220 scratch, Operand(ORI));
5221 lw(scratch, MemOperand(li_location, kInstrSize));
5223 // "scratch" now holds an ori instruction. Extract the immediate.
5224 andi(scratch, scratch, kImm16Mask);
5226 // Merge the results.
5227 or_(value, value, scratch);
5231 void MacroAssembler::CheckPageFlag(
5236 Label* condition_met) {
5237 And(scratch, object, Operand(~Page::kPageAlignmentMask));
5238 lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5239 And(scratch, scratch, Operand(mask));
5240 Branch(condition_met, cc, scratch, Operand(zero_reg));
5244 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
5246 Label* if_deprecated) {
5247 if (map->CanBeDeprecated()) {
5248 li(scratch, Operand(map));
5249 lw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
5250 And(scratch, scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
5251 Branch(if_deprecated, ne, scratch, Operand(zero_reg));
5256 void MacroAssembler::JumpIfBlack(Register object,
5260 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
5261 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5265 void MacroAssembler::HasColor(Register object,
5266 Register bitmap_scratch,
5267 Register mask_scratch,
5271 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
5272 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
5274 GetMarkBits(object, bitmap_scratch, mask_scratch);
5276 Label other_color, word_boundary;
5277 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5278 And(t8, t9, Operand(mask_scratch));
5279 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
5280 // Shift left 1 by adding.
5281 Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
5282 Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
5283 And(t8, t9, Operand(mask_scratch));
5284 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
5287 bind(&word_boundary);
5288 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
5289 And(t9, t9, Operand(1));
5290 Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
5295 // Detect some, but not all, common pointer-free objects. This is used by the
5296 // incremental write barrier which doesn't care about oddballs (they are always
5297 // marked black immediately so this code is not hit).
5298 void MacroAssembler::JumpIfDataObject(Register value,
5300 Label* not_data_object) {
5301 ASSERT(!AreAliased(value, scratch, t8, no_reg));
5302 Label is_data_object;
5303 lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5304 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5305 Branch(&is_data_object, eq, t8, Operand(scratch));
5306 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5307 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5308 // If it's a string and it's not a cons string then it's an object containing
5310 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5311 And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
5312 Branch(not_data_object, ne, t8, Operand(zero_reg));
5313 bind(&is_data_object);
5317 void MacroAssembler::GetMarkBits(Register addr_reg,
5318 Register bitmap_reg,
5319 Register mask_reg) {
5320 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
5321 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5322 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
5323 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
5324 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
5325 sll(t8, t8, kPointerSizeLog2);
5326 Addu(bitmap_reg, bitmap_reg, t8);
5328 sllv(mask_reg, t8, mask_reg);
5332 void MacroAssembler::EnsureNotWhite(
5334 Register bitmap_scratch,
5335 Register mask_scratch,
5336 Register load_scratch,
5337 Label* value_is_white_and_not_data) {
5338 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
5339 GetMarkBits(value, bitmap_scratch, mask_scratch);
5341 // If the value is black or grey we don't need to do anything.
5342 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5343 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5344 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
5345 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5349 // Since both black and grey have a 1 in the first position and white does
5350 // not have a 1 there we only need to check one bit.
5351 lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5352 And(t8, mask_scratch, load_scratch);
5353 Branch(&done, ne, t8, Operand(zero_reg));
5355 if (emit_debug_code()) {
5356 // Check for impossible bit pattern.
5358 // sll may overflow, making the check conservative.
5359 sll(t8, mask_scratch, 1);
5360 And(t8, load_scratch, t8);
5361 Branch(&ok, eq, t8, Operand(zero_reg));
5362 stop("Impossible marking bit pattern");
5366 // Value is white. We check whether it is data that doesn't need scanning.
5367 // Currently only checks for HeapNumber and non-cons strings.
5368 Register map = load_scratch; // Holds map while checking type.
5369 Register length = load_scratch; // Holds length of object after testing type.
5370 Label is_data_object;
5372 // Check for heap-number
5373 lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
5374 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5377 Branch(&skip, ne, t8, Operand(map));
5378 li(length, HeapNumber::kSize);
5379 Branch(&is_data_object);
5383 // Check for strings.
5384 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5385 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5386 // If it's a string and it's not a cons string then it's an object containing
5388 Register instance_type = load_scratch;
5389 lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
5390 And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
5391 Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5392 // It's a non-indirect (non-cons and non-slice) string.
5393 // If it's external, the length is just ExternalString::kSize.
5394 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5395 // External strings are the only ones with the kExternalStringTag bit
5397 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
5398 ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
5399 And(t8, instance_type, Operand(kExternalStringTag));
5402 Branch(&skip, eq, t8, Operand(zero_reg));
5403 li(length, ExternalString::kSize);
5404 Branch(&is_data_object);
5408 // Sequential string, either ASCII or UC16.
5409 // For ASCII (char-size of 1) we shift the smi tag away to get the length.
5410 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
5411 // getting the length multiplied by 2.
5412 ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
5413 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
5414 lw(t9, FieldMemOperand(value, String::kLengthOffset));
5415 And(t8, instance_type, Operand(kStringEncodingMask));
5418 Branch(&skip, eq, t8, Operand(zero_reg));
5422 Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
5423 And(length, length, Operand(~kObjectAlignmentMask));
5425 bind(&is_data_object);
5426 // Value is a data object, and it is white. Mark it black. Since we know
5427 // that the object is white we can make it black by flipping one bit.
5428 lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5429 Or(t8, t8, Operand(mask_scratch));
5430 sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5432 And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5433 lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5434 Addu(t8, t8, Operand(length));
5435 sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5441 void MacroAssembler::Throw(BailoutReason reason) {
5445 const char* msg = GetBailoutReason(reason);
5447 RecordComment("Throw message: ");
5452 li(a0, Operand(Smi::FromInt(reason)));
5454 // Disable stub call restrictions to always allow calls to throw.
5456 // We don't actually want to generate a pile of code for this, so just
5457 // claim there is a stack frame, without generating one.
5458 FrameScope scope(this, StackFrame::NONE);
5459 CallRuntime(Runtime::kHiddenThrowMessage, 1);
5461 CallRuntime(Runtime::kHiddenThrowMessage, 1);
5463 // will not return here
5464 if (is_trampoline_pool_blocked()) {
5465 // If the calling code cares throw the exact number of
5466 // instructions generated, we insert padding here to keep the size
5467 // of the ThrowMessage macro constant.
5468 // Currently in debug mode with debug_code enabled the number of
5469 // generated instructions is 14, so we use this as a maximum value.
5470 static const int kExpectedThrowMessageInstructions = 14;
5471 int throw_instructions = InstructionsGeneratedSince(&throw_start);
5472 ASSERT(throw_instructions <= kExpectedThrowMessageInstructions);
5473 while (throw_instructions++ < kExpectedThrowMessageInstructions) {
5480 void MacroAssembler::ThrowIf(Condition cc,
5481 BailoutReason reason,
5485 Branch(&L, NegateCondition(cc), rs, rt);
5487 // will not return here
5492 void MacroAssembler::LoadInstanceDescriptors(Register map,
5493 Register descriptors) {
5494 lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
5498 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
5499 lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5500 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
5504 void MacroAssembler::EnumLength(Register dst, Register map) {
5505 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
5506 lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5507 And(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
5511 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5512 Register empty_fixed_array_value = t2;
5513 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5517 // Check if the enum length field is properly initialized, indicating that
5518 // there is an enum cache.
5519 lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5523 call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
5528 lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5530 // For all objects but the receiver, check that the cache is empty.
5532 Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
5536 // Check that there are no elements. Register a2 contains the current JS
5537 // object we've reached through the prototype chain.
5539 lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
5540 Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
5542 // Second chance, the object may be using the empty slow element dictionary.
5543 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
5544 Branch(call_runtime, ne, a2, Operand(at));
5547 lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
5548 Branch(&next, ne, a2, Operand(null_value));
5552 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5553 ASSERT(!output_reg.is(input_reg));
5555 li(output_reg, Operand(255));
5556 // Normal branch: nop in delay slot.
5557 Branch(&done, gt, input_reg, Operand(output_reg));
5558 // Use delay slot in this branch.
5559 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
5560 mov(output_reg, zero_reg); // In delay slot.
5561 mov(output_reg, input_reg); // Value is in range 0..255.
5566 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
5567 DoubleRegister input_reg,
5568 DoubleRegister temp_double_reg) {
5573 Move(temp_double_reg, 0.0);
5574 BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
5576 // Double value is less than zero, NaN or Inf, return 0.
5577 mov(result_reg, zero_reg);
5580 // Double value is >= 255, return 255.
5582 Move(temp_double_reg, 255.0);
5583 BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
5584 li(result_reg, Operand(255));
5587 // In 0-255 range, round and truncate.
5589 cvt_w_d(temp_double_reg, input_reg);
5590 mfc1(result_reg, temp_double_reg);
5595 void MacroAssembler::TestJSArrayForAllocationMemento(
5596 Register receiver_reg,
5597 Register scratch_reg,
5598 Label* no_memento_found,
5600 Label* allocation_memento_present) {
5601 ExternalReference new_space_start =
5602 ExternalReference::new_space_start(isolate());
5603 ExternalReference new_space_allocation_top =
5604 ExternalReference::new_space_allocation_top_address(isolate());
5605 Addu(scratch_reg, receiver_reg,
5606 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5607 Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
5608 li(at, Operand(new_space_allocation_top));
5609 lw(at, MemOperand(at));
5610 Branch(no_memento_found, gt, scratch_reg, Operand(at));
5611 lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
5612 if (allocation_memento_present) {
5613 Branch(allocation_memento_present, cond, scratch_reg,
5614 Operand(isolate()->factory()->allocation_memento_map()));
5619 Register GetRegisterThatIsNotOneOf(Register reg1,
5626 if (reg1.is_valid()) regs |= reg1.bit();
5627 if (reg2.is_valid()) regs |= reg2.bit();
5628 if (reg3.is_valid()) regs |= reg3.bit();
5629 if (reg4.is_valid()) regs |= reg4.bit();
5630 if (reg5.is_valid()) regs |= reg5.bit();
5631 if (reg6.is_valid()) regs |= reg6.bit();
5633 for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
5634 Register candidate = Register::FromAllocationIndex(i);
5635 if (regs & candidate.bit()) continue;
5643 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5648 ASSERT(!scratch1.is(scratch0));
5649 Factory* factory = isolate()->factory();
5650 Register current = scratch0;
5653 // Scratch contained elements pointer.
5654 Move(current, object);
5656 // Loop based on the map going up the prototype chain.
5658 lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
5659 lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
5660 Ext(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
5661 Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
5662 lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
5663 Branch(&loop_again, ne, current, Operand(factory->null_value()));
5667 bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
5668 if (r1.is(r2)) return true;
5669 if (r1.is(r3)) return true;
5670 if (r1.is(r4)) return true;
5671 if (r2.is(r3)) return true;
5672 if (r2.is(r4)) return true;
5673 if (r3.is(r4)) return true;
5678 CodePatcher::CodePatcher(byte* address, int instructions)
5679 : address_(address),
5680 size_(instructions * Assembler::kInstrSize),
5681 masm_(NULL, address, size_ + Assembler::kGap) {
5682 // Create a new macro assembler pointing to the address of the code to patch.
5683 // The size is adjusted with kGap on order for the assembler to generate size
5684 // bytes of instructions without failing with buffer size constraints.
5685 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5689 CodePatcher::~CodePatcher() {
5690 // Indicate that code has changed.
5691 CPU::FlushICache(address_, size_);
5693 // Check that the code was patched as expected.
5694 ASSERT(masm_.pc_ == address_ + size_);
5695 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5699 void CodePatcher::Emit(Instr instr) {
5700 masm()->emit(instr);
5704 void CodePatcher::Emit(Address addr) {
5705 masm()->emit(reinterpret_cast<Instr>(addr));
5709 void CodePatcher::ChangeBranchCondition(Condition cond) {
5710 Instr instr = Assembler::instr_at(masm_.pc_);
5711 ASSERT(Assembler::IsBranch(instr));
5712 uint32_t opcode = Assembler::GetOpcodeField(instr);
5713 // Currently only the 'eq' and 'ne' cond values are supported and the simple
5714 // branch instructions (with opcode being the branch type).
5715 // There are some special cases (see Assembler::IsBranch()) so extending this
5717 ASSERT(opcode == BEQ ||
5725 opcode = (cond == eq) ? BEQ : BNE;
5726 instr = (instr & ~kOpcodeMask) | opcode;
5731 void MacroAssembler::TruncatingDiv(Register result,
5734 ASSERT(!dividend.is(result));
5735 ASSERT(!dividend.is(at));
5736 ASSERT(!result.is(at));
5737 MultiplierAndShift ms(divisor);
5738 li(at, Operand(ms.multiplier()));
5739 Mult(dividend, Operand(at));
5741 if (divisor > 0 && ms.multiplier() < 0) {
5742 Addu(result, result, Operand(dividend));
5744 if (divisor < 0 && ms.multiplier() > 0) {
5745 Subu(result, result, Operand(dividend));
5747 if (ms.shift() > 0) sra(result, result, ms.shift());
5748 srl(at, dividend, 31);
5749 Addu(result, result, Operand(at));
5753 } } // namespace v8::internal
5755 #endif // V8_TARGET_ARCH_MIPS