1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include <limits.h> // For LONG_MIN, LONG_MAX.
9 #if V8_TARGET_ARCH_MIPS
11 #include "src/base/bits.h"
12 #include "src/base/division-by-constant.h"
13 #include "src/bootstrapper.h"
14 #include "src/codegen.h"
15 #include "src/cpu-profiler.h"
16 #include "src/debug.h"
17 #include "src/isolate-inl.h"
18 #include "src/runtime/runtime.h"
23 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
24 : Assembler(arg_isolate, buffer, size),
25 generating_stub_(false),
27 has_double_zero_reg_set_(false) {
28 if (isolate() != NULL) {
29 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
35 void MacroAssembler::Load(Register dst,
36 const MemOperand& src,
38 DCHECK(!r.IsDouble());
41 } else if (r.IsUInteger8()) {
43 } else if (r.IsInteger16()) {
45 } else if (r.IsUInteger16()) {
53 void MacroAssembler::Store(Register src,
54 const MemOperand& dst,
56 DCHECK(!r.IsDouble());
57 if (r.IsInteger8() || r.IsUInteger8()) {
59 } else if (r.IsInteger16() || r.IsUInteger16()) {
62 if (r.IsHeapObject()) {
64 } else if (r.IsSmi()) {
72 void MacroAssembler::LoadRoot(Register destination,
73 Heap::RootListIndex index) {
74 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
78 void MacroAssembler::LoadRoot(Register destination,
79 Heap::RootListIndex index,
81 Register src1, const Operand& src2) {
82 Branch(2, NegateCondition(cond), src1, src2);
83 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
87 void MacroAssembler::StoreRoot(Register source,
88 Heap::RootListIndex index) {
89 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
90 sw(source, MemOperand(s6, index << kPointerSizeLog2));
94 void MacroAssembler::StoreRoot(Register source,
95 Heap::RootListIndex index,
97 Register src1, const Operand& src2) {
98 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
99 Branch(2, NegateCondition(cond), src1, src2);
100 sw(source, MemOperand(s6, index << kPointerSizeLog2));
104 // Push and pop all registers that can hold pointers.
105 void MacroAssembler::PushSafepointRegisters() {
106 // Safepoints expect a block of kNumSafepointRegisters values on the
107 // stack, so adjust the stack for unsaved registers.
108 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
109 DCHECK(num_unsaved >= 0);
110 if (num_unsaved > 0) {
111 Subu(sp, sp, Operand(num_unsaved * kPointerSize));
113 MultiPush(kSafepointSavedRegisters);
117 void MacroAssembler::PopSafepointRegisters() {
118 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
119 MultiPop(kSafepointSavedRegisters);
120 if (num_unsaved > 0) {
121 Addu(sp, sp, Operand(num_unsaved * kPointerSize));
126 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
127 sw(src, SafepointRegisterSlot(dst));
131 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
132 lw(dst, SafepointRegisterSlot(src));
136 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
137 // The registers are pushed starting with the highest encoding,
138 // which means that lowest encodings are closest to the stack pointer.
139 return kSafepointRegisterStackIndexMap[reg_code];
143 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
144 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
148 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
149 UNIMPLEMENTED_MIPS();
150 // General purpose registers are pushed last on the stack.
151 int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
152 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
153 return MemOperand(sp, doubles_size + register_offset);
157 void MacroAssembler::InNewSpace(Register object,
161 DCHECK(cc == eq || cc == ne);
162 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
163 Branch(branch, cc, scratch,
164 Operand(ExternalReference::new_space_start(isolate())));
168 void MacroAssembler::RecordWriteField(
174 SaveFPRegsMode save_fp,
175 RememberedSetAction remembered_set_action,
177 PointersToHereCheck pointers_to_here_check_for_value) {
178 DCHECK(!AreAliased(value, dst, t8, object));
179 // First, check if a write barrier is even needed. The tests below
180 // catch stores of Smis.
183 // Skip barrier if writing a smi.
184 if (smi_check == INLINE_SMI_CHECK) {
185 JumpIfSmi(value, &done);
188 // Although the object register is tagged, the offset is relative to the start
189 // of the object, so so offset must be a multiple of kPointerSize.
190 DCHECK(IsAligned(offset, kPointerSize));
192 Addu(dst, object, Operand(offset - kHeapObjectTag));
193 if (emit_debug_code()) {
195 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
196 Branch(&ok, eq, t8, Operand(zero_reg));
197 stop("Unaligned cell in write barrier");
206 remembered_set_action,
208 pointers_to_here_check_for_value);
212 // Clobber clobbered input registers when running with the debug-code flag
213 // turned on to provoke errors.
214 if (emit_debug_code()) {
215 li(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
216 li(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
221 // Will clobber 4 registers: object, map, dst, ip. The
222 // register 'object' contains a heap object pointer.
223 void MacroAssembler::RecordWriteForMap(Register object,
227 SaveFPRegsMode fp_mode) {
228 if (emit_debug_code()) {
230 lw(dst, FieldMemOperand(map, HeapObject::kMapOffset));
232 kWrongAddressOrValuePassedToRecordWrite,
234 Operand(isolate()->factory()->meta_map()));
237 if (!FLAG_incremental_marking) {
241 if (emit_debug_code()) {
242 lw(at, FieldMemOperand(object, HeapObject::kMapOffset));
244 kWrongAddressOrValuePassedToRecordWrite,
251 // A single check of the map's pages interesting flag suffices, since it is
252 // only set during incremental collection, and then it's also guaranteed that
253 // the from object's page's interesting flag is also set. This optimization
254 // relies on the fact that maps can never be in new space.
256 map, // Used as scratch.
257 MemoryChunk::kPointersToHereAreInterestingMask,
261 Addu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
262 if (emit_debug_code()) {
264 And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
265 Branch(&ok, eq, at, Operand(zero_reg));
266 stop("Unaligned cell in write barrier");
270 // Record the actual write.
271 if (ra_status == kRAHasNotBeenSaved) {
274 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
277 if (ra_status == kRAHasNotBeenSaved) {
283 // Count number of write barriers in generated code.
284 isolate()->counters()->write_barriers_static()->Increment();
285 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
287 // Clobber clobbered registers when running with the debug-code flag
288 // turned on to provoke errors.
289 if (emit_debug_code()) {
290 li(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
291 li(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
296 // Will clobber 4 registers: object, address, scratch, ip. The
297 // register 'object' contains a heap object pointer. The heap object
298 // tag is shifted away.
299 void MacroAssembler::RecordWrite(
304 SaveFPRegsMode fp_mode,
305 RememberedSetAction remembered_set_action,
307 PointersToHereCheck pointers_to_here_check_for_value) {
308 DCHECK(!AreAliased(object, address, value, t8));
309 DCHECK(!AreAliased(object, address, value, t9));
311 if (emit_debug_code()) {
312 lw(at, MemOperand(address));
314 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
317 if (remembered_set_action == OMIT_REMEMBERED_SET &&
318 !FLAG_incremental_marking) {
322 // First, check if a write barrier is even needed. The tests below
323 // catch stores of smis and stores into the young generation.
326 if (smi_check == INLINE_SMI_CHECK) {
327 DCHECK_EQ(0, kSmiTag);
328 JumpIfSmi(value, &done);
331 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
333 value, // Used as scratch.
334 MemoryChunk::kPointersToHereAreInterestingMask,
338 CheckPageFlag(object,
339 value, // Used as scratch.
340 MemoryChunk::kPointersFromHereAreInterestingMask,
344 // Record the actual write.
345 if (ra_status == kRAHasNotBeenSaved) {
348 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
351 if (ra_status == kRAHasNotBeenSaved) {
357 // Count number of write barriers in generated code.
358 isolate()->counters()->write_barriers_static()->Increment();
359 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
362 // Clobber clobbered registers when running with the debug-code flag
363 // turned on to provoke errors.
364 if (emit_debug_code()) {
365 li(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
366 li(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
371 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
374 SaveFPRegsMode fp_mode,
375 RememberedSetFinalAction and_then) {
377 if (emit_debug_code()) {
379 JumpIfNotInNewSpace(object, scratch, &ok);
380 stop("Remembered set pointer is in new space");
383 // Load store buffer top.
384 ExternalReference store_buffer =
385 ExternalReference::store_buffer_top(isolate());
386 li(t8, Operand(store_buffer));
387 lw(scratch, MemOperand(t8));
388 // Store pointer to buffer and increment buffer top.
389 sw(address, MemOperand(scratch));
390 Addu(scratch, scratch, kPointerSize);
391 // Write back new top of buffer.
392 sw(scratch, MemOperand(t8));
393 // Call stub on end of buffer.
394 // Check for end of buffer.
395 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
396 if (and_then == kFallThroughAtEnd) {
397 Branch(&done, eq, t8, Operand(zero_reg));
399 DCHECK(and_then == kReturnAtEnd);
400 Ret(eq, t8, Operand(zero_reg));
403 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
404 CallStub(&store_buffer_overflow);
407 if (and_then == kReturnAtEnd) {
413 // -----------------------------------------------------------------------------
414 // Allocation support.
417 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
422 DCHECK(!holder_reg.is(scratch));
423 DCHECK(!holder_reg.is(at));
424 DCHECK(!scratch.is(at));
426 // Load current lexical context from the stack frame.
427 lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
428 // In debug mode, make sure the lexical context is set.
430 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
431 scratch, Operand(zero_reg));
434 // Load the native context of the current context.
436 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
437 lw(scratch, FieldMemOperand(scratch, offset));
438 lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
440 // Check the context is a native context.
441 if (emit_debug_code()) {
442 push(holder_reg); // Temporarily save holder on the stack.
443 // Read the first word and compare to the native_context_map.
444 lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
445 LoadRoot(at, Heap::kNativeContextMapRootIndex);
446 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
447 holder_reg, Operand(at));
448 pop(holder_reg); // Restore holder.
451 // Check if both contexts are the same.
452 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
453 Branch(&same_contexts, eq, scratch, Operand(at));
455 // Check the context is a native context.
456 if (emit_debug_code()) {
457 push(holder_reg); // Temporarily save holder on the stack.
458 mov(holder_reg, at); // Move at to its holding place.
459 LoadRoot(at, Heap::kNullValueRootIndex);
460 Check(ne, kJSGlobalProxyContextShouldNotBeNull,
461 holder_reg, Operand(at));
463 lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
464 LoadRoot(at, Heap::kNativeContextMapRootIndex);
465 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
466 holder_reg, Operand(at));
467 // Restore at is not needed. at is reloaded below.
468 pop(holder_reg); // Restore holder.
469 // Restore at to holder's context.
470 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
473 // Check that the security token in the calling global object is
474 // compatible with the security token in the receiving global
476 int token_offset = Context::kHeaderSize +
477 Context::SECURITY_TOKEN_INDEX * kPointerSize;
479 lw(scratch, FieldMemOperand(scratch, token_offset));
480 lw(at, FieldMemOperand(at, token_offset));
481 Branch(miss, ne, scratch, Operand(at));
483 bind(&same_contexts);
487 // Compute the hash code from the untagged key. This must be kept in sync with
488 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
489 // code-stub-hydrogen.cc
490 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
491 // First of all we assign the hash seed to scratch.
492 LoadRoot(scratch, Heap::kHashSeedRootIndex);
495 // Xor original key with a seed.
496 xor_(reg0, reg0, scratch);
498 // Compute the hash code from the untagged key. This must be kept in sync
499 // with ComputeIntegerHash in utils.h.
501 // hash = ~hash + (hash << 15);
502 nor(scratch, reg0, zero_reg);
504 addu(reg0, scratch, at);
506 // hash = hash ^ (hash >> 12);
508 xor_(reg0, reg0, at);
510 // hash = hash + (hash << 2);
512 addu(reg0, reg0, at);
514 // hash = hash ^ (hash >> 4);
516 xor_(reg0, reg0, at);
518 // hash = hash * 2057;
519 sll(scratch, reg0, 11);
521 addu(reg0, reg0, at);
522 addu(reg0, reg0, scratch);
524 // hash = hash ^ (hash >> 16);
526 xor_(reg0, reg0, at);
530 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
539 // elements - holds the slow-case elements of the receiver on entry.
540 // Unchanged unless 'result' is the same register.
542 // key - holds the smi key on entry.
543 // Unchanged unless 'result' is the same register.
546 // result - holds the result on exit if the load succeeded.
547 // Allowed to be the same as 'key' or 'result'.
548 // Unchanged on bailout so 'key' or 'result' can be used
549 // in further computation.
551 // Scratch registers:
553 // reg0 - holds the untagged key on entry and holds the hash once computed.
555 // reg1 - Used to hold the capacity mask of the dictionary.
557 // reg2 - Used for the index into the dictionary.
558 // at - Temporary (avoid MacroAssembler instructions also using 'at').
561 GetNumberHash(reg0, reg1);
563 // Compute the capacity mask.
564 lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
565 sra(reg1, reg1, kSmiTagSize);
566 Subu(reg1, reg1, Operand(1));
568 // Generate an unrolled loop that performs a few probes before giving up.
569 for (int i = 0; i < kNumberDictionaryProbes; i++) {
570 // Use reg2 for index calculations and keep the hash intact in reg0.
572 // Compute the masked index: (hash + i + i * i) & mask.
574 Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
576 and_(reg2, reg2, reg1);
578 // Scale the index by multiplying by the element size.
579 DCHECK(SeededNumberDictionary::kEntrySize == 3);
580 sll(at, reg2, 1); // 2x.
581 addu(reg2, reg2, at); // reg2 = reg2 * 3.
583 // Check if the key is identical to the name.
584 sll(at, reg2, kPointerSizeLog2);
585 addu(reg2, elements, at);
587 lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
588 if (i != kNumberDictionaryProbes - 1) {
589 Branch(&done, eq, key, Operand(at));
591 Branch(miss, ne, key, Operand(at));
596 // Check that the value is a field property.
597 // reg2: elements + (index * kPointerSize).
598 const int kDetailsOffset =
599 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
600 lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
602 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
603 Branch(miss, ne, at, Operand(zero_reg));
605 // Get the value at the masked, scaled index and return.
606 const int kValueOffset =
607 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
608 lw(result, FieldMemOperand(reg2, kValueOffset));
612 // ---------------------------------------------------------------------------
613 // Instruction macros.
615 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
617 addu(rd, rs, rt.rm());
619 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
620 addiu(rd, rs, rt.imm32_);
622 // li handles the relocation.
631 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
633 subu(rd, rs, rt.rm());
635 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
636 addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
638 // li handles the relocation.
647 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
649 if (IsMipsArchVariant(kLoongson)) {
653 mul(rd, rs, rt.rm());
656 // li handles the relocation.
659 if (IsMipsArchVariant(kLoongson)) {
669 void MacroAssembler::Mul(Register rd_hi, Register rd_lo,
670 Register rs, const Operand& rt) {
672 if (!IsMipsArchVariant(kMips32r6)) {
678 DCHECK(!rd_hi.is(rs));
679 DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
680 muh(rd_hi, rs, rt.rm());
681 mul(rd_lo, rs, rt.rm());
683 DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
684 mul(rd_lo, rs, rt.rm());
685 muh(rd_hi, rs, rt.rm());
689 // li handles the relocation.
692 if (!IsMipsArchVariant(kMips32r6)) {
698 DCHECK(!rd_hi.is(rs));
699 DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
703 DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
712 void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
714 if (!IsMipsArchVariant(kMips32r6)) {
718 muh(rd, rs, rt.rm());
721 // li handles the relocation.
724 if (!IsMipsArchVariant(kMips32r6)) {
734 void MacroAssembler::Mult(Register rs, const Operand& rt) {
738 // li handles the relocation.
746 void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
748 if (!IsMipsArchVariant(kMips32r6)) {
752 muhu(rd, rs, rt.rm());
755 // li handles the relocation.
758 if (!IsMipsArchVariant(kMips32r6)) {
768 void MacroAssembler::Multu(Register rs, const Operand& rt) {
772 // li handles the relocation.
780 void MacroAssembler::Div(Register rs, const Operand& rt) {
784 // li handles the relocation.
792 void MacroAssembler::Div(Register rem, Register res,
793 Register rs, const Operand& rt) {
795 if (!IsMipsArchVariant(kMips32r6)) {
800 div(res, rs, rt.rm());
801 mod(rem, rs, rt.rm());
804 // li handles the relocation.
807 if (!IsMipsArchVariant(kMips32r6)) {
819 void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
821 if (!IsMipsArchVariant(kMips32r6)) {
825 div(res, rs, rt.rm());
828 // li handles the relocation.
831 if (!IsMipsArchVariant(kMips32r6)) {
841 void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
843 if (!IsMipsArchVariant(kMips32r6)) {
847 mod(rd, rs, rt.rm());
850 // li handles the relocation.
853 if (!IsMipsArchVariant(kMips32r6)) {
863 void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
865 if (!IsMipsArchVariant(kMips32r6)) {
869 modu(rd, rs, rt.rm());
872 // li handles the relocation.
875 if (!IsMipsArchVariant(kMips32r6)) {
885 void MacroAssembler::Divu(Register rs, const Operand& rt) {
889 // li handles the relocation.
897 void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
899 if (!IsMipsArchVariant(kMips32r6)) {
903 divu(res, rs, rt.rm());
906 // li handles the relocation.
909 if (!IsMipsArchVariant(kMips32r6)) {
919 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
921 and_(rd, rs, rt.rm());
923 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
924 andi(rd, rs, rt.imm32_);
926 // li handles the relocation.
935 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
937 or_(rd, rs, rt.rm());
939 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
940 ori(rd, rs, rt.imm32_);
942 // li handles the relocation.
951 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
953 xor_(rd, rs, rt.rm());
955 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
956 xori(rd, rs, rt.imm32_);
958 // li handles the relocation.
967 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
969 nor(rd, rs, rt.rm());
971 // li handles the relocation.
979 void MacroAssembler::Neg(Register rs, const Operand& rt) {
982 DCHECK(!at.is(rt.rm()));
984 xor_(rs, rt.rm(), at);
988 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
990 slt(rd, rs, rt.rm());
992 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
993 slti(rd, rs, rt.imm32_);
995 // li handles the relocation.
1004 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
1006 sltu(rd, rs, rt.rm());
1008 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
1009 sltiu(rd, rs, rt.imm32_);
1011 // li handles the relocation.
1020 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
1021 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1023 rotrv(rd, rs, rt.rm());
1025 rotr(rd, rs, rt.imm32_);
1029 subu(at, zero_reg, rt.rm());
1031 srlv(rd, rs, rt.rm());
1034 if (rt.imm32_ == 0) {
1037 srl(at, rs, rt.imm32_);
1038 sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
1046 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
1047 if (IsMipsArchVariant(kLoongson)) {
1055 // ------------Pseudo-instructions-------------
1057 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
1059 lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1063 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
1065 swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1069 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1070 AllowDeferredHandleDereference smi_check;
1071 if (value->IsSmi()) {
1072 li(dst, Operand(value), mode);
1074 DCHECK(value->IsHeapObject());
1075 if (isolate()->heap()->InNewSpace(*value)) {
1076 Handle<Cell> cell = isolate()->factory()->NewCell(value);
1077 li(dst, Operand(cell));
1078 lw(dst, FieldMemOperand(dst, Cell::kValueOffset));
1080 li(dst, Operand(value));
1086 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1087 DCHECK(!j.is_reg());
1088 BlockTrampolinePoolScope block_trampoline_pool(this);
1089 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
1090 // Normal load of an immediate value which does not need Relocation Info.
1091 if (is_int16(j.imm32_)) {
1092 addiu(rd, zero_reg, j.imm32_);
1093 } else if (!(j.imm32_ & kHiMask)) {
1094 ori(rd, zero_reg, j.imm32_);
1095 } else if (!(j.imm32_ & kImm16Mask)) {
1096 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1098 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1099 ori(rd, rd, (j.imm32_ & kImm16Mask));
1102 if (MustUseReg(j.rmode_)) {
1103 RecordRelocInfo(j.rmode_, j.imm32_);
1105 // We always need the same number of instructions as we may need to patch
1106 // this code to load another value which may need 2 instructions to load.
1107 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1108 ori(rd, rd, (j.imm32_ & kImm16Mask));
1113 void MacroAssembler::MultiPush(RegList regs) {
1114 int16_t num_to_push = NumberOfBitsSet(regs);
1115 int16_t stack_offset = num_to_push * kPointerSize;
1117 Subu(sp, sp, Operand(stack_offset));
1118 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1119 if ((regs & (1 << i)) != 0) {
1120 stack_offset -= kPointerSize;
1121 sw(ToRegister(i), MemOperand(sp, stack_offset));
1127 void MacroAssembler::MultiPushReversed(RegList regs) {
1128 int16_t num_to_push = NumberOfBitsSet(regs);
1129 int16_t stack_offset = num_to_push * kPointerSize;
1131 Subu(sp, sp, Operand(stack_offset));
1132 for (int16_t i = 0; i < kNumRegisters; i++) {
1133 if ((regs & (1 << i)) != 0) {
1134 stack_offset -= kPointerSize;
1135 sw(ToRegister(i), MemOperand(sp, stack_offset));
1141 void MacroAssembler::MultiPop(RegList regs) {
1142 int16_t stack_offset = 0;
1144 for (int16_t i = 0; i < kNumRegisters; i++) {
1145 if ((regs & (1 << i)) != 0) {
1146 lw(ToRegister(i), MemOperand(sp, stack_offset));
1147 stack_offset += kPointerSize;
1150 addiu(sp, sp, stack_offset);
1154 void MacroAssembler::MultiPopReversed(RegList regs) {
1155 int16_t stack_offset = 0;
1157 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1158 if ((regs & (1 << i)) != 0) {
1159 lw(ToRegister(i), MemOperand(sp, stack_offset));
1160 stack_offset += kPointerSize;
1163 addiu(sp, sp, stack_offset);
1167 void MacroAssembler::MultiPushFPU(RegList regs) {
1168 int16_t num_to_push = NumberOfBitsSet(regs);
1169 int16_t stack_offset = num_to_push * kDoubleSize;
1171 Subu(sp, sp, Operand(stack_offset));
1172 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1173 if ((regs & (1 << i)) != 0) {
1174 stack_offset -= kDoubleSize;
1175 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1181 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
1182 int16_t num_to_push = NumberOfBitsSet(regs);
1183 int16_t stack_offset = num_to_push * kDoubleSize;
1185 Subu(sp, sp, Operand(stack_offset));
1186 for (int16_t i = 0; i < kNumRegisters; i++) {
1187 if ((regs & (1 << i)) != 0) {
1188 stack_offset -= kDoubleSize;
1189 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1195 void MacroAssembler::MultiPopFPU(RegList regs) {
1196 int16_t stack_offset = 0;
1198 for (int16_t i = 0; i < kNumRegisters; i++) {
1199 if ((regs & (1 << i)) != 0) {
1200 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1201 stack_offset += kDoubleSize;
1204 addiu(sp, sp, stack_offset);
1208 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
1209 int16_t stack_offset = 0;
1211 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1212 if ((regs & (1 << i)) != 0) {
1213 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1214 stack_offset += kDoubleSize;
1217 addiu(sp, sp, stack_offset);
1221 void MacroAssembler::FlushICache(Register address, unsigned instructions) {
1222 RegList saved_regs = kJSCallerSaved | ra.bit();
1223 MultiPush(saved_regs);
1224 AllowExternalCallThatCantCauseGC scope(this);
1226 // Save to a0 in case address == t0.
1228 PrepareCallCFunction(2, t0);
1230 li(a1, instructions * kInstrSize);
1231 CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
1232 MultiPop(saved_regs);
1236 void MacroAssembler::Ext(Register rt,
1241 DCHECK(pos + size < 33);
1243 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1244 ext_(rt, rs, pos, size);
1246 // Move rs to rt and shift it left then right to get the
1247 // desired bitfield on the right side and zeroes on the left.
1248 int shift_left = 32 - (pos + size);
1249 sll(rt, rs, shift_left); // Acts as a move if shift_left == 0.
1251 int shift_right = 32 - size;
1252 if (shift_right > 0) {
1253 srl(rt, rt, shift_right);
1259 void MacroAssembler::Ins(Register rt,
1264 DCHECK(pos + size <= 32);
1267 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1268 ins_(rt, rs, pos, size);
1270 DCHECK(!rt.is(t8) && !rs.is(t8));
1271 Subu(at, zero_reg, Operand(1));
1272 srl(at, at, 32 - size);
1276 nor(at, at, zero_reg);
1283 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1285 FPURegister scratch) {
1286 // Move the data from fs to t8.
1288 Cvt_d_uw(fd, t8, scratch);
1292 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1294 FPURegister scratch) {
1295 // Convert rs to a FP value in fd (and fd + 1).
1296 // We do this by converting rs minus the MSB to avoid sign conversion,
1297 // then adding 2^31 to the result (if needed).
1299 DCHECK(!fd.is(scratch));
1303 // Save rs's MSB to t9.
1307 // Move the result to fd.
1310 // Convert fd to a real FP value.
1313 Label conversion_done;
1315 // If rs's MSB was 0, it's done.
1316 // Otherwise we need to add that to the FP register.
1317 Branch(&conversion_done, eq, t9, Operand(zero_reg));
1319 // Load 2^31 into f20 as its float representation.
1321 mtc1(zero_reg, scratch);
1324 add_d(fd, fd, scratch);
1326 bind(&conversion_done);
1330 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1332 FPURegister scratch) {
1333 Trunc_uw_d(fs, t8, scratch);
1338 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1339 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1349 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1350 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1360 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1361 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1371 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1372 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1382 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1384 FPURegister scratch) {
1385 DCHECK(!fd.is(scratch));
1388 // Load 2^31 into scratch as its float representation.
1390 mtc1(zero_reg, scratch);
1392 // Test if scratch > fd.
1393 // If fd < 2^31 we can convert it normally.
1394 Label simple_convert;
1395 BranchF(&simple_convert, NULL, lt, fd, scratch);
1397 // First we subtract 2^31 from fd, then trunc it to rs
1398 // and add 2^31 to rs.
1399 sub_d(scratch, fd, scratch);
1400 trunc_w_d(scratch, scratch);
1402 Or(rs, rs, 1 << 31);
1406 // Simple conversion.
1407 bind(&simple_convert);
1408 trunc_w_d(scratch, fd);
1415 void MacroAssembler::Mthc1(Register rt, FPURegister fs) {
1419 mtc1(rt, fs.high());
1424 void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
1428 mfc1(rt, fs.high());
1433 void MacroAssembler::BranchF(Label* target,
1438 BranchDelaySlot bd) {
1439 BlockTrampolinePoolScope block_trampoline_pool(this);
1445 DCHECK(nan || target);
1446 // Check for unordered (NaN) cases.
1448 if (!IsMipsArchVariant(kMips32r6)) {
1449 c(UN, D, cmp1, cmp2);
1452 // Use kDoubleCompareReg for comparison result. It has to be unavailable
1453 // to lithium register allocator.
1454 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
1455 cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
1456 bc1nez(nan, kDoubleCompareReg);
1460 if (!IsMipsArchVariant(kMips32r6)) {
1462 // Here NaN cases were either handled by this function or are assumed to
1463 // have been handled by the caller.
1466 c(OLT, D, cmp1, cmp2);
1470 c(ULE, D, cmp1, cmp2);
1474 c(ULT, D, cmp1, cmp2);
1478 c(OLE, D, cmp1, cmp2);
1482 c(EQ, D, cmp1, cmp2);
1486 c(UEQ, D, cmp1, cmp2);
1490 c(EQ, D, cmp1, cmp2);
1494 c(UEQ, D, cmp1, cmp2);
1503 // Here NaN cases were either handled by this function or are assumed to
1504 // have been handled by the caller.
1505 // Unsigned conditions are treated as their signed counterpart.
1506 // Use kDoubleCompareReg for comparison result, it is
1507 // valid in fp64 (FR = 1) mode which is implied for mips32r6.
1508 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
1511 cmp(OLT, L, kDoubleCompareReg, cmp1, cmp2);
1512 bc1nez(target, kDoubleCompareReg);
1515 cmp(ULE, L, kDoubleCompareReg, cmp1, cmp2);
1516 bc1eqz(target, kDoubleCompareReg);
1519 cmp(ULT, L, kDoubleCompareReg, cmp1, cmp2);
1520 bc1eqz(target, kDoubleCompareReg);
1523 cmp(OLE, L, kDoubleCompareReg, cmp1, cmp2);
1524 bc1nez(target, kDoubleCompareReg);
1527 cmp(EQ, L, kDoubleCompareReg, cmp1, cmp2);
1528 bc1nez(target, kDoubleCompareReg);
1531 cmp(UEQ, L, kDoubleCompareReg, cmp1, cmp2);
1532 bc1nez(target, kDoubleCompareReg);
1535 cmp(EQ, L, kDoubleCompareReg, cmp1, cmp2);
1536 bc1eqz(target, kDoubleCompareReg);
1539 cmp(UEQ, L, kDoubleCompareReg, cmp1, cmp2);
1540 bc1eqz(target, kDoubleCompareReg);
1548 if (bd == PROTECT) {
1554 void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
1556 DCHECK(!src_low.is(at));
1566 void MacroAssembler::Move(FPURegister dst, float imm) {
1567 li(at, Operand(bit_cast<int32_t>(imm)));
1572 void MacroAssembler::Move(FPURegister dst, double imm) {
1573 static const DoubleRepresentation minus_zero(-0.0);
1574 static const DoubleRepresentation zero(0.0);
1575 DoubleRepresentation value_rep(imm);
1576 // Handle special values first.
1577 if (value_rep == zero && has_double_zero_reg_set_) {
1578 mov_d(dst, kDoubleRegZero);
1579 } else if (value_rep == minus_zero && has_double_zero_reg_set_) {
1580 neg_d(dst, kDoubleRegZero);
1583 DoubleAsTwoUInt32(imm, &lo, &hi);
1584 // Move the low part of the double into the lower of the corresponding FPU
1585 // register of FPU register pair.
1587 li(at, Operand(lo));
1590 mtc1(zero_reg, dst);
1592 // Move the high part of the double into the higher of the corresponding FPU
1593 // register of FPU register pair.
1595 li(at, Operand(hi));
1598 Mthc1(zero_reg, dst);
1600 if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true;
1605 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1606 if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
1608 Branch(&done, ne, rt, Operand(zero_reg));
1617 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1618 if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
1620 Branch(&done, eq, rt, Operand(zero_reg));
1629 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
1630 if (IsMipsArchVariant(kLoongson)) {
1631 // Tests an FP condition code and then conditionally move rs to rd.
1632 // We do not currently use any FPU cc bit other than bit 0.
1634 DCHECK(!(rs.is(t8) || rd.is(t8)));
1636 Register scratch = t8;
1637 // For testing purposes we need to fetch content of the FCSR register and
1638 // than test its cc (floating point condition code) bit (for cc = 0, it is
1639 // 24. bit of the FCSR).
1640 cfc1(scratch, FCSR);
1641 // For the MIPS I, II and III architectures, the contents of scratch is
1642 // UNPREDICTABLE for the instruction immediately following CFC1.
1644 srl(scratch, scratch, 16);
1645 andi(scratch, scratch, 0x0080);
1646 Branch(&done, eq, scratch, Operand(zero_reg));
1655 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
1656 if (IsMipsArchVariant(kLoongson)) {
1657 // Tests an FP condition code and then conditionally move rs to rd.
1658 // We do not currently use any FPU cc bit other than bit 0.
1660 DCHECK(!(rs.is(t8) || rd.is(t8)));
1662 Register scratch = t8;
1663 // For testing purposes we need to fetch content of the FCSR register and
1664 // than test its cc (floating point condition code) bit (for cc = 0, it is
1665 // 24. bit of the FCSR).
1666 cfc1(scratch, FCSR);
1667 // For the MIPS I, II and III architectures, the contents of scratch is
1668 // UNPREDICTABLE for the instruction immediately following CFC1.
1670 srl(scratch, scratch, 16);
1671 andi(scratch, scratch, 0x0080);
1672 Branch(&done, ne, scratch, Operand(zero_reg));
1681 void MacroAssembler::Clz(Register rd, Register rs) {
1682 if (IsMipsArchVariant(kLoongson)) {
1683 DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
1685 Register scratch = t9;
1691 and_(scratch, at, mask);
1692 Branch(&end, ne, scratch, Operand(zero_reg));
1694 Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
1703 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1705 DoubleRegister double_input,
1707 DoubleRegister double_scratch,
1708 Register except_flag,
1709 CheckForInexactConversion check_inexact) {
1710 DCHECK(!result.is(scratch));
1711 DCHECK(!double_input.is(double_scratch));
1712 DCHECK(!except_flag.is(scratch));
1716 // Clear the except flag (0 = no exception)
1717 mov(except_flag, zero_reg);
1719 // Test for values that can be exactly represented as a signed 32-bit integer.
1720 cvt_w_d(double_scratch, double_input);
1721 mfc1(result, double_scratch);
1722 cvt_d_w(double_scratch, double_scratch);
1723 BranchF(&done, NULL, eq, double_input, double_scratch);
1725 int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
1727 if (check_inexact == kDontCheckForInexactConversion) {
1728 // Ignore inexact exceptions.
1729 except_mask &= ~kFCSRInexactFlagMask;
1733 cfc1(scratch, FCSR);
1734 // Disable FPU exceptions.
1735 ctc1(zero_reg, FCSR);
1737 // Do operation based on rounding mode.
1738 switch (rounding_mode) {
1739 case kRoundToNearest:
1740 Round_w_d(double_scratch, double_input);
1743 Trunc_w_d(double_scratch, double_input);
1745 case kRoundToPlusInf:
1746 Ceil_w_d(double_scratch, double_input);
1748 case kRoundToMinusInf:
1749 Floor_w_d(double_scratch, double_input);
1751 } // End of switch-statement.
1754 cfc1(except_flag, FCSR);
1756 ctc1(scratch, FCSR);
1757 // Move the converted value into the result register.
1758 mfc1(result, double_scratch);
1760 // Check for fpu exceptions.
1761 And(except_flag, except_flag, Operand(except_mask));
1767 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
1768 DoubleRegister double_input,
1770 DoubleRegister single_scratch = kLithiumScratchDouble.low();
1771 Register scratch = at;
1772 Register scratch2 = t9;
1774 // Clear cumulative exception flags and save the FCSR.
1775 cfc1(scratch2, FCSR);
1776 ctc1(zero_reg, FCSR);
1777 // Try a conversion to a signed integer.
1778 trunc_w_d(single_scratch, double_input);
1779 mfc1(result, single_scratch);
1780 // Retrieve and restore the FCSR.
1781 cfc1(scratch, FCSR);
1782 ctc1(scratch2, FCSR);
1783 // Check for overflow and NaNs.
1786 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
1787 // If we had no exceptions we are done.
1788 Branch(done, eq, scratch, Operand(zero_reg));
1792 void MacroAssembler::TruncateDoubleToI(Register result,
1793 DoubleRegister double_input) {
1796 TryInlineTruncateDoubleToI(result, double_input, &done);
1798 // If we fell through then inline version didn't succeed - call stub instead.
1800 Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
1801 sdc1(double_input, MemOperand(sp, 0));
1803 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
1806 Addu(sp, sp, Operand(kDoubleSize));
1813 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
1815 DoubleRegister double_scratch = f12;
1816 DCHECK(!result.is(object));
1818 ldc1(double_scratch,
1819 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
1820 TryInlineTruncateDoubleToI(result, double_scratch, &done);
1822 // If we fell through then inline version didn't succeed - call stub instead.
1824 DoubleToIStub stub(isolate(),
1827 HeapNumber::kValueOffset - kHeapObjectTag,
1837 void MacroAssembler::TruncateNumberToI(Register object,
1839 Register heap_number_map,
1841 Label* not_number) {
1843 DCHECK(!result.is(object));
1845 UntagAndJumpIfSmi(result, object, &done);
1846 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
1847 TruncateHeapNumberToI(result, object);
1853 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1855 int num_least_bits) {
1856 Ext(dst, src, kSmiTagSize, num_least_bits);
1860 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1862 int num_least_bits) {
1863 And(dst, src, Operand((1 << num_least_bits) - 1));
1867 // Emulated condtional branches do not emit a nop in the branch delay slot.
1869 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1870 #define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \
1871 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1872 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1875 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1876 BranchShort(offset, bdslot);
1880 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1882 BranchDelaySlot bdslot) {
1883 BranchShort(offset, cond, rs, rt, bdslot);
1887 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1888 if (L->is_bound()) {
1890 BranchShort(L, bdslot);
1895 if (is_trampoline_emitted()) {
1898 BranchShort(L, bdslot);
1904 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1906 BranchDelaySlot bdslot) {
1907 if (L->is_bound()) {
1909 BranchShort(L, cond, rs, rt, bdslot);
1911 if (cond != cc_always) {
1913 Condition neg_cond = NegateCondition(cond);
1914 BranchShort(&skip, neg_cond, rs, rt);
1922 if (is_trampoline_emitted()) {
1923 if (cond != cc_always) {
1925 Condition neg_cond = NegateCondition(cond);
1926 BranchShort(&skip, neg_cond, rs, rt);
1933 BranchShort(L, cond, rs, rt, bdslot);
1939 void MacroAssembler::Branch(Label* L,
1942 Heap::RootListIndex index,
1943 BranchDelaySlot bdslot) {
1944 LoadRoot(at, index);
1945 Branch(L, cond, rs, Operand(at), bdslot);
1949 void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
1952 // Emit a nop in the branch delay slot if required.
1953 if (bdslot == PROTECT)
1958 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1960 BranchDelaySlot bdslot) {
1961 BRANCH_ARGS_CHECK(cond, rs, rt);
1962 DCHECK(!rs.is(zero_reg));
1963 Register r2 = no_reg;
1964 Register scratch = at;
1967 // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
1969 BlockTrampolinePoolScope block_trampoline_pool(this);
1976 beq(rs, r2, offset);
1979 bne(rs, r2, offset);
1981 // Signed comparison.
1983 if (r2.is(zero_reg)) {
1986 slt(scratch, r2, rs);
1987 bne(scratch, zero_reg, offset);
1991 if (r2.is(zero_reg)) {
1994 slt(scratch, rs, r2);
1995 beq(scratch, zero_reg, offset);
1999 if (r2.is(zero_reg)) {
2002 slt(scratch, rs, r2);
2003 bne(scratch, zero_reg, offset);
2007 if (r2.is(zero_reg)) {
2010 slt(scratch, r2, rs);
2011 beq(scratch, zero_reg, offset);
2014 // Unsigned comparison.
2016 if (r2.is(zero_reg)) {
2017 bne(rs, zero_reg, offset);
2019 sltu(scratch, r2, rs);
2020 bne(scratch, zero_reg, offset);
2023 case Ugreater_equal:
2024 if (r2.is(zero_reg)) {
2027 sltu(scratch, rs, r2);
2028 beq(scratch, zero_reg, offset);
2032 if (r2.is(zero_reg)) {
2033 // No code needs to be emitted.
2036 sltu(scratch, rs, r2);
2037 bne(scratch, zero_reg, offset);
2041 if (r2.is(zero_reg)) {
2042 beq(rs, zero_reg, offset);
2044 sltu(scratch, r2, rs);
2045 beq(scratch, zero_reg, offset);
2052 // Be careful to always use shifted_branch_offset only just before the
2053 // branch instruction, as the location will be remember for patching the
2055 BlockTrampolinePoolScope block_trampoline_pool(this);
2061 if (rt.imm32_ == 0) {
2062 beq(rs, zero_reg, offset);
2064 // We don't want any other register but scratch clobbered.
2065 DCHECK(!scratch.is(rs));
2068 beq(rs, r2, offset);
2072 if (rt.imm32_ == 0) {
2073 bne(rs, zero_reg, offset);
2075 // We don't want any other register but scratch clobbered.
2076 DCHECK(!scratch.is(rs));
2079 bne(rs, r2, offset);
2082 // Signed comparison.
2084 if (rt.imm32_ == 0) {
2089 slt(scratch, r2, rs);
2090 bne(scratch, zero_reg, offset);
2094 if (rt.imm32_ == 0) {
2096 } else if (is_int16(rt.imm32_)) {
2097 slti(scratch, rs, rt.imm32_);
2098 beq(scratch, zero_reg, offset);
2102 slt(scratch, rs, r2);
2103 beq(scratch, zero_reg, offset);
2107 if (rt.imm32_ == 0) {
2109 } else if (is_int16(rt.imm32_)) {
2110 slti(scratch, rs, rt.imm32_);
2111 bne(scratch, zero_reg, offset);
2115 slt(scratch, rs, r2);
2116 bne(scratch, zero_reg, offset);
2120 if (rt.imm32_ == 0) {
2125 slt(scratch, r2, rs);
2126 beq(scratch, zero_reg, offset);
2129 // Unsigned comparison.
2131 if (rt.imm32_ == 0) {
2132 bne(rs, zero_reg, offset);
2136 sltu(scratch, r2, rs);
2137 bne(scratch, zero_reg, offset);
2140 case Ugreater_equal:
2141 if (rt.imm32_ == 0) {
2143 } else if (is_int16(rt.imm32_)) {
2144 sltiu(scratch, rs, rt.imm32_);
2145 beq(scratch, zero_reg, offset);
2149 sltu(scratch, rs, r2);
2150 beq(scratch, zero_reg, offset);
2154 if (rt.imm32_ == 0) {
2155 // No code needs to be emitted.
2157 } else if (is_int16(rt.imm32_)) {
2158 sltiu(scratch, rs, rt.imm32_);
2159 bne(scratch, zero_reg, offset);
2163 sltu(scratch, rs, r2);
2164 bne(scratch, zero_reg, offset);
2168 if (rt.imm32_ == 0) {
2169 beq(rs, zero_reg, offset);
2173 sltu(scratch, r2, rs);
2174 beq(scratch, zero_reg, offset);
2181 // Emit a nop in the branch delay slot if required.
2182 if (bdslot == PROTECT)
2187 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
2188 // We use branch_offset as an argument for the branch instructions to be sure
2189 // it is called just before generating the branch instruction, as needed.
2191 b(shifted_branch_offset(L, false));
2193 // Emit a nop in the branch delay slot if required.
2194 if (bdslot == PROTECT)
2199 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
2201 BranchDelaySlot bdslot) {
2202 BRANCH_ARGS_CHECK(cond, rs, rt);
2205 Register r2 = no_reg;
2206 Register scratch = at;
2208 BlockTrampolinePoolScope block_trampoline_pool(this);
2210 // Be careful to always use shifted_branch_offset only just before the
2211 // branch instruction, as the location will be remember for patching the
2215 offset = shifted_branch_offset(L, false);
2219 offset = shifted_branch_offset(L, false);
2220 beq(rs, r2, offset);
2223 offset = shifted_branch_offset(L, false);
2224 bne(rs, r2, offset);
2226 // Signed comparison.
2228 if (r2.is(zero_reg)) {
2229 offset = shifted_branch_offset(L, false);
2232 slt(scratch, r2, rs);
2233 offset = shifted_branch_offset(L, false);
2234 bne(scratch, zero_reg, offset);
2238 if (r2.is(zero_reg)) {
2239 offset = shifted_branch_offset(L, false);
2242 slt(scratch, rs, r2);
2243 offset = shifted_branch_offset(L, false);
2244 beq(scratch, zero_reg, offset);
2248 if (r2.is(zero_reg)) {
2249 offset = shifted_branch_offset(L, false);
2252 slt(scratch, rs, r2);
2253 offset = shifted_branch_offset(L, false);
2254 bne(scratch, zero_reg, offset);
2258 if (r2.is(zero_reg)) {
2259 offset = shifted_branch_offset(L, false);
2262 slt(scratch, r2, rs);
2263 offset = shifted_branch_offset(L, false);
2264 beq(scratch, zero_reg, offset);
2267 // Unsigned comparison.
2269 if (r2.is(zero_reg)) {
2270 offset = shifted_branch_offset(L, false);
2271 bne(rs, zero_reg, offset);
2273 sltu(scratch, r2, rs);
2274 offset = shifted_branch_offset(L, false);
2275 bne(scratch, zero_reg, offset);
2278 case Ugreater_equal:
2279 if (r2.is(zero_reg)) {
2280 offset = shifted_branch_offset(L, false);
2283 sltu(scratch, rs, r2);
2284 offset = shifted_branch_offset(L, false);
2285 beq(scratch, zero_reg, offset);
2289 if (r2.is(zero_reg)) {
2290 // No code needs to be emitted.
2293 sltu(scratch, rs, r2);
2294 offset = shifted_branch_offset(L, false);
2295 bne(scratch, zero_reg, offset);
2299 if (r2.is(zero_reg)) {
2300 offset = shifted_branch_offset(L, false);
2301 beq(rs, zero_reg, offset);
2303 sltu(scratch, r2, rs);
2304 offset = shifted_branch_offset(L, false);
2305 beq(scratch, zero_reg, offset);
2312 // Be careful to always use shifted_branch_offset only just before the
2313 // branch instruction, as the location will be remember for patching the
2315 BlockTrampolinePoolScope block_trampoline_pool(this);
2318 offset = shifted_branch_offset(L, false);
2322 if (rt.imm32_ == 0) {
2323 offset = shifted_branch_offset(L, false);
2324 beq(rs, zero_reg, offset);
2326 DCHECK(!scratch.is(rs));
2329 offset = shifted_branch_offset(L, false);
2330 beq(rs, r2, offset);
2334 if (rt.imm32_ == 0) {
2335 offset = shifted_branch_offset(L, false);
2336 bne(rs, zero_reg, offset);
2338 DCHECK(!scratch.is(rs));
2341 offset = shifted_branch_offset(L, false);
2342 bne(rs, r2, offset);
2345 // Signed comparison.
2347 if (rt.imm32_ == 0) {
2348 offset = shifted_branch_offset(L, false);
2351 DCHECK(!scratch.is(rs));
2354 slt(scratch, r2, rs);
2355 offset = shifted_branch_offset(L, false);
2356 bne(scratch, zero_reg, offset);
2360 if (rt.imm32_ == 0) {
2361 offset = shifted_branch_offset(L, false);
2363 } else if (is_int16(rt.imm32_)) {
2364 slti(scratch, rs, rt.imm32_);
2365 offset = shifted_branch_offset(L, false);
2366 beq(scratch, zero_reg, offset);
2368 DCHECK(!scratch.is(rs));
2371 slt(scratch, rs, r2);
2372 offset = shifted_branch_offset(L, false);
2373 beq(scratch, zero_reg, offset);
2377 if (rt.imm32_ == 0) {
2378 offset = shifted_branch_offset(L, false);
2380 } else if (is_int16(rt.imm32_)) {
2381 slti(scratch, rs, rt.imm32_);
2382 offset = shifted_branch_offset(L, false);
2383 bne(scratch, zero_reg, offset);
2385 DCHECK(!scratch.is(rs));
2388 slt(scratch, rs, r2);
2389 offset = shifted_branch_offset(L, false);
2390 bne(scratch, zero_reg, offset);
2394 if (rt.imm32_ == 0) {
2395 offset = shifted_branch_offset(L, false);
2398 DCHECK(!scratch.is(rs));
2401 slt(scratch, r2, rs);
2402 offset = shifted_branch_offset(L, false);
2403 beq(scratch, zero_reg, offset);
2406 // Unsigned comparison.
2408 if (rt.imm32_ == 0) {
2409 offset = shifted_branch_offset(L, false);
2410 bne(rs, zero_reg, offset);
2412 DCHECK(!scratch.is(rs));
2415 sltu(scratch, r2, rs);
2416 offset = shifted_branch_offset(L, false);
2417 bne(scratch, zero_reg, offset);
2420 case Ugreater_equal:
2421 if (rt.imm32_ == 0) {
2422 offset = shifted_branch_offset(L, false);
2424 } else if (is_int16(rt.imm32_)) {
2425 sltiu(scratch, rs, rt.imm32_);
2426 offset = shifted_branch_offset(L, false);
2427 beq(scratch, zero_reg, offset);
2429 DCHECK(!scratch.is(rs));
2432 sltu(scratch, rs, r2);
2433 offset = shifted_branch_offset(L, false);
2434 beq(scratch, zero_reg, offset);
2438 if (rt.imm32_ == 0) {
2439 // No code needs to be emitted.
2441 } else if (is_int16(rt.imm32_)) {
2442 sltiu(scratch, rs, rt.imm32_);
2443 offset = shifted_branch_offset(L, false);
2444 bne(scratch, zero_reg, offset);
2446 DCHECK(!scratch.is(rs));
2449 sltu(scratch, rs, r2);
2450 offset = shifted_branch_offset(L, false);
2451 bne(scratch, zero_reg, offset);
2455 if (rt.imm32_ == 0) {
2456 offset = shifted_branch_offset(L, false);
2457 beq(rs, zero_reg, offset);
2459 DCHECK(!scratch.is(rs));
2462 sltu(scratch, r2, rs);
2463 offset = shifted_branch_offset(L, false);
2464 beq(scratch, zero_reg, offset);
2471 // Check that offset could actually hold on an int16_t.
2472 DCHECK(is_int16(offset));
2473 // Emit a nop in the branch delay slot if required.
2474 if (bdslot == PROTECT)
2479 void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
2480 BranchAndLinkShort(offset, bdslot);
2484 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
2486 BranchDelaySlot bdslot) {
2487 BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2491 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
2492 if (L->is_bound()) {
2494 BranchAndLinkShort(L, bdslot);
2499 if (is_trampoline_emitted()) {
2502 BranchAndLinkShort(L, bdslot);
2508 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2510 BranchDelaySlot bdslot) {
2511 if (L->is_bound()) {
2513 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2516 Condition neg_cond = NegateCondition(cond);
2517 BranchShort(&skip, neg_cond, rs, rt);
2522 if (is_trampoline_emitted()) {
2524 Condition neg_cond = NegateCondition(cond);
2525 BranchShort(&skip, neg_cond, rs, rt);
2529 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2535 // We need to use a bgezal or bltzal, but they can't be used directly with the
2536 // slt instructions. We could use sub or add instead but we would miss overflow
2537 // cases, so we keep slt and add an intermediate third instruction.
2538 void MacroAssembler::BranchAndLinkShort(int16_t offset,
2539 BranchDelaySlot bdslot) {
2542 // Emit a nop in the branch delay slot if required.
2543 if (bdslot == PROTECT)
2548 void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
2549 Register rs, const Operand& rt,
2550 BranchDelaySlot bdslot) {
2551 BRANCH_ARGS_CHECK(cond, rs, rt);
2552 Register r2 = no_reg;
2553 Register scratch = at;
2557 } else if (cond != cc_always) {
2562 if (!IsMipsArchVariant(kMips32r6)) {
2563 BlockTrampolinePoolScope block_trampoline_pool(this);
2579 // Signed comparison.
2581 slt(scratch, r2, rs);
2582 addiu(scratch, scratch, -1);
2583 bgezal(scratch, offset);
2586 slt(scratch, rs, r2);
2587 addiu(scratch, scratch, -1);
2588 bltzal(scratch, offset);
2591 slt(scratch, rs, r2);
2592 addiu(scratch, scratch, -1);
2593 bgezal(scratch, offset);
2596 slt(scratch, r2, rs);
2597 addiu(scratch, scratch, -1);
2598 bltzal(scratch, offset);
2601 // Unsigned comparison.
2603 sltu(scratch, r2, rs);
2604 addiu(scratch, scratch, -1);
2605 bgezal(scratch, offset);
2607 case Ugreater_equal:
2608 sltu(scratch, rs, r2);
2609 addiu(scratch, scratch, -1);
2610 bltzal(scratch, offset);
2613 sltu(scratch, rs, r2);
2614 addiu(scratch, scratch, -1);
2615 bgezal(scratch, offset);
2618 sltu(scratch, r2, rs);
2619 addiu(scratch, scratch, -1);
2620 bltzal(scratch, offset);
2627 BlockTrampolinePoolScope block_trampoline_pool(this);
2643 // Signed comparison.
2646 slt(scratch, r2, rs);
2647 beq(scratch, zero_reg, 2);
2653 slt(scratch, rs, r2);
2654 bne(scratch, zero_reg, 2);
2660 slt(scratch, rs, r2);
2661 bne(scratch, zero_reg, 2);
2667 slt(scratch, r2, rs);
2668 bne(scratch, zero_reg, 2);
2674 // Unsigned comparison.
2677 sltu(scratch, r2, rs);
2678 beq(scratch, zero_reg, 2);
2682 case Ugreater_equal:
2684 sltu(scratch, rs, r2);
2685 bne(scratch, zero_reg, 2);
2691 sltu(scratch, rs, r2);
2692 bne(scratch, zero_reg, 2);
2698 sltu(scratch, r2, rs);
2699 bne(scratch, zero_reg, 2);
2708 // Emit a nop in the branch delay slot if required.
2709 if (bdslot == PROTECT)
2714 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
2715 bal(shifted_branch_offset(L, false));
2717 // Emit a nop in the branch delay slot if required.
2718 if (bdslot == PROTECT)
2723 void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
2725 BranchDelaySlot bdslot) {
2726 BRANCH_ARGS_CHECK(cond, rs, rt);
2729 Register r2 = no_reg;
2730 Register scratch = at;
2733 } else if (cond != cc_always) {
2738 if (!IsMipsArchVariant(kMips32r6)) {
2739 BlockTrampolinePoolScope block_trampoline_pool(this);
2742 offset = shifted_branch_offset(L, false);
2748 offset = shifted_branch_offset(L, false);
2754 offset = shifted_branch_offset(L, false);
2758 // Signed comparison.
2760 slt(scratch, r2, rs);
2761 addiu(scratch, scratch, -1);
2762 offset = shifted_branch_offset(L, false);
2763 bgezal(scratch, offset);
2766 slt(scratch, rs, r2);
2767 addiu(scratch, scratch, -1);
2768 offset = shifted_branch_offset(L, false);
2769 bltzal(scratch, offset);
2772 slt(scratch, rs, r2);
2773 addiu(scratch, scratch, -1);
2774 offset = shifted_branch_offset(L, false);
2775 bgezal(scratch, offset);
2778 slt(scratch, r2, rs);
2779 addiu(scratch, scratch, -1);
2780 offset = shifted_branch_offset(L, false);
2781 bltzal(scratch, offset);
2784 // Unsigned comparison.
2786 sltu(scratch, r2, rs);
2787 addiu(scratch, scratch, -1);
2788 offset = shifted_branch_offset(L, false);
2789 bgezal(scratch, offset);
2791 case Ugreater_equal:
2792 sltu(scratch, rs, r2);
2793 addiu(scratch, scratch, -1);
2794 offset = shifted_branch_offset(L, false);
2795 bltzal(scratch, offset);
2798 sltu(scratch, rs, r2);
2799 addiu(scratch, scratch, -1);
2800 offset = shifted_branch_offset(L, false);
2801 bgezal(scratch, offset);
2804 sltu(scratch, r2, rs);
2805 addiu(scratch, scratch, -1);
2806 offset = shifted_branch_offset(L, false);
2807 bltzal(scratch, offset);
2814 BlockTrampolinePoolScope block_trampoline_pool(this);
2817 offset = shifted_branch_offset(L, false);
2823 offset = shifted_branch_offset(L, false);
2829 offset = shifted_branch_offset(L, false);
2833 // Signed comparison.
2836 slt(scratch, r2, rs);
2837 beq(scratch, zero_reg, 2);
2839 offset = shifted_branch_offset(L, false);
2844 slt(scratch, rs, r2);
2845 bne(scratch, zero_reg, 2);
2847 offset = shifted_branch_offset(L, false);
2852 slt(scratch, rs, r2);
2853 bne(scratch, zero_reg, 2);
2855 offset = shifted_branch_offset(L, false);
2860 slt(scratch, r2, rs);
2861 bne(scratch, zero_reg, 2);
2863 offset = shifted_branch_offset(L, false);
2868 // Unsigned comparison.
2871 sltu(scratch, r2, rs);
2872 beq(scratch, zero_reg, 2);
2874 offset = shifted_branch_offset(L, false);
2877 case Ugreater_equal:
2879 sltu(scratch, rs, r2);
2880 bne(scratch, zero_reg, 2);
2882 offset = shifted_branch_offset(L, false);
2887 sltu(scratch, rs, r2);
2888 bne(scratch, zero_reg, 2);
2890 offset = shifted_branch_offset(L, false);
2895 sltu(scratch, r2, rs);
2896 bne(scratch, zero_reg, 2);
2898 offset = shifted_branch_offset(L, false);
2907 // Check that offset could actually hold on an int16_t.
2908 DCHECK(is_int16(offset));
2910 // Emit a nop in the branch delay slot if required.
2911 if (bdslot == PROTECT)
2916 void MacroAssembler::Jump(Register target,
2920 BranchDelaySlot bd) {
2921 BlockTrampolinePoolScope block_trampoline_pool(this);
2922 if (cond == cc_always) {
2925 BRANCH_ARGS_CHECK(cond, rs, rt);
2926 Branch(2, NegateCondition(cond), rs, rt);
2929 // Emit a nop in the branch delay slot if required.
2935 void MacroAssembler::Jump(intptr_t target,
2936 RelocInfo::Mode rmode,
2940 BranchDelaySlot bd) {
2942 if (cond != cc_always) {
2943 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
2945 // The first instruction of 'li' may be placed in the delay slot.
2946 // This is not an issue, t9 is expected to be clobbered anyway.
2947 li(t9, Operand(target, rmode));
2948 Jump(t9, al, zero_reg, Operand(zero_reg), bd);
2953 void MacroAssembler::Jump(Address target,
2954 RelocInfo::Mode rmode,
2958 BranchDelaySlot bd) {
2959 DCHECK(!RelocInfo::IsCodeTarget(rmode));
2960 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2964 void MacroAssembler::Jump(Handle<Code> code,
2965 RelocInfo::Mode rmode,
2969 BranchDelaySlot bd) {
2970 DCHECK(RelocInfo::IsCodeTarget(rmode));
2971 AllowDeferredHandleDereference embedding_raw_address;
2972 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2976 int MacroAssembler::CallSize(Register target,
2980 BranchDelaySlot bd) {
2983 if (cond == cc_always) {
2992 return size * kInstrSize;
2996 // Note: To call gcc-compiled C code on mips, you must call thru t9.
2997 void MacroAssembler::Call(Register target,
3001 BranchDelaySlot bd) {
3002 BlockTrampolinePoolScope block_trampoline_pool(this);
3005 if (cond == cc_always) {
3008 BRANCH_ARGS_CHECK(cond, rs, rt);
3009 Branch(2, NegateCondition(cond), rs, rt);
3012 // Emit a nop in the branch delay slot if required.
3016 DCHECK_EQ(CallSize(target, cond, rs, rt, bd),
3017 SizeOfCodeGeneratedSince(&start));
3021 int MacroAssembler::CallSize(Address target,
3022 RelocInfo::Mode rmode,
3026 BranchDelaySlot bd) {
3027 int size = CallSize(t9, cond, rs, rt, bd);
3028 return size + 2 * kInstrSize;
3032 void MacroAssembler::Call(Address target,
3033 RelocInfo::Mode rmode,
3037 BranchDelaySlot bd) {
3038 BlockTrampolinePoolScope block_trampoline_pool(this);
3041 int32_t target_int = reinterpret_cast<int32_t>(target);
3042 // Must record previous source positions before the
3043 // li() generates a new code target.
3044 positions_recorder()->WriteRecordedPositions();
3045 li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
3046 Call(t9, cond, rs, rt, bd);
3047 DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
3048 SizeOfCodeGeneratedSince(&start));
3052 int MacroAssembler::CallSize(Handle<Code> code,
3053 RelocInfo::Mode rmode,
3054 TypeFeedbackId ast_id,
3058 BranchDelaySlot bd) {
3059 AllowDeferredHandleDereference using_raw_address;
3060 return CallSize(reinterpret_cast<Address>(code.location()),
3061 rmode, cond, rs, rt, bd);
3065 void MacroAssembler::Call(Handle<Code> code,
3066 RelocInfo::Mode rmode,
3067 TypeFeedbackId ast_id,
3071 BranchDelaySlot bd) {
3072 BlockTrampolinePoolScope block_trampoline_pool(this);
3075 DCHECK(RelocInfo::IsCodeTarget(rmode));
3076 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
3077 SetRecordedAstId(ast_id);
3078 rmode = RelocInfo::CODE_TARGET_WITH_ID;
3080 AllowDeferredHandleDereference embedding_raw_address;
3081 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
3082 DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
3083 SizeOfCodeGeneratedSince(&start));
3087 void MacroAssembler::Ret(Condition cond,
3090 BranchDelaySlot bd) {
3091 Jump(ra, cond, rs, rt, bd);
3095 void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
3096 BlockTrampolinePoolScope block_trampoline_pool(this);
3099 imm28 = jump_address(L);
3100 imm28 &= kImm28Mask;
3101 { BlockGrowBufferScope block_buf_growth(this);
3102 // Buffer growth (and relocation) must be blocked for internal references
3103 // until associated instructions are emitted and available to be patched.
3104 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3107 // Emit a nop in the branch delay slot if required.
3108 if (bdslot == PROTECT)
3113 void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
3114 BlockTrampolinePoolScope block_trampoline_pool(this);
3117 imm32 = jump_address(L);
3118 { BlockGrowBufferScope block_buf_growth(this);
3119 // Buffer growth (and relocation) must be blocked for internal references
3120 // until associated instructions are emitted and available to be patched.
3121 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3122 lui(at, (imm32 & kHiMask) >> kLuiShift);
3123 ori(at, at, (imm32 & kImm16Mask));
3127 // Emit a nop in the branch delay slot if required.
3128 if (bdslot == PROTECT)
3133 void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
3134 BlockTrampolinePoolScope block_trampoline_pool(this);
3137 imm32 = jump_address(L);
3138 { BlockGrowBufferScope block_buf_growth(this);
3139 // Buffer growth (and relocation) must be blocked for internal references
3140 // until associated instructions are emitted and available to be patched.
3141 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3142 lui(at, (imm32 & kHiMask) >> kLuiShift);
3143 ori(at, at, (imm32 & kImm16Mask));
3147 // Emit a nop in the branch delay slot if required.
3148 if (bdslot == PROTECT)
3153 void MacroAssembler::DropAndRet(int drop) {
3154 Ret(USE_DELAY_SLOT);
3155 addiu(sp, sp, drop * kPointerSize);
3158 void MacroAssembler::DropAndRet(int drop,
3161 const Operand& r2) {
3162 // Both Drop and Ret need to be conditional.
3164 if (cond != cc_always) {
3165 Branch(&skip, NegateCondition(cond), r1, r2);
3171 if (cond != cc_always) {
3177 void MacroAssembler::Drop(int count,
3180 const Operand& op) {
3188 Branch(&skip, NegateCondition(cond), reg, op);
3191 addiu(sp, sp, count * kPointerSize);
3200 void MacroAssembler::Swap(Register reg1,
3203 if (scratch.is(no_reg)) {
3204 Xor(reg1, reg1, Operand(reg2));
3205 Xor(reg2, reg2, Operand(reg1));
3206 Xor(reg1, reg1, Operand(reg2));
3215 void MacroAssembler::Call(Label* target) {
3216 BranchAndLink(target);
3220 void MacroAssembler::Push(Handle<Object> handle) {
3221 li(at, Operand(handle));
3226 void MacroAssembler::DebugBreak() {
3227 PrepareCEntryArgs(0);
3228 PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
3229 CEntryStub ces(isolate(), 1);
3230 DCHECK(AllowThisStubCall(&ces));
3231 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3235 // ---------------------------------------------------------------------------
3236 // Exception handling.
3238 void MacroAssembler::PushStackHandler() {
3239 // Adjust this code if not the case.
3240 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
3241 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3243 // Link the current handler as the next handler.
3244 li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3245 lw(t1, MemOperand(t2));
3248 // Set this new handler as the current one.
3249 sw(sp, MemOperand(t2));
3253 void MacroAssembler::PopStackHandler() {
3254 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3256 Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
3257 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3258 sw(a1, MemOperand(at));
3262 void MacroAssembler::Allocate(int object_size,
3267 AllocationFlags flags) {
3268 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3269 if (!FLAG_inline_new) {
3270 if (emit_debug_code()) {
3271 // Trash the registers to simulate an allocation failure.
3273 li(scratch1, 0x7191);
3274 li(scratch2, 0x7291);
3280 DCHECK(!result.is(scratch1));
3281 DCHECK(!result.is(scratch2));
3282 DCHECK(!scratch1.is(scratch2));
3283 DCHECK(!scratch1.is(t9));
3284 DCHECK(!scratch2.is(t9));
3285 DCHECK(!result.is(t9));
3287 // Make object size into bytes.
3288 if ((flags & SIZE_IN_WORDS) != 0) {
3289 object_size *= kPointerSize;
3291 DCHECK_EQ(0, object_size & kObjectAlignmentMask);
3293 // Check relative positions of allocation top and limit addresses.
3294 // ARM adds additional checks to make sure the ldm instruction can be
3295 // used. On MIPS we don't have ldm so we don't need additional checks either.
3296 ExternalReference allocation_top =
3297 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3298 ExternalReference allocation_limit =
3299 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3302 reinterpret_cast<intptr_t>(allocation_top.address());
3304 reinterpret_cast<intptr_t>(allocation_limit.address());
3305 DCHECK((limit - top) == kPointerSize);
3307 // Set up allocation top address and object size registers.
3308 Register topaddr = scratch1;
3309 li(topaddr, Operand(allocation_top));
3311 // This code stores a temporary value in t9.
3312 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3313 // Load allocation top into result and allocation limit into t9.
3314 lw(result, MemOperand(topaddr));
3315 lw(t9, MemOperand(topaddr, kPointerSize));
3317 if (emit_debug_code()) {
3318 // Assert that result actually contains top on entry. t9 is used
3319 // immediately below so this use of t9 does not cause difference with
3320 // respect to register content between debug and release mode.
3321 lw(t9, MemOperand(topaddr));
3322 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3324 // Load allocation limit into t9. Result already contains allocation top.
3325 lw(t9, MemOperand(topaddr, limit - top));
3328 if ((flags & DOUBLE_ALIGNMENT) != 0) {
3329 // Align the next allocation. Storing the filler map without checking top is
3330 // safe in new-space because the limit of the heap is aligned there.
3331 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
3332 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
3333 And(scratch2, result, Operand(kDoubleAlignmentMask));
3335 Branch(&aligned, eq, scratch2, Operand(zero_reg));
3336 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
3337 Branch(gc_required, Ugreater_equal, result, Operand(t9));
3339 li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
3340 sw(scratch2, MemOperand(result));
3341 Addu(result, result, Operand(kDoubleSize / 2));
3345 // Calculate new top and bail out if new space is exhausted. Use result
3346 // to calculate the new top.
3347 Addu(scratch2, result, Operand(object_size));
3348 Branch(gc_required, Ugreater, scratch2, Operand(t9));
3349 sw(scratch2, MemOperand(topaddr));
3351 // Tag object if requested.
3352 if ((flags & TAG_OBJECT) != 0) {
3353 Addu(result, result, Operand(kHeapObjectTag));
3358 void MacroAssembler::Allocate(Register object_size,
3363 AllocationFlags flags) {
3364 if (!FLAG_inline_new) {
3365 if (emit_debug_code()) {
3366 // Trash the registers to simulate an allocation failure.
3368 li(scratch1, 0x7191);
3369 li(scratch2, 0x7291);
3375 DCHECK(!result.is(scratch1));
3376 DCHECK(!result.is(scratch2));
3377 DCHECK(!scratch1.is(scratch2));
3378 DCHECK(!object_size.is(t9));
3379 DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
3381 // Check relative positions of allocation top and limit addresses.
3382 // ARM adds additional checks to make sure the ldm instruction can be
3383 // used. On MIPS we don't have ldm so we don't need additional checks either.
3384 ExternalReference allocation_top =
3385 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3386 ExternalReference allocation_limit =
3387 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3389 reinterpret_cast<intptr_t>(allocation_top.address());
3391 reinterpret_cast<intptr_t>(allocation_limit.address());
3392 DCHECK((limit - top) == kPointerSize);
3394 // Set up allocation top address and object size registers.
3395 Register topaddr = scratch1;
3396 li(topaddr, Operand(allocation_top));
3398 // This code stores a temporary value in t9.
3399 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3400 // Load allocation top into result and allocation limit into t9.
3401 lw(result, MemOperand(topaddr));
3402 lw(t9, MemOperand(topaddr, kPointerSize));
3404 if (emit_debug_code()) {
3405 // Assert that result actually contains top on entry. t9 is used
3406 // immediately below so this use of t9 does not cause difference with
3407 // respect to register content between debug and release mode.
3408 lw(t9, MemOperand(topaddr));
3409 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3411 // Load allocation limit into t9. Result already contains allocation top.
3412 lw(t9, MemOperand(topaddr, limit - top));
3415 if ((flags & DOUBLE_ALIGNMENT) != 0) {
3416 // Align the next allocation. Storing the filler map without checking top is
3417 // safe in new-space because the limit of the heap is aligned there.
3418 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
3419 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
3420 And(scratch2, result, Operand(kDoubleAlignmentMask));
3422 Branch(&aligned, eq, scratch2, Operand(zero_reg));
3423 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
3424 Branch(gc_required, Ugreater_equal, result, Operand(t9));
3426 li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
3427 sw(scratch2, MemOperand(result));
3428 Addu(result, result, Operand(kDoubleSize / 2));
3432 // Calculate new top and bail out if new space is exhausted. Use result
3433 // to calculate the new top. Object size may be in words so a shift is
3434 // required to get the number of bytes.
3435 if ((flags & SIZE_IN_WORDS) != 0) {
3436 sll(scratch2, object_size, kPointerSizeLog2);
3437 Addu(scratch2, result, scratch2);
3439 Addu(scratch2, result, Operand(object_size));
3441 Branch(gc_required, Ugreater, scratch2, Operand(t9));
3443 // Update allocation top. result temporarily holds the new top.
3444 if (emit_debug_code()) {
3445 And(t9, scratch2, Operand(kObjectAlignmentMask));
3446 Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
3448 sw(scratch2, MemOperand(topaddr));
3450 // Tag object if requested.
3451 if ((flags & TAG_OBJECT) != 0) {
3452 Addu(result, result, Operand(kHeapObjectTag));
3457 void MacroAssembler::UndoAllocationInNewSpace(Register object,
3459 ExternalReference new_space_allocation_top =
3460 ExternalReference::new_space_allocation_top_address(isolate());
3462 // Make sure the object has no tag before resetting top.
3463 And(object, object, Operand(~kHeapObjectTagMask));
3465 // Check that the object un-allocated is below the current top.
3466 li(scratch, Operand(new_space_allocation_top));
3467 lw(scratch, MemOperand(scratch));
3468 Check(less, kUndoAllocationOfNonAllocatedMemory,
3469 object, Operand(scratch));
3471 // Write the address of the object to un-allocate as the current top.
3472 li(scratch, Operand(new_space_allocation_top));
3473 sw(object, MemOperand(scratch));
3477 void MacroAssembler::AllocateTwoByteString(Register result,
3482 Label* gc_required) {
3483 // Calculate the number of bytes needed for the characters in the string while
3484 // observing object alignment.
3485 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3486 sll(scratch1, length, 1); // Length in bytes, not chars.
3487 addiu(scratch1, scratch1,
3488 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3489 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3491 // Allocate two-byte string in new space.
3499 // Set the map, length and hash field.
3500 InitializeNewString(result,
3502 Heap::kStringMapRootIndex,
3508 void MacroAssembler::AllocateOneByteString(Register result, Register length,
3509 Register scratch1, Register scratch2,
3511 Label* gc_required) {
3512 // Calculate the number of bytes needed for the characters in the string
3513 // while observing object alignment.
3514 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3515 DCHECK(kCharSize == 1);
3516 addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3517 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3519 // Allocate one-byte string in new space.
3527 // Set the map, length and hash field.
3528 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
3529 scratch1, scratch2);
3533 void MacroAssembler::AllocateTwoByteConsString(Register result,
3537 Label* gc_required) {
3538 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3540 InitializeNewString(result,
3542 Heap::kConsStringMapRootIndex,
3548 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
3551 Label* gc_required) {
3552 Allocate(ConsString::kSize,
3559 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
3560 scratch1, scratch2);
3564 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3568 Label* gc_required) {
3569 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3572 InitializeNewString(result,
3574 Heap::kSlicedStringMapRootIndex,
3580 void MacroAssembler::AllocateOneByteSlicedString(Register result,
3584 Label* gc_required) {
3585 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3588 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
3589 scratch1, scratch2);
3593 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
3594 Label* not_unique_name) {
3595 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3597 And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3598 Branch(&succeed, eq, at, Operand(zero_reg));
3599 Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
3605 // Allocates a heap number or jumps to the label if the young space is full and
3606 // a scavenge is needed.
3607 void MacroAssembler::AllocateHeapNumber(Register result,
3610 Register heap_number_map,
3612 TaggingMode tagging_mode,
3614 // Allocate an object in the heap for the heap number and tag it as a heap
3616 Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
3617 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3619 Heap::RootListIndex map_index = mode == MUTABLE
3620 ? Heap::kMutableHeapNumberMapRootIndex
3621 : Heap::kHeapNumberMapRootIndex;
3622 AssertIsRoot(heap_number_map, map_index);
3624 // Store heap number map in the allocated object.
3625 if (tagging_mode == TAG_RESULT) {
3626 sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3628 sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3633 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3637 Label* gc_required) {
3638 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3639 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3640 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3644 // Copies a fixed number of fields of heap objects from src to dst.
3645 void MacroAssembler::CopyFields(Register dst,
3649 DCHECK((temps & dst.bit()) == 0);
3650 DCHECK((temps & src.bit()) == 0);
3651 // Primitive implementation using only one temporary register.
3653 Register tmp = no_reg;
3654 // Find a temp register in temps list.
3655 for (int i = 0; i < kNumRegisters; i++) {
3656 if ((temps & (1 << i)) != 0) {
3661 DCHECK(!tmp.is(no_reg));
3663 for (int i = 0; i < field_count; i++) {
3664 lw(tmp, FieldMemOperand(src, i * kPointerSize));
3665 sw(tmp, FieldMemOperand(dst, i * kPointerSize));
3670 void MacroAssembler::CopyBytes(Register src,
3674 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3676 // Align src before copying in word size chunks.
3677 Branch(&byte_loop, le, length, Operand(kPointerSize));
3678 bind(&align_loop_1);
3679 And(scratch, src, kPointerSize - 1);
3680 Branch(&word_loop, eq, scratch, Operand(zero_reg));
3681 lbu(scratch, MemOperand(src));
3683 sb(scratch, MemOperand(dst));
3685 Subu(length, length, Operand(1));
3686 Branch(&align_loop_1, ne, length, Operand(zero_reg));
3688 // Copy bytes in word size chunks.
3690 if (emit_debug_code()) {
3691 And(scratch, src, kPointerSize - 1);
3692 Assert(eq, kExpectingAlignmentForCopyBytes,
3693 scratch, Operand(zero_reg));
3695 Branch(&byte_loop, lt, length, Operand(kPointerSize));
3696 lw(scratch, MemOperand(src));
3697 Addu(src, src, kPointerSize);
3699 // TODO(kalmard) check if this can be optimized to use sw in most cases.
3700 // Can't use unaligned access - copy byte by byte.
3701 if (kArchEndian == kLittle) {
3702 sb(scratch, MemOperand(dst, 0));
3703 srl(scratch, scratch, 8);
3704 sb(scratch, MemOperand(dst, 1));
3705 srl(scratch, scratch, 8);
3706 sb(scratch, MemOperand(dst, 2));
3707 srl(scratch, scratch, 8);
3708 sb(scratch, MemOperand(dst, 3));
3710 sb(scratch, MemOperand(dst, 3));
3711 srl(scratch, scratch, 8);
3712 sb(scratch, MemOperand(dst, 2));
3713 srl(scratch, scratch, 8);
3714 sb(scratch, MemOperand(dst, 1));
3715 srl(scratch, scratch, 8);
3716 sb(scratch, MemOperand(dst, 0));
3721 Subu(length, length, Operand(kPointerSize));
3724 // Copy the last bytes if any left.
3726 Branch(&done, eq, length, Operand(zero_reg));
3728 lbu(scratch, MemOperand(src));
3730 sb(scratch, MemOperand(dst));
3732 Subu(length, length, Operand(1));
3733 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3738 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3739 Register end_offset,
3744 sw(filler, MemOperand(start_offset));
3745 Addu(start_offset, start_offset, kPointerSize);
3747 Branch(&loop, lt, start_offset, Operand(end_offset));
3751 void MacroAssembler::CheckFastElements(Register map,
3754 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3755 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3756 STATIC_ASSERT(FAST_ELEMENTS == 2);
3757 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3758 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3759 Branch(fail, hi, scratch,
3760 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3764 void MacroAssembler::CheckFastObjectElements(Register map,
3767 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3768 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3769 STATIC_ASSERT(FAST_ELEMENTS == 2);
3770 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3771 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3772 Branch(fail, ls, scratch,
3773 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3774 Branch(fail, hi, scratch,
3775 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3779 void MacroAssembler::CheckFastSmiElements(Register map,
3782 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3783 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3784 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3785 Branch(fail, hi, scratch,
3786 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3790 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3792 Register elements_reg,
3797 int elements_offset) {
3798 Label smi_value, maybe_nan, have_double_value, is_nan, done;
3799 Register mantissa_reg = scratch2;
3800 Register exponent_reg = scratch3;
3802 // Handle smi values specially.
3803 JumpIfSmi(value_reg, &smi_value);
3805 // Ensure that the object is a heap number
3808 Heap::kHeapNumberMapRootIndex,
3812 // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
3814 li(scratch1, Operand(kHoleNanUpper32 & HeapNumber::kExponentMask));
3815 lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3816 Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
3818 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3820 bind(&have_double_value);
3821 sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3822 Addu(scratch1, scratch1, elements_reg);
3824 FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
3825 + kHoleNanLower32Offset));
3827 FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
3828 + kHoleNanUpper32Offset));
3832 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3833 // it's an Infinity, and the non-NaN code path applies.
3834 Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
3835 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3836 Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
3838 // Load canonical NaN for storing into the double array.
3839 LoadRoot(at, Heap::kNanValueRootIndex);
3840 lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
3841 lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
3842 jmp(&have_double_value);
3845 Addu(scratch1, elements_reg,
3846 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
3848 sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3849 Addu(scratch1, scratch1, scratch2);
3850 // scratch1 is now effective address of the double element
3852 Register untagged_value = elements_reg;
3853 SmiUntag(untagged_value, value_reg);
3854 mtc1(untagged_value, f2);
3856 sdc1(f0, MemOperand(scratch1, 0));
3861 void MacroAssembler::CompareMapAndBranch(Register obj,
3864 Label* early_success,
3867 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3868 CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
3872 void MacroAssembler::CompareMapAndBranch(Register obj_map,
3874 Label* early_success,
3877 Branch(branch_to, cond, obj_map, Operand(map));
3881 void MacroAssembler::CheckMap(Register obj,
3885 SmiCheckType smi_check_type) {
3886 if (smi_check_type == DO_SMI_CHECK) {
3887 JumpIfSmi(obj, fail);
3890 CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
3895 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
3896 Register scratch2, Handle<WeakCell> cell,
3897 Handle<Code> success,
3898 SmiCheckType smi_check_type) {
3900 if (smi_check_type == DO_SMI_CHECK) {
3901 JumpIfSmi(obj, &fail);
3903 lw(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
3904 GetWeakValue(scratch2, cell);
3905 Jump(success, RelocInfo::CODE_TARGET, eq, scratch1, Operand(scratch2));
3910 void MacroAssembler::CheckMap(Register obj,
3912 Heap::RootListIndex index,
3914 SmiCheckType smi_check_type) {
3915 if (smi_check_type == DO_SMI_CHECK) {
3916 JumpIfSmi(obj, fail);
3918 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3919 LoadRoot(at, index);
3920 Branch(fail, ne, scratch, Operand(at));
3924 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
3925 li(value, Operand(cell));
3926 lw(value, FieldMemOperand(value, WeakCell::kValueOffset));
3930 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
3932 GetWeakValue(value, cell);
3933 JumpIfSmi(value, miss);
3937 void MacroAssembler::MovFromFloatResult(DoubleRegister dst) {
3938 if (IsMipsSoftFloatABI) {
3939 if (kArchEndian == kLittle) {
3945 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
3950 void MacroAssembler::MovFromFloatParameter(DoubleRegister dst) {
3951 if (IsMipsSoftFloatABI) {
3952 if (kArchEndian == kLittle) {
3958 Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
3963 void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
3964 if (!IsMipsSoftFloatABI) {
3967 if (kArchEndian == kLittle) {
3976 void MacroAssembler::MovToFloatResult(DoubleRegister src) {
3977 if (!IsMipsSoftFloatABI) {
3980 if (kArchEndian == kLittle) {
3989 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
3990 DoubleRegister src2) {
3991 if (!IsMipsSoftFloatABI) {
3993 DCHECK(!src1.is(f14));
4001 if (kArchEndian == kLittle) {
4012 // -----------------------------------------------------------------------------
4013 // JavaScript invokes.
4015 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
4016 const ParameterCount& actual,
4017 Handle<Code> code_constant,
4020 bool* definitely_mismatches,
4022 const CallWrapper& call_wrapper) {
4023 bool definitely_matches = false;
4024 *definitely_mismatches = false;
4025 Label regular_invoke;
4027 // Check whether the expected and actual arguments count match. If not,
4028 // setup registers according to contract with ArgumentsAdaptorTrampoline:
4029 // a0: actual arguments count
4030 // a1: function (passed through to callee)
4031 // a2: expected arguments count
4033 // The code below is made a lot easier because the calling code already sets
4034 // up actual and expected registers according to the contract if values are
4035 // passed in registers.
4036 DCHECK(actual.is_immediate() || actual.reg().is(a0));
4037 DCHECK(expected.is_immediate() || expected.reg().is(a2));
4038 DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
4040 if (expected.is_immediate()) {
4041 DCHECK(actual.is_immediate());
4042 if (expected.immediate() == actual.immediate()) {
4043 definitely_matches = true;
4045 li(a0, Operand(actual.immediate()));
4046 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
4047 if (expected.immediate() == sentinel) {
4048 // Don't worry about adapting arguments for builtins that
4049 // don't want that done. Skip adaption code by making it look
4050 // like we have a match between expected and actual number of
4052 definitely_matches = true;
4054 *definitely_mismatches = true;
4055 li(a2, Operand(expected.immediate()));
4058 } else if (actual.is_immediate()) {
4059 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.immediate()));
4060 li(a0, Operand(actual.immediate()));
4062 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg()));
4065 if (!definitely_matches) {
4066 if (!code_constant.is_null()) {
4067 li(a3, Operand(code_constant));
4068 addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
4071 Handle<Code> adaptor =
4072 isolate()->builtins()->ArgumentsAdaptorTrampoline();
4073 if (flag == CALL_FUNCTION) {
4074 call_wrapper.BeforeCall(CallSize(adaptor));
4076 call_wrapper.AfterCall();
4077 if (!*definitely_mismatches) {
4081 Jump(adaptor, RelocInfo::CODE_TARGET);
4083 bind(®ular_invoke);
4088 void MacroAssembler::InvokeCode(Register code,
4089 const ParameterCount& expected,
4090 const ParameterCount& actual,
4092 const CallWrapper& call_wrapper) {
4093 // You can't call a function without a valid frame.
4094 DCHECK(flag == JUMP_FUNCTION || has_frame());
4098 bool definitely_mismatches = false;
4099 InvokePrologue(expected, actual, Handle<Code>::null(), code,
4100 &done, &definitely_mismatches, flag,
4102 if (!definitely_mismatches) {
4103 if (flag == CALL_FUNCTION) {
4104 call_wrapper.BeforeCall(CallSize(code));
4106 call_wrapper.AfterCall();
4108 DCHECK(flag == JUMP_FUNCTION);
4111 // Continue here if InvokePrologue does handle the invocation due to
4112 // mismatched parameter counts.
4118 void MacroAssembler::InvokeFunction(Register function,
4119 const ParameterCount& actual,
4121 const CallWrapper& call_wrapper) {
4122 // You can't call a function without a valid frame.
4123 DCHECK(flag == JUMP_FUNCTION || has_frame());
4125 // Contract with called JS functions requires that function is passed in a1.
4126 DCHECK(function.is(a1));
4127 Register expected_reg = a2;
4128 Register code_reg = a3;
4130 lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4131 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4133 FieldMemOperand(code_reg,
4134 SharedFunctionInfo::kFormalParameterCountOffset));
4135 sra(expected_reg, expected_reg, kSmiTagSize);
4136 lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4138 ParameterCount expected(expected_reg);
4139 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
4143 void MacroAssembler::InvokeFunction(Register function,
4144 const ParameterCount& expected,
4145 const ParameterCount& actual,
4147 const CallWrapper& call_wrapper) {
4148 // You can't call a function without a valid frame.
4149 DCHECK(flag == JUMP_FUNCTION || has_frame());
4151 // Contract with called JS functions requires that function is passed in a1.
4152 DCHECK(function.is(a1));
4154 // Get the function and setup the context.
4155 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4157 // We call indirectly through the code field in the function to
4158 // allow recompilation to take effect without changing any of the
4160 lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4161 InvokeCode(a3, expected, actual, flag, call_wrapper);
4165 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
4166 const ParameterCount& expected,
4167 const ParameterCount& actual,
4169 const CallWrapper& call_wrapper) {
4171 InvokeFunction(a1, expected, actual, flag, call_wrapper);
4175 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
4179 lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
4180 IsInstanceJSObjectType(map, scratch, fail);
4184 void MacroAssembler::IsInstanceJSObjectType(Register map,
4187 lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
4188 Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
4189 Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
4193 void MacroAssembler::IsObjectJSStringType(Register object,
4196 DCHECK(kNotStringTag != 0);
4198 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4199 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4200 And(scratch, scratch, Operand(kIsNotStringMask));
4201 Branch(fail, ne, scratch, Operand(zero_reg));
4205 void MacroAssembler::IsObjectNameType(Register object,
4208 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4209 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4210 Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
4214 // ---------------------------------------------------------------------------
4215 // Support functions.
4218 void MacroAssembler::GetMapConstructor(Register result, Register map,
4219 Register temp, Register temp2) {
4221 lw(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
4223 JumpIfSmi(result, &done);
4224 GetObjectType(result, temp, temp2);
4225 Branch(&done, ne, temp2, Operand(MAP_TYPE));
4226 lw(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
4232 void MacroAssembler::TryGetFunctionPrototype(Register function,
4236 bool miss_on_bound_function) {
4238 if (miss_on_bound_function) {
4239 // Check that the receiver isn't a smi.
4240 JumpIfSmi(function, miss);
4242 // Check that the function really is a function. Load map into result reg.
4243 GetObjectType(function, result, scratch);
4244 Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
4247 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
4249 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
4250 And(scratch, scratch,
4251 Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
4252 Branch(miss, ne, scratch, Operand(zero_reg));
4254 // Make sure that the function has an instance prototype.
4255 lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
4256 And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
4257 Branch(&non_instance, ne, scratch, Operand(zero_reg));
4260 // Get the prototype or initial map from the function.
4262 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4264 // If the prototype or initial map is the hole, don't return it and
4265 // simply miss the cache instead. This will allow us to allocate a
4266 // prototype object on-demand in the runtime system.
4267 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
4268 Branch(miss, eq, result, Operand(t8));
4270 // If the function does not have an initial map, we're done.
4272 GetObjectType(result, scratch, scratch);
4273 Branch(&done, ne, scratch, Operand(MAP_TYPE));
4275 // Get the prototype from the initial map.
4276 lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
4278 if (miss_on_bound_function) {
4281 // Non-instance prototype: Fetch prototype from constructor field
4283 bind(&non_instance);
4284 GetMapConstructor(result, result, scratch, scratch);
4292 void MacroAssembler::GetObjectType(Register object,
4294 Register type_reg) {
4295 lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
4296 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
4300 // -----------------------------------------------------------------------------
4303 void MacroAssembler::CallStub(CodeStub* stub,
4304 TypeFeedbackId ast_id,
4308 BranchDelaySlot bd) {
4309 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
4310 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
4315 void MacroAssembler::TailCallStub(CodeStub* stub,
4319 BranchDelaySlot bd) {
4320 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
4324 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4325 return has_frame_ || !stub->SometimesSetsUpAFrame();
4329 void MacroAssembler::IndexFromHash(Register hash, Register index) {
4330 // If the hash field contains an array index pick it out. The assert checks
4331 // that the constants for the maximum number of digits for an array index
4332 // cached in the hash field and the number of bits reserved for it does not
4334 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
4335 (1 << String::kArrayIndexValueBits));
4336 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
4340 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4344 Register heap_number_map,
4346 ObjectToDoubleFlags flags) {
4348 if ((flags & OBJECT_NOT_SMI) == 0) {
4350 JumpIfNotSmi(object, ¬_smi);
4351 // Remove smi tag and convert to double.
4352 sra(scratch1, object, kSmiTagSize);
4353 mtc1(scratch1, result);
4354 cvt_d_w(result, result);
4358 // Check for heap number and load double value from it.
4359 lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4360 Branch(not_number, ne, scratch1, Operand(heap_number_map));
4362 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4363 // If exponent is all ones the number is either a NaN or +/-Infinity.
4364 Register exponent = scratch1;
4365 Register mask_reg = scratch2;
4366 lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4367 li(mask_reg, HeapNumber::kExponentMask);
4369 And(exponent, exponent, mask_reg);
4370 Branch(not_number, eq, exponent, Operand(mask_reg));
4372 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4377 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4379 Register scratch1) {
4380 sra(scratch1, smi, kSmiTagSize);
4381 mtc1(scratch1, value);
4382 cvt_d_w(value, value);
4386 void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
4387 const Operand& right,
4388 Register overflow_dst,
4390 if (right.is_reg()) {
4391 AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4394 mov(scratch, left); // Preserve left.
4395 addiu(dst, left, right.immediate()); // Left is overwritten.
4396 xor_(scratch, dst, scratch); // Original left.
4397 // Load right since xori takes uint16 as immediate.
4398 addiu(t9, zero_reg, right.immediate());
4399 xor_(overflow_dst, dst, t9);
4400 and_(overflow_dst, overflow_dst, scratch);
4402 addiu(dst, left, right.immediate());
4403 xor_(overflow_dst, dst, left);
4404 // Load right since xori takes uint16 as immediate.
4405 addiu(t9, zero_reg, right.immediate());
4406 xor_(scratch, dst, t9);
4407 and_(overflow_dst, scratch, overflow_dst);
4413 void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
4415 Register overflow_dst,
4417 DCHECK(!dst.is(overflow_dst));
4418 DCHECK(!dst.is(scratch));
4419 DCHECK(!overflow_dst.is(scratch));
4420 DCHECK(!overflow_dst.is(left));
4421 DCHECK(!overflow_dst.is(right));
4423 if (left.is(right) && dst.is(left)) {
4424 DCHECK(!dst.is(t9));
4425 DCHECK(!scratch.is(t9));
4426 DCHECK(!left.is(t9));
4427 DCHECK(!right.is(t9));
4428 DCHECK(!overflow_dst.is(t9));
4434 mov(scratch, left); // Preserve left.
4435 addu(dst, left, right); // Left is overwritten.
4436 xor_(scratch, dst, scratch); // Original left.
4437 xor_(overflow_dst, dst, right);
4438 and_(overflow_dst, overflow_dst, scratch);
4439 } else if (dst.is(right)) {
4440 mov(scratch, right); // Preserve right.
4441 addu(dst, left, right); // Right is overwritten.
4442 xor_(scratch, dst, scratch); // Original right.
4443 xor_(overflow_dst, dst, left);
4444 and_(overflow_dst, overflow_dst, scratch);
4446 addu(dst, left, right);
4447 xor_(overflow_dst, dst, left);
4448 xor_(scratch, dst, right);
4449 and_(overflow_dst, scratch, overflow_dst);
4454 void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
4455 const Operand& right,
4456 Register overflow_dst,
4458 if (right.is_reg()) {
4459 SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4462 mov(scratch, left); // Preserve left.
4463 addiu(dst, left, -(right.immediate())); // Left is overwritten.
4464 xor_(overflow_dst, dst, scratch); // scratch is original left.
4465 // Load right since xori takes uint16 as immediate.
4466 addiu(t9, zero_reg, right.immediate());
4467 xor_(scratch, scratch, t9); // scratch is original left.
4468 and_(overflow_dst, scratch, overflow_dst);
4470 addiu(dst, left, -(right.immediate()));
4471 xor_(overflow_dst, dst, left);
4472 // Load right since xori takes uint16 as immediate.
4473 addiu(t9, zero_reg, right.immediate());
4474 xor_(scratch, left, t9);
4475 and_(overflow_dst, scratch, overflow_dst);
4481 void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
4483 Register overflow_dst,
4485 DCHECK(!dst.is(overflow_dst));
4486 DCHECK(!dst.is(scratch));
4487 DCHECK(!overflow_dst.is(scratch));
4488 DCHECK(!overflow_dst.is(left));
4489 DCHECK(!overflow_dst.is(right));
4490 DCHECK(!scratch.is(left));
4491 DCHECK(!scratch.is(right));
4493 // This happens with some crankshaft code. Since Subu works fine if
4494 // left == right, let's not make that restriction here.
4495 if (left.is(right)) {
4497 mov(overflow_dst, zero_reg);
4502 mov(scratch, left); // Preserve left.
4503 subu(dst, left, right); // Left is overwritten.
4504 xor_(overflow_dst, dst, scratch); // scratch is original left.
4505 xor_(scratch, scratch, right); // scratch is original left.
4506 and_(overflow_dst, scratch, overflow_dst);
4507 } else if (dst.is(right)) {
4508 mov(scratch, right); // Preserve right.
4509 subu(dst, left, right); // Right is overwritten.
4510 xor_(overflow_dst, dst, left);
4511 xor_(scratch, left, scratch); // Original right.
4512 and_(overflow_dst, scratch, overflow_dst);
4514 subu(dst, left, right);
4515 xor_(overflow_dst, dst, left);
4516 xor_(scratch, left, right);
4517 and_(overflow_dst, scratch, overflow_dst);
4522 void MacroAssembler::CallRuntime(const Runtime::Function* f,
4524 SaveFPRegsMode save_doubles) {
4525 // All parameters are on the stack. v0 has the return value after call.
4527 // If the expected number of arguments of the runtime function is
4528 // constant, we check that the actual number of arguments match the
4530 CHECK(f->nargs < 0 || f->nargs == num_arguments);
4532 // TODO(1236192): Most runtime routines don't need the number of
4533 // arguments passed in because it is constant. At some point we
4534 // should remove this need and make the runtime routine entry code
4536 PrepareCEntryArgs(num_arguments);
4537 PrepareCEntryFunction(ExternalReference(f, isolate()));
4538 CEntryStub stub(isolate(), 1, save_doubles);
4543 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
4545 BranchDelaySlot bd) {
4546 PrepareCEntryArgs(num_arguments);
4547 PrepareCEntryFunction(ext);
4549 CEntryStub stub(isolate(), 1);
4550 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
4554 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
4557 // TODO(1236192): Most runtime routines don't need the number of
4558 // arguments passed in because it is constant. At some point we
4559 // should remove this need and make the runtime routine entry code
4561 PrepareCEntryArgs(num_arguments);
4562 JumpToExternalReference(ext);
4566 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
4569 TailCallExternalReference(ExternalReference(fid, isolate()),
4575 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
4576 BranchDelaySlot bd) {
4577 PrepareCEntryFunction(builtin);
4578 CEntryStub stub(isolate(), 1);
4579 Jump(stub.GetCode(),
4580 RelocInfo::CODE_TARGET,
4588 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
4590 const CallWrapper& call_wrapper) {
4591 // You can't call a builtin without a valid frame.
4592 DCHECK(flag == JUMP_FUNCTION || has_frame());
4594 GetBuiltinEntry(t9, id);
4595 if (flag == CALL_FUNCTION) {
4596 call_wrapper.BeforeCall(CallSize(t9));
4598 call_wrapper.AfterCall();
4600 DCHECK(flag == JUMP_FUNCTION);
4606 void MacroAssembler::GetBuiltinFunction(Register target,
4607 Builtins::JavaScript id) {
4608 // Load the builtins object into target register.
4609 lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4610 lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
4611 // Load the JavaScript builtin function from the builtins object.
4612 lw(target, FieldMemOperand(target,
4613 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
4617 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
4618 DCHECK(!target.is(a1));
4619 GetBuiltinFunction(a1, id);
4620 // Load the code entry point from the builtins object.
4621 lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4625 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4626 Register scratch1, Register scratch2) {
4627 if (FLAG_native_code_counters && counter->Enabled()) {
4628 li(scratch1, Operand(value));
4629 li(scratch2, Operand(ExternalReference(counter)));
4630 sw(scratch1, MemOperand(scratch2));
4635 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4636 Register scratch1, Register scratch2) {
4638 if (FLAG_native_code_counters && counter->Enabled()) {
4639 li(scratch2, Operand(ExternalReference(counter)));
4640 lw(scratch1, MemOperand(scratch2));
4641 Addu(scratch1, scratch1, Operand(value));
4642 sw(scratch1, MemOperand(scratch2));
4647 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4648 Register scratch1, Register scratch2) {
4650 if (FLAG_native_code_counters && counter->Enabled()) {
4651 li(scratch2, Operand(ExternalReference(counter)));
4652 lw(scratch1, MemOperand(scratch2));
4653 Subu(scratch1, scratch1, Operand(value));
4654 sw(scratch1, MemOperand(scratch2));
4659 // -----------------------------------------------------------------------------
4662 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
4663 Register rs, Operand rt) {
4664 if (emit_debug_code())
4665 Check(cc, reason, rs, rt);
4669 void MacroAssembler::AssertFastElements(Register elements) {
4670 if (emit_debug_code()) {
4671 DCHECK(!elements.is(at));
4674 lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4675 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4676 Branch(&ok, eq, elements, Operand(at));
4677 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4678 Branch(&ok, eq, elements, Operand(at));
4679 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4680 Branch(&ok, eq, elements, Operand(at));
4681 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4688 void MacroAssembler::Check(Condition cc, BailoutReason reason,
4689 Register rs, Operand rt) {
4691 Branch(&L, cc, rs, rt);
4693 // Will not return here.
4698 void MacroAssembler::Abort(BailoutReason reason) {
4702 const char* msg = GetBailoutReason(reason);
4704 RecordComment("Abort message: ");
4708 if (FLAG_trap_on_abort) {
4714 li(a0, Operand(Smi::FromInt(reason)));
4716 // Disable stub call restrictions to always allow calls to abort.
4718 // We don't actually want to generate a pile of code for this, so just
4719 // claim there is a stack frame, without generating one.
4720 FrameScope scope(this, StackFrame::NONE);
4721 CallRuntime(Runtime::kAbort, 1);
4723 CallRuntime(Runtime::kAbort, 1);
4725 // Will not return here.
4726 if (is_trampoline_pool_blocked()) {
4727 // If the calling code cares about the exact number of
4728 // instructions generated, we insert padding here to keep the size
4729 // of the Abort macro constant.
4730 // Currently in debug mode with debug_code enabled the number of
4731 // generated instructions is 10, so we use this as a maximum value.
4732 static const int kExpectedAbortInstructions = 10;
4733 int abort_instructions = InstructionsGeneratedSince(&abort_start);
4734 DCHECK(abort_instructions <= kExpectedAbortInstructions);
4735 while (abort_instructions++ < kExpectedAbortInstructions) {
4742 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4743 if (context_chain_length > 0) {
4744 // Move up the chain of contexts to the context containing the slot.
4745 lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4746 for (int i = 1; i < context_chain_length; i++) {
4747 lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4750 // Slot is in the current function context. Move it into the
4751 // destination register in case we store into it (the write barrier
4752 // cannot be allowed to destroy the context in esi).
4758 void MacroAssembler::LoadTransitionedArrayMapConditional(
4759 ElementsKind expected_kind,
4760 ElementsKind transitioned_kind,
4761 Register map_in_out,
4763 Label* no_map_match) {
4764 // Load the global or builtins object from the current context.
4766 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4767 lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
4769 // Check that the function's map is the same as the expected cached map.
4772 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4773 size_t offset = expected_kind * kPointerSize +
4774 FixedArrayBase::kHeaderSize;
4775 lw(at, FieldMemOperand(scratch, offset));
4776 Branch(no_map_match, ne, map_in_out, Operand(at));
4778 // Use the transitioned cached map.
4779 offset = transitioned_kind * kPointerSize +
4780 FixedArrayBase::kHeaderSize;
4781 lw(map_in_out, FieldMemOperand(scratch, offset));
4785 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4786 // Load the global or builtins object from the current context.
4788 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4789 // Load the native context from the global or builtins object.
4790 lw(function, FieldMemOperand(function,
4791 GlobalObject::kNativeContextOffset));
4792 // Load the function from the native context.
4793 lw(function, MemOperand(function, Context::SlotOffset(index)));
4797 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4800 // Load the initial map. The global functions all have initial maps.
4801 lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4802 if (emit_debug_code()) {
4804 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4807 Abort(kGlobalFunctionsMustHaveInitialMap);
4813 void MacroAssembler::StubPrologue() {
4815 Push(Smi::FromInt(StackFrame::STUB));
4816 // Adjust FP to point to saved FP.
4817 Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4821 void MacroAssembler::Prologue(bool code_pre_aging) {
4822 PredictableCodeSizeScope predictible_code_size_scope(
4823 this, kNoCodeAgeSequenceLength);
4824 // The following three instructions must remain together and unmodified
4825 // for code aging to work properly.
4826 if (code_pre_aging) {
4827 // Pre-age the code.
4828 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
4829 nop(Assembler::CODE_AGE_MARKER_NOP);
4830 // Load the stub address to t9 and call it,
4831 // GetCodeAgeAndParity() extracts the stub address from this instruction.
4833 Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
4835 nop(); // Prevent jalr to jal optimization.
4837 nop(); // Branch delay slot nop.
4838 nop(); // Pad the empty space.
4840 Push(ra, fp, cp, a1);
4841 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4842 // Adjust fp to point to caller's fp.
4843 Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4848 void MacroAssembler::EnterFrame(StackFrame::Type type,
4849 bool load_constant_pool_pointer_reg) {
4850 // Out-of-line constant pool not implemented on mips.
4855 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4856 addiu(sp, sp, -5 * kPointerSize);
4857 li(t8, Operand(Smi::FromInt(type)));
4858 li(t9, Operand(CodeObject()), CONSTANT_SIZE);
4859 sw(ra, MemOperand(sp, 4 * kPointerSize));
4860 sw(fp, MemOperand(sp, 3 * kPointerSize));
4861 sw(cp, MemOperand(sp, 2 * kPointerSize));
4862 sw(t8, MemOperand(sp, 1 * kPointerSize));
4863 sw(t9, MemOperand(sp, 0 * kPointerSize));
4864 // Adjust FP to point to saved FP.
4866 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
4870 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4872 lw(fp, MemOperand(sp, 0 * kPointerSize));
4873 lw(ra, MemOperand(sp, 1 * kPointerSize));
4874 addiu(sp, sp, 2 * kPointerSize);
4878 void MacroAssembler::EnterExitFrame(bool save_doubles,
4880 // Set up the frame structure on the stack.
4881 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4882 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4883 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4885 // This is how the stack will look:
4886 // fp + 2 (==kCallerSPDisplacement) - old stack's end
4887 // [fp + 1 (==kCallerPCOffset)] - saved old ra
4888 // [fp + 0 (==kCallerFPOffset)] - saved old fp
4889 // [fp - 1 (==kSPOffset)] - sp of the called function
4890 // [fp - 2 (==kCodeOffset)] - CodeObject
4891 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
4892 // new stack (will contain saved ra)
4895 addiu(sp, sp, -4 * kPointerSize);
4896 sw(ra, MemOperand(sp, 3 * kPointerSize));
4897 sw(fp, MemOperand(sp, 2 * kPointerSize));
4898 addiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
4900 if (emit_debug_code()) {
4901 sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
4904 // Accessed from ExitFrame::code_slot.
4905 li(t8, Operand(CodeObject()), CONSTANT_SIZE);
4906 sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
4908 // Save the frame pointer and the context in top.
4909 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4910 sw(fp, MemOperand(t8));
4911 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4912 sw(cp, MemOperand(t8));
4914 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4916 // The stack must be allign to 0 modulo 8 for stores with sdc1.
4917 DCHECK(kDoubleSize == frame_alignment);
4918 if (frame_alignment > 0) {
4919 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4920 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4922 int space = FPURegister::kMaxNumRegisters * kDoubleSize;
4923 Subu(sp, sp, Operand(space));
4924 // Remember: we only need to save every 2nd double FPU value.
4925 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4926 FPURegister reg = FPURegister::from_code(i);
4927 sdc1(reg, MemOperand(sp, i * kDoubleSize));
4931 // Reserve place for the return address, stack space and an optional slot
4932 // (used by the DirectCEntryStub to hold the return value if a struct is
4933 // returned) and align the frame preparing for calling the runtime function.
4934 DCHECK(stack_space >= 0);
4935 Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4936 if (frame_alignment > 0) {
4937 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4938 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4941 // Set the exit frame sp value to point just before the return address
4943 addiu(at, sp, kPointerSize);
4944 sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
4948 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
4949 bool restore_context, bool do_return,
4950 bool argument_count_is_length) {
4951 // Optionally restore all double registers.
4953 // Remember: we only need to restore every 2nd double FPU value.
4954 lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
4955 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4956 FPURegister reg = FPURegister::from_code(i);
4957 ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
4962 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4963 sw(zero_reg, MemOperand(t8));
4965 // Restore current context from top and clear it in debug mode.
4966 if (restore_context) {
4967 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4968 lw(cp, MemOperand(t8));
4971 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4972 sw(a3, MemOperand(t8));
4975 // Pop the arguments, restore registers, and return.
4976 mov(sp, fp); // Respect ABI stack constraint.
4977 lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
4978 lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
4980 if (argument_count.is_valid()) {
4981 if (argument_count_is_length) {
4982 addu(sp, sp, argument_count);
4984 sll(t8, argument_count, kPointerSizeLog2);
4990 Ret(USE_DELAY_SLOT);
4991 // If returning, the instruction in the delay slot will be the addiu below.
4997 void MacroAssembler::InitializeNewString(Register string,
4999 Heap::RootListIndex map_index,
5001 Register scratch2) {
5002 sll(scratch1, length, kSmiTagSize);
5003 LoadRoot(scratch2, map_index);
5004 sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
5005 li(scratch1, Operand(String::kEmptyHashField));
5006 sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
5007 sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
5011 int MacroAssembler::ActivationFrameAlignment() {
5012 #if V8_HOST_ARCH_MIPS
5013 // Running on the real platform. Use the alignment as mandated by the local
5015 // Note: This will break if we ever start generating snapshots on one Mips
5016 // platform for another Mips platform with a different alignment.
5017 return base::OS::ActivationFrameAlignment();
5018 #else // V8_HOST_ARCH_MIPS
5019 // If we are using the simulator then we should always align to the expected
5020 // alignment. As the simulator is used to generate snapshots we do not know
5021 // if the target platform will need alignment, so this is controlled from a
5023 return FLAG_sim_stack_alignment;
5024 #endif // V8_HOST_ARCH_MIPS
5028 void MacroAssembler::AssertStackIsAligned() {
5029 if (emit_debug_code()) {
5030 const int frame_alignment = ActivationFrameAlignment();
5031 const int frame_alignment_mask = frame_alignment - 1;
5033 if (frame_alignment > kPointerSize) {
5034 Label alignment_as_expected;
5035 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5036 andi(at, sp, frame_alignment_mask);
5037 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5038 // Don't use Check here, as it will call Runtime_Abort re-entering here.
5039 stop("Unexpected stack alignment");
5040 bind(&alignment_as_expected);
5046 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
5049 Label* not_power_of_two_or_zero) {
5050 Subu(scratch, reg, Operand(1));
5051 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
5052 scratch, Operand(zero_reg));
5053 and_(at, scratch, reg); // In the delay slot.
5054 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
5058 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
5059 DCHECK(!reg.is(overflow));
5060 mov(overflow, reg); // Save original value.
5062 xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
5066 void MacroAssembler::SmiTagCheckOverflow(Register dst,
5068 Register overflow) {
5070 // Fall back to slower case.
5071 SmiTagCheckOverflow(dst, overflow);
5073 DCHECK(!dst.is(src));
5074 DCHECK(!dst.is(overflow));
5075 DCHECK(!src.is(overflow));
5077 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
5082 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
5085 JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
5090 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
5092 Label* non_smi_case) {
5093 JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
5097 void MacroAssembler::JumpIfSmi(Register value,
5100 BranchDelaySlot bd) {
5101 DCHECK_EQ(0, kSmiTag);
5102 andi(scratch, value, kSmiTagMask);
5103 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
5106 void MacroAssembler::JumpIfNotSmi(Register value,
5107 Label* not_smi_label,
5109 BranchDelaySlot bd) {
5110 DCHECK_EQ(0, kSmiTag);
5111 andi(scratch, value, kSmiTagMask);
5112 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
5116 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
5118 Label* on_not_both_smi) {
5119 STATIC_ASSERT(kSmiTag == 0);
5120 DCHECK_EQ(1, kSmiTagMask);
5121 or_(at, reg1, reg2);
5122 JumpIfNotSmi(at, on_not_both_smi);
5126 void MacroAssembler::JumpIfEitherSmi(Register reg1,
5128 Label* on_either_smi) {
5129 STATIC_ASSERT(kSmiTag == 0);
5130 DCHECK_EQ(1, kSmiTagMask);
5131 // Both Smi tags must be 1 (not Smi).
5132 and_(at, reg1, reg2);
5133 JumpIfSmi(at, on_either_smi);
5137 void MacroAssembler::AssertNotSmi(Register object) {
5138 if (emit_debug_code()) {
5139 STATIC_ASSERT(kSmiTag == 0);
5140 andi(at, object, kSmiTagMask);
5141 Check(ne, kOperandIsASmi, at, Operand(zero_reg));
5146 void MacroAssembler::AssertSmi(Register object) {
5147 if (emit_debug_code()) {
5148 STATIC_ASSERT(kSmiTag == 0);
5149 andi(at, object, kSmiTagMask);
5150 Check(eq, kOperandIsASmi, at, Operand(zero_reg));
5155 void MacroAssembler::AssertString(Register object) {
5156 if (emit_debug_code()) {
5157 STATIC_ASSERT(kSmiTag == 0);
5159 Check(ne, kOperandIsASmiAndNotAString, t0, Operand(zero_reg));
5161 lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
5162 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
5163 Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
5169 void MacroAssembler::AssertName(Register object) {
5170 if (emit_debug_code()) {
5171 STATIC_ASSERT(kSmiTag == 0);
5173 Check(ne, kOperandIsASmiAndNotAName, t0, Operand(zero_reg));
5175 lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
5176 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
5177 Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
5183 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
5185 if (emit_debug_code()) {
5186 Label done_checking;
5187 AssertNotSmi(object);
5188 LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5189 Branch(&done_checking, eq, object, Operand(scratch));
5191 lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
5192 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
5193 Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
5195 bind(&done_checking);
5200 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
5201 if (emit_debug_code()) {
5202 DCHECK(!reg.is(at));
5203 LoadRoot(at, index);
5204 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
5209 void MacroAssembler::JumpIfNotHeapNumber(Register object,
5210 Register heap_number_map,
5212 Label* on_not_heap_number) {
5213 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5214 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
5215 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
5219 void MacroAssembler::LookupNumberStringCache(Register object,
5225 // Use of registers. Register result is used as a temporary.
5226 Register number_string_cache = result;
5227 Register mask = scratch3;
5229 // Load the number string cache.
5230 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
5232 // Make the hash mask from the length of the number string cache. It
5233 // contains two elements (number and string) for each cache entry.
5234 lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
5235 // Divide length by two (length is a smi).
5236 sra(mask, mask, kSmiTagSize + 1);
5237 Addu(mask, mask, -1); // Make mask.
5239 // Calculate the entry in the number string cache. The hash value in the
5240 // number string cache for smis is just the smi value, and the hash for
5241 // doubles is the xor of the upper and lower words. See
5242 // Heap::GetNumberStringCache.
5244 Label load_result_from_cache;
5245 JumpIfSmi(object, &is_smi);
5248 Heap::kHeapNumberMapRootIndex,
5252 STATIC_ASSERT(8 == kDoubleSize);
5255 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
5256 lw(scratch2, MemOperand(scratch1, kPointerSize));
5257 lw(scratch1, MemOperand(scratch1, 0));
5258 Xor(scratch1, scratch1, Operand(scratch2));
5259 And(scratch1, scratch1, Operand(mask));
5261 // Calculate address of entry in string cache: each entry consists
5262 // of two pointer sized fields.
5263 sll(scratch1, scratch1, kPointerSizeLog2 + 1);
5264 Addu(scratch1, number_string_cache, scratch1);
5266 Register probe = mask;
5267 lw(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
5268 JumpIfSmi(probe, not_found);
5269 ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
5270 ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
5271 BranchF(&load_result_from_cache, NULL, eq, f12, f14);
5275 Register scratch = scratch1;
5276 sra(scratch, object, 1); // Shift away the tag.
5277 And(scratch, mask, Operand(scratch));
5279 // Calculate address of entry in string cache: each entry consists
5280 // of two pointer sized fields.
5281 sll(scratch, scratch, kPointerSizeLog2 + 1);
5282 Addu(scratch, number_string_cache, scratch);
5284 // Check if the entry is the smi we are looking for.
5285 lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
5286 Branch(not_found, ne, object, Operand(probe));
5288 // Get the result from the cache.
5289 bind(&load_result_from_cache);
5290 lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
5292 IncrementCounter(isolate()->counters()->number_to_string_native(),
5299 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
5300 Register first, Register second, Register scratch1, Register scratch2,
5302 // Test that both first and second are sequential one-byte strings.
5303 // Assume that they are non-smis.
5304 lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
5305 lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
5306 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
5307 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
5309 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
5314 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
5319 // Check that neither is a smi.
5320 STATIC_ASSERT(kSmiTag == 0);
5321 And(scratch1, first, Operand(second));
5322 JumpIfSmi(scratch1, failure);
5323 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
5328 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
5329 Register first, Register second, Register scratch1, Register scratch2,
5331 const int kFlatOneByteStringMask =
5332 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5333 const int kFlatOneByteStringTag =
5334 kStringTag | kOneByteStringTag | kSeqStringTag;
5335 DCHECK(kFlatOneByteStringTag <= 0xffff); // Ensure this fits 16-bit immed.
5336 andi(scratch1, first, kFlatOneByteStringMask);
5337 Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
5338 andi(scratch2, second, kFlatOneByteStringMask);
5339 Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
5343 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
5346 const int kFlatOneByteStringMask =
5347 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5348 const int kFlatOneByteStringTag =
5349 kStringTag | kOneByteStringTag | kSeqStringTag;
5350 And(scratch, type, Operand(kFlatOneByteStringMask));
5351 Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
5355 static const int kRegisterPassedArguments = 4;
5357 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
5358 int num_double_arguments) {
5359 int stack_passed_words = 0;
5360 num_reg_arguments += 2 * num_double_arguments;
5362 // Up to four simple arguments are passed in registers a0..a3.
5363 if (num_reg_arguments > kRegisterPassedArguments) {
5364 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5366 stack_passed_words += kCArgSlotCount;
5367 return stack_passed_words;
5371 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5375 uint32_t encoding_mask) {
5378 Check(ne, kNonObject, at, Operand(zero_reg));
5380 lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
5381 lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
5383 andi(at, at, kStringRepresentationMask | kStringEncodingMask);
5384 li(scratch, Operand(encoding_mask));
5385 Check(eq, kUnexpectedStringType, at, Operand(scratch));
5387 // The index is assumed to be untagged coming in, tag it to compare with the
5388 // string length without using a temp register, it is restored at the end of
5390 Label index_tag_ok, index_tag_bad;
5391 TrySmiTag(index, scratch, &index_tag_bad);
5392 Branch(&index_tag_ok);
5393 bind(&index_tag_bad);
5394 Abort(kIndexIsTooLarge);
5395 bind(&index_tag_ok);
5397 lw(at, FieldMemOperand(string, String::kLengthOffset));
5398 Check(lt, kIndexIsTooLarge, index, Operand(at));
5400 DCHECK(Smi::FromInt(0) == 0);
5401 Check(ge, kIndexIsNegative, index, Operand(zero_reg));
5403 SmiUntag(index, index);
5407 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5408 int num_double_arguments,
5410 int frame_alignment = ActivationFrameAlignment();
5412 // Up to four simple arguments are passed in registers a0..a3.
5413 // Those four arguments must have reserved argument slots on the stack for
5414 // mips, even though those argument slots are not normally used.
5415 // Remaining arguments are pushed on the stack, above (higher address than)
5416 // the argument slots.
5417 int stack_passed_arguments = CalculateStackPassedWords(
5418 num_reg_arguments, num_double_arguments);
5419 if (frame_alignment > kPointerSize) {
5420 // Make stack end at alignment and make room for num_arguments - 4 words
5421 // and the original value of sp.
5423 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5424 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5425 And(sp, sp, Operand(-frame_alignment));
5426 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
5428 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5433 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5435 PrepareCallCFunction(num_reg_arguments, 0, scratch);
5439 void MacroAssembler::CallCFunction(ExternalReference function,
5440 int num_reg_arguments,
5441 int num_double_arguments) {
5442 li(t8, Operand(function));
5443 CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
5447 void MacroAssembler::CallCFunction(Register function,
5448 int num_reg_arguments,
5449 int num_double_arguments) {
5450 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
5454 void MacroAssembler::CallCFunction(ExternalReference function,
5455 int num_arguments) {
5456 CallCFunction(function, num_arguments, 0);
5460 void MacroAssembler::CallCFunction(Register function,
5461 int num_arguments) {
5462 CallCFunction(function, num_arguments, 0);
5466 void MacroAssembler::CallCFunctionHelper(Register function,
5467 int num_reg_arguments,
5468 int num_double_arguments) {
5469 DCHECK(has_frame());
5470 // Make sure that the stack is aligned before calling a C function unless
5471 // running in the simulator. The simulator has its own alignment check which
5472 // provides more information.
5473 // The argument stots are presumed to have been set up by
5474 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
5476 #if V8_HOST_ARCH_MIPS
5477 if (emit_debug_code()) {
5478 int frame_alignment = base::OS::ActivationFrameAlignment();
5479 int frame_alignment_mask = frame_alignment - 1;
5480 if (frame_alignment > kPointerSize) {
5481 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5482 Label alignment_as_expected;
5483 And(at, sp, Operand(frame_alignment_mask));
5484 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5485 // Don't use Check here, as it will call Runtime_Abort possibly
5486 // re-entering here.
5487 stop("Unexpected alignment in CallCFunction");
5488 bind(&alignment_as_expected);
5491 #endif // V8_HOST_ARCH_MIPS
5493 // Just call directly. The function called cannot cause a GC, or
5494 // allow preemption, so the return address in the link register
5497 if (!function.is(t9)) {
5504 int stack_passed_arguments = CalculateStackPassedWords(
5505 num_reg_arguments, num_double_arguments);
5507 if (base::OS::ActivationFrameAlignment() > kPointerSize) {
5508 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5510 Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
5515 #undef BRANCH_ARGS_CHECK
5518 void MacroAssembler::PatchRelocatedValue(Register li_location,
5520 Register new_value) {
5521 lw(scratch, MemOperand(li_location));
5522 // At this point scratch is a lui(at, ...) instruction.
5523 if (emit_debug_code()) {
5524 And(scratch, scratch, kOpcodeMask);
5525 Check(eq, kTheInstructionToPatchShouldBeALui,
5526 scratch, Operand(LUI));
5527 lw(scratch, MemOperand(li_location));
5529 srl(t9, new_value, kImm16Bits);
5530 Ins(scratch, t9, 0, kImm16Bits);
5531 sw(scratch, MemOperand(li_location));
5533 lw(scratch, MemOperand(li_location, kInstrSize));
5534 // scratch is now ori(at, ...).
5535 if (emit_debug_code()) {
5536 And(scratch, scratch, kOpcodeMask);
5537 Check(eq, kTheInstructionToPatchShouldBeAnOri,
5538 scratch, Operand(ORI));
5539 lw(scratch, MemOperand(li_location, kInstrSize));
5541 Ins(scratch, new_value, 0, kImm16Bits);
5542 sw(scratch, MemOperand(li_location, kInstrSize));
5544 // Update the I-cache so the new lui and ori can be executed.
5545 FlushICache(li_location, 2);
5548 void MacroAssembler::GetRelocatedValue(Register li_location,
5551 lw(value, MemOperand(li_location));
5552 if (emit_debug_code()) {
5553 And(value, value, kOpcodeMask);
5554 Check(eq, kTheInstructionShouldBeALui,
5555 value, Operand(LUI));
5556 lw(value, MemOperand(li_location));
5559 // value now holds a lui instruction. Extract the immediate.
5560 sll(value, value, kImm16Bits);
5562 lw(scratch, MemOperand(li_location, kInstrSize));
5563 if (emit_debug_code()) {
5564 And(scratch, scratch, kOpcodeMask);
5565 Check(eq, kTheInstructionShouldBeAnOri,
5566 scratch, Operand(ORI));
5567 lw(scratch, MemOperand(li_location, kInstrSize));
5569 // "scratch" now holds an ori instruction. Extract the immediate.
5570 andi(scratch, scratch, kImm16Mask);
5572 // Merge the results.
5573 or_(value, value, scratch);
5577 void MacroAssembler::CheckPageFlag(
5582 Label* condition_met) {
5583 And(scratch, object, Operand(~Page::kPageAlignmentMask));
5584 lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5585 And(scratch, scratch, Operand(mask));
5586 Branch(condition_met, cc, scratch, Operand(zero_reg));
5590 void MacroAssembler::JumpIfBlack(Register object,
5594 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
5595 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5599 void MacroAssembler::HasColor(Register object,
5600 Register bitmap_scratch,
5601 Register mask_scratch,
5605 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
5606 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
5608 GetMarkBits(object, bitmap_scratch, mask_scratch);
5610 Label other_color, word_boundary;
5611 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5612 And(t8, t9, Operand(mask_scratch));
5613 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
5614 // Shift left 1 by adding.
5615 Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
5616 Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
5617 And(t8, t9, Operand(mask_scratch));
5618 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
5621 bind(&word_boundary);
5622 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
5623 And(t9, t9, Operand(1));
5624 Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
5629 // Detect some, but not all, common pointer-free objects. This is used by the
5630 // incremental write barrier which doesn't care about oddballs (they are always
5631 // marked black immediately so this code is not hit).
5632 void MacroAssembler::JumpIfDataObject(Register value,
5634 Label* not_data_object) {
5635 DCHECK(!AreAliased(value, scratch, t8, no_reg));
5636 Label is_data_object;
5637 lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5638 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5639 Branch(&is_data_object, eq, t8, Operand(scratch));
5640 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5641 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5642 // If it's a string and it's not a cons string then it's an object containing
5644 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5645 And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
5646 Branch(not_data_object, ne, t8, Operand(zero_reg));
5647 bind(&is_data_object);
5651 void MacroAssembler::GetMarkBits(Register addr_reg,
5652 Register bitmap_reg,
5653 Register mask_reg) {
5654 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
5655 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5656 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
5657 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
5658 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
5659 sll(t8, t8, kPointerSizeLog2);
5660 Addu(bitmap_reg, bitmap_reg, t8);
5662 sllv(mask_reg, t8, mask_reg);
5666 void MacroAssembler::EnsureNotWhite(
5668 Register bitmap_scratch,
5669 Register mask_scratch,
5670 Register load_scratch,
5671 Label* value_is_white_and_not_data) {
5672 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
5673 GetMarkBits(value, bitmap_scratch, mask_scratch);
5675 // If the value is black or grey we don't need to do anything.
5676 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5677 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5678 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
5679 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5683 // Since both black and grey have a 1 in the first position and white does
5684 // not have a 1 there we only need to check one bit.
5685 lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5686 And(t8, mask_scratch, load_scratch);
5687 Branch(&done, ne, t8, Operand(zero_reg));
5689 if (emit_debug_code()) {
5690 // Check for impossible bit pattern.
5692 // sll may overflow, making the check conservative.
5693 sll(t8, mask_scratch, 1);
5694 And(t8, load_scratch, t8);
5695 Branch(&ok, eq, t8, Operand(zero_reg));
5696 stop("Impossible marking bit pattern");
5700 // Value is white. We check whether it is data that doesn't need scanning.
5701 // Currently only checks for HeapNumber and non-cons strings.
5702 Register map = load_scratch; // Holds map while checking type.
5703 Register length = load_scratch; // Holds length of object after testing type.
5704 Label is_data_object;
5706 // Check for heap-number
5707 lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
5708 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5711 Branch(&skip, ne, t8, Operand(map));
5712 li(length, HeapNumber::kSize);
5713 Branch(&is_data_object);
5717 // Check for strings.
5718 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5719 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5720 // If it's a string and it's not a cons string then it's an object containing
5722 Register instance_type = load_scratch;
5723 lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
5724 And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
5725 Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5726 // It's a non-indirect (non-cons and non-slice) string.
5727 // If it's external, the length is just ExternalString::kSize.
5728 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5729 // External strings are the only ones with the kExternalStringTag bit
5731 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
5732 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
5733 And(t8, instance_type, Operand(kExternalStringTag));
5736 Branch(&skip, eq, t8, Operand(zero_reg));
5737 li(length, ExternalString::kSize);
5738 Branch(&is_data_object);
5742 // Sequential string, either Latin1 or UC16.
5743 // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
5744 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
5745 // getting the length multiplied by 2.
5746 DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
5747 DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
5748 lw(t9, FieldMemOperand(value, String::kLengthOffset));
5749 And(t8, instance_type, Operand(kStringEncodingMask));
5752 Branch(&skip, eq, t8, Operand(zero_reg));
5756 Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
5757 And(length, length, Operand(~kObjectAlignmentMask));
5759 bind(&is_data_object);
5760 // Value is a data object, and it is white. Mark it black. Since we know
5761 // that the object is white we can make it black by flipping one bit.
5762 lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5763 Or(t8, t8, Operand(mask_scratch));
5764 sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5766 And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5767 lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5768 Addu(t8, t8, Operand(length));
5769 sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5775 void MacroAssembler::LoadInstanceDescriptors(Register map,
5776 Register descriptors) {
5777 lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
5781 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
5782 lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5783 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
5787 void MacroAssembler::EnumLength(Register dst, Register map) {
5788 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
5789 lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5790 And(dst, dst, Operand(Map::EnumLengthBits::kMask));
5795 void MacroAssembler::LoadAccessor(Register dst, Register holder,
5797 AccessorComponent accessor) {
5798 lw(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
5799 LoadInstanceDescriptors(dst, dst);
5801 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
5802 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
5803 : AccessorPair::kSetterOffset;
5804 lw(dst, FieldMemOperand(dst, offset));
5808 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5809 Register empty_fixed_array_value = t2;
5810 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5814 // Check if the enum length field is properly initialized, indicating that
5815 // there is an enum cache.
5816 lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5820 call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
5825 lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5827 // For all objects but the receiver, check that the cache is empty.
5829 Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
5833 // Check that there are no elements. Register a2 contains the current JS
5834 // object we've reached through the prototype chain.
5836 lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
5837 Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
5839 // Second chance, the object may be using the empty slow element dictionary.
5840 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
5841 Branch(call_runtime, ne, a2, Operand(at));
5844 lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
5845 Branch(&next, ne, a2, Operand(null_value));
5849 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5850 DCHECK(!output_reg.is(input_reg));
5852 li(output_reg, Operand(255));
5853 // Normal branch: nop in delay slot.
5854 Branch(&done, gt, input_reg, Operand(output_reg));
5855 // Use delay slot in this branch.
5856 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
5857 mov(output_reg, zero_reg); // In delay slot.
5858 mov(output_reg, input_reg); // Value is in range 0..255.
5863 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
5864 DoubleRegister input_reg,
5865 DoubleRegister temp_double_reg) {
5870 Move(temp_double_reg, 0.0);
5871 BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
5873 // Double value is less than zero, NaN or Inf, return 0.
5874 mov(result_reg, zero_reg);
5877 // Double value is >= 255, return 255.
5879 Move(temp_double_reg, 255.0);
5880 BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
5881 li(result_reg, Operand(255));
5884 // In 0-255 range, round and truncate.
5886 cvt_w_d(temp_double_reg, input_reg);
5887 mfc1(result_reg, temp_double_reg);
5892 void MacroAssembler::TestJSArrayForAllocationMemento(
5893 Register receiver_reg,
5894 Register scratch_reg,
5895 Label* no_memento_found,
5897 Label* allocation_memento_present) {
5898 ExternalReference new_space_start =
5899 ExternalReference::new_space_start(isolate());
5900 ExternalReference new_space_allocation_top =
5901 ExternalReference::new_space_allocation_top_address(isolate());
5902 Addu(scratch_reg, receiver_reg,
5903 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5904 Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
5905 li(at, Operand(new_space_allocation_top));
5906 lw(at, MemOperand(at));
5907 Branch(no_memento_found, gt, scratch_reg, Operand(at));
5908 lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
5909 if (allocation_memento_present) {
5910 Branch(allocation_memento_present, cond, scratch_reg,
5911 Operand(isolate()->factory()->allocation_memento_map()));
5916 Register GetRegisterThatIsNotOneOf(Register reg1,
5923 if (reg1.is_valid()) regs |= reg1.bit();
5924 if (reg2.is_valid()) regs |= reg2.bit();
5925 if (reg3.is_valid()) regs |= reg3.bit();
5926 if (reg4.is_valid()) regs |= reg4.bit();
5927 if (reg5.is_valid()) regs |= reg5.bit();
5928 if (reg6.is_valid()) regs |= reg6.bit();
5930 for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
5931 Register candidate = Register::FromAllocationIndex(i);
5932 if (regs & candidate.bit()) continue;
5940 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5945 DCHECK(!scratch1.is(scratch0));
5946 Factory* factory = isolate()->factory();
5947 Register current = scratch0;
5950 // Scratch contained elements pointer.
5951 Move(current, object);
5953 // Loop based on the map going up the prototype chain.
5955 lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
5956 lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
5957 DecodeField<Map::ElementsKindBits>(scratch1);
5958 Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
5959 lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
5960 Branch(&loop_again, ne, current, Operand(factory->null_value()));
5964 bool AreAliased(Register reg1,
5972 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
5973 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
5974 reg7.is_valid() + reg8.is_valid();
5977 if (reg1.is_valid()) regs |= reg1.bit();
5978 if (reg2.is_valid()) regs |= reg2.bit();
5979 if (reg3.is_valid()) regs |= reg3.bit();
5980 if (reg4.is_valid()) regs |= reg4.bit();
5981 if (reg5.is_valid()) regs |= reg5.bit();
5982 if (reg6.is_valid()) regs |= reg6.bit();
5983 if (reg7.is_valid()) regs |= reg7.bit();
5984 if (reg8.is_valid()) regs |= reg8.bit();
5985 int n_of_non_aliasing_regs = NumRegs(regs);
5987 return n_of_valid_regs != n_of_non_aliasing_regs;
5991 CodePatcher::CodePatcher(byte* address,
5993 FlushICache flush_cache)
5994 : address_(address),
5995 size_(instructions * Assembler::kInstrSize),
5996 masm_(NULL, address, size_ + Assembler::kGap),
5997 flush_cache_(flush_cache) {
5998 // Create a new macro assembler pointing to the address of the code to patch.
5999 // The size is adjusted with kGap on order for the assembler to generate size
6000 // bytes of instructions without failing with buffer size constraints.
6001 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6005 CodePatcher::~CodePatcher() {
6006 // Indicate that code has changed.
6007 if (flush_cache_ == FLUSH) {
6008 CpuFeatures::FlushICache(address_, size_);
6011 // Check that the code was patched as expected.
6012 DCHECK(masm_.pc_ == address_ + size_);
6013 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6017 void CodePatcher::Emit(Instr instr) {
6018 masm()->emit(instr);
6022 void CodePatcher::Emit(Address addr) {
6023 masm()->emit(reinterpret_cast<Instr>(addr));
6027 void CodePatcher::ChangeBranchCondition(Condition cond) {
6028 Instr instr = Assembler::instr_at(masm_.pc_);
6029 DCHECK(Assembler::IsBranch(instr));
6030 uint32_t opcode = Assembler::GetOpcodeField(instr);
6031 // Currently only the 'eq' and 'ne' cond values are supported and the simple
6032 // branch instructions (with opcode being the branch type).
6033 // There are some special cases (see Assembler::IsBranch()) so extending this
6035 DCHECK(opcode == BEQ ||
6043 opcode = (cond == eq) ? BEQ : BNE;
6044 instr = (instr & ~kOpcodeMask) | opcode;
6049 void MacroAssembler::TruncatingDiv(Register result,
6052 DCHECK(!dividend.is(result));
6053 DCHECK(!dividend.is(at));
6054 DCHECK(!result.is(at));
6055 base::MagicNumbersForDivision<uint32_t> mag =
6056 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
6057 li(at, Operand(mag.multiplier));
6058 Mulh(result, dividend, Operand(at));
6059 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
6060 if (divisor > 0 && neg) {
6061 Addu(result, result, Operand(dividend));
6063 if (divisor < 0 && !neg && mag.multiplier > 0) {
6064 Subu(result, result, Operand(dividend));
6066 if (mag.shift > 0) sra(result, result, mag.shift);
6067 srl(at, dividend, 31);
6068 Addu(result, result, Operand(at));
6072 } } // namespace v8::internal
6074 #endif // V8_TARGET_ARCH_MIPS