2 // Copyright 2012 the V8 project authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
6 #include <limits.h> // For LONG_MIN, LONG_MAX.
8 #if V8_TARGET_ARCH_MIPS
10 #include "src/base/bits.h"
11 #include "src/base/division-by-constant.h"
12 #include "src/bootstrapper.h"
13 #include "src/codegen.h"
14 #include "src/cpu-profiler.h"
15 #include "src/debug/debug.h"
16 #include "src/mips/macro-assembler-mips.h"
17 #include "src/runtime/runtime.h"
22 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
23 : Assembler(arg_isolate, buffer, size),
24 generating_stub_(false),
26 has_double_zero_reg_set_(false) {
27 if (isolate() != NULL) {
28 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
34 void MacroAssembler::Load(Register dst,
35 const MemOperand& src,
37 DCHECK(!r.IsDouble());
40 } else if (r.IsUInteger8()) {
42 } else if (r.IsInteger16()) {
44 } else if (r.IsUInteger16()) {
52 void MacroAssembler::Store(Register src,
53 const MemOperand& dst,
55 DCHECK(!r.IsDouble());
56 if (r.IsInteger8() || r.IsUInteger8()) {
58 } else if (r.IsInteger16() || r.IsUInteger16()) {
61 if (r.IsHeapObject()) {
63 } else if (r.IsSmi()) {
71 void MacroAssembler::LoadRoot(Register destination,
72 Heap::RootListIndex index) {
73 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
77 void MacroAssembler::LoadRoot(Register destination,
78 Heap::RootListIndex index,
80 Register src1, const Operand& src2) {
81 Branch(2, NegateCondition(cond), src1, src2);
82 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
86 void MacroAssembler::StoreRoot(Register source,
87 Heap::RootListIndex index) {
88 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
89 sw(source, MemOperand(s6, index << kPointerSizeLog2));
93 void MacroAssembler::StoreRoot(Register source,
94 Heap::RootListIndex index,
96 Register src1, const Operand& src2) {
97 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
98 Branch(2, NegateCondition(cond), src1, src2);
99 sw(source, MemOperand(s6, index << kPointerSizeLog2));
103 // Push and pop all registers that can hold pointers.
104 void MacroAssembler::PushSafepointRegisters() {
105 // Safepoints expect a block of kNumSafepointRegisters values on the
106 // stack, so adjust the stack for unsaved registers.
107 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
108 DCHECK(num_unsaved >= 0);
109 if (num_unsaved > 0) {
110 Subu(sp, sp, Operand(num_unsaved * kPointerSize));
112 MultiPush(kSafepointSavedRegisters);
116 void MacroAssembler::PopSafepointRegisters() {
117 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
118 MultiPop(kSafepointSavedRegisters);
119 if (num_unsaved > 0) {
120 Addu(sp, sp, Operand(num_unsaved * kPointerSize));
125 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
126 sw(src, SafepointRegisterSlot(dst));
130 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
131 lw(dst, SafepointRegisterSlot(src));
135 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
136 // The registers are pushed starting with the highest encoding,
137 // which means that lowest encodings are closest to the stack pointer.
138 return kSafepointRegisterStackIndexMap[reg_code];
142 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
143 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
147 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
148 UNIMPLEMENTED_MIPS();
149 // General purpose registers are pushed last on the stack.
150 int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
151 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
152 return MemOperand(sp, doubles_size + register_offset);
156 void MacroAssembler::InNewSpace(Register object,
160 DCHECK(cc == eq || cc == ne);
161 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
162 Branch(branch, cc, scratch,
163 Operand(ExternalReference::new_space_start(isolate())));
167 // Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
168 // The register 'object' contains a heap object pointer. The heap object
169 // tag is shifted away.
170 void MacroAssembler::RecordWriteField(
176 SaveFPRegsMode save_fp,
177 RememberedSetAction remembered_set_action,
179 PointersToHereCheck pointers_to_here_check_for_value) {
180 DCHECK(!AreAliased(value, dst, t8, object));
181 // First, check if a write barrier is even needed. The tests below
182 // catch stores of Smis.
185 // Skip barrier if writing a smi.
186 if (smi_check == INLINE_SMI_CHECK) {
187 JumpIfSmi(value, &done);
190 // Although the object register is tagged, the offset is relative to the start
191 // of the object, so so offset must be a multiple of kPointerSize.
192 DCHECK(IsAligned(offset, kPointerSize));
194 Addu(dst, object, Operand(offset - kHeapObjectTag));
195 if (emit_debug_code()) {
197 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
198 Branch(&ok, eq, t8, Operand(zero_reg));
199 stop("Unaligned cell in write barrier");
208 remembered_set_action,
210 pointers_to_here_check_for_value);
214 // Clobber clobbered input registers when running with the debug-code flag
215 // turned on to provoke errors.
216 if (emit_debug_code()) {
217 li(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
218 li(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
223 // Clobbers object, dst, map, and ra, if (ra_status == kRAHasBeenSaved)
224 void MacroAssembler::RecordWriteForMap(Register object,
228 SaveFPRegsMode fp_mode) {
229 if (emit_debug_code()) {
231 lw(dst, FieldMemOperand(map, HeapObject::kMapOffset));
233 kWrongAddressOrValuePassedToRecordWrite,
235 Operand(isolate()->factory()->meta_map()));
238 if (!FLAG_incremental_marking) {
242 if (emit_debug_code()) {
243 lw(at, FieldMemOperand(object, HeapObject::kMapOffset));
245 kWrongAddressOrValuePassedToRecordWrite,
252 // A single check of the map's pages interesting flag suffices, since it is
253 // only set during incremental collection, and then it's also guaranteed that
254 // the from object's page's interesting flag is also set. This optimization
255 // relies on the fact that maps can never be in new space.
257 map, // Used as scratch.
258 MemoryChunk::kPointersToHereAreInterestingMask,
262 Addu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
263 if (emit_debug_code()) {
265 And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
266 Branch(&ok, eq, at, Operand(zero_reg));
267 stop("Unaligned cell in write barrier");
271 // Record the actual write.
272 if (ra_status == kRAHasNotBeenSaved) {
275 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
278 if (ra_status == kRAHasNotBeenSaved) {
284 // Count number of write barriers in generated code.
285 isolate()->counters()->write_barriers_static()->Increment();
286 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
288 // Clobber clobbered registers when running with the debug-code flag
289 // turned on to provoke errors.
290 if (emit_debug_code()) {
291 li(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
292 li(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
297 // Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
298 // The register 'object' contains a heap object pointer. The heap object
299 // tag is shifted away.
300 void MacroAssembler::RecordWrite(
305 SaveFPRegsMode fp_mode,
306 RememberedSetAction remembered_set_action,
308 PointersToHereCheck pointers_to_here_check_for_value) {
309 DCHECK(!AreAliased(object, address, value, t8));
310 DCHECK(!AreAliased(object, address, value, t9));
312 if (emit_debug_code()) {
313 lw(at, MemOperand(address));
315 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
318 if (remembered_set_action == OMIT_REMEMBERED_SET &&
319 !FLAG_incremental_marking) {
323 // First, check if a write barrier is even needed. The tests below
324 // catch stores of smis and stores into the young generation.
327 if (smi_check == INLINE_SMI_CHECK) {
328 DCHECK_EQ(0, kSmiTag);
329 JumpIfSmi(value, &done);
332 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
334 value, // Used as scratch.
335 MemoryChunk::kPointersToHereAreInterestingMask,
339 CheckPageFlag(object,
340 value, // Used as scratch.
341 MemoryChunk::kPointersFromHereAreInterestingMask,
345 // Record the actual write.
346 if (ra_status == kRAHasNotBeenSaved) {
349 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
352 if (ra_status == kRAHasNotBeenSaved) {
358 // Count number of write barriers in generated code.
359 isolate()->counters()->write_barriers_static()->Increment();
360 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
363 // Clobber clobbered registers when running with the debug-code flag
364 // turned on to provoke errors.
365 if (emit_debug_code()) {
366 li(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
367 li(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
372 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
375 SaveFPRegsMode fp_mode,
376 RememberedSetFinalAction and_then) {
378 if (emit_debug_code()) {
380 JumpIfNotInNewSpace(object, scratch, &ok);
381 stop("Remembered set pointer is in new space");
384 // Load store buffer top.
385 ExternalReference store_buffer =
386 ExternalReference::store_buffer_top(isolate());
387 li(t8, Operand(store_buffer));
388 lw(scratch, MemOperand(t8));
389 // Store pointer to buffer and increment buffer top.
390 sw(address, MemOperand(scratch));
391 Addu(scratch, scratch, kPointerSize);
392 // Write back new top of buffer.
393 sw(scratch, MemOperand(t8));
394 // Call stub on end of buffer.
395 // Check for end of buffer.
396 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
397 if (and_then == kFallThroughAtEnd) {
398 Branch(&done, eq, t8, Operand(zero_reg));
400 DCHECK(and_then == kReturnAtEnd);
401 Ret(eq, t8, Operand(zero_reg));
404 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
405 CallStub(&store_buffer_overflow);
408 if (and_then == kReturnAtEnd) {
414 // -----------------------------------------------------------------------------
415 // Allocation support.
418 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
423 DCHECK(!holder_reg.is(scratch));
424 DCHECK(!holder_reg.is(at));
425 DCHECK(!scratch.is(at));
427 // Load current lexical context from the stack frame.
428 lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
429 // In debug mode, make sure the lexical context is set.
431 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
432 scratch, Operand(zero_reg));
435 // Load the native context of the current context.
437 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
438 lw(scratch, FieldMemOperand(scratch, offset));
439 lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
441 // Check the context is a native context.
442 if (emit_debug_code()) {
443 push(holder_reg); // Temporarily save holder on the stack.
444 // Read the first word and compare to the native_context_map.
445 lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
446 LoadRoot(at, Heap::kNativeContextMapRootIndex);
447 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
448 holder_reg, Operand(at));
449 pop(holder_reg); // Restore holder.
452 // Check if both contexts are the same.
453 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
454 Branch(&same_contexts, eq, scratch, Operand(at));
456 // Check the context is a native context.
457 if (emit_debug_code()) {
458 push(holder_reg); // Temporarily save holder on the stack.
459 mov(holder_reg, at); // Move at to its holding place.
460 LoadRoot(at, Heap::kNullValueRootIndex);
461 Check(ne, kJSGlobalProxyContextShouldNotBeNull,
462 holder_reg, Operand(at));
464 lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
465 LoadRoot(at, Heap::kNativeContextMapRootIndex);
466 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
467 holder_reg, Operand(at));
468 // Restore at is not needed. at is reloaded below.
469 pop(holder_reg); // Restore holder.
470 // Restore at to holder's context.
471 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
474 // Check that the security token in the calling global object is
475 // compatible with the security token in the receiving global
477 int token_offset = Context::kHeaderSize +
478 Context::SECURITY_TOKEN_INDEX * kPointerSize;
480 lw(scratch, FieldMemOperand(scratch, token_offset));
481 lw(at, FieldMemOperand(at, token_offset));
482 Branch(miss, ne, scratch, Operand(at));
484 bind(&same_contexts);
488 // Compute the hash code from the untagged key. This must be kept in sync with
489 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
490 // code-stub-hydrogen.cc
491 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
492 // First of all we assign the hash seed to scratch.
493 LoadRoot(scratch, Heap::kHashSeedRootIndex);
496 // Xor original key with a seed.
497 xor_(reg0, reg0, scratch);
499 // Compute the hash code from the untagged key. This must be kept in sync
500 // with ComputeIntegerHash in utils.h.
502 // hash = ~hash + (hash << 15);
503 nor(scratch, reg0, zero_reg);
505 addu(reg0, scratch, at);
507 // hash = hash ^ (hash >> 12);
509 xor_(reg0, reg0, at);
511 // hash = hash + (hash << 2);
513 addu(reg0, reg0, at);
515 // hash = hash ^ (hash >> 4);
517 xor_(reg0, reg0, at);
519 // hash = hash * 2057;
520 sll(scratch, reg0, 11);
522 addu(reg0, reg0, at);
523 addu(reg0, reg0, scratch);
525 // hash = hash ^ (hash >> 16);
527 xor_(reg0, reg0, at);
528 And(reg0, reg0, Operand(0x3fffffff));
532 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
541 // elements - holds the slow-case elements of the receiver on entry.
542 // Unchanged unless 'result' is the same register.
544 // key - holds the smi key on entry.
545 // Unchanged unless 'result' is the same register.
548 // result - holds the result on exit if the load succeeded.
549 // Allowed to be the same as 'key' or 'result'.
550 // Unchanged on bailout so 'key' or 'result' can be used
551 // in further computation.
553 // Scratch registers:
555 // reg0 - holds the untagged key on entry and holds the hash once computed.
557 // reg1 - Used to hold the capacity mask of the dictionary.
559 // reg2 - Used for the index into the dictionary.
560 // at - Temporary (avoid MacroAssembler instructions also using 'at').
563 GetNumberHash(reg0, reg1);
565 // Compute the capacity mask.
566 lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
567 sra(reg1, reg1, kSmiTagSize);
568 Subu(reg1, reg1, Operand(1));
570 // Generate an unrolled loop that performs a few probes before giving up.
571 for (int i = 0; i < kNumberDictionaryProbes; i++) {
572 // Use reg2 for index calculations and keep the hash intact in reg0.
574 // Compute the masked index: (hash + i + i * i) & mask.
576 Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
578 and_(reg2, reg2, reg1);
580 // Scale the index by multiplying by the element size.
581 DCHECK(SeededNumberDictionary::kEntrySize == 3);
582 sll(at, reg2, 1); // 2x.
583 addu(reg2, reg2, at); // reg2 = reg2 * 3.
585 // Check if the key is identical to the name.
586 sll(at, reg2, kPointerSizeLog2);
587 addu(reg2, elements, at);
589 lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
590 if (i != kNumberDictionaryProbes - 1) {
591 Branch(&done, eq, key, Operand(at));
593 Branch(miss, ne, key, Operand(at));
598 // Check that the value is a field property.
599 // reg2: elements + (index * kPointerSize).
600 const int kDetailsOffset =
601 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
602 lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
604 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
605 Branch(miss, ne, at, Operand(zero_reg));
607 // Get the value at the masked, scaled index and return.
608 const int kValueOffset =
609 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
610 lw(result, FieldMemOperand(reg2, kValueOffset));
614 // ---------------------------------------------------------------------------
615 // Instruction macros.
617 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
619 addu(rd, rs, rt.rm());
621 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
622 addiu(rd, rs, rt.imm32_);
624 // li handles the relocation.
633 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
635 subu(rd, rs, rt.rm());
637 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
638 addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
640 // li handles the relocation.
649 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
651 if (IsMipsArchVariant(kLoongson)) {
655 mul(rd, rs, rt.rm());
658 // li handles the relocation.
661 if (IsMipsArchVariant(kLoongson)) {
671 void MacroAssembler::Mul(Register rd_hi, Register rd_lo,
672 Register rs, const Operand& rt) {
674 if (!IsMipsArchVariant(kMips32r6)) {
680 DCHECK(!rd_hi.is(rs));
681 DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
682 muh(rd_hi, rs, rt.rm());
683 mul(rd_lo, rs, rt.rm());
685 DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
686 mul(rd_lo, rs, rt.rm());
687 muh(rd_hi, rs, rt.rm());
691 // li handles the relocation.
694 if (!IsMipsArchVariant(kMips32r6)) {
700 DCHECK(!rd_hi.is(rs));
701 DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
705 DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
714 void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
716 if (!IsMipsArchVariant(kMips32r6)) {
720 muh(rd, rs, rt.rm());
723 // li handles the relocation.
726 if (!IsMipsArchVariant(kMips32r6)) {
736 void MacroAssembler::Mult(Register rs, const Operand& rt) {
740 // li handles the relocation.
748 void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
750 if (!IsMipsArchVariant(kMips32r6)) {
754 muhu(rd, rs, rt.rm());
757 // li handles the relocation.
760 if (!IsMipsArchVariant(kMips32r6)) {
770 void MacroAssembler::Multu(Register rs, const Operand& rt) {
774 // li handles the relocation.
782 void MacroAssembler::Div(Register rs, const Operand& rt) {
786 // li handles the relocation.
794 void MacroAssembler::Div(Register rem, Register res,
795 Register rs, const Operand& rt) {
797 if (!IsMipsArchVariant(kMips32r6)) {
802 div(res, rs, rt.rm());
803 mod(rem, rs, rt.rm());
806 // li handles the relocation.
809 if (!IsMipsArchVariant(kMips32r6)) {
821 void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
823 if (!IsMipsArchVariant(kMips32r6)) {
827 div(res, rs, rt.rm());
830 // li handles the relocation.
833 if (!IsMipsArchVariant(kMips32r6)) {
843 void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
845 if (!IsMipsArchVariant(kMips32r6)) {
849 mod(rd, rs, rt.rm());
852 // li handles the relocation.
855 if (!IsMipsArchVariant(kMips32r6)) {
865 void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
867 if (!IsMipsArchVariant(kMips32r6)) {
871 modu(rd, rs, rt.rm());
874 // li handles the relocation.
877 if (!IsMipsArchVariant(kMips32r6)) {
887 void MacroAssembler::Divu(Register rs, const Operand& rt) {
891 // li handles the relocation.
899 void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
901 if (!IsMipsArchVariant(kMips32r6)) {
905 divu(res, rs, rt.rm());
908 // li handles the relocation.
911 if (!IsMipsArchVariant(kMips32r6)) {
921 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
923 and_(rd, rs, rt.rm());
925 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
926 andi(rd, rs, rt.imm32_);
928 // li handles the relocation.
937 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
939 or_(rd, rs, rt.rm());
941 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
942 ori(rd, rs, rt.imm32_);
944 // li handles the relocation.
953 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
955 xor_(rd, rs, rt.rm());
957 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
958 xori(rd, rs, rt.imm32_);
960 // li handles the relocation.
969 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
971 nor(rd, rs, rt.rm());
973 // li handles the relocation.
981 void MacroAssembler::Neg(Register rs, const Operand& rt) {
984 DCHECK(!at.is(rt.rm()));
986 xor_(rs, rt.rm(), at);
990 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
992 slt(rd, rs, rt.rm());
994 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
995 slti(rd, rs, rt.imm32_);
997 // li handles the relocation.
1006 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
1008 sltu(rd, rs, rt.rm());
1010 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
1011 sltiu(rd, rs, rt.imm32_);
1013 // li handles the relocation.
1022 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
1023 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1025 rotrv(rd, rs, rt.rm());
1027 rotr(rd, rs, rt.imm32_);
1031 subu(at, zero_reg, rt.rm());
1033 srlv(rd, rs, rt.rm());
1036 if (rt.imm32_ == 0) {
1039 srl(at, rs, rt.imm32_);
1040 sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
1048 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
1049 if (IsMipsArchVariant(kLoongson)) {
1057 // ------------Pseudo-instructions-------------
1059 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
1061 lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1065 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
1067 swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1071 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1072 AllowDeferredHandleDereference smi_check;
1073 if (value->IsSmi()) {
1074 li(dst, Operand(value), mode);
1076 DCHECK(value->IsHeapObject());
1077 if (isolate()->heap()->InNewSpace(*value)) {
1078 Handle<Cell> cell = isolate()->factory()->NewCell(value);
1079 li(dst, Operand(cell));
1080 lw(dst, FieldMemOperand(dst, Cell::kValueOffset));
1082 li(dst, Operand(value));
1088 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1089 DCHECK(!j.is_reg());
1090 BlockTrampolinePoolScope block_trampoline_pool(this);
1091 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
1092 // Normal load of an immediate value which does not need Relocation Info.
1093 if (is_int16(j.imm32_)) {
1094 addiu(rd, zero_reg, j.imm32_);
1095 } else if (!(j.imm32_ & kHiMask)) {
1096 ori(rd, zero_reg, j.imm32_);
1097 } else if (!(j.imm32_ & kImm16Mask)) {
1098 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1100 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1101 ori(rd, rd, (j.imm32_ & kImm16Mask));
1104 if (MustUseReg(j.rmode_)) {
1105 RecordRelocInfo(j.rmode_, j.imm32_);
1107 // We always need the same number of instructions as we may need to patch
1108 // this code to load another value which may need 2 instructions to load.
1109 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1110 ori(rd, rd, (j.imm32_ & kImm16Mask));
1115 void MacroAssembler::MultiPush(RegList regs) {
1116 int16_t num_to_push = NumberOfBitsSet(regs);
1117 int16_t stack_offset = num_to_push * kPointerSize;
1119 Subu(sp, sp, Operand(stack_offset));
1120 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1121 if ((regs & (1 << i)) != 0) {
1122 stack_offset -= kPointerSize;
1123 sw(ToRegister(i), MemOperand(sp, stack_offset));
1129 void MacroAssembler::MultiPushReversed(RegList regs) {
1130 int16_t num_to_push = NumberOfBitsSet(regs);
1131 int16_t stack_offset = num_to_push * kPointerSize;
1133 Subu(sp, sp, Operand(stack_offset));
1134 for (int16_t i = 0; i < kNumRegisters; i++) {
1135 if ((regs & (1 << i)) != 0) {
1136 stack_offset -= kPointerSize;
1137 sw(ToRegister(i), MemOperand(sp, stack_offset));
1143 void MacroAssembler::MultiPop(RegList regs) {
1144 int16_t stack_offset = 0;
1146 for (int16_t i = 0; i < kNumRegisters; i++) {
1147 if ((regs & (1 << i)) != 0) {
1148 lw(ToRegister(i), MemOperand(sp, stack_offset));
1149 stack_offset += kPointerSize;
1152 addiu(sp, sp, stack_offset);
1156 void MacroAssembler::MultiPopReversed(RegList regs) {
1157 int16_t stack_offset = 0;
1159 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1160 if ((regs & (1 << i)) != 0) {
1161 lw(ToRegister(i), MemOperand(sp, stack_offset));
1162 stack_offset += kPointerSize;
1165 addiu(sp, sp, stack_offset);
1169 void MacroAssembler::MultiPushFPU(RegList regs) {
1170 int16_t num_to_push = NumberOfBitsSet(regs);
1171 int16_t stack_offset = num_to_push * kDoubleSize;
1173 Subu(sp, sp, Operand(stack_offset));
1174 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1175 if ((regs & (1 << i)) != 0) {
1176 stack_offset -= kDoubleSize;
1177 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1183 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
1184 int16_t num_to_push = NumberOfBitsSet(regs);
1185 int16_t stack_offset = num_to_push * kDoubleSize;
1187 Subu(sp, sp, Operand(stack_offset));
1188 for (int16_t i = 0; i < kNumRegisters; i++) {
1189 if ((regs & (1 << i)) != 0) {
1190 stack_offset -= kDoubleSize;
1191 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1197 void MacroAssembler::MultiPopFPU(RegList regs) {
1198 int16_t stack_offset = 0;
1200 for (int16_t i = 0; i < kNumRegisters; i++) {
1201 if ((regs & (1 << i)) != 0) {
1202 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1203 stack_offset += kDoubleSize;
1206 addiu(sp, sp, stack_offset);
1210 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
1211 int16_t stack_offset = 0;
1213 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1214 if ((regs & (1 << i)) != 0) {
1215 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1216 stack_offset += kDoubleSize;
1219 addiu(sp, sp, stack_offset);
1223 void MacroAssembler::FlushICache(Register address, unsigned instructions) {
1224 RegList saved_regs = kJSCallerSaved | ra.bit();
1225 MultiPush(saved_regs);
1226 AllowExternalCallThatCantCauseGC scope(this);
1228 // Save to a0 in case address == t0.
1230 PrepareCallCFunction(2, t0);
1232 li(a1, instructions * kInstrSize);
1233 CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
1234 MultiPop(saved_regs);
1238 void MacroAssembler::Ext(Register rt,
1243 DCHECK(pos + size < 33);
1245 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1246 ext_(rt, rs, pos, size);
1248 // Move rs to rt and shift it left then right to get the
1249 // desired bitfield on the right side and zeroes on the left.
1250 int shift_left = 32 - (pos + size);
1251 sll(rt, rs, shift_left); // Acts as a move if shift_left == 0.
1253 int shift_right = 32 - size;
1254 if (shift_right > 0) {
1255 srl(rt, rt, shift_right);
1261 void MacroAssembler::Ins(Register rt,
1266 DCHECK(pos + size <= 32);
1269 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1270 ins_(rt, rs, pos, size);
1272 DCHECK(!rt.is(t8) && !rs.is(t8));
1273 Subu(at, zero_reg, Operand(1));
1274 srl(at, at, 32 - size);
1278 nor(at, at, zero_reg);
1285 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1287 FPURegister scratch) {
1288 // Move the data from fs to t8.
1290 Cvt_d_uw(fd, t8, scratch);
1294 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1296 FPURegister scratch) {
1297 // Convert rs to a FP value in fd (and fd + 1).
1298 // We do this by converting rs minus the MSB to avoid sign conversion,
1299 // then adding 2^31 to the result (if needed).
1301 DCHECK(!fd.is(scratch));
1305 // Save rs's MSB to t9.
1309 // Move the result to fd.
1312 // Convert fd to a real FP value.
1315 Label conversion_done;
1317 // If rs's MSB was 0, it's done.
1318 // Otherwise we need to add that to the FP register.
1319 Branch(&conversion_done, eq, t9, Operand(zero_reg));
1321 // Load 2^31 into f20 as its float representation.
1323 mtc1(zero_reg, scratch);
1326 add_d(fd, fd, scratch);
1328 bind(&conversion_done);
1332 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1334 FPURegister scratch) {
1335 Trunc_uw_d(fs, t8, scratch);
1340 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1341 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1351 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1352 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1362 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1363 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1373 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1374 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1384 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1386 FPURegister scratch) {
1387 DCHECK(!fd.is(scratch));
1390 // Load 2^31 into scratch as its float representation.
1392 mtc1(zero_reg, scratch);
1394 // Test if scratch > fd.
1395 // If fd < 2^31 we can convert it normally.
1396 Label simple_convert;
1397 BranchF(&simple_convert, NULL, lt, fd, scratch);
1399 // First we subtract 2^31 from fd, then trunc it to rs
1400 // and add 2^31 to rs.
1401 sub_d(scratch, fd, scratch);
1402 trunc_w_d(scratch, scratch);
1404 Or(rs, rs, 1 << 31);
1408 // Simple conversion.
1409 bind(&simple_convert);
1410 trunc_w_d(scratch, fd);
1417 void MacroAssembler::Mthc1(Register rt, FPURegister fs) {
1421 mtc1(rt, fs.high());
1426 void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
1430 mfc1(rt, fs.high());
1435 void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
1436 Label* nan, Condition cond, FPURegister cmp1,
1437 FPURegister cmp2, BranchDelaySlot bd) {
1439 BlockTrampolinePoolScope block_trampoline_pool(this);
1445 if (IsMipsArchVariant(kMips32r6)) {
1446 sizeField = sizeField == D ? L : W;
1448 DCHECK(nan || target);
1449 // Check for unordered (NaN) cases.
1452 nan->is_bound() ? is_near(nan) : is_trampoline_emitted();
1453 if (!IsMipsArchVariant(kMips32r6)) {
1456 c(UN, D, cmp1, cmp2);
1462 c(UN, D, cmp1, cmp2);
1464 if (bd == PROTECT) {
1469 // Use kDoubleCompareReg for comparison result. It has to be unavailable
1470 // to lithium register allocator.
1471 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
1474 cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
1475 bc1eqz(&skip, kDoubleCompareReg);
1480 cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
1481 bc1nez(nan, kDoubleCompareReg);
1482 if (bd == PROTECT) {
1491 target->is_bound() ? is_near(target) : is_trampoline_emitted();
1494 Condition neg_cond = NegateFpuCondition(cond);
1495 BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
1499 BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
1505 void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
1506 Condition cc, FPURegister cmp1,
1507 FPURegister cmp2, BranchDelaySlot bd) {
1508 if (!IsMipsArchVariant(kMips32r6)) {
1509 BlockTrampolinePoolScope block_trampoline_pool(this);
1511 // Here NaN cases were either handled by this function or are assumed to
1512 // have been handled by the caller.
1515 c(OLT, sizeField, cmp1, cmp2);
1519 c(ULT, sizeField, cmp1, cmp2);
1523 c(ULE, sizeField, cmp1, cmp2);
1527 c(OLE, sizeField, cmp1, cmp2);
1531 c(ULT, sizeField, cmp1, cmp2);
1535 c(OLT, sizeField, cmp1, cmp2);
1539 c(OLE, sizeField, cmp1, cmp2);
1543 c(ULE, sizeField, cmp1, cmp2);
1547 c(EQ, sizeField, cmp1, cmp2);
1551 c(UEQ, sizeField, cmp1, cmp2);
1554 case ne: // Unordered or not equal.
1555 c(EQ, sizeField, cmp1, cmp2);
1559 c(UEQ, sizeField, cmp1, cmp2);
1567 BlockTrampolinePoolScope block_trampoline_pool(this);
1569 // Here NaN cases were either handled by this function or are assumed to
1570 // have been handled by the caller.
1571 // Unsigned conditions are treated as their signed counterpart.
1572 // Use kDoubleCompareReg for comparison result, it is
1573 // valid in fp64 (FR = 1) mode which is implied for mips32r6.
1574 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
1577 cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
1578 bc1nez(target, kDoubleCompareReg);
1581 cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
1582 bc1nez(target, kDoubleCompareReg);
1585 cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
1586 bc1eqz(target, kDoubleCompareReg);
1589 cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
1590 bc1eqz(target, kDoubleCompareReg);
1593 cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
1594 bc1eqz(target, kDoubleCompareReg);
1597 cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
1598 bc1eqz(target, kDoubleCompareReg);
1601 cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
1602 bc1nez(target, kDoubleCompareReg);
1605 cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
1606 bc1nez(target, kDoubleCompareReg);
1609 cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
1610 bc1nez(target, kDoubleCompareReg);
1613 cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
1614 bc1nez(target, kDoubleCompareReg);
1617 cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
1618 bc1eqz(target, kDoubleCompareReg);
1621 cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
1622 bc1eqz(target, kDoubleCompareReg);
1629 if (bd == PROTECT) {
1635 void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
1637 DCHECK(!src_low.is(at));
1647 void MacroAssembler::Move(FPURegister dst, float imm) {
1648 li(at, Operand(bit_cast<int32_t>(imm)));
1653 void MacroAssembler::Move(FPURegister dst, double imm) {
1654 static const DoubleRepresentation minus_zero(-0.0);
1655 static const DoubleRepresentation zero(0.0);
1656 DoubleRepresentation value_rep(imm);
1657 // Handle special values first.
1658 if (value_rep == zero && has_double_zero_reg_set_) {
1659 mov_d(dst, kDoubleRegZero);
1660 } else if (value_rep == minus_zero && has_double_zero_reg_set_) {
1661 neg_d(dst, kDoubleRegZero);
1664 DoubleAsTwoUInt32(imm, &lo, &hi);
1665 // Move the low part of the double into the lower of the corresponding FPU
1666 // register of FPU register pair.
1668 li(at, Operand(lo));
1671 mtc1(zero_reg, dst);
1673 // Move the high part of the double into the higher of the corresponding FPU
1674 // register of FPU register pair.
1676 li(at, Operand(hi));
1679 Mthc1(zero_reg, dst);
1681 if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true;
1686 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1687 if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
1689 Branch(&done, ne, rt, Operand(zero_reg));
1698 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1699 if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
1701 Branch(&done, eq, rt, Operand(zero_reg));
1710 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
1711 if (IsMipsArchVariant(kLoongson)) {
1712 // Tests an FP condition code and then conditionally move rs to rd.
1713 // We do not currently use any FPU cc bit other than bit 0.
1715 DCHECK(!(rs.is(t8) || rd.is(t8)));
1717 Register scratch = t8;
1718 // For testing purposes we need to fetch content of the FCSR register and
1719 // than test its cc (floating point condition code) bit (for cc = 0, it is
1720 // 24. bit of the FCSR).
1721 cfc1(scratch, FCSR);
1722 // For the MIPS I, II and III architectures, the contents of scratch is
1723 // UNPREDICTABLE for the instruction immediately following CFC1.
1725 srl(scratch, scratch, 16);
1726 andi(scratch, scratch, 0x0080);
1727 Branch(&done, eq, scratch, Operand(zero_reg));
1736 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
1737 if (IsMipsArchVariant(kLoongson)) {
1738 // Tests an FP condition code and then conditionally move rs to rd.
1739 // We do not currently use any FPU cc bit other than bit 0.
1741 DCHECK(!(rs.is(t8) || rd.is(t8)));
1743 Register scratch = t8;
1744 // For testing purposes we need to fetch content of the FCSR register and
1745 // than test its cc (floating point condition code) bit (for cc = 0, it is
1746 // 24. bit of the FCSR).
1747 cfc1(scratch, FCSR);
1748 // For the MIPS I, II and III architectures, the contents of scratch is
1749 // UNPREDICTABLE for the instruction immediately following CFC1.
1751 srl(scratch, scratch, 16);
1752 andi(scratch, scratch, 0x0080);
1753 Branch(&done, ne, scratch, Operand(zero_reg));
1762 void MacroAssembler::Clz(Register rd, Register rs) {
1763 if (IsMipsArchVariant(kLoongson)) {
1764 DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
1766 Register scratch = t9;
1772 and_(scratch, at, mask);
1773 Branch(&end, ne, scratch, Operand(zero_reg));
1775 Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
1784 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1786 DoubleRegister double_input,
1788 DoubleRegister double_scratch,
1789 Register except_flag,
1790 CheckForInexactConversion check_inexact) {
1791 DCHECK(!result.is(scratch));
1792 DCHECK(!double_input.is(double_scratch));
1793 DCHECK(!except_flag.is(scratch));
1797 // Clear the except flag (0 = no exception)
1798 mov(except_flag, zero_reg);
1800 // Test for values that can be exactly represented as a signed 32-bit integer.
1801 cvt_w_d(double_scratch, double_input);
1802 mfc1(result, double_scratch);
1803 cvt_d_w(double_scratch, double_scratch);
1804 BranchF(&done, NULL, eq, double_input, double_scratch);
1806 int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
1808 if (check_inexact == kDontCheckForInexactConversion) {
1809 // Ignore inexact exceptions.
1810 except_mask &= ~kFCSRInexactFlagMask;
1814 cfc1(scratch, FCSR);
1815 // Disable FPU exceptions.
1816 ctc1(zero_reg, FCSR);
1818 // Do operation based on rounding mode.
1819 switch (rounding_mode) {
1820 case kRoundToNearest:
1821 Round_w_d(double_scratch, double_input);
1824 Trunc_w_d(double_scratch, double_input);
1826 case kRoundToPlusInf:
1827 Ceil_w_d(double_scratch, double_input);
1829 case kRoundToMinusInf:
1830 Floor_w_d(double_scratch, double_input);
1832 } // End of switch-statement.
1835 cfc1(except_flag, FCSR);
1837 ctc1(scratch, FCSR);
1838 // Move the converted value into the result register.
1839 mfc1(result, double_scratch);
1841 // Check for fpu exceptions.
1842 And(except_flag, except_flag, Operand(except_mask));
1848 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
1849 DoubleRegister double_input,
1851 DoubleRegister single_scratch = kLithiumScratchDouble.low();
1852 Register scratch = at;
1853 Register scratch2 = t9;
1855 // Clear cumulative exception flags and save the FCSR.
1856 cfc1(scratch2, FCSR);
1857 ctc1(zero_reg, FCSR);
1858 // Try a conversion to a signed integer.
1859 trunc_w_d(single_scratch, double_input);
1860 mfc1(result, single_scratch);
1861 // Retrieve and restore the FCSR.
1862 cfc1(scratch, FCSR);
1863 ctc1(scratch2, FCSR);
1864 // Check for overflow and NaNs.
1867 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
1868 // If we had no exceptions we are done.
1869 Branch(done, eq, scratch, Operand(zero_reg));
1873 void MacroAssembler::TruncateDoubleToI(Register result,
1874 DoubleRegister double_input) {
1877 TryInlineTruncateDoubleToI(result, double_input, &done);
1879 // If we fell through then inline version didn't succeed - call stub instead.
1881 Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
1882 sdc1(double_input, MemOperand(sp, 0));
1884 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
1887 Addu(sp, sp, Operand(kDoubleSize));
1894 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
1896 DoubleRegister double_scratch = f12;
1897 DCHECK(!result.is(object));
1899 ldc1(double_scratch,
1900 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
1901 TryInlineTruncateDoubleToI(result, double_scratch, &done);
1903 // If we fell through then inline version didn't succeed - call stub instead.
1905 DoubleToIStub stub(isolate(),
1908 HeapNumber::kValueOffset - kHeapObjectTag,
1918 void MacroAssembler::TruncateNumberToI(Register object,
1920 Register heap_number_map,
1922 Label* not_number) {
1924 DCHECK(!result.is(object));
1926 UntagAndJumpIfSmi(result, object, &done);
1927 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
1928 TruncateHeapNumberToI(result, object);
1934 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1936 int num_least_bits) {
1937 Ext(dst, src, kSmiTagSize, num_least_bits);
1941 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1943 int num_least_bits) {
1944 And(dst, src, Operand((1 << num_least_bits) - 1));
1948 // Emulated condtional branches do not emit a nop in the branch delay slot.
1950 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1951 #define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \
1952 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1953 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1956 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1957 BranchShort(offset, bdslot);
1961 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1963 BranchDelaySlot bdslot) {
1964 BranchShort(offset, cond, rs, rt, bdslot);
1968 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1969 if (L->is_bound()) {
1971 BranchShort(L, bdslot);
1976 if (is_trampoline_emitted()) {
1979 BranchShort(L, bdslot);
1985 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1987 BranchDelaySlot bdslot) {
1988 if (L->is_bound()) {
1990 BranchShort(L, cond, rs, rt, bdslot);
1992 if (cond != cc_always) {
1994 Condition neg_cond = NegateCondition(cond);
1995 BranchShort(&skip, neg_cond, rs, rt);
2003 if (is_trampoline_emitted()) {
2004 if (cond != cc_always) {
2006 Condition neg_cond = NegateCondition(cond);
2007 BranchShort(&skip, neg_cond, rs, rt);
2014 BranchShort(L, cond, rs, rt, bdslot);
2020 void MacroAssembler::Branch(Label* L,
2023 Heap::RootListIndex index,
2024 BranchDelaySlot bdslot) {
2025 LoadRoot(at, index);
2026 Branch(L, cond, rs, Operand(at), bdslot);
2030 void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
2033 // Emit a nop in the branch delay slot if required.
2034 if (bdslot == PROTECT)
2039 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
2041 BranchDelaySlot bdslot) {
2042 BRANCH_ARGS_CHECK(cond, rs, rt);
2043 DCHECK(!rs.is(zero_reg));
2044 Register r2 = no_reg;
2045 Register scratch = at;
2048 // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
2050 BlockTrampolinePoolScope block_trampoline_pool(this);
2057 beq(rs, r2, offset);
2060 bne(rs, r2, offset);
2062 // Signed comparison.
2064 if (r2.is(zero_reg)) {
2067 slt(scratch, r2, rs);
2068 bne(scratch, zero_reg, offset);
2072 if (r2.is(zero_reg)) {
2075 slt(scratch, rs, r2);
2076 beq(scratch, zero_reg, offset);
2080 if (r2.is(zero_reg)) {
2083 slt(scratch, rs, r2);
2084 bne(scratch, zero_reg, offset);
2088 if (r2.is(zero_reg)) {
2091 slt(scratch, r2, rs);
2092 beq(scratch, zero_reg, offset);
2095 // Unsigned comparison.
2097 if (r2.is(zero_reg)) {
2098 bne(rs, zero_reg, offset);
2100 sltu(scratch, r2, rs);
2101 bne(scratch, zero_reg, offset);
2104 case Ugreater_equal:
2105 if (r2.is(zero_reg)) {
2108 sltu(scratch, rs, r2);
2109 beq(scratch, zero_reg, offset);
2113 if (r2.is(zero_reg)) {
2114 // No code needs to be emitted.
2117 sltu(scratch, rs, r2);
2118 bne(scratch, zero_reg, offset);
2122 if (r2.is(zero_reg)) {
2123 beq(rs, zero_reg, offset);
2125 sltu(scratch, r2, rs);
2126 beq(scratch, zero_reg, offset);
2133 // Be careful to always use shifted_branch_offset only just before the
2134 // branch instruction, as the location will be remember for patching the
2136 BlockTrampolinePoolScope block_trampoline_pool(this);
2142 if (rt.imm32_ == 0) {
2143 beq(rs, zero_reg, offset);
2145 // We don't want any other register but scratch clobbered.
2146 DCHECK(!scratch.is(rs));
2149 beq(rs, r2, offset);
2153 if (rt.imm32_ == 0) {
2154 bne(rs, zero_reg, offset);
2156 // We don't want any other register but scratch clobbered.
2157 DCHECK(!scratch.is(rs));
2160 bne(rs, r2, offset);
2163 // Signed comparison.
2165 if (rt.imm32_ == 0) {
2170 slt(scratch, r2, rs);
2171 bne(scratch, zero_reg, offset);
2175 if (rt.imm32_ == 0) {
2177 } else if (is_int16(rt.imm32_)) {
2178 slti(scratch, rs, rt.imm32_);
2179 beq(scratch, zero_reg, offset);
2183 slt(scratch, rs, r2);
2184 beq(scratch, zero_reg, offset);
2188 if (rt.imm32_ == 0) {
2190 } else if (is_int16(rt.imm32_)) {
2191 slti(scratch, rs, rt.imm32_);
2192 bne(scratch, zero_reg, offset);
2196 slt(scratch, rs, r2);
2197 bne(scratch, zero_reg, offset);
2201 if (rt.imm32_ == 0) {
2206 slt(scratch, r2, rs);
2207 beq(scratch, zero_reg, offset);
2210 // Unsigned comparison.
2212 if (rt.imm32_ == 0) {
2213 bne(rs, zero_reg, offset);
2217 sltu(scratch, r2, rs);
2218 bne(scratch, zero_reg, offset);
2221 case Ugreater_equal:
2222 if (rt.imm32_ == 0) {
2224 } else if (is_int16(rt.imm32_)) {
2225 sltiu(scratch, rs, rt.imm32_);
2226 beq(scratch, zero_reg, offset);
2230 sltu(scratch, rs, r2);
2231 beq(scratch, zero_reg, offset);
2235 if (rt.imm32_ == 0) {
2236 // No code needs to be emitted.
2238 } else if (is_int16(rt.imm32_)) {
2239 sltiu(scratch, rs, rt.imm32_);
2240 bne(scratch, zero_reg, offset);
2244 sltu(scratch, rs, r2);
2245 bne(scratch, zero_reg, offset);
2249 if (rt.imm32_ == 0) {
2250 beq(rs, zero_reg, offset);
2254 sltu(scratch, r2, rs);
2255 beq(scratch, zero_reg, offset);
2262 // Emit a nop in the branch delay slot if required.
2263 if (bdslot == PROTECT)
2268 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
2269 // We use branch_offset as an argument for the branch instructions to be sure
2270 // it is called just before generating the branch instruction, as needed.
2272 b(shifted_branch_offset(L, false));
2274 // Emit a nop in the branch delay slot if required.
2275 if (bdslot == PROTECT)
2280 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
2282 BranchDelaySlot bdslot) {
2283 BRANCH_ARGS_CHECK(cond, rs, rt);
2286 Register r2 = no_reg;
2287 Register scratch = at;
2289 BlockTrampolinePoolScope block_trampoline_pool(this);
2291 // Be careful to always use shifted_branch_offset only just before the
2292 // branch instruction, as the location will be remember for patching the
2296 offset = shifted_branch_offset(L, false);
2300 offset = shifted_branch_offset(L, false);
2301 beq(rs, r2, offset);
2304 offset = shifted_branch_offset(L, false);
2305 bne(rs, r2, offset);
2307 // Signed comparison.
2309 if (r2.is(zero_reg)) {
2310 offset = shifted_branch_offset(L, false);
2313 slt(scratch, r2, rs);
2314 offset = shifted_branch_offset(L, false);
2315 bne(scratch, zero_reg, offset);
2319 if (r2.is(zero_reg)) {
2320 offset = shifted_branch_offset(L, false);
2323 slt(scratch, rs, r2);
2324 offset = shifted_branch_offset(L, false);
2325 beq(scratch, zero_reg, offset);
2329 if (r2.is(zero_reg)) {
2330 offset = shifted_branch_offset(L, false);
2333 slt(scratch, rs, r2);
2334 offset = shifted_branch_offset(L, false);
2335 bne(scratch, zero_reg, offset);
2339 if (r2.is(zero_reg)) {
2340 offset = shifted_branch_offset(L, false);
2343 slt(scratch, r2, rs);
2344 offset = shifted_branch_offset(L, false);
2345 beq(scratch, zero_reg, offset);
2348 // Unsigned comparison.
2350 if (r2.is(zero_reg)) {
2351 offset = shifted_branch_offset(L, false);
2352 bne(rs, zero_reg, offset);
2354 sltu(scratch, r2, rs);
2355 offset = shifted_branch_offset(L, false);
2356 bne(scratch, zero_reg, offset);
2359 case Ugreater_equal:
2360 if (r2.is(zero_reg)) {
2361 offset = shifted_branch_offset(L, false);
2364 sltu(scratch, rs, r2);
2365 offset = shifted_branch_offset(L, false);
2366 beq(scratch, zero_reg, offset);
2370 if (r2.is(zero_reg)) {
2371 // No code needs to be emitted.
2374 sltu(scratch, rs, r2);
2375 offset = shifted_branch_offset(L, false);
2376 bne(scratch, zero_reg, offset);
2380 if (r2.is(zero_reg)) {
2381 offset = shifted_branch_offset(L, false);
2382 beq(rs, zero_reg, offset);
2384 sltu(scratch, r2, rs);
2385 offset = shifted_branch_offset(L, false);
2386 beq(scratch, zero_reg, offset);
2393 // Be careful to always use shifted_branch_offset only just before the
2394 // branch instruction, as the location will be remember for patching the
2396 BlockTrampolinePoolScope block_trampoline_pool(this);
2399 offset = shifted_branch_offset(L, false);
2403 if (rt.imm32_ == 0) {
2404 offset = shifted_branch_offset(L, false);
2405 beq(rs, zero_reg, offset);
2407 DCHECK(!scratch.is(rs));
2410 offset = shifted_branch_offset(L, false);
2411 beq(rs, r2, offset);
2415 if (rt.imm32_ == 0) {
2416 offset = shifted_branch_offset(L, false);
2417 bne(rs, zero_reg, offset);
2419 DCHECK(!scratch.is(rs));
2422 offset = shifted_branch_offset(L, false);
2423 bne(rs, r2, offset);
2426 // Signed comparison.
2428 if (rt.imm32_ == 0) {
2429 offset = shifted_branch_offset(L, false);
2432 DCHECK(!scratch.is(rs));
2435 slt(scratch, r2, rs);
2436 offset = shifted_branch_offset(L, false);
2437 bne(scratch, zero_reg, offset);
2441 if (rt.imm32_ == 0) {
2442 offset = shifted_branch_offset(L, false);
2444 } else if (is_int16(rt.imm32_)) {
2445 slti(scratch, rs, rt.imm32_);
2446 offset = shifted_branch_offset(L, false);
2447 beq(scratch, zero_reg, offset);
2449 DCHECK(!scratch.is(rs));
2452 slt(scratch, rs, r2);
2453 offset = shifted_branch_offset(L, false);
2454 beq(scratch, zero_reg, offset);
2458 if (rt.imm32_ == 0) {
2459 offset = shifted_branch_offset(L, false);
2461 } else if (is_int16(rt.imm32_)) {
2462 slti(scratch, rs, rt.imm32_);
2463 offset = shifted_branch_offset(L, false);
2464 bne(scratch, zero_reg, offset);
2466 DCHECK(!scratch.is(rs));
2469 slt(scratch, rs, r2);
2470 offset = shifted_branch_offset(L, false);
2471 bne(scratch, zero_reg, offset);
2475 if (rt.imm32_ == 0) {
2476 offset = shifted_branch_offset(L, false);
2479 DCHECK(!scratch.is(rs));
2482 slt(scratch, r2, rs);
2483 offset = shifted_branch_offset(L, false);
2484 beq(scratch, zero_reg, offset);
2487 // Unsigned comparison.
2489 if (rt.imm32_ == 0) {
2490 offset = shifted_branch_offset(L, false);
2491 bne(rs, zero_reg, offset);
2493 DCHECK(!scratch.is(rs));
2496 sltu(scratch, r2, rs);
2497 offset = shifted_branch_offset(L, false);
2498 bne(scratch, zero_reg, offset);
2501 case Ugreater_equal:
2502 if (rt.imm32_ == 0) {
2503 offset = shifted_branch_offset(L, false);
2505 } else if (is_int16(rt.imm32_)) {
2506 sltiu(scratch, rs, rt.imm32_);
2507 offset = shifted_branch_offset(L, false);
2508 beq(scratch, zero_reg, offset);
2510 DCHECK(!scratch.is(rs));
2513 sltu(scratch, rs, r2);
2514 offset = shifted_branch_offset(L, false);
2515 beq(scratch, zero_reg, offset);
2519 if (rt.imm32_ == 0) {
2520 // No code needs to be emitted.
2522 } else if (is_int16(rt.imm32_)) {
2523 sltiu(scratch, rs, rt.imm32_);
2524 offset = shifted_branch_offset(L, false);
2525 bne(scratch, zero_reg, offset);
2527 DCHECK(!scratch.is(rs));
2530 sltu(scratch, rs, r2);
2531 offset = shifted_branch_offset(L, false);
2532 bne(scratch, zero_reg, offset);
2536 if (rt.imm32_ == 0) {
2537 offset = shifted_branch_offset(L, false);
2538 beq(rs, zero_reg, offset);
2540 DCHECK(!scratch.is(rs));
2543 sltu(scratch, r2, rs);
2544 offset = shifted_branch_offset(L, false);
2545 beq(scratch, zero_reg, offset);
2552 // Check that offset could actually hold on an int16_t.
2553 DCHECK(is_int16(offset));
2554 // Emit a nop in the branch delay slot if required.
2555 if (bdslot == PROTECT)
2560 void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
2561 BranchAndLinkShort(offset, bdslot);
2565 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
2567 BranchDelaySlot bdslot) {
2568 BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2572 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
2573 if (L->is_bound()) {
2575 BranchAndLinkShort(L, bdslot);
2580 if (is_trampoline_emitted()) {
2583 BranchAndLinkShort(L, bdslot);
2589 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2591 BranchDelaySlot bdslot) {
2592 if (L->is_bound()) {
2594 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2597 Condition neg_cond = NegateCondition(cond);
2598 BranchShort(&skip, neg_cond, rs, rt);
2603 if (is_trampoline_emitted()) {
2605 Condition neg_cond = NegateCondition(cond);
2606 BranchShort(&skip, neg_cond, rs, rt);
2610 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2616 // We need to use a bgezal or bltzal, but they can't be used directly with the
2617 // slt instructions. We could use sub or add instead but we would miss overflow
2618 // cases, so we keep slt and add an intermediate third instruction.
2619 void MacroAssembler::BranchAndLinkShort(int16_t offset,
2620 BranchDelaySlot bdslot) {
2623 // Emit a nop in the branch delay slot if required.
2624 if (bdslot == PROTECT)
2629 void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
2630 Register rs, const Operand& rt,
2631 BranchDelaySlot bdslot) {
2632 BRANCH_ARGS_CHECK(cond, rs, rt);
2633 Register r2 = no_reg;
2634 Register scratch = at;
2638 } else if (cond != cc_always) {
2643 if (!IsMipsArchVariant(kMips32r6)) {
2644 BlockTrampolinePoolScope block_trampoline_pool(this);
2660 // Signed comparison.
2662 slt(scratch, r2, rs);
2663 addiu(scratch, scratch, -1);
2664 bgezal(scratch, offset);
2667 slt(scratch, rs, r2);
2668 addiu(scratch, scratch, -1);
2669 bltzal(scratch, offset);
2672 slt(scratch, rs, r2);
2673 addiu(scratch, scratch, -1);
2674 bgezal(scratch, offset);
2677 slt(scratch, r2, rs);
2678 addiu(scratch, scratch, -1);
2679 bltzal(scratch, offset);
2682 // Unsigned comparison.
2684 sltu(scratch, r2, rs);
2685 addiu(scratch, scratch, -1);
2686 bgezal(scratch, offset);
2688 case Ugreater_equal:
2689 sltu(scratch, rs, r2);
2690 addiu(scratch, scratch, -1);
2691 bltzal(scratch, offset);
2694 sltu(scratch, rs, r2);
2695 addiu(scratch, scratch, -1);
2696 bgezal(scratch, offset);
2699 sltu(scratch, r2, rs);
2700 addiu(scratch, scratch, -1);
2701 bltzal(scratch, offset);
2708 BlockTrampolinePoolScope block_trampoline_pool(this);
2724 // Signed comparison.
2727 slt(scratch, r2, rs);
2728 beq(scratch, zero_reg, 2);
2734 slt(scratch, rs, r2);
2735 bne(scratch, zero_reg, 2);
2741 slt(scratch, rs, r2);
2742 bne(scratch, zero_reg, 2);
2748 slt(scratch, r2, rs);
2749 bne(scratch, zero_reg, 2);
2755 // Unsigned comparison.
2758 sltu(scratch, r2, rs);
2759 beq(scratch, zero_reg, 2);
2763 case Ugreater_equal:
2765 sltu(scratch, rs, r2);
2766 bne(scratch, zero_reg, 2);
2772 sltu(scratch, rs, r2);
2773 bne(scratch, zero_reg, 2);
2779 sltu(scratch, r2, rs);
2780 bne(scratch, zero_reg, 2);
2789 // Emit a nop in the branch delay slot if required.
2790 if (bdslot == PROTECT)
2795 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
2796 bal(shifted_branch_offset(L, false));
2798 // Emit a nop in the branch delay slot if required.
2799 if (bdslot == PROTECT)
2804 void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
2806 BranchDelaySlot bdslot) {
2807 BRANCH_ARGS_CHECK(cond, rs, rt);
2810 Register r2 = no_reg;
2811 Register scratch = at;
2814 } else if (cond != cc_always) {
2819 if (!IsMipsArchVariant(kMips32r6)) {
2820 BlockTrampolinePoolScope block_trampoline_pool(this);
2823 offset = shifted_branch_offset(L, false);
2829 offset = shifted_branch_offset(L, false);
2835 offset = shifted_branch_offset(L, false);
2839 // Signed comparison.
2841 slt(scratch, r2, rs);
2842 addiu(scratch, scratch, -1);
2843 offset = shifted_branch_offset(L, false);
2844 bgezal(scratch, offset);
2847 slt(scratch, rs, r2);
2848 addiu(scratch, scratch, -1);
2849 offset = shifted_branch_offset(L, false);
2850 bltzal(scratch, offset);
2853 slt(scratch, rs, r2);
2854 addiu(scratch, scratch, -1);
2855 offset = shifted_branch_offset(L, false);
2856 bgezal(scratch, offset);
2859 slt(scratch, r2, rs);
2860 addiu(scratch, scratch, -1);
2861 offset = shifted_branch_offset(L, false);
2862 bltzal(scratch, offset);
2865 // Unsigned comparison.
2867 sltu(scratch, r2, rs);
2868 addiu(scratch, scratch, -1);
2869 offset = shifted_branch_offset(L, false);
2870 bgezal(scratch, offset);
2872 case Ugreater_equal:
2873 sltu(scratch, rs, r2);
2874 addiu(scratch, scratch, -1);
2875 offset = shifted_branch_offset(L, false);
2876 bltzal(scratch, offset);
2879 sltu(scratch, rs, r2);
2880 addiu(scratch, scratch, -1);
2881 offset = shifted_branch_offset(L, false);
2882 bgezal(scratch, offset);
2885 sltu(scratch, r2, rs);
2886 addiu(scratch, scratch, -1);
2887 offset = shifted_branch_offset(L, false);
2888 bltzal(scratch, offset);
2895 BlockTrampolinePoolScope block_trampoline_pool(this);
2898 offset = shifted_branch_offset(L, false);
2904 offset = shifted_branch_offset(L, false);
2910 offset = shifted_branch_offset(L, false);
2914 // Signed comparison.
2917 slt(scratch, r2, rs);
2918 beq(scratch, zero_reg, 2);
2920 offset = shifted_branch_offset(L, false);
2925 slt(scratch, rs, r2);
2926 bne(scratch, zero_reg, 2);
2928 offset = shifted_branch_offset(L, false);
2933 slt(scratch, rs, r2);
2934 bne(scratch, zero_reg, 2);
2936 offset = shifted_branch_offset(L, false);
2941 slt(scratch, r2, rs);
2942 bne(scratch, zero_reg, 2);
2944 offset = shifted_branch_offset(L, false);
2949 // Unsigned comparison.
2952 sltu(scratch, r2, rs);
2953 beq(scratch, zero_reg, 2);
2955 offset = shifted_branch_offset(L, false);
2958 case Ugreater_equal:
2960 sltu(scratch, rs, r2);
2961 bne(scratch, zero_reg, 2);
2963 offset = shifted_branch_offset(L, false);
2968 sltu(scratch, rs, r2);
2969 bne(scratch, zero_reg, 2);
2971 offset = shifted_branch_offset(L, false);
2976 sltu(scratch, r2, rs);
2977 bne(scratch, zero_reg, 2);
2979 offset = shifted_branch_offset(L, false);
2988 // Check that offset could actually hold on an int16_t.
2989 DCHECK(is_int16(offset));
2991 // Emit a nop in the branch delay slot if required.
2992 if (bdslot == PROTECT)
2997 void MacroAssembler::Jump(Register target,
3001 BranchDelaySlot bd) {
3002 BlockTrampolinePoolScope block_trampoline_pool(this);
3003 if (cond == cc_always) {
3006 BRANCH_ARGS_CHECK(cond, rs, rt);
3007 Branch(2, NegateCondition(cond), rs, rt);
3010 // Emit a nop in the branch delay slot if required.
3016 void MacroAssembler::Jump(intptr_t target,
3017 RelocInfo::Mode rmode,
3021 BranchDelaySlot bd) {
3023 if (cond != cc_always) {
3024 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
3026 // The first instruction of 'li' may be placed in the delay slot.
3027 // This is not an issue, t9 is expected to be clobbered anyway.
3028 li(t9, Operand(target, rmode));
3029 Jump(t9, al, zero_reg, Operand(zero_reg), bd);
3034 void MacroAssembler::Jump(Address target,
3035 RelocInfo::Mode rmode,
3039 BranchDelaySlot bd) {
3040 DCHECK(!RelocInfo::IsCodeTarget(rmode));
3041 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
3045 void MacroAssembler::Jump(Handle<Code> code,
3046 RelocInfo::Mode rmode,
3050 BranchDelaySlot bd) {
3051 DCHECK(RelocInfo::IsCodeTarget(rmode));
3052 AllowDeferredHandleDereference embedding_raw_address;
3053 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
3057 int MacroAssembler::CallSize(Register target,
3061 BranchDelaySlot bd) {
3064 if (cond == cc_always) {
3073 return size * kInstrSize;
3077 // Note: To call gcc-compiled C code on mips, you must call thru t9.
3078 void MacroAssembler::Call(Register target,
3082 BranchDelaySlot bd) {
3083 BlockTrampolinePoolScope block_trampoline_pool(this);
3086 if (cond == cc_always) {
3089 BRANCH_ARGS_CHECK(cond, rs, rt);
3090 Branch(2, NegateCondition(cond), rs, rt);
3093 // Emit a nop in the branch delay slot if required.
3097 DCHECK_EQ(CallSize(target, cond, rs, rt, bd),
3098 SizeOfCodeGeneratedSince(&start));
3102 int MacroAssembler::CallSize(Address target,
3103 RelocInfo::Mode rmode,
3107 BranchDelaySlot bd) {
3108 int size = CallSize(t9, cond, rs, rt, bd);
3109 return size + 2 * kInstrSize;
3113 void MacroAssembler::Call(Address target,
3114 RelocInfo::Mode rmode,
3118 BranchDelaySlot bd) {
3119 BlockTrampolinePoolScope block_trampoline_pool(this);
3122 int32_t target_int = reinterpret_cast<int32_t>(target);
3123 // Must record previous source positions before the
3124 // li() generates a new code target.
3125 positions_recorder()->WriteRecordedPositions();
3126 li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
3127 Call(t9, cond, rs, rt, bd);
3128 DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
3129 SizeOfCodeGeneratedSince(&start));
3133 int MacroAssembler::CallSize(Handle<Code> code,
3134 RelocInfo::Mode rmode,
3135 TypeFeedbackId ast_id,
3139 BranchDelaySlot bd) {
3140 AllowDeferredHandleDereference using_raw_address;
3141 return CallSize(reinterpret_cast<Address>(code.location()),
3142 rmode, cond, rs, rt, bd);
3146 void MacroAssembler::Call(Handle<Code> code,
3147 RelocInfo::Mode rmode,
3148 TypeFeedbackId ast_id,
3152 BranchDelaySlot bd) {
3153 BlockTrampolinePoolScope block_trampoline_pool(this);
3156 DCHECK(RelocInfo::IsCodeTarget(rmode));
3157 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
3158 SetRecordedAstId(ast_id);
3159 rmode = RelocInfo::CODE_TARGET_WITH_ID;
3161 AllowDeferredHandleDereference embedding_raw_address;
3162 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
3163 DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
3164 SizeOfCodeGeneratedSince(&start));
3168 void MacroAssembler::Ret(Condition cond,
3171 BranchDelaySlot bd) {
3172 Jump(ra, cond, rs, rt, bd);
3176 void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
3177 BlockTrampolinePoolScope block_trampoline_pool(this);
3180 imm32 = jump_address(L);
3181 { BlockGrowBufferScope block_buf_growth(this);
3182 // Buffer growth (and relocation) must be blocked for internal references
3183 // until associated instructions are emitted and available to be patched.
3184 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3185 lui(at, (imm32 & kHiMask) >> kLuiShift);
3186 ori(at, at, (imm32 & kImm16Mask));
3190 // Emit a nop in the branch delay slot if required.
3191 if (bdslot == PROTECT)
3196 void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
3197 BlockTrampolinePoolScope block_trampoline_pool(this);
3200 imm32 = jump_address(L);
3201 { BlockGrowBufferScope block_buf_growth(this);
3202 // Buffer growth (and relocation) must be blocked for internal references
3203 // until associated instructions are emitted and available to be patched.
3204 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3205 lui(at, (imm32 & kHiMask) >> kLuiShift);
3206 ori(at, at, (imm32 & kImm16Mask));
3210 // Emit a nop in the branch delay slot if required.
3211 if (bdslot == PROTECT)
3216 void MacroAssembler::DropAndRet(int drop) {
3217 DCHECK(is_int16(drop * kPointerSize));
3218 Ret(USE_DELAY_SLOT);
3219 addiu(sp, sp, drop * kPointerSize);
3222 void MacroAssembler::DropAndRet(int drop,
3225 const Operand& r2) {
3226 // Both Drop and Ret need to be conditional.
3228 if (cond != cc_always) {
3229 Branch(&skip, NegateCondition(cond), r1, r2);
3235 if (cond != cc_always) {
3241 void MacroAssembler::Drop(int count,
3244 const Operand& op) {
3252 Branch(&skip, NegateCondition(cond), reg, op);
3255 Addu(sp, sp, Operand(count * kPointerSize));
3264 void MacroAssembler::Swap(Register reg1,
3267 if (scratch.is(no_reg)) {
3268 Xor(reg1, reg1, Operand(reg2));
3269 Xor(reg2, reg2, Operand(reg1));
3270 Xor(reg1, reg1, Operand(reg2));
3279 void MacroAssembler::Call(Label* target) {
3280 BranchAndLink(target);
3284 void MacroAssembler::Push(Handle<Object> handle) {
3285 li(at, Operand(handle));
3290 void MacroAssembler::DebugBreak() {
3291 PrepareCEntryArgs(0);
3292 PrepareCEntryFunction(
3293 ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
3294 CEntryStub ces(isolate(), 1);
3295 DCHECK(AllowThisStubCall(&ces));
3296 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
3300 // ---------------------------------------------------------------------------
3301 // Exception handling.
3303 void MacroAssembler::PushStackHandler() {
3304 // Adjust this code if not the case.
3305 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
3306 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3308 // Link the current handler as the next handler.
3309 li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3310 lw(t1, MemOperand(t2));
3313 // Set this new handler as the current one.
3314 sw(sp, MemOperand(t2));
3318 void MacroAssembler::PopStackHandler() {
3319 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3321 Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
3322 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3323 sw(a1, MemOperand(at));
3327 void MacroAssembler::Allocate(int object_size,
3332 AllocationFlags flags) {
3333 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3334 if (!FLAG_inline_new) {
3335 if (emit_debug_code()) {
3336 // Trash the registers to simulate an allocation failure.
3338 li(scratch1, 0x7191);
3339 li(scratch2, 0x7291);
3345 DCHECK(!result.is(scratch1));
3346 DCHECK(!result.is(scratch2));
3347 DCHECK(!scratch1.is(scratch2));
3348 DCHECK(!scratch1.is(t9));
3349 DCHECK(!scratch2.is(t9));
3350 DCHECK(!result.is(t9));
3352 // Make object size into bytes.
3353 if ((flags & SIZE_IN_WORDS) != 0) {
3354 object_size *= kPointerSize;
3356 DCHECK_EQ(0, object_size & kObjectAlignmentMask);
3358 // Check relative positions of allocation top and limit addresses.
3359 // ARM adds additional checks to make sure the ldm instruction can be
3360 // used. On MIPS we don't have ldm so we don't need additional checks either.
3361 ExternalReference allocation_top =
3362 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3363 ExternalReference allocation_limit =
3364 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3367 reinterpret_cast<intptr_t>(allocation_top.address());
3369 reinterpret_cast<intptr_t>(allocation_limit.address());
3370 DCHECK((limit - top) == kPointerSize);
3372 // Set up allocation top address and object size registers.
3373 Register topaddr = scratch1;
3374 li(topaddr, Operand(allocation_top));
3376 // This code stores a temporary value in t9.
3377 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3378 // Load allocation top into result and allocation limit into t9.
3379 lw(result, MemOperand(topaddr));
3380 lw(t9, MemOperand(topaddr, kPointerSize));
3382 if (emit_debug_code()) {
3383 // Assert that result actually contains top on entry. t9 is used
3384 // immediately below so this use of t9 does not cause difference with
3385 // respect to register content between debug and release mode.
3386 lw(t9, MemOperand(topaddr));
3387 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3389 // Load allocation limit into t9. Result already contains allocation top.
3390 lw(t9, MemOperand(topaddr, limit - top));
3393 if ((flags & DOUBLE_ALIGNMENT) != 0) {
3394 // Align the next allocation. Storing the filler map without checking top is
3395 // safe in new-space because the limit of the heap is aligned there.
3396 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
3397 And(scratch2, result, Operand(kDoubleAlignmentMask));
3399 Branch(&aligned, eq, scratch2, Operand(zero_reg));
3400 if ((flags & PRETENURE) != 0) {
3401 Branch(gc_required, Ugreater_equal, result, Operand(t9));
3403 li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
3404 sw(scratch2, MemOperand(result));
3405 Addu(result, result, Operand(kDoubleSize / 2));
3409 // Calculate new top and bail out if new space is exhausted. Use result
3410 // to calculate the new top.
3411 Addu(scratch2, result, Operand(object_size));
3412 Branch(gc_required, Ugreater, scratch2, Operand(t9));
3413 sw(scratch2, MemOperand(topaddr));
3415 // Tag object if requested.
3416 if ((flags & TAG_OBJECT) != 0) {
3417 Addu(result, result, Operand(kHeapObjectTag));
3422 void MacroAssembler::Allocate(Register object_size,
3427 AllocationFlags flags) {
3428 if (!FLAG_inline_new) {
3429 if (emit_debug_code()) {
3430 // Trash the registers to simulate an allocation failure.
3432 li(scratch1, 0x7191);
3433 li(scratch2, 0x7291);
3439 DCHECK(!result.is(scratch1));
3440 DCHECK(!result.is(scratch2));
3441 DCHECK(!scratch1.is(scratch2));
3442 DCHECK(!object_size.is(t9));
3443 DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
3445 // Check relative positions of allocation top and limit addresses.
3446 // ARM adds additional checks to make sure the ldm instruction can be
3447 // used. On MIPS we don't have ldm so we don't need additional checks either.
3448 ExternalReference allocation_top =
3449 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3450 ExternalReference allocation_limit =
3451 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3453 reinterpret_cast<intptr_t>(allocation_top.address());
3455 reinterpret_cast<intptr_t>(allocation_limit.address());
3456 DCHECK((limit - top) == kPointerSize);
3458 // Set up allocation top address and object size registers.
3459 Register topaddr = scratch1;
3460 li(topaddr, Operand(allocation_top));
3462 // This code stores a temporary value in t9.
3463 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3464 // Load allocation top into result and allocation limit into t9.
3465 lw(result, MemOperand(topaddr));
3466 lw(t9, MemOperand(topaddr, kPointerSize));
3468 if (emit_debug_code()) {
3469 // Assert that result actually contains top on entry. t9 is used
3470 // immediately below so this use of t9 does not cause difference with
3471 // respect to register content between debug and release mode.
3472 lw(t9, MemOperand(topaddr));
3473 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3475 // Load allocation limit into t9. Result already contains allocation top.
3476 lw(t9, MemOperand(topaddr, limit - top));
3479 if ((flags & DOUBLE_ALIGNMENT) != 0) {
3480 // Align the next allocation. Storing the filler map without checking top is
3481 // safe in new-space because the limit of the heap is aligned there.
3482 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
3483 And(scratch2, result, Operand(kDoubleAlignmentMask));
3485 Branch(&aligned, eq, scratch2, Operand(zero_reg));
3486 if ((flags & PRETENURE) != 0) {
3487 Branch(gc_required, Ugreater_equal, result, Operand(t9));
3489 li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
3490 sw(scratch2, MemOperand(result));
3491 Addu(result, result, Operand(kDoubleSize / 2));
3495 // Calculate new top and bail out if new space is exhausted. Use result
3496 // to calculate the new top. Object size may be in words so a shift is
3497 // required to get the number of bytes.
3498 if ((flags & SIZE_IN_WORDS) != 0) {
3499 sll(scratch2, object_size, kPointerSizeLog2);
3500 Addu(scratch2, result, scratch2);
3502 Addu(scratch2, result, Operand(object_size));
3504 Branch(gc_required, Ugreater, scratch2, Operand(t9));
3506 // Update allocation top. result temporarily holds the new top.
3507 if (emit_debug_code()) {
3508 And(t9, scratch2, Operand(kObjectAlignmentMask));
3509 Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
3511 sw(scratch2, MemOperand(topaddr));
3513 // Tag object if requested.
3514 if ((flags & TAG_OBJECT) != 0) {
3515 Addu(result, result, Operand(kHeapObjectTag));
3520 void MacroAssembler::AllocateTwoByteString(Register result,
3525 Label* gc_required) {
3526 // Calculate the number of bytes needed for the characters in the string while
3527 // observing object alignment.
3528 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3529 sll(scratch1, length, 1); // Length in bytes, not chars.
3530 addiu(scratch1, scratch1,
3531 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3532 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3534 // Allocate two-byte string in new space.
3542 // Set the map, length and hash field.
3543 InitializeNewString(result,
3545 Heap::kStringMapRootIndex,
3551 void MacroAssembler::AllocateOneByteString(Register result, Register length,
3552 Register scratch1, Register scratch2,
3554 Label* gc_required) {
3555 // Calculate the number of bytes needed for the characters in the string
3556 // while observing object alignment.
3557 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3558 DCHECK(kCharSize == 1);
3559 addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3560 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3562 // Allocate one-byte string in new space.
3570 // Set the map, length and hash field.
3571 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
3572 scratch1, scratch2);
3576 void MacroAssembler::AllocateTwoByteConsString(Register result,
3580 Label* gc_required) {
3581 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3583 InitializeNewString(result,
3585 Heap::kConsStringMapRootIndex,
3591 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
3594 Label* gc_required) {
3595 Allocate(ConsString::kSize,
3602 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
3603 scratch1, scratch2);
3607 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3611 Label* gc_required) {
3612 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3615 InitializeNewString(result,
3617 Heap::kSlicedStringMapRootIndex,
3623 void MacroAssembler::AllocateOneByteSlicedString(Register result,
3627 Label* gc_required) {
3628 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3631 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
3632 scratch1, scratch2);
3636 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
3637 Label* not_unique_name) {
3638 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3640 And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3641 Branch(&succeed, eq, at, Operand(zero_reg));
3642 Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
3648 // Allocates a heap number or jumps to the label if the young space is full and
3649 // a scavenge is needed.
3650 void MacroAssembler::AllocateHeapNumber(Register result,
3653 Register heap_number_map,
3655 TaggingMode tagging_mode,
3657 // Allocate an object in the heap for the heap number and tag it as a heap
3659 Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
3660 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3662 Heap::RootListIndex map_index = mode == MUTABLE
3663 ? Heap::kMutableHeapNumberMapRootIndex
3664 : Heap::kHeapNumberMapRootIndex;
3665 AssertIsRoot(heap_number_map, map_index);
3667 // Store heap number map in the allocated object.
3668 if (tagging_mode == TAG_RESULT) {
3669 sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3671 sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3676 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3680 Label* gc_required) {
3681 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3682 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3683 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3687 // Copies a fixed number of fields of heap objects from src to dst.
3688 void MacroAssembler::CopyFields(Register dst,
3692 DCHECK((temps & dst.bit()) == 0);
3693 DCHECK((temps & src.bit()) == 0);
3694 // Primitive implementation using only one temporary register.
3696 Register tmp = no_reg;
3697 // Find a temp register in temps list.
3698 for (int i = 0; i < kNumRegisters; i++) {
3699 if ((temps & (1 << i)) != 0) {
3704 DCHECK(!tmp.is(no_reg));
3706 for (int i = 0; i < field_count; i++) {
3707 lw(tmp, FieldMemOperand(src, i * kPointerSize));
3708 sw(tmp, FieldMemOperand(dst, i * kPointerSize));
3713 void MacroAssembler::CopyBytes(Register src,
3717 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3719 // Align src before copying in word size chunks.
3720 Branch(&byte_loop, le, length, Operand(kPointerSize));
3721 bind(&align_loop_1);
3722 And(scratch, src, kPointerSize - 1);
3723 Branch(&word_loop, eq, scratch, Operand(zero_reg));
3724 lbu(scratch, MemOperand(src));
3726 sb(scratch, MemOperand(dst));
3728 Subu(length, length, Operand(1));
3729 Branch(&align_loop_1, ne, length, Operand(zero_reg));
3731 // Copy bytes in word size chunks.
3733 if (emit_debug_code()) {
3734 And(scratch, src, kPointerSize - 1);
3735 Assert(eq, kExpectingAlignmentForCopyBytes,
3736 scratch, Operand(zero_reg));
3738 Branch(&byte_loop, lt, length, Operand(kPointerSize));
3739 lw(scratch, MemOperand(src));
3740 Addu(src, src, kPointerSize);
3742 // TODO(kalmard) check if this can be optimized to use sw in most cases.
3743 // Can't use unaligned access - copy byte by byte.
3744 if (kArchEndian == kLittle) {
3745 sb(scratch, MemOperand(dst, 0));
3746 srl(scratch, scratch, 8);
3747 sb(scratch, MemOperand(dst, 1));
3748 srl(scratch, scratch, 8);
3749 sb(scratch, MemOperand(dst, 2));
3750 srl(scratch, scratch, 8);
3751 sb(scratch, MemOperand(dst, 3));
3753 sb(scratch, MemOperand(dst, 3));
3754 srl(scratch, scratch, 8);
3755 sb(scratch, MemOperand(dst, 2));
3756 srl(scratch, scratch, 8);
3757 sb(scratch, MemOperand(dst, 1));
3758 srl(scratch, scratch, 8);
3759 sb(scratch, MemOperand(dst, 0));
3764 Subu(length, length, Operand(kPointerSize));
3767 // Copy the last bytes if any left.
3769 Branch(&done, eq, length, Operand(zero_reg));
3771 lbu(scratch, MemOperand(src));
3773 sb(scratch, MemOperand(dst));
3775 Subu(length, length, Operand(1));
3776 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3781 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3782 Register end_offset,
3787 sw(filler, MemOperand(start_offset));
3788 Addu(start_offset, start_offset, kPointerSize);
3790 Branch(&loop, ult, start_offset, Operand(end_offset));
3794 void MacroAssembler::CheckFastElements(Register map,
3797 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3798 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3799 STATIC_ASSERT(FAST_ELEMENTS == 2);
3800 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3801 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3802 Branch(fail, hi, scratch,
3803 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3807 void MacroAssembler::CheckFastObjectElements(Register map,
3810 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3811 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3812 STATIC_ASSERT(FAST_ELEMENTS == 2);
3813 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3814 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3815 Branch(fail, ls, scratch,
3816 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3817 Branch(fail, hi, scratch,
3818 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3822 void MacroAssembler::CheckFastSmiElements(Register map,
3825 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3826 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3827 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3828 Branch(fail, hi, scratch,
3829 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3833 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3835 Register elements_reg,
3840 int elements_offset) {
3841 Label smi_value, maybe_nan, have_double_value, is_nan, done;
3842 Register mantissa_reg = scratch2;
3843 Register exponent_reg = scratch3;
3845 // Handle smi values specially.
3846 JumpIfSmi(value_reg, &smi_value);
3848 // Ensure that the object is a heap number
3851 Heap::kHeapNumberMapRootIndex,
3855 // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
3857 li(scratch1, Operand(kHoleNanUpper32 & HeapNumber::kExponentMask));
3858 lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3859 Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
3861 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3863 bind(&have_double_value);
3864 sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3865 Addu(scratch1, scratch1, elements_reg);
3867 FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
3868 + kHoleNanLower32Offset));
3870 FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
3871 + kHoleNanUpper32Offset));
3875 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3876 // it's an Infinity, and the non-NaN code path applies.
3877 Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
3878 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3879 Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
3881 // Load canonical NaN for storing into the double array.
3882 LoadRoot(at, Heap::kNanValueRootIndex);
3883 lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
3884 lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
3885 jmp(&have_double_value);
3888 Addu(scratch1, elements_reg,
3889 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
3891 sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3892 Addu(scratch1, scratch1, scratch2);
3893 // scratch1 is now effective address of the double element
3895 Register untagged_value = elements_reg;
3896 SmiUntag(untagged_value, value_reg);
3897 mtc1(untagged_value, f2);
3899 sdc1(f0, MemOperand(scratch1, 0));
3904 void MacroAssembler::CompareMapAndBranch(Register obj,
3907 Label* early_success,
3910 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3911 CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
3915 void MacroAssembler::CompareMapAndBranch(Register obj_map,
3917 Label* early_success,
3920 Branch(branch_to, cond, obj_map, Operand(map));
3924 void MacroAssembler::CheckMap(Register obj,
3928 SmiCheckType smi_check_type) {
3929 if (smi_check_type == DO_SMI_CHECK) {
3930 JumpIfSmi(obj, fail);
3933 CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
3938 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
3939 Register scratch2, Handle<WeakCell> cell,
3940 Handle<Code> success,
3941 SmiCheckType smi_check_type) {
3943 if (smi_check_type == DO_SMI_CHECK) {
3944 JumpIfSmi(obj, &fail);
3946 lw(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
3947 GetWeakValue(scratch2, cell);
3948 Jump(success, RelocInfo::CODE_TARGET, eq, scratch1, Operand(scratch2));
3953 void MacroAssembler::CheckMap(Register obj,
3955 Heap::RootListIndex index,
3957 SmiCheckType smi_check_type) {
3958 if (smi_check_type == DO_SMI_CHECK) {
3959 JumpIfSmi(obj, fail);
3961 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3962 LoadRoot(at, index);
3963 Branch(fail, ne, scratch, Operand(at));
3967 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
3968 li(value, Operand(cell));
3969 lw(value, FieldMemOperand(value, WeakCell::kValueOffset));
3973 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
3975 GetWeakValue(value, cell);
3976 JumpIfSmi(value, miss);
3980 void MacroAssembler::MovFromFloatResult(DoubleRegister dst) {
3981 if (IsMipsSoftFloatABI) {
3982 if (kArchEndian == kLittle) {
3988 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
3993 void MacroAssembler::MovFromFloatParameter(DoubleRegister dst) {
3994 if (IsMipsSoftFloatABI) {
3995 if (kArchEndian == kLittle) {
4001 Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
4006 void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
4007 if (!IsMipsSoftFloatABI) {
4010 if (kArchEndian == kLittle) {
4019 void MacroAssembler::MovToFloatResult(DoubleRegister src) {
4020 if (!IsMipsSoftFloatABI) {
4023 if (kArchEndian == kLittle) {
4032 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
4033 DoubleRegister src2) {
4034 if (!IsMipsSoftFloatABI) {
4036 DCHECK(!src1.is(f14));
4044 if (kArchEndian == kLittle) {
4055 // -----------------------------------------------------------------------------
4056 // JavaScript invokes.
4058 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
4059 const ParameterCount& actual,
4060 Handle<Code> code_constant,
4063 bool* definitely_mismatches,
4065 const CallWrapper& call_wrapper) {
4066 bool definitely_matches = false;
4067 *definitely_mismatches = false;
4068 Label regular_invoke;
4070 // Check whether the expected and actual arguments count match. If not,
4071 // setup registers according to contract with ArgumentsAdaptorTrampoline:
4072 // a0: actual arguments count
4073 // a1: function (passed through to callee)
4074 // a2: expected arguments count
4076 // The code below is made a lot easier because the calling code already sets
4077 // up actual and expected registers according to the contract if values are
4078 // passed in registers.
4079 DCHECK(actual.is_immediate() || actual.reg().is(a0));
4080 DCHECK(expected.is_immediate() || expected.reg().is(a2));
4081 DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
4083 if (expected.is_immediate()) {
4084 DCHECK(actual.is_immediate());
4085 if (expected.immediate() == actual.immediate()) {
4086 definitely_matches = true;
4088 li(a0, Operand(actual.immediate()));
4089 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
4090 if (expected.immediate() == sentinel) {
4091 // Don't worry about adapting arguments for builtins that
4092 // don't want that done. Skip adaption code by making it look
4093 // like we have a match between expected and actual number of
4095 definitely_matches = true;
4097 *definitely_mismatches = true;
4098 li(a2, Operand(expected.immediate()));
4101 } else if (actual.is_immediate()) {
4102 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.immediate()));
4103 li(a0, Operand(actual.immediate()));
4105 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg()));
4108 if (!definitely_matches) {
4109 if (!code_constant.is_null()) {
4110 li(a3, Operand(code_constant));
4111 addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
4114 Handle<Code> adaptor =
4115 isolate()->builtins()->ArgumentsAdaptorTrampoline();
4116 if (flag == CALL_FUNCTION) {
4117 call_wrapper.BeforeCall(CallSize(adaptor));
4119 call_wrapper.AfterCall();
4120 if (!*definitely_mismatches) {
4124 Jump(adaptor, RelocInfo::CODE_TARGET);
4126 bind(®ular_invoke);
4131 void MacroAssembler::InvokeCode(Register code,
4132 const ParameterCount& expected,
4133 const ParameterCount& actual,
4135 const CallWrapper& call_wrapper) {
4136 // You can't call a function without a valid frame.
4137 DCHECK(flag == JUMP_FUNCTION || has_frame());
4141 bool definitely_mismatches = false;
4142 InvokePrologue(expected, actual, Handle<Code>::null(), code,
4143 &done, &definitely_mismatches, flag,
4145 if (!definitely_mismatches) {
4146 if (flag == CALL_FUNCTION) {
4147 call_wrapper.BeforeCall(CallSize(code));
4149 call_wrapper.AfterCall();
4151 DCHECK(flag == JUMP_FUNCTION);
4154 // Continue here if InvokePrologue does handle the invocation due to
4155 // mismatched parameter counts.
4161 void MacroAssembler::InvokeFunction(Register function,
4162 const ParameterCount& actual,
4164 const CallWrapper& call_wrapper) {
4165 // You can't call a function without a valid frame.
4166 DCHECK(flag == JUMP_FUNCTION || has_frame());
4168 // Contract with called JS functions requires that function is passed in a1.
4169 DCHECK(function.is(a1));
4170 Register expected_reg = a2;
4171 Register code_reg = a3;
4173 lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4174 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4176 FieldMemOperand(code_reg,
4177 SharedFunctionInfo::kFormalParameterCountOffset));
4178 sra(expected_reg, expected_reg, kSmiTagSize);
4179 lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4181 ParameterCount expected(expected_reg);
4182 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
4186 void MacroAssembler::InvokeFunction(Register function,
4187 const ParameterCount& expected,
4188 const ParameterCount& actual,
4190 const CallWrapper& call_wrapper) {
4191 // You can't call a function without a valid frame.
4192 DCHECK(flag == JUMP_FUNCTION || has_frame());
4194 // Contract with called JS functions requires that function is passed in a1.
4195 DCHECK(function.is(a1));
4197 // Get the function and setup the context.
4198 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4200 // We call indirectly through the code field in the function to
4201 // allow recompilation to take effect without changing any of the
4203 lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4204 InvokeCode(a3, expected, actual, flag, call_wrapper);
4208 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
4209 const ParameterCount& expected,
4210 const ParameterCount& actual,
4212 const CallWrapper& call_wrapper) {
4214 InvokeFunction(a1, expected, actual, flag, call_wrapper);
4218 void MacroAssembler::IsObjectJSStringType(Register object,
4221 DCHECK(kNotStringTag != 0);
4223 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4224 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4225 And(scratch, scratch, Operand(kIsNotStringMask));
4226 Branch(fail, ne, scratch, Operand(zero_reg));
4230 void MacroAssembler::IsObjectNameType(Register object,
4233 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4234 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4235 Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
4239 // ---------------------------------------------------------------------------
4240 // Support functions.
4243 void MacroAssembler::GetMapConstructor(Register result, Register map,
4244 Register temp, Register temp2) {
4246 lw(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
4248 JumpIfSmi(result, &done);
4249 GetObjectType(result, temp, temp2);
4250 Branch(&done, ne, temp2, Operand(MAP_TYPE));
4251 lw(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
4257 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
4258 Register scratch, Label* miss) {
4259 // Get the prototype or initial map from the function.
4261 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4263 // If the prototype or initial map is the hole, don't return it and
4264 // simply miss the cache instead. This will allow us to allocate a
4265 // prototype object on-demand in the runtime system.
4266 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
4267 Branch(miss, eq, result, Operand(t8));
4269 // If the function does not have an initial map, we're done.
4271 GetObjectType(result, scratch, scratch);
4272 Branch(&done, ne, scratch, Operand(MAP_TYPE));
4274 // Get the prototype from the initial map.
4275 lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
4282 void MacroAssembler::GetObjectType(Register object,
4284 Register type_reg) {
4285 lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
4286 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
4290 // -----------------------------------------------------------------------------
4293 void MacroAssembler::CallStub(CodeStub* stub,
4294 TypeFeedbackId ast_id,
4298 BranchDelaySlot bd) {
4299 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
4300 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
4305 void MacroAssembler::TailCallStub(CodeStub* stub,
4309 BranchDelaySlot bd) {
4310 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
4314 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4315 return has_frame_ || !stub->SometimesSetsUpAFrame();
4319 void MacroAssembler::IndexFromHash(Register hash, Register index) {
4320 // If the hash field contains an array index pick it out. The assert checks
4321 // that the constants for the maximum number of digits for an array index
4322 // cached in the hash field and the number of bits reserved for it does not
4324 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
4325 (1 << String::kArrayIndexValueBits));
4326 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
4330 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4334 Register heap_number_map,
4336 ObjectToDoubleFlags flags) {
4338 if ((flags & OBJECT_NOT_SMI) == 0) {
4340 JumpIfNotSmi(object, ¬_smi);
4341 // Remove smi tag and convert to double.
4342 sra(scratch1, object, kSmiTagSize);
4343 mtc1(scratch1, result);
4344 cvt_d_w(result, result);
4348 // Check for heap number and load double value from it.
4349 lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4350 Branch(not_number, ne, scratch1, Operand(heap_number_map));
4352 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4353 // If exponent is all ones the number is either a NaN or +/-Infinity.
4354 Register exponent = scratch1;
4355 Register mask_reg = scratch2;
4356 lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4357 li(mask_reg, HeapNumber::kExponentMask);
4359 And(exponent, exponent, mask_reg);
4360 Branch(not_number, eq, exponent, Operand(mask_reg));
4362 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4367 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4369 Register scratch1) {
4370 sra(scratch1, smi, kSmiTagSize);
4371 mtc1(scratch1, value);
4372 cvt_d_w(value, value);
4376 void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
4377 const Operand& right,
4378 Register overflow_dst,
4380 if (right.is_reg()) {
4381 AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4384 mov(scratch, left); // Preserve left.
4385 Addu(dst, left, right.immediate()); // Left is overwritten.
4386 xor_(scratch, dst, scratch); // Original left.
4387 // Load right since xori takes uint16 as immediate.
4388 Addu(t9, zero_reg, right);
4389 xor_(overflow_dst, dst, t9);
4390 and_(overflow_dst, overflow_dst, scratch);
4392 Addu(dst, left, right.immediate());
4393 xor_(overflow_dst, dst, left);
4394 // Load right since xori takes uint16 as immediate.
4395 Addu(t9, zero_reg, right);
4396 xor_(scratch, dst, t9);
4397 and_(overflow_dst, scratch, overflow_dst);
4403 void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
4405 Register overflow_dst,
4407 DCHECK(!dst.is(overflow_dst));
4408 DCHECK(!dst.is(scratch));
4409 DCHECK(!overflow_dst.is(scratch));
4410 DCHECK(!overflow_dst.is(left));
4411 DCHECK(!overflow_dst.is(right));
4413 if (left.is(right) && dst.is(left)) {
4414 DCHECK(!dst.is(t9));
4415 DCHECK(!scratch.is(t9));
4416 DCHECK(!left.is(t9));
4417 DCHECK(!right.is(t9));
4418 DCHECK(!overflow_dst.is(t9));
4424 mov(scratch, left); // Preserve left.
4425 addu(dst, left, right); // Left is overwritten.
4426 xor_(scratch, dst, scratch); // Original left.
4427 xor_(overflow_dst, dst, right);
4428 and_(overflow_dst, overflow_dst, scratch);
4429 } else if (dst.is(right)) {
4430 mov(scratch, right); // Preserve right.
4431 addu(dst, left, right); // Right is overwritten.
4432 xor_(scratch, dst, scratch); // Original right.
4433 xor_(overflow_dst, dst, left);
4434 and_(overflow_dst, overflow_dst, scratch);
4436 addu(dst, left, right);
4437 xor_(overflow_dst, dst, left);
4438 xor_(scratch, dst, right);
4439 and_(overflow_dst, scratch, overflow_dst);
4444 void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
4445 const Operand& right,
4446 Register overflow_dst,
4448 if (right.is_reg()) {
4449 SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4452 mov(scratch, left); // Preserve left.
4453 Subu(dst, left, right); // Left is overwritten.
4454 xor_(overflow_dst, dst, scratch); // scratch is original left.
4455 // Load right since xori takes uint16 as immediate.
4456 Addu(t9, zero_reg, right);
4457 xor_(scratch, scratch, t9); // scratch is original left.
4458 and_(overflow_dst, scratch, overflow_dst);
4460 Subu(dst, left, right);
4461 xor_(overflow_dst, dst, left);
4462 // Load right since xori takes uint16 as immediate.
4463 Addu(t9, zero_reg, right);
4464 xor_(scratch, left, t9);
4465 and_(overflow_dst, scratch, overflow_dst);
4471 void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
4473 Register overflow_dst,
4475 DCHECK(!dst.is(overflow_dst));
4476 DCHECK(!dst.is(scratch));
4477 DCHECK(!overflow_dst.is(scratch));
4478 DCHECK(!overflow_dst.is(left));
4479 DCHECK(!overflow_dst.is(right));
4480 DCHECK(!scratch.is(left));
4481 DCHECK(!scratch.is(right));
4483 // This happens with some crankshaft code. Since Subu works fine if
4484 // left == right, let's not make that restriction here.
4485 if (left.is(right)) {
4487 mov(overflow_dst, zero_reg);
4492 mov(scratch, left); // Preserve left.
4493 subu(dst, left, right); // Left is overwritten.
4494 xor_(overflow_dst, dst, scratch); // scratch is original left.
4495 xor_(scratch, scratch, right); // scratch is original left.
4496 and_(overflow_dst, scratch, overflow_dst);
4497 } else if (dst.is(right)) {
4498 mov(scratch, right); // Preserve right.
4499 subu(dst, left, right); // Right is overwritten.
4500 xor_(overflow_dst, dst, left);
4501 xor_(scratch, left, scratch); // Original right.
4502 and_(overflow_dst, scratch, overflow_dst);
4504 subu(dst, left, right);
4505 xor_(overflow_dst, dst, left);
4506 xor_(scratch, left, right);
4507 and_(overflow_dst, scratch, overflow_dst);
4512 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
4513 SaveFPRegsMode save_doubles,
4514 BranchDelaySlot bd) {
4515 // All parameters are on the stack. v0 has the return value after call.
4517 // If the expected number of arguments of the runtime function is
4518 // constant, we check that the actual number of arguments match the
4520 CHECK(f->nargs < 0 || f->nargs == num_arguments);
4522 // TODO(1236192): Most runtime routines don't need the number of
4523 // arguments passed in because it is constant. At some point we
4524 // should remove this need and make the runtime routine entry code
4526 PrepareCEntryArgs(num_arguments);
4527 PrepareCEntryFunction(ExternalReference(f, isolate()));
4528 CEntryStub stub(isolate(), 1, save_doubles);
4529 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
4533 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
4535 BranchDelaySlot bd) {
4536 PrepareCEntryArgs(num_arguments);
4537 PrepareCEntryFunction(ext);
4539 CEntryStub stub(isolate(), 1);
4540 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
4544 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
4547 // TODO(1236192): Most runtime routines don't need the number of
4548 // arguments passed in because it is constant. At some point we
4549 // should remove this need and make the runtime routine entry code
4551 PrepareCEntryArgs(num_arguments);
4552 JumpToExternalReference(ext);
4556 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
4559 TailCallExternalReference(ExternalReference(fid, isolate()),
4565 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
4566 BranchDelaySlot bd) {
4567 PrepareCEntryFunction(builtin);
4568 CEntryStub stub(isolate(), 1);
4569 Jump(stub.GetCode(),
4570 RelocInfo::CODE_TARGET,
4578 void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
4579 const CallWrapper& call_wrapper) {
4580 // You can't call a builtin without a valid frame.
4581 DCHECK(flag == JUMP_FUNCTION || has_frame());
4583 GetBuiltinEntry(t9, native_context_index);
4584 if (flag == CALL_FUNCTION) {
4585 call_wrapper.BeforeCall(CallSize(t9));
4587 call_wrapper.AfterCall();
4589 DCHECK(flag == JUMP_FUNCTION);
4595 void MacroAssembler::GetBuiltinFunction(Register target,
4596 int native_context_index) {
4597 // Load the builtins object into target register.
4598 lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4599 lw(target, FieldMemOperand(target, GlobalObject::kNativeContextOffset));
4600 // Load the JavaScript builtin function from the builtins object.
4601 lw(target, ContextOperand(target, native_context_index));
4605 void MacroAssembler::GetBuiltinEntry(Register target,
4606 int native_context_index) {
4607 DCHECK(!target.is(a1));
4608 GetBuiltinFunction(a1, native_context_index);
4609 // Load the code entry point from the builtins object.
4610 lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4614 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4615 Register scratch1, Register scratch2) {
4616 if (FLAG_native_code_counters && counter->Enabled()) {
4617 li(scratch1, Operand(value));
4618 li(scratch2, Operand(ExternalReference(counter)));
4619 sw(scratch1, MemOperand(scratch2));
4624 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4625 Register scratch1, Register scratch2) {
4627 if (FLAG_native_code_counters && counter->Enabled()) {
4628 li(scratch2, Operand(ExternalReference(counter)));
4629 lw(scratch1, MemOperand(scratch2));
4630 Addu(scratch1, scratch1, Operand(value));
4631 sw(scratch1, MemOperand(scratch2));
4636 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4637 Register scratch1, Register scratch2) {
4639 if (FLAG_native_code_counters && counter->Enabled()) {
4640 li(scratch2, Operand(ExternalReference(counter)));
4641 lw(scratch1, MemOperand(scratch2));
4642 Subu(scratch1, scratch1, Operand(value));
4643 sw(scratch1, MemOperand(scratch2));
4648 // -----------------------------------------------------------------------------
4651 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
4652 Register rs, Operand rt) {
4653 if (emit_debug_code())
4654 Check(cc, reason, rs, rt);
4658 void MacroAssembler::AssertFastElements(Register elements) {
4659 if (emit_debug_code()) {
4660 DCHECK(!elements.is(at));
4663 lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4664 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4665 Branch(&ok, eq, elements, Operand(at));
4666 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4667 Branch(&ok, eq, elements, Operand(at));
4668 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4669 Branch(&ok, eq, elements, Operand(at));
4670 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4677 void MacroAssembler::Check(Condition cc, BailoutReason reason,
4678 Register rs, Operand rt) {
4680 Branch(&L, cc, rs, rt);
4682 // Will not return here.
4687 void MacroAssembler::Abort(BailoutReason reason) {
4691 const char* msg = GetBailoutReason(reason);
4693 RecordComment("Abort message: ");
4697 if (FLAG_trap_on_abort) {
4703 li(a0, Operand(Smi::FromInt(reason)));
4705 // Disable stub call restrictions to always allow calls to abort.
4707 // We don't actually want to generate a pile of code for this, so just
4708 // claim there is a stack frame, without generating one.
4709 FrameScope scope(this, StackFrame::NONE);
4710 CallRuntime(Runtime::kAbort, 1);
4712 CallRuntime(Runtime::kAbort, 1);
4714 // Will not return here.
4715 if (is_trampoline_pool_blocked()) {
4716 // If the calling code cares about the exact number of
4717 // instructions generated, we insert padding here to keep the size
4718 // of the Abort macro constant.
4719 // Currently in debug mode with debug_code enabled the number of
4720 // generated instructions is 10, so we use this as a maximum value.
4721 static const int kExpectedAbortInstructions = 10;
4722 int abort_instructions = InstructionsGeneratedSince(&abort_start);
4723 DCHECK(abort_instructions <= kExpectedAbortInstructions);
4724 while (abort_instructions++ < kExpectedAbortInstructions) {
4731 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4732 if (context_chain_length > 0) {
4733 // Move up the chain of contexts to the context containing the slot.
4734 lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4735 for (int i = 1; i < context_chain_length; i++) {
4736 lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4739 // Slot is in the current function context. Move it into the
4740 // destination register in case we store into it (the write barrier
4741 // cannot be allowed to destroy the context in esi).
4747 void MacroAssembler::LoadGlobalProxy(Register dst) {
4748 lw(dst, GlobalObjectOperand());
4749 lw(dst, FieldMemOperand(dst, GlobalObject::kGlobalProxyOffset));
4753 void MacroAssembler::LoadTransitionedArrayMapConditional(
4754 ElementsKind expected_kind,
4755 ElementsKind transitioned_kind,
4756 Register map_in_out,
4758 Label* no_map_match) {
4759 // Load the global or builtins object from the current context.
4761 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4762 lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
4764 // Check that the function's map is the same as the expected cached map.
4767 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4768 size_t offset = expected_kind * kPointerSize +
4769 FixedArrayBase::kHeaderSize;
4770 lw(at, FieldMemOperand(scratch, offset));
4771 Branch(no_map_match, ne, map_in_out, Operand(at));
4773 // Use the transitioned cached map.
4774 offset = transitioned_kind * kPointerSize +
4775 FixedArrayBase::kHeaderSize;
4776 lw(map_in_out, FieldMemOperand(scratch, offset));
4780 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4781 // Load the global or builtins object from the current context.
4783 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4784 // Load the native context from the global or builtins object.
4785 lw(function, FieldMemOperand(function,
4786 GlobalObject::kNativeContextOffset));
4787 // Load the function from the native context.
4788 lw(function, MemOperand(function, Context::SlotOffset(index)));
4792 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4795 // Load the initial map. The global functions all have initial maps.
4796 lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4797 if (emit_debug_code()) {
4799 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4802 Abort(kGlobalFunctionsMustHaveInitialMap);
4808 void MacroAssembler::StubPrologue() {
4810 Push(Smi::FromInt(StackFrame::STUB));
4811 // Adjust FP to point to saved FP.
4812 Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4816 void MacroAssembler::Prologue(bool code_pre_aging) {
4817 PredictableCodeSizeScope predictible_code_size_scope(
4818 this, kNoCodeAgeSequenceLength);
4819 // The following three instructions must remain together and unmodified
4820 // for code aging to work properly.
4821 if (code_pre_aging) {
4822 // Pre-age the code.
4823 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
4824 nop(Assembler::CODE_AGE_MARKER_NOP);
4825 // Load the stub address to t9 and call it,
4826 // GetCodeAgeAndParity() extracts the stub address from this instruction.
4828 Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
4830 nop(); // Prevent jalr to jal optimization.
4832 nop(); // Branch delay slot nop.
4833 nop(); // Pad the empty space.
4835 Push(ra, fp, cp, a1);
4836 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4837 // Adjust fp to point to caller's fp.
4838 Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4843 void MacroAssembler::EnterFrame(StackFrame::Type type,
4844 bool load_constant_pool_pointer_reg) {
4845 // Out-of-line constant pool not implemented on mips.
4850 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4851 addiu(sp, sp, -5 * kPointerSize);
4852 li(t8, Operand(Smi::FromInt(type)));
4853 li(t9, Operand(CodeObject()), CONSTANT_SIZE);
4854 sw(ra, MemOperand(sp, 4 * kPointerSize));
4855 sw(fp, MemOperand(sp, 3 * kPointerSize));
4856 sw(cp, MemOperand(sp, 2 * kPointerSize));
4857 sw(t8, MemOperand(sp, 1 * kPointerSize));
4858 sw(t9, MemOperand(sp, 0 * kPointerSize));
4859 // Adjust FP to point to saved FP.
4861 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
4865 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4867 lw(fp, MemOperand(sp, 0 * kPointerSize));
4868 lw(ra, MemOperand(sp, 1 * kPointerSize));
4869 addiu(sp, sp, 2 * kPointerSize);
4873 void MacroAssembler::EnterExitFrame(bool save_doubles,
4875 // Set up the frame structure on the stack.
4876 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4877 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4878 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4880 // This is how the stack will look:
4881 // fp + 2 (==kCallerSPDisplacement) - old stack's end
4882 // [fp + 1 (==kCallerPCOffset)] - saved old ra
4883 // [fp + 0 (==kCallerFPOffset)] - saved old fp
4884 // [fp - 1 (==kSPOffset)] - sp of the called function
4885 // [fp - 2 (==kCodeOffset)] - CodeObject
4886 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
4887 // new stack (will contain saved ra)
4890 addiu(sp, sp, -4 * kPointerSize);
4891 sw(ra, MemOperand(sp, 3 * kPointerSize));
4892 sw(fp, MemOperand(sp, 2 * kPointerSize));
4893 addiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
4895 if (emit_debug_code()) {
4896 sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
4899 // Accessed from ExitFrame::code_slot.
4900 li(t8, Operand(CodeObject()), CONSTANT_SIZE);
4901 sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
4903 // Save the frame pointer and the context in top.
4904 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4905 sw(fp, MemOperand(t8));
4906 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4907 sw(cp, MemOperand(t8));
4909 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4911 // The stack must be allign to 0 modulo 8 for stores with sdc1.
4912 DCHECK(kDoubleSize == frame_alignment);
4913 if (frame_alignment > 0) {
4914 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4915 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4917 int space = FPURegister::kMaxNumRegisters * kDoubleSize;
4918 Subu(sp, sp, Operand(space));
4919 // Remember: we only need to save every 2nd double FPU value.
4920 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4921 FPURegister reg = FPURegister::from_code(i);
4922 sdc1(reg, MemOperand(sp, i * kDoubleSize));
4926 // Reserve place for the return address, stack space and an optional slot
4927 // (used by the DirectCEntryStub to hold the return value if a struct is
4928 // returned) and align the frame preparing for calling the runtime function.
4929 DCHECK(stack_space >= 0);
4930 Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4931 if (frame_alignment > 0) {
4932 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4933 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4936 // Set the exit frame sp value to point just before the return address
4938 addiu(at, sp, kPointerSize);
4939 sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
4943 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
4944 bool restore_context, bool do_return,
4945 bool argument_count_is_length) {
4946 // Optionally restore all double registers.
4948 // Remember: we only need to restore every 2nd double FPU value.
4949 lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
4950 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4951 FPURegister reg = FPURegister::from_code(i);
4952 ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
4957 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4958 sw(zero_reg, MemOperand(t8));
4960 // Restore current context from top and clear it in debug mode.
4961 if (restore_context) {
4962 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4963 lw(cp, MemOperand(t8));
4966 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4967 sw(a3, MemOperand(t8));
4970 // Pop the arguments, restore registers, and return.
4971 mov(sp, fp); // Respect ABI stack constraint.
4972 lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
4973 lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
4975 if (argument_count.is_valid()) {
4976 if (argument_count_is_length) {
4977 addu(sp, sp, argument_count);
4979 sll(t8, argument_count, kPointerSizeLog2);
4985 Ret(USE_DELAY_SLOT);
4986 // If returning, the instruction in the delay slot will be the addiu below.
4992 void MacroAssembler::InitializeNewString(Register string,
4994 Heap::RootListIndex map_index,
4996 Register scratch2) {
4997 sll(scratch1, length, kSmiTagSize);
4998 LoadRoot(scratch2, map_index);
4999 sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
5000 li(scratch1, Operand(String::kEmptyHashField));
5001 sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
5002 sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
5006 int MacroAssembler::ActivationFrameAlignment() {
5007 #if V8_HOST_ARCH_MIPS
5008 // Running on the real platform. Use the alignment as mandated by the local
5010 // Note: This will break if we ever start generating snapshots on one Mips
5011 // platform for another Mips platform with a different alignment.
5012 return base::OS::ActivationFrameAlignment();
5013 #else // V8_HOST_ARCH_MIPS
5014 // If we are using the simulator then we should always align to the expected
5015 // alignment. As the simulator is used to generate snapshots we do not know
5016 // if the target platform will need alignment, so this is controlled from a
5018 return FLAG_sim_stack_alignment;
5019 #endif // V8_HOST_ARCH_MIPS
5023 void MacroAssembler::AssertStackIsAligned() {
5024 if (emit_debug_code()) {
5025 const int frame_alignment = ActivationFrameAlignment();
5026 const int frame_alignment_mask = frame_alignment - 1;
5028 if (frame_alignment > kPointerSize) {
5029 Label alignment_as_expected;
5030 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5031 andi(at, sp, frame_alignment_mask);
5032 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5033 // Don't use Check here, as it will call Runtime_Abort re-entering here.
5034 stop("Unexpected stack alignment");
5035 bind(&alignment_as_expected);
5041 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
5044 Label* not_power_of_two_or_zero) {
5045 Subu(scratch, reg, Operand(1));
5046 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
5047 scratch, Operand(zero_reg));
5048 and_(at, scratch, reg); // In the delay slot.
5049 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
5053 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
5054 DCHECK(!reg.is(overflow));
5055 mov(overflow, reg); // Save original value.
5057 xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
5061 void MacroAssembler::SmiTagCheckOverflow(Register dst,
5063 Register overflow) {
5065 // Fall back to slower case.
5066 SmiTagCheckOverflow(dst, overflow);
5068 DCHECK(!dst.is(src));
5069 DCHECK(!dst.is(overflow));
5070 DCHECK(!src.is(overflow));
5072 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
5077 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
5080 JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
5085 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
5087 Label* non_smi_case) {
5088 JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
5092 void MacroAssembler::JumpIfSmi(Register value,
5095 BranchDelaySlot bd) {
5096 DCHECK_EQ(0, kSmiTag);
5097 andi(scratch, value, kSmiTagMask);
5098 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
5101 void MacroAssembler::JumpIfNotSmi(Register value,
5102 Label* not_smi_label,
5104 BranchDelaySlot bd) {
5105 DCHECK_EQ(0, kSmiTag);
5106 andi(scratch, value, kSmiTagMask);
5107 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
5111 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
5113 Label* on_not_both_smi) {
5114 STATIC_ASSERT(kSmiTag == 0);
5115 DCHECK_EQ(1, kSmiTagMask);
5116 or_(at, reg1, reg2);
5117 JumpIfNotSmi(at, on_not_both_smi);
5121 void MacroAssembler::JumpIfEitherSmi(Register reg1,
5123 Label* on_either_smi) {
5124 STATIC_ASSERT(kSmiTag == 0);
5125 DCHECK_EQ(1, kSmiTagMask);
5126 // Both Smi tags must be 1 (not Smi).
5127 and_(at, reg1, reg2);
5128 JumpIfSmi(at, on_either_smi);
5132 void MacroAssembler::AssertNotSmi(Register object) {
5133 if (emit_debug_code()) {
5134 STATIC_ASSERT(kSmiTag == 0);
5135 andi(at, object, kSmiTagMask);
5136 Check(ne, kOperandIsASmi, at, Operand(zero_reg));
5141 void MacroAssembler::AssertSmi(Register object) {
5142 if (emit_debug_code()) {
5143 STATIC_ASSERT(kSmiTag == 0);
5144 andi(at, object, kSmiTagMask);
5145 Check(eq, kOperandIsASmi, at, Operand(zero_reg));
5150 void MacroAssembler::AssertString(Register object) {
5151 if (emit_debug_code()) {
5152 STATIC_ASSERT(kSmiTag == 0);
5154 Check(ne, kOperandIsASmiAndNotAString, t0, Operand(zero_reg));
5156 lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
5157 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
5158 Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
5164 void MacroAssembler::AssertName(Register object) {
5165 if (emit_debug_code()) {
5166 STATIC_ASSERT(kSmiTag == 0);
5168 Check(ne, kOperandIsASmiAndNotAName, t0, Operand(zero_reg));
5170 lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
5171 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
5172 Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
5178 void MacroAssembler::AssertFunction(Register object) {
5179 if (emit_debug_code()) {
5180 STATIC_ASSERT(kSmiTag == 0);
5182 Check(ne, kOperandIsASmiAndNotAFunction, t0, Operand(zero_reg));
5184 GetObjectType(object, object, object);
5186 Check(ne, kOperandIsNotAFunction, object, Operand(JS_FUNCTION_TYPE));
5191 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
5193 if (emit_debug_code()) {
5194 Label done_checking;
5195 AssertNotSmi(object);
5196 LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5197 Branch(&done_checking, eq, object, Operand(scratch));
5199 lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
5200 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
5201 Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
5203 bind(&done_checking);
5208 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
5209 if (emit_debug_code()) {
5210 DCHECK(!reg.is(at));
5211 LoadRoot(at, index);
5212 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
5217 void MacroAssembler::JumpIfNotHeapNumber(Register object,
5218 Register heap_number_map,
5220 Label* on_not_heap_number) {
5221 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5222 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
5223 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
5227 void MacroAssembler::LookupNumberStringCache(Register object,
5233 // Use of registers. Register result is used as a temporary.
5234 Register number_string_cache = result;
5235 Register mask = scratch3;
5237 // Load the number string cache.
5238 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
5240 // Make the hash mask from the length of the number string cache. It
5241 // contains two elements (number and string) for each cache entry.
5242 lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
5243 // Divide length by two (length is a smi).
5244 sra(mask, mask, kSmiTagSize + 1);
5245 Addu(mask, mask, -1); // Make mask.
5247 // Calculate the entry in the number string cache. The hash value in the
5248 // number string cache for smis is just the smi value, and the hash for
5249 // doubles is the xor of the upper and lower words. See
5250 // Heap::GetNumberStringCache.
5252 Label load_result_from_cache;
5253 JumpIfSmi(object, &is_smi);
5256 Heap::kHeapNumberMapRootIndex,
5260 STATIC_ASSERT(8 == kDoubleSize);
5263 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
5264 lw(scratch2, MemOperand(scratch1, kPointerSize));
5265 lw(scratch1, MemOperand(scratch1, 0));
5266 Xor(scratch1, scratch1, Operand(scratch2));
5267 And(scratch1, scratch1, Operand(mask));
5269 // Calculate address of entry in string cache: each entry consists
5270 // of two pointer sized fields.
5271 sll(scratch1, scratch1, kPointerSizeLog2 + 1);
5272 Addu(scratch1, number_string_cache, scratch1);
5274 Register probe = mask;
5275 lw(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
5276 JumpIfSmi(probe, not_found);
5277 ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
5278 ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
5279 BranchF(&load_result_from_cache, NULL, eq, f12, f14);
5283 Register scratch = scratch1;
5284 sra(scratch, object, 1); // Shift away the tag.
5285 And(scratch, mask, Operand(scratch));
5287 // Calculate address of entry in string cache: each entry consists
5288 // of two pointer sized fields.
5289 sll(scratch, scratch, kPointerSizeLog2 + 1);
5290 Addu(scratch, number_string_cache, scratch);
5292 // Check if the entry is the smi we are looking for.
5293 lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
5294 Branch(not_found, ne, object, Operand(probe));
5296 // Get the result from the cache.
5297 bind(&load_result_from_cache);
5298 lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
5300 IncrementCounter(isolate()->counters()->number_to_string_native(),
5307 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
5308 Register first, Register second, Register scratch1, Register scratch2,
5310 // Test that both first and second are sequential one-byte strings.
5311 // Assume that they are non-smis.
5312 lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
5313 lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
5314 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
5315 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
5317 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
5322 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
5327 // Check that neither is a smi.
5328 STATIC_ASSERT(kSmiTag == 0);
5329 And(scratch1, first, Operand(second));
5330 JumpIfSmi(scratch1, failure);
5331 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
5336 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
5337 Register first, Register second, Register scratch1, Register scratch2,
5339 const int kFlatOneByteStringMask =
5340 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5341 const int kFlatOneByteStringTag =
5342 kStringTag | kOneByteStringTag | kSeqStringTag;
5343 DCHECK(kFlatOneByteStringTag <= 0xffff); // Ensure this fits 16-bit immed.
5344 andi(scratch1, first, kFlatOneByteStringMask);
5345 Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
5346 andi(scratch2, second, kFlatOneByteStringMask);
5347 Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
5351 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
5354 const int kFlatOneByteStringMask =
5355 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5356 const int kFlatOneByteStringTag =
5357 kStringTag | kOneByteStringTag | kSeqStringTag;
5358 And(scratch, type, Operand(kFlatOneByteStringMask));
5359 Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
5363 static const int kRegisterPassedArguments = 4;
5365 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
5366 int num_double_arguments) {
5367 int stack_passed_words = 0;
5368 num_reg_arguments += 2 * num_double_arguments;
5370 // Up to four simple arguments are passed in registers a0..a3.
5371 if (num_reg_arguments > kRegisterPassedArguments) {
5372 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5374 stack_passed_words += kCArgSlotCount;
5375 return stack_passed_words;
5379 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5383 uint32_t encoding_mask) {
5386 Check(ne, kNonObject, at, Operand(zero_reg));
5388 lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
5389 lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
5391 andi(at, at, kStringRepresentationMask | kStringEncodingMask);
5392 li(scratch, Operand(encoding_mask));
5393 Check(eq, kUnexpectedStringType, at, Operand(scratch));
5395 // The index is assumed to be untagged coming in, tag it to compare with the
5396 // string length without using a temp register, it is restored at the end of
5398 Label index_tag_ok, index_tag_bad;
5399 TrySmiTag(index, scratch, &index_tag_bad);
5400 Branch(&index_tag_ok);
5401 bind(&index_tag_bad);
5402 Abort(kIndexIsTooLarge);
5403 bind(&index_tag_ok);
5405 lw(at, FieldMemOperand(string, String::kLengthOffset));
5406 Check(lt, kIndexIsTooLarge, index, Operand(at));
5408 DCHECK(Smi::FromInt(0) == 0);
5409 Check(ge, kIndexIsNegative, index, Operand(zero_reg));
5411 SmiUntag(index, index);
5415 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5416 int num_double_arguments,
5418 int frame_alignment = ActivationFrameAlignment();
5420 // Up to four simple arguments are passed in registers a0..a3.
5421 // Those four arguments must have reserved argument slots on the stack for
5422 // mips, even though those argument slots are not normally used.
5423 // Remaining arguments are pushed on the stack, above (higher address than)
5424 // the argument slots.
5425 int stack_passed_arguments = CalculateStackPassedWords(
5426 num_reg_arguments, num_double_arguments);
5427 if (frame_alignment > kPointerSize) {
5428 // Make stack end at alignment and make room for num_arguments - 4 words
5429 // and the original value of sp.
5431 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5432 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5433 And(sp, sp, Operand(-frame_alignment));
5434 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
5436 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5441 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5443 PrepareCallCFunction(num_reg_arguments, 0, scratch);
5447 void MacroAssembler::CallCFunction(ExternalReference function,
5448 int num_reg_arguments,
5449 int num_double_arguments) {
5450 li(t8, Operand(function));
5451 CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
5455 void MacroAssembler::CallCFunction(Register function,
5456 int num_reg_arguments,
5457 int num_double_arguments) {
5458 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
5462 void MacroAssembler::CallCFunction(ExternalReference function,
5463 int num_arguments) {
5464 CallCFunction(function, num_arguments, 0);
5468 void MacroAssembler::CallCFunction(Register function,
5469 int num_arguments) {
5470 CallCFunction(function, num_arguments, 0);
5474 void MacroAssembler::CallCFunctionHelper(Register function,
5475 int num_reg_arguments,
5476 int num_double_arguments) {
5477 DCHECK(has_frame());
5478 // Make sure that the stack is aligned before calling a C function unless
5479 // running in the simulator. The simulator has its own alignment check which
5480 // provides more information.
5481 // The argument stots are presumed to have been set up by
5482 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
5484 #if V8_HOST_ARCH_MIPS
5485 if (emit_debug_code()) {
5486 int frame_alignment = base::OS::ActivationFrameAlignment();
5487 int frame_alignment_mask = frame_alignment - 1;
5488 if (frame_alignment > kPointerSize) {
5489 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5490 Label alignment_as_expected;
5491 And(at, sp, Operand(frame_alignment_mask));
5492 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5493 // Don't use Check here, as it will call Runtime_Abort possibly
5494 // re-entering here.
5495 stop("Unexpected alignment in CallCFunction");
5496 bind(&alignment_as_expected);
5499 #endif // V8_HOST_ARCH_MIPS
5501 // Just call directly. The function called cannot cause a GC, or
5502 // allow preemption, so the return address in the link register
5505 if (!function.is(t9)) {
5512 int stack_passed_arguments = CalculateStackPassedWords(
5513 num_reg_arguments, num_double_arguments);
5515 if (base::OS::ActivationFrameAlignment() > kPointerSize) {
5516 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5518 Addu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5523 #undef BRANCH_ARGS_CHECK
5526 void MacroAssembler::CheckPageFlag(
5531 Label* condition_met) {
5532 And(scratch, object, Operand(~Page::kPageAlignmentMask));
5533 lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5534 And(scratch, scratch, Operand(mask));
5535 Branch(condition_met, cc, scratch, Operand(zero_reg));
5539 void MacroAssembler::JumpIfBlack(Register object,
5543 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
5544 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5548 void MacroAssembler::HasColor(Register object,
5549 Register bitmap_scratch,
5550 Register mask_scratch,
5554 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
5555 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
5557 GetMarkBits(object, bitmap_scratch, mask_scratch);
5559 Label other_color, word_boundary;
5560 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5561 And(t8, t9, Operand(mask_scratch));
5562 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
5563 // Shift left 1 by adding.
5564 Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
5565 Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
5566 And(t8, t9, Operand(mask_scratch));
5567 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
5570 bind(&word_boundary);
5571 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
5572 And(t9, t9, Operand(1));
5573 Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
5578 // Detect some, but not all, common pointer-free objects. This is used by the
5579 // incremental write barrier which doesn't care about oddballs (they are always
5580 // marked black immediately so this code is not hit).
5581 void MacroAssembler::JumpIfDataObject(Register value,
5583 Label* not_data_object) {
5584 DCHECK(!AreAliased(value, scratch, t8, no_reg));
5585 Label is_data_object;
5586 lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5587 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5588 Branch(&is_data_object, eq, t8, Operand(scratch));
5589 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5590 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5591 // If it's a string and it's not a cons string then it's an object containing
5593 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5594 And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
5595 Branch(not_data_object, ne, t8, Operand(zero_reg));
5596 bind(&is_data_object);
5600 void MacroAssembler::GetMarkBits(Register addr_reg,
5601 Register bitmap_reg,
5602 Register mask_reg) {
5603 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
5604 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5605 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
5606 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
5607 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
5608 sll(t8, t8, kPointerSizeLog2);
5609 Addu(bitmap_reg, bitmap_reg, t8);
5611 sllv(mask_reg, t8, mask_reg);
5615 void MacroAssembler::EnsureNotWhite(
5617 Register bitmap_scratch,
5618 Register mask_scratch,
5619 Register load_scratch,
5620 Label* value_is_white_and_not_data) {
5621 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
5622 GetMarkBits(value, bitmap_scratch, mask_scratch);
5624 // If the value is black or grey we don't need to do anything.
5625 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5626 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5627 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
5628 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5632 // Since both black and grey have a 1 in the first position and white does
5633 // not have a 1 there we only need to check one bit.
5634 lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5635 And(t8, mask_scratch, load_scratch);
5636 Branch(&done, ne, t8, Operand(zero_reg));
5638 if (emit_debug_code()) {
5639 // Check for impossible bit pattern.
5641 // sll may overflow, making the check conservative.
5642 sll(t8, mask_scratch, 1);
5643 And(t8, load_scratch, t8);
5644 Branch(&ok, eq, t8, Operand(zero_reg));
5645 stop("Impossible marking bit pattern");
5649 // Value is white. We check whether it is data that doesn't need scanning.
5650 // Currently only checks for HeapNumber and non-cons strings.
5651 Register map = load_scratch; // Holds map while checking type.
5652 Register length = load_scratch; // Holds length of object after testing type.
5653 Label is_data_object;
5655 // Check for heap-number
5656 lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
5657 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5660 Branch(&skip, ne, t8, Operand(map));
5661 li(length, HeapNumber::kSize);
5662 Branch(&is_data_object);
5666 // Check for strings.
5667 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5668 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5669 // If it's a string and it's not a cons string then it's an object containing
5671 Register instance_type = load_scratch;
5672 lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
5673 And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
5674 Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5675 // It's a non-indirect (non-cons and non-slice) string.
5676 // If it's external, the length is just ExternalString::kSize.
5677 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5678 // External strings are the only ones with the kExternalStringTag bit
5680 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
5681 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
5682 And(t8, instance_type, Operand(kExternalStringTag));
5685 Branch(&skip, eq, t8, Operand(zero_reg));
5686 li(length, ExternalString::kSize);
5687 Branch(&is_data_object);
5691 // Sequential string, either Latin1 or UC16.
5692 // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
5693 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
5694 // getting the length multiplied by 2.
5695 DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
5696 DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
5697 lw(t9, FieldMemOperand(value, String::kLengthOffset));
5698 And(t8, instance_type, Operand(kStringEncodingMask));
5701 Branch(&skip, eq, t8, Operand(zero_reg));
5705 Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
5706 And(length, length, Operand(~kObjectAlignmentMask));
5708 bind(&is_data_object);
5709 // Value is a data object, and it is white. Mark it black. Since we know
5710 // that the object is white we can make it black by flipping one bit.
5711 lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5712 Or(t8, t8, Operand(mask_scratch));
5713 sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5715 And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5716 lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5717 Addu(t8, t8, Operand(length));
5718 sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5724 void MacroAssembler::LoadInstanceDescriptors(Register map,
5725 Register descriptors) {
5726 lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
5730 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
5731 lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5732 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
5736 void MacroAssembler::EnumLength(Register dst, Register map) {
5737 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
5738 lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5739 And(dst, dst, Operand(Map::EnumLengthBits::kMask));
5744 void MacroAssembler::LoadAccessor(Register dst, Register holder,
5746 AccessorComponent accessor) {
5747 lw(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
5748 LoadInstanceDescriptors(dst, dst);
5750 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
5751 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
5752 : AccessorPair::kSetterOffset;
5753 lw(dst, FieldMemOperand(dst, offset));
5757 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5758 Register empty_fixed_array_value = t2;
5759 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5763 // Check if the enum length field is properly initialized, indicating that
5764 // there is an enum cache.
5765 lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5769 call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
5774 lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5776 // For all objects but the receiver, check that the cache is empty.
5778 Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
5782 // Check that there are no elements. Register a2 contains the current JS
5783 // object we've reached through the prototype chain.
5785 lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
5786 Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
5788 // Second chance, the object may be using the empty slow element dictionary.
5789 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
5790 Branch(call_runtime, ne, a2, Operand(at));
5793 lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
5794 Branch(&next, ne, a2, Operand(null_value));
5798 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5799 DCHECK(!output_reg.is(input_reg));
5801 li(output_reg, Operand(255));
5802 // Normal branch: nop in delay slot.
5803 Branch(&done, gt, input_reg, Operand(output_reg));
5804 // Use delay slot in this branch.
5805 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
5806 mov(output_reg, zero_reg); // In delay slot.
5807 mov(output_reg, input_reg); // Value is in range 0..255.
5812 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
5813 DoubleRegister input_reg,
5814 DoubleRegister temp_double_reg) {
5819 Move(temp_double_reg, 0.0);
5820 BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
5822 // Double value is less than zero, NaN or Inf, return 0.
5823 mov(result_reg, zero_reg);
5826 // Double value is >= 255, return 255.
5828 Move(temp_double_reg, 255.0);
5829 BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
5830 li(result_reg, Operand(255));
5833 // In 0-255 range, round and truncate.
5835 cvt_w_d(temp_double_reg, input_reg);
5836 mfc1(result_reg, temp_double_reg);
5841 void MacroAssembler::TestJSArrayForAllocationMemento(
5842 Register receiver_reg,
5843 Register scratch_reg,
5844 Label* no_memento_found,
5846 Label* allocation_memento_present) {
5847 ExternalReference new_space_start =
5848 ExternalReference::new_space_start(isolate());
5849 ExternalReference new_space_allocation_top =
5850 ExternalReference::new_space_allocation_top_address(isolate());
5851 Addu(scratch_reg, receiver_reg,
5852 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5853 Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
5854 li(at, Operand(new_space_allocation_top));
5855 lw(at, MemOperand(at));
5856 Branch(no_memento_found, gt, scratch_reg, Operand(at));
5857 lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
5858 if (allocation_memento_present) {
5859 Branch(allocation_memento_present, cond, scratch_reg,
5860 Operand(isolate()->factory()->allocation_memento_map()));
5865 Register GetRegisterThatIsNotOneOf(Register reg1,
5872 if (reg1.is_valid()) regs |= reg1.bit();
5873 if (reg2.is_valid()) regs |= reg2.bit();
5874 if (reg3.is_valid()) regs |= reg3.bit();
5875 if (reg4.is_valid()) regs |= reg4.bit();
5876 if (reg5.is_valid()) regs |= reg5.bit();
5877 if (reg6.is_valid()) regs |= reg6.bit();
5879 for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
5880 Register candidate = Register::FromAllocationIndex(i);
5881 if (regs & candidate.bit()) continue;
5889 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5894 DCHECK(!scratch1.is(scratch0));
5895 Factory* factory = isolate()->factory();
5896 Register current = scratch0;
5897 Label loop_again, end;
5899 // Scratch contained elements pointer.
5900 Move(current, object);
5901 lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
5902 lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
5903 Branch(&end, eq, current, Operand(factory->null_value()));
5905 // Loop based on the map going up the prototype chain.
5907 lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
5908 lbu(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
5909 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
5910 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
5911 Branch(found, lo, scratch1, Operand(JS_OBJECT_TYPE));
5912 lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
5913 DecodeField<Map::ElementsKindBits>(scratch1);
5914 Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
5915 lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
5916 Branch(&loop_again, ne, current, Operand(factory->null_value()));
5922 bool AreAliased(Register reg1,
5930 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
5931 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
5932 reg7.is_valid() + reg8.is_valid();
5935 if (reg1.is_valid()) regs |= reg1.bit();
5936 if (reg2.is_valid()) regs |= reg2.bit();
5937 if (reg3.is_valid()) regs |= reg3.bit();
5938 if (reg4.is_valid()) regs |= reg4.bit();
5939 if (reg5.is_valid()) regs |= reg5.bit();
5940 if (reg6.is_valid()) regs |= reg6.bit();
5941 if (reg7.is_valid()) regs |= reg7.bit();
5942 if (reg8.is_valid()) regs |= reg8.bit();
5943 int n_of_non_aliasing_regs = NumRegs(regs);
5945 return n_of_valid_regs != n_of_non_aliasing_regs;
5949 CodePatcher::CodePatcher(byte* address,
5951 FlushICache flush_cache)
5952 : address_(address),
5953 size_(instructions * Assembler::kInstrSize),
5954 masm_(NULL, address, size_ + Assembler::kGap),
5955 flush_cache_(flush_cache) {
5956 // Create a new macro assembler pointing to the address of the code to patch.
5957 // The size is adjusted with kGap on order for the assembler to generate size
5958 // bytes of instructions without failing with buffer size constraints.
5959 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5963 CodePatcher::~CodePatcher() {
5964 // Indicate that code has changed.
5965 if (flush_cache_ == FLUSH) {
5966 CpuFeatures::FlushICache(address_, size_);
5969 // Check that the code was patched as expected.
5970 DCHECK(masm_.pc_ == address_ + size_);
5971 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5975 void CodePatcher::Emit(Instr instr) {
5976 masm()->emit(instr);
5980 void CodePatcher::Emit(Address addr) {
5981 masm()->emit(reinterpret_cast<Instr>(addr));
5985 void CodePatcher::ChangeBranchCondition(Condition cond) {
5986 Instr instr = Assembler::instr_at(masm_.pc_);
5987 DCHECK(Assembler::IsBranch(instr));
5988 uint32_t opcode = Assembler::GetOpcodeField(instr);
5989 // Currently only the 'eq' and 'ne' cond values are supported and the simple
5990 // branch instructions (with opcode being the branch type).
5991 // There are some special cases (see Assembler::IsBranch()) so extending this
5993 DCHECK(opcode == BEQ ||
6001 opcode = (cond == eq) ? BEQ : BNE;
6002 instr = (instr & ~kOpcodeMask) | opcode;
6007 void MacroAssembler::TruncatingDiv(Register result,
6010 DCHECK(!dividend.is(result));
6011 DCHECK(!dividend.is(at));
6012 DCHECK(!result.is(at));
6013 base::MagicNumbersForDivision<uint32_t> mag =
6014 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
6015 li(at, Operand(mag.multiplier));
6016 Mulh(result, dividend, Operand(at));
6017 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
6018 if (divisor > 0 && neg) {
6019 Addu(result, result, Operand(dividend));
6021 if (divisor < 0 && !neg && mag.multiplier > 0) {
6022 Subu(result, result, Operand(dividend));
6024 if (mag.shift > 0) sra(result, result, mag.shift);
6025 srl(at, dividend, 31);
6026 Addu(result, result, Operand(at));
6030 } // namespace internal
6033 #endif // V8_TARGET_ARCH_MIPS