1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include <limits.h> // For LONG_MIN, LONG_MAX.
7 #if V8_TARGET_ARCH_MIPS
9 #include "src/base/bits.h"
10 #include "src/base/division-by-constant.h"
11 #include "src/bootstrapper.h"
12 #include "src/codegen.h"
13 #include "src/cpu-profiler.h"
14 #include "src/debug/debug.h"
15 #include "src/mips/macro-assembler-mips.h"
16 #include "src/runtime/runtime.h"
21 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
22 : Assembler(arg_isolate, buffer, size),
23 generating_stub_(false),
25 has_double_zero_reg_set_(false) {
26 if (isolate() != NULL) {
27 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
33 void MacroAssembler::Load(Register dst,
34 const MemOperand& src,
36 DCHECK(!r.IsDouble());
39 } else if (r.IsUInteger8()) {
41 } else if (r.IsInteger16()) {
43 } else if (r.IsUInteger16()) {
51 void MacroAssembler::Store(Register src,
52 const MemOperand& dst,
54 DCHECK(!r.IsDouble());
55 if (r.IsInteger8() || r.IsUInteger8()) {
57 } else if (r.IsInteger16() || r.IsUInteger16()) {
60 if (r.IsHeapObject()) {
62 } else if (r.IsSmi()) {
70 void MacroAssembler::LoadRoot(Register destination,
71 Heap::RootListIndex index) {
72 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
76 void MacroAssembler::LoadRoot(Register destination,
77 Heap::RootListIndex index,
79 Register src1, const Operand& src2) {
80 Branch(2, NegateCondition(cond), src1, src2);
81 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
85 void MacroAssembler::StoreRoot(Register source,
86 Heap::RootListIndex index) {
87 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
88 sw(source, MemOperand(s6, index << kPointerSizeLog2));
92 void MacroAssembler::StoreRoot(Register source,
93 Heap::RootListIndex index,
95 Register src1, const Operand& src2) {
96 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
97 Branch(2, NegateCondition(cond), src1, src2);
98 sw(source, MemOperand(s6, index << kPointerSizeLog2));
102 // Push and pop all registers that can hold pointers.
103 void MacroAssembler::PushSafepointRegisters() {
104 // Safepoints expect a block of kNumSafepointRegisters values on the
105 // stack, so adjust the stack for unsaved registers.
106 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
107 DCHECK(num_unsaved >= 0);
108 if (num_unsaved > 0) {
109 Subu(sp, sp, Operand(num_unsaved * kPointerSize));
111 MultiPush(kSafepointSavedRegisters);
115 void MacroAssembler::PopSafepointRegisters() {
116 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
117 MultiPop(kSafepointSavedRegisters);
118 if (num_unsaved > 0) {
119 Addu(sp, sp, Operand(num_unsaved * kPointerSize));
124 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
125 sw(src, SafepointRegisterSlot(dst));
129 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
130 lw(dst, SafepointRegisterSlot(src));
134 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
135 // The registers are pushed starting with the highest encoding,
136 // which means that lowest encodings are closest to the stack pointer.
137 return kSafepointRegisterStackIndexMap[reg_code];
141 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
142 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
146 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
147 UNIMPLEMENTED_MIPS();
148 // General purpose registers are pushed last on the stack.
149 int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
150 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
151 return MemOperand(sp, doubles_size + register_offset);
155 void MacroAssembler::InNewSpace(Register object,
159 DCHECK(cc == eq || cc == ne);
160 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
161 Branch(branch, cc, scratch,
162 Operand(ExternalReference::new_space_start(isolate())));
166 // Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
167 // The register 'object' contains a heap object pointer. The heap object
168 // tag is shifted away.
169 void MacroAssembler::RecordWriteField(
175 SaveFPRegsMode save_fp,
176 RememberedSetAction remembered_set_action,
178 PointersToHereCheck pointers_to_here_check_for_value) {
179 DCHECK(!AreAliased(value, dst, t8, object));
180 // First, check if a write barrier is even needed. The tests below
181 // catch stores of Smis.
184 // Skip barrier if writing a smi.
185 if (smi_check == INLINE_SMI_CHECK) {
186 JumpIfSmi(value, &done);
189 // Although the object register is tagged, the offset is relative to the start
190 // of the object, so so offset must be a multiple of kPointerSize.
191 DCHECK(IsAligned(offset, kPointerSize));
193 Addu(dst, object, Operand(offset - kHeapObjectTag));
194 if (emit_debug_code()) {
196 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
197 Branch(&ok, eq, t8, Operand(zero_reg));
198 stop("Unaligned cell in write barrier");
207 remembered_set_action,
209 pointers_to_here_check_for_value);
213 // Clobber clobbered input registers when running with the debug-code flag
214 // turned on to provoke errors.
215 if (emit_debug_code()) {
216 li(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
217 li(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
222 // Clobbers object, dst, map, and ra, if (ra_status == kRAHasBeenSaved)
223 void MacroAssembler::RecordWriteForMap(Register object,
227 SaveFPRegsMode fp_mode) {
228 if (emit_debug_code()) {
230 lw(dst, FieldMemOperand(map, HeapObject::kMapOffset));
232 kWrongAddressOrValuePassedToRecordWrite,
234 Operand(isolate()->factory()->meta_map()));
237 if (!FLAG_incremental_marking) {
241 if (emit_debug_code()) {
242 lw(at, FieldMemOperand(object, HeapObject::kMapOffset));
244 kWrongAddressOrValuePassedToRecordWrite,
251 // A single check of the map's pages interesting flag suffices, since it is
252 // only set during incremental collection, and then it's also guaranteed that
253 // the from object's page's interesting flag is also set. This optimization
254 // relies on the fact that maps can never be in new space.
256 map, // Used as scratch.
257 MemoryChunk::kPointersToHereAreInterestingMask,
261 Addu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
262 if (emit_debug_code()) {
264 And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
265 Branch(&ok, eq, at, Operand(zero_reg));
266 stop("Unaligned cell in write barrier");
270 // Record the actual write.
271 if (ra_status == kRAHasNotBeenSaved) {
274 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
277 if (ra_status == kRAHasNotBeenSaved) {
283 // Count number of write barriers in generated code.
284 isolate()->counters()->write_barriers_static()->Increment();
285 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
287 // Clobber clobbered registers when running with the debug-code flag
288 // turned on to provoke errors.
289 if (emit_debug_code()) {
290 li(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
291 li(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
296 // Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
297 // The register 'object' contains a heap object pointer. The heap object
298 // tag is shifted away.
299 void MacroAssembler::RecordWrite(
304 SaveFPRegsMode fp_mode,
305 RememberedSetAction remembered_set_action,
307 PointersToHereCheck pointers_to_here_check_for_value) {
308 DCHECK(!AreAliased(object, address, value, t8));
309 DCHECK(!AreAliased(object, address, value, t9));
311 if (emit_debug_code()) {
312 lw(at, MemOperand(address));
314 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
317 if (remembered_set_action == OMIT_REMEMBERED_SET &&
318 !FLAG_incremental_marking) {
322 // First, check if a write barrier is even needed. The tests below
323 // catch stores of smis and stores into the young generation.
326 if (smi_check == INLINE_SMI_CHECK) {
327 DCHECK_EQ(0, kSmiTag);
328 JumpIfSmi(value, &done);
331 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
333 value, // Used as scratch.
334 MemoryChunk::kPointersToHereAreInterestingMask,
338 CheckPageFlag(object,
339 value, // Used as scratch.
340 MemoryChunk::kPointersFromHereAreInterestingMask,
344 // Record the actual write.
345 if (ra_status == kRAHasNotBeenSaved) {
348 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
351 if (ra_status == kRAHasNotBeenSaved) {
357 // Count number of write barriers in generated code.
358 isolate()->counters()->write_barriers_static()->Increment();
359 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
362 // Clobber clobbered registers when running with the debug-code flag
363 // turned on to provoke errors.
364 if (emit_debug_code()) {
365 li(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
366 li(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
371 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
374 SaveFPRegsMode fp_mode,
375 RememberedSetFinalAction and_then) {
377 if (emit_debug_code()) {
379 JumpIfNotInNewSpace(object, scratch, &ok);
380 stop("Remembered set pointer is in new space");
383 // Load store buffer top.
384 ExternalReference store_buffer =
385 ExternalReference::store_buffer_top(isolate());
386 li(t8, Operand(store_buffer));
387 lw(scratch, MemOperand(t8));
388 // Store pointer to buffer and increment buffer top.
389 sw(address, MemOperand(scratch));
390 Addu(scratch, scratch, kPointerSize);
391 // Write back new top of buffer.
392 sw(scratch, MemOperand(t8));
393 // Call stub on end of buffer.
394 // Check for end of buffer.
395 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
396 if (and_then == kFallThroughAtEnd) {
397 Branch(&done, eq, t8, Operand(zero_reg));
399 DCHECK(and_then == kReturnAtEnd);
400 Ret(eq, t8, Operand(zero_reg));
403 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
404 CallStub(&store_buffer_overflow);
407 if (and_then == kReturnAtEnd) {
413 // -----------------------------------------------------------------------------
414 // Allocation support.
417 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
422 DCHECK(!holder_reg.is(scratch));
423 DCHECK(!holder_reg.is(at));
424 DCHECK(!scratch.is(at));
426 // Load current lexical context from the stack frame.
427 lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
428 // In debug mode, make sure the lexical context is set.
430 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
431 scratch, Operand(zero_reg));
434 // Load the native context of the current context.
436 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
437 lw(scratch, FieldMemOperand(scratch, offset));
438 lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
440 // Check the context is a native context.
441 if (emit_debug_code()) {
442 push(holder_reg); // Temporarily save holder on the stack.
443 // Read the first word and compare to the native_context_map.
444 lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
445 LoadRoot(at, Heap::kNativeContextMapRootIndex);
446 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
447 holder_reg, Operand(at));
448 pop(holder_reg); // Restore holder.
451 // Check if both contexts are the same.
452 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
453 Branch(&same_contexts, eq, scratch, Operand(at));
455 // Check the context is a native context.
456 if (emit_debug_code()) {
457 push(holder_reg); // Temporarily save holder on the stack.
458 mov(holder_reg, at); // Move at to its holding place.
459 LoadRoot(at, Heap::kNullValueRootIndex);
460 Check(ne, kJSGlobalProxyContextShouldNotBeNull,
461 holder_reg, Operand(at));
463 lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
464 LoadRoot(at, Heap::kNativeContextMapRootIndex);
465 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
466 holder_reg, Operand(at));
467 // Restore at is not needed. at is reloaded below.
468 pop(holder_reg); // Restore holder.
469 // Restore at to holder's context.
470 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
473 // Check that the security token in the calling global object is
474 // compatible with the security token in the receiving global
476 int token_offset = Context::kHeaderSize +
477 Context::SECURITY_TOKEN_INDEX * kPointerSize;
479 lw(scratch, FieldMemOperand(scratch, token_offset));
480 lw(at, FieldMemOperand(at, token_offset));
481 Branch(miss, ne, scratch, Operand(at));
483 bind(&same_contexts);
487 // Compute the hash code from the untagged key. This must be kept in sync with
488 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
489 // code-stub-hydrogen.cc
490 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
491 // First of all we assign the hash seed to scratch.
492 LoadRoot(scratch, Heap::kHashSeedRootIndex);
495 // Xor original key with a seed.
496 xor_(reg0, reg0, scratch);
498 // Compute the hash code from the untagged key. This must be kept in sync
499 // with ComputeIntegerHash in utils.h.
501 // hash = ~hash + (hash << 15);
502 nor(scratch, reg0, zero_reg);
504 addu(reg0, scratch, at);
506 // hash = hash ^ (hash >> 12);
508 xor_(reg0, reg0, at);
510 // hash = hash + (hash << 2);
512 addu(reg0, reg0, at);
514 // hash = hash ^ (hash >> 4);
516 xor_(reg0, reg0, at);
518 // hash = hash * 2057;
519 sll(scratch, reg0, 11);
521 addu(reg0, reg0, at);
522 addu(reg0, reg0, scratch);
524 // hash = hash ^ (hash >> 16);
526 xor_(reg0, reg0, at);
527 And(reg0, reg0, Operand(0x3fffffff));
531 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
540 // elements - holds the slow-case elements of the receiver on entry.
541 // Unchanged unless 'result' is the same register.
543 // key - holds the smi key on entry.
544 // Unchanged unless 'result' is the same register.
547 // result - holds the result on exit if the load succeeded.
548 // Allowed to be the same as 'key' or 'result'.
549 // Unchanged on bailout so 'key' or 'result' can be used
550 // in further computation.
552 // Scratch registers:
554 // reg0 - holds the untagged key on entry and holds the hash once computed.
556 // reg1 - Used to hold the capacity mask of the dictionary.
558 // reg2 - Used for the index into the dictionary.
559 // at - Temporary (avoid MacroAssembler instructions also using 'at').
562 GetNumberHash(reg0, reg1);
564 // Compute the capacity mask.
565 lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
566 sra(reg1, reg1, kSmiTagSize);
567 Subu(reg1, reg1, Operand(1));
569 // Generate an unrolled loop that performs a few probes before giving up.
570 for (int i = 0; i < kNumberDictionaryProbes; i++) {
571 // Use reg2 for index calculations and keep the hash intact in reg0.
573 // Compute the masked index: (hash + i + i * i) & mask.
575 Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
577 and_(reg2, reg2, reg1);
579 // Scale the index by multiplying by the element size.
580 DCHECK(SeededNumberDictionary::kEntrySize == 3);
581 sll(at, reg2, 1); // 2x.
582 addu(reg2, reg2, at); // reg2 = reg2 * 3.
584 // Check if the key is identical to the name.
585 sll(at, reg2, kPointerSizeLog2);
586 addu(reg2, elements, at);
588 lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
589 if (i != kNumberDictionaryProbes - 1) {
590 Branch(&done, eq, key, Operand(at));
592 Branch(miss, ne, key, Operand(at));
597 // Check that the value is a field property.
598 // reg2: elements + (index * kPointerSize).
599 const int kDetailsOffset =
600 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
601 lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
603 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
604 Branch(miss, ne, at, Operand(zero_reg));
606 // Get the value at the masked, scaled index and return.
607 const int kValueOffset =
608 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
609 lw(result, FieldMemOperand(reg2, kValueOffset));
613 // ---------------------------------------------------------------------------
614 // Instruction macros.
616 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
618 addu(rd, rs, rt.rm());
620 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
621 addiu(rd, rs, rt.imm32_);
623 // li handles the relocation.
632 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
634 subu(rd, rs, rt.rm());
636 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
637 addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
639 // li handles the relocation.
648 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
650 if (IsMipsArchVariant(kLoongson)) {
654 mul(rd, rs, rt.rm());
657 // li handles the relocation.
660 if (IsMipsArchVariant(kLoongson)) {
670 void MacroAssembler::Mul(Register rd_hi, Register rd_lo,
671 Register rs, const Operand& rt) {
673 if (!IsMipsArchVariant(kMips32r6)) {
679 DCHECK(!rd_hi.is(rs));
680 DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
681 muh(rd_hi, rs, rt.rm());
682 mul(rd_lo, rs, rt.rm());
684 DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
685 mul(rd_lo, rs, rt.rm());
686 muh(rd_hi, rs, rt.rm());
690 // li handles the relocation.
693 if (!IsMipsArchVariant(kMips32r6)) {
699 DCHECK(!rd_hi.is(rs));
700 DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
704 DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
713 void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
715 if (!IsMipsArchVariant(kMips32r6)) {
719 muh(rd, rs, rt.rm());
722 // li handles the relocation.
725 if (!IsMipsArchVariant(kMips32r6)) {
735 void MacroAssembler::Mult(Register rs, const Operand& rt) {
739 // li handles the relocation.
747 void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
749 if (!IsMipsArchVariant(kMips32r6)) {
753 muhu(rd, rs, rt.rm());
756 // li handles the relocation.
759 if (!IsMipsArchVariant(kMips32r6)) {
769 void MacroAssembler::Multu(Register rs, const Operand& rt) {
773 // li handles the relocation.
781 void MacroAssembler::Div(Register rs, const Operand& rt) {
785 // li handles the relocation.
793 void MacroAssembler::Div(Register rem, Register res,
794 Register rs, const Operand& rt) {
796 if (!IsMipsArchVariant(kMips32r6)) {
801 div(res, rs, rt.rm());
802 mod(rem, rs, rt.rm());
805 // li handles the relocation.
808 if (!IsMipsArchVariant(kMips32r6)) {
820 void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
822 if (!IsMipsArchVariant(kMips32r6)) {
826 div(res, rs, rt.rm());
829 // li handles the relocation.
832 if (!IsMipsArchVariant(kMips32r6)) {
842 void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
844 if (!IsMipsArchVariant(kMips32r6)) {
848 mod(rd, rs, rt.rm());
851 // li handles the relocation.
854 if (!IsMipsArchVariant(kMips32r6)) {
864 void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
866 if (!IsMipsArchVariant(kMips32r6)) {
870 modu(rd, rs, rt.rm());
873 // li handles the relocation.
876 if (!IsMipsArchVariant(kMips32r6)) {
886 void MacroAssembler::Divu(Register rs, const Operand& rt) {
890 // li handles the relocation.
898 void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
900 if (!IsMipsArchVariant(kMips32r6)) {
904 divu(res, rs, rt.rm());
907 // li handles the relocation.
910 if (!IsMipsArchVariant(kMips32r6)) {
920 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
922 and_(rd, rs, rt.rm());
924 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
925 andi(rd, rs, rt.imm32_);
927 // li handles the relocation.
936 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
938 or_(rd, rs, rt.rm());
940 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
941 ori(rd, rs, rt.imm32_);
943 // li handles the relocation.
952 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
954 xor_(rd, rs, rt.rm());
956 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
957 xori(rd, rs, rt.imm32_);
959 // li handles the relocation.
968 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
970 nor(rd, rs, rt.rm());
972 // li handles the relocation.
980 void MacroAssembler::Neg(Register rs, const Operand& rt) {
983 DCHECK(!at.is(rt.rm()));
985 xor_(rs, rt.rm(), at);
989 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
991 slt(rd, rs, rt.rm());
993 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
994 slti(rd, rs, rt.imm32_);
996 // li handles the relocation.
1005 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
1007 sltu(rd, rs, rt.rm());
1009 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
1010 sltiu(rd, rs, rt.imm32_);
1012 // li handles the relocation.
1021 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
1022 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1024 rotrv(rd, rs, rt.rm());
1026 rotr(rd, rs, rt.imm32_);
1030 subu(at, zero_reg, rt.rm());
1032 srlv(rd, rs, rt.rm());
1035 if (rt.imm32_ == 0) {
1038 srl(at, rs, rt.imm32_);
1039 sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
1047 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
1048 if (IsMipsArchVariant(kLoongson)) {
1056 // ------------Pseudo-instructions-------------
1058 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
1060 lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1064 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
1066 swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1070 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1071 AllowDeferredHandleDereference smi_check;
1072 if (value->IsSmi()) {
1073 li(dst, Operand(value), mode);
1075 DCHECK(value->IsHeapObject());
1076 if (isolate()->heap()->InNewSpace(*value)) {
1077 Handle<Cell> cell = isolate()->factory()->NewCell(value);
1078 li(dst, Operand(cell));
1079 lw(dst, FieldMemOperand(dst, Cell::kValueOffset));
1081 li(dst, Operand(value));
1087 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1088 DCHECK(!j.is_reg());
1089 BlockTrampolinePoolScope block_trampoline_pool(this);
1090 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
1091 // Normal load of an immediate value which does not need Relocation Info.
1092 if (is_int16(j.imm32_)) {
1093 addiu(rd, zero_reg, j.imm32_);
1094 } else if (!(j.imm32_ & kHiMask)) {
1095 ori(rd, zero_reg, j.imm32_);
1096 } else if (!(j.imm32_ & kImm16Mask)) {
1097 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1099 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1100 ori(rd, rd, (j.imm32_ & kImm16Mask));
1103 if (MustUseReg(j.rmode_)) {
1104 RecordRelocInfo(j.rmode_, j.imm32_);
1106 // We always need the same number of instructions as we may need to patch
1107 // this code to load another value which may need 2 instructions to load.
1108 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1109 ori(rd, rd, (j.imm32_ & kImm16Mask));
1114 void MacroAssembler::MultiPush(RegList regs) {
1115 int16_t num_to_push = NumberOfBitsSet(regs);
1116 int16_t stack_offset = num_to_push * kPointerSize;
1118 Subu(sp, sp, Operand(stack_offset));
1119 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1120 if ((regs & (1 << i)) != 0) {
1121 stack_offset -= kPointerSize;
1122 sw(ToRegister(i), MemOperand(sp, stack_offset));
1128 void MacroAssembler::MultiPushReversed(RegList regs) {
1129 int16_t num_to_push = NumberOfBitsSet(regs);
1130 int16_t stack_offset = num_to_push * kPointerSize;
1132 Subu(sp, sp, Operand(stack_offset));
1133 for (int16_t i = 0; i < kNumRegisters; i++) {
1134 if ((regs & (1 << i)) != 0) {
1135 stack_offset -= kPointerSize;
1136 sw(ToRegister(i), MemOperand(sp, stack_offset));
1142 void MacroAssembler::MultiPop(RegList regs) {
1143 int16_t stack_offset = 0;
1145 for (int16_t i = 0; i < kNumRegisters; i++) {
1146 if ((regs & (1 << i)) != 0) {
1147 lw(ToRegister(i), MemOperand(sp, stack_offset));
1148 stack_offset += kPointerSize;
1151 addiu(sp, sp, stack_offset);
1155 void MacroAssembler::MultiPopReversed(RegList regs) {
1156 int16_t stack_offset = 0;
1158 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1159 if ((regs & (1 << i)) != 0) {
1160 lw(ToRegister(i), MemOperand(sp, stack_offset));
1161 stack_offset += kPointerSize;
1164 addiu(sp, sp, stack_offset);
1168 void MacroAssembler::MultiPushFPU(RegList regs) {
1169 int16_t num_to_push = NumberOfBitsSet(regs);
1170 int16_t stack_offset = num_to_push * kDoubleSize;
1172 Subu(sp, sp, Operand(stack_offset));
1173 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1174 if ((regs & (1 << i)) != 0) {
1175 stack_offset -= kDoubleSize;
1176 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1182 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
1183 int16_t num_to_push = NumberOfBitsSet(regs);
1184 int16_t stack_offset = num_to_push * kDoubleSize;
1186 Subu(sp, sp, Operand(stack_offset));
1187 for (int16_t i = 0; i < kNumRegisters; i++) {
1188 if ((regs & (1 << i)) != 0) {
1189 stack_offset -= kDoubleSize;
1190 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1196 void MacroAssembler::MultiPopFPU(RegList regs) {
1197 int16_t stack_offset = 0;
1199 for (int16_t i = 0; i < kNumRegisters; i++) {
1200 if ((regs & (1 << i)) != 0) {
1201 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1202 stack_offset += kDoubleSize;
1205 addiu(sp, sp, stack_offset);
1209 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
1210 int16_t stack_offset = 0;
1212 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1213 if ((regs & (1 << i)) != 0) {
1214 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1215 stack_offset += kDoubleSize;
1218 addiu(sp, sp, stack_offset);
1222 void MacroAssembler::FlushICache(Register address, unsigned instructions) {
1223 RegList saved_regs = kJSCallerSaved | ra.bit();
1224 MultiPush(saved_regs);
1225 AllowExternalCallThatCantCauseGC scope(this);
1227 // Save to a0 in case address == t0.
1229 PrepareCallCFunction(2, t0);
1231 li(a1, instructions * kInstrSize);
1232 CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
1233 MultiPop(saved_regs);
1237 void MacroAssembler::Ext(Register rt,
1242 DCHECK(pos + size < 33);
1244 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1245 ext_(rt, rs, pos, size);
1247 // Move rs to rt and shift it left then right to get the
1248 // desired bitfield on the right side and zeroes on the left.
1249 int shift_left = 32 - (pos + size);
1250 sll(rt, rs, shift_left); // Acts as a move if shift_left == 0.
1252 int shift_right = 32 - size;
1253 if (shift_right > 0) {
1254 srl(rt, rt, shift_right);
1260 void MacroAssembler::Ins(Register rt,
1265 DCHECK(pos + size <= 32);
1268 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1269 ins_(rt, rs, pos, size);
1271 DCHECK(!rt.is(t8) && !rs.is(t8));
1272 Subu(at, zero_reg, Operand(1));
1273 srl(at, at, 32 - size);
1277 nor(at, at, zero_reg);
1284 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1286 FPURegister scratch) {
1287 // Move the data from fs to t8.
1289 Cvt_d_uw(fd, t8, scratch);
1293 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1295 FPURegister scratch) {
1296 // Convert rs to a FP value in fd (and fd + 1).
1297 // We do this by converting rs minus the MSB to avoid sign conversion,
1298 // then adding 2^31 to the result (if needed).
1300 DCHECK(!fd.is(scratch));
1304 // Save rs's MSB to t9.
1308 // Move the result to fd.
1311 // Convert fd to a real FP value.
1314 Label conversion_done;
1316 // If rs's MSB was 0, it's done.
1317 // Otherwise we need to add that to the FP register.
1318 Branch(&conversion_done, eq, t9, Operand(zero_reg));
1320 // Load 2^31 into f20 as its float representation.
1322 mtc1(zero_reg, scratch);
1325 add_d(fd, fd, scratch);
1327 bind(&conversion_done);
1331 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1333 FPURegister scratch) {
1334 Trunc_uw_d(fs, t8, scratch);
1339 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1340 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1350 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1351 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1361 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1362 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1372 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1373 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1383 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1385 FPURegister scratch) {
1386 DCHECK(!fd.is(scratch));
1389 // Load 2^31 into scratch as its float representation.
1391 mtc1(zero_reg, scratch);
1393 // Test if scratch > fd.
1394 // If fd < 2^31 we can convert it normally.
1395 Label simple_convert;
1396 BranchF(&simple_convert, NULL, lt, fd, scratch);
1398 // First we subtract 2^31 from fd, then trunc it to rs
1399 // and add 2^31 to rs.
1400 sub_d(scratch, fd, scratch);
1401 trunc_w_d(scratch, scratch);
1403 Or(rs, rs, 1 << 31);
1407 // Simple conversion.
1408 bind(&simple_convert);
1409 trunc_w_d(scratch, fd);
1416 void MacroAssembler::Mthc1(Register rt, FPURegister fs) {
1420 mtc1(rt, fs.high());
1425 void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
1429 mfc1(rt, fs.high());
1434 void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
1435 Label* nan, Condition cond, FPURegister cmp1,
1436 FPURegister cmp2, BranchDelaySlot bd) {
1438 BlockTrampolinePoolScope block_trampoline_pool(this);
1444 if (IsMipsArchVariant(kMips32r6)) {
1445 sizeField = sizeField == D ? L : W;
1447 DCHECK(nan || target);
1448 // Check for unordered (NaN) cases.
1451 nan->is_bound() ? is_near(nan) : is_trampoline_emitted();
1452 if (!IsMipsArchVariant(kMips32r6)) {
1455 c(UN, D, cmp1, cmp2);
1461 c(UN, D, cmp1, cmp2);
1463 if (bd == PROTECT) {
1468 // Use kDoubleCompareReg for comparison result. It has to be unavailable
1469 // to lithium register allocator.
1470 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
1473 cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
1474 bc1eqz(&skip, kDoubleCompareReg);
1479 cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
1480 bc1nez(nan, kDoubleCompareReg);
1481 if (bd == PROTECT) {
1490 target->is_bound() ? is_near(target) : is_trampoline_emitted();
1493 Condition neg_cond = NegateFpuCondition(cond);
1494 BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
1498 BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
1504 void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
1505 Condition cc, FPURegister cmp1,
1506 FPURegister cmp2, BranchDelaySlot bd) {
1507 if (!IsMipsArchVariant(kMips32r6)) {
1508 BlockTrampolinePoolScope block_trampoline_pool(this);
1510 // Here NaN cases were either handled by this function or are assumed to
1511 // have been handled by the caller.
1514 c(OLT, sizeField, cmp1, cmp2);
1518 c(ULT, sizeField, cmp1, cmp2);
1522 c(ULE, sizeField, cmp1, cmp2);
1526 c(OLE, sizeField, cmp1, cmp2);
1530 c(ULT, sizeField, cmp1, cmp2);
1534 c(OLT, sizeField, cmp1, cmp2);
1538 c(OLE, sizeField, cmp1, cmp2);
1542 c(ULE, sizeField, cmp1, cmp2);
1546 c(EQ, sizeField, cmp1, cmp2);
1550 c(UEQ, sizeField, cmp1, cmp2);
1553 case ne: // Unordered or not equal.
1554 c(EQ, sizeField, cmp1, cmp2);
1558 c(UEQ, sizeField, cmp1, cmp2);
1566 BlockTrampolinePoolScope block_trampoline_pool(this);
1568 // Here NaN cases were either handled by this function or are assumed to
1569 // have been handled by the caller.
1570 // Unsigned conditions are treated as their signed counterpart.
1571 // Use kDoubleCompareReg for comparison result, it is
1572 // valid in fp64 (FR = 1) mode which is implied for mips32r6.
1573 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
1576 cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
1577 bc1nez(target, kDoubleCompareReg);
1580 cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
1581 bc1nez(target, kDoubleCompareReg);
1584 cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
1585 bc1eqz(target, kDoubleCompareReg);
1588 cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
1589 bc1eqz(target, kDoubleCompareReg);
1592 cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
1593 bc1eqz(target, kDoubleCompareReg);
1596 cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
1597 bc1eqz(target, kDoubleCompareReg);
1600 cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
1601 bc1nez(target, kDoubleCompareReg);
1604 cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
1605 bc1nez(target, kDoubleCompareReg);
1608 cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
1609 bc1nez(target, kDoubleCompareReg);
1612 cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
1613 bc1nez(target, kDoubleCompareReg);
1616 cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
1617 bc1eqz(target, kDoubleCompareReg);
1620 cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
1621 bc1eqz(target, kDoubleCompareReg);
1628 if (bd == PROTECT) {
1634 void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
1636 DCHECK(!src_low.is(at));
1646 void MacroAssembler::Move(FPURegister dst, float imm) {
1647 li(at, Operand(bit_cast<int32_t>(imm)));
1652 void MacroAssembler::Move(FPURegister dst, double imm) {
1653 static const DoubleRepresentation minus_zero(-0.0);
1654 static const DoubleRepresentation zero(0.0);
1655 DoubleRepresentation value_rep(imm);
1656 // Handle special values first.
1657 if (value_rep == zero && has_double_zero_reg_set_) {
1658 mov_d(dst, kDoubleRegZero);
1659 } else if (value_rep == minus_zero && has_double_zero_reg_set_) {
1660 neg_d(dst, kDoubleRegZero);
1663 DoubleAsTwoUInt32(imm, &lo, &hi);
1664 // Move the low part of the double into the lower of the corresponding FPU
1665 // register of FPU register pair.
1667 li(at, Operand(lo));
1670 mtc1(zero_reg, dst);
1672 // Move the high part of the double into the higher of the corresponding FPU
1673 // register of FPU register pair.
1675 li(at, Operand(hi));
1678 Mthc1(zero_reg, dst);
1680 if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true;
1685 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1686 if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
1688 Branch(&done, ne, rt, Operand(zero_reg));
1697 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1698 if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
1700 Branch(&done, eq, rt, Operand(zero_reg));
1709 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
1710 if (IsMipsArchVariant(kLoongson)) {
1711 // Tests an FP condition code and then conditionally move rs to rd.
1712 // We do not currently use any FPU cc bit other than bit 0.
1714 DCHECK(!(rs.is(t8) || rd.is(t8)));
1716 Register scratch = t8;
1717 // For testing purposes we need to fetch content of the FCSR register and
1718 // than test its cc (floating point condition code) bit (for cc = 0, it is
1719 // 24. bit of the FCSR).
1720 cfc1(scratch, FCSR);
1721 // For the MIPS I, II and III architectures, the contents of scratch is
1722 // UNPREDICTABLE for the instruction immediately following CFC1.
1724 srl(scratch, scratch, 16);
1725 andi(scratch, scratch, 0x0080);
1726 Branch(&done, eq, scratch, Operand(zero_reg));
1735 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
1736 if (IsMipsArchVariant(kLoongson)) {
1737 // Tests an FP condition code and then conditionally move rs to rd.
1738 // We do not currently use any FPU cc bit other than bit 0.
1740 DCHECK(!(rs.is(t8) || rd.is(t8)));
1742 Register scratch = t8;
1743 // For testing purposes we need to fetch content of the FCSR register and
1744 // than test its cc (floating point condition code) bit (for cc = 0, it is
1745 // 24. bit of the FCSR).
1746 cfc1(scratch, FCSR);
1747 // For the MIPS I, II and III architectures, the contents of scratch is
1748 // UNPREDICTABLE for the instruction immediately following CFC1.
1750 srl(scratch, scratch, 16);
1751 andi(scratch, scratch, 0x0080);
1752 Branch(&done, ne, scratch, Operand(zero_reg));
1761 void MacroAssembler::Clz(Register rd, Register rs) {
1762 if (IsMipsArchVariant(kLoongson)) {
1763 DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
1765 Register scratch = t9;
1771 and_(scratch, at, mask);
1772 Branch(&end, ne, scratch, Operand(zero_reg));
1774 Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
1783 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1785 DoubleRegister double_input,
1787 DoubleRegister double_scratch,
1788 Register except_flag,
1789 CheckForInexactConversion check_inexact) {
1790 DCHECK(!result.is(scratch));
1791 DCHECK(!double_input.is(double_scratch));
1792 DCHECK(!except_flag.is(scratch));
1796 // Clear the except flag (0 = no exception)
1797 mov(except_flag, zero_reg);
1799 // Test for values that can be exactly represented as a signed 32-bit integer.
1800 cvt_w_d(double_scratch, double_input);
1801 mfc1(result, double_scratch);
1802 cvt_d_w(double_scratch, double_scratch);
1803 BranchF(&done, NULL, eq, double_input, double_scratch);
1805 int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
1807 if (check_inexact == kDontCheckForInexactConversion) {
1808 // Ignore inexact exceptions.
1809 except_mask &= ~kFCSRInexactFlagMask;
1813 cfc1(scratch, FCSR);
1814 // Disable FPU exceptions.
1815 ctc1(zero_reg, FCSR);
1817 // Do operation based on rounding mode.
1818 switch (rounding_mode) {
1819 case kRoundToNearest:
1820 Round_w_d(double_scratch, double_input);
1823 Trunc_w_d(double_scratch, double_input);
1825 case kRoundToPlusInf:
1826 Ceil_w_d(double_scratch, double_input);
1828 case kRoundToMinusInf:
1829 Floor_w_d(double_scratch, double_input);
1831 } // End of switch-statement.
1834 cfc1(except_flag, FCSR);
1836 ctc1(scratch, FCSR);
1837 // Move the converted value into the result register.
1838 mfc1(result, double_scratch);
1840 // Check for fpu exceptions.
1841 And(except_flag, except_flag, Operand(except_mask));
1847 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
1848 DoubleRegister double_input,
1850 DoubleRegister single_scratch = kLithiumScratchDouble.low();
1851 Register scratch = at;
1852 Register scratch2 = t9;
1854 // Clear cumulative exception flags and save the FCSR.
1855 cfc1(scratch2, FCSR);
1856 ctc1(zero_reg, FCSR);
1857 // Try a conversion to a signed integer.
1858 trunc_w_d(single_scratch, double_input);
1859 mfc1(result, single_scratch);
1860 // Retrieve and restore the FCSR.
1861 cfc1(scratch, FCSR);
1862 ctc1(scratch2, FCSR);
1863 // Check for overflow and NaNs.
1866 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
1867 // If we had no exceptions we are done.
1868 Branch(done, eq, scratch, Operand(zero_reg));
1872 void MacroAssembler::TruncateDoubleToI(Register result,
1873 DoubleRegister double_input) {
1876 TryInlineTruncateDoubleToI(result, double_input, &done);
1878 // If we fell through then inline version didn't succeed - call stub instead.
1880 Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
1881 sdc1(double_input, MemOperand(sp, 0));
1883 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
1886 Addu(sp, sp, Operand(kDoubleSize));
1893 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
1895 DoubleRegister double_scratch = f12;
1896 DCHECK(!result.is(object));
1898 ldc1(double_scratch,
1899 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
1900 TryInlineTruncateDoubleToI(result, double_scratch, &done);
1902 // If we fell through then inline version didn't succeed - call stub instead.
1904 DoubleToIStub stub(isolate(),
1907 HeapNumber::kValueOffset - kHeapObjectTag,
1917 void MacroAssembler::TruncateNumberToI(Register object,
1919 Register heap_number_map,
1921 Label* not_number) {
1923 DCHECK(!result.is(object));
1925 UntagAndJumpIfSmi(result, object, &done);
1926 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
1927 TruncateHeapNumberToI(result, object);
1933 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1935 int num_least_bits) {
1936 Ext(dst, src, kSmiTagSize, num_least_bits);
1940 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1942 int num_least_bits) {
1943 And(dst, src, Operand((1 << num_least_bits) - 1));
1947 // Emulated condtional branches do not emit a nop in the branch delay slot.
1949 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1950 #define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \
1951 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1952 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1955 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1956 BranchShort(offset, bdslot);
1960 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1962 BranchDelaySlot bdslot) {
1963 BranchShort(offset, cond, rs, rt, bdslot);
1967 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1968 if (L->is_bound()) {
1970 BranchShort(L, bdslot);
1975 if (is_trampoline_emitted()) {
1978 BranchShort(L, bdslot);
1984 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1986 BranchDelaySlot bdslot) {
1987 if (L->is_bound()) {
1989 BranchShort(L, cond, rs, rt, bdslot);
1991 if (cond != cc_always) {
1993 Condition neg_cond = NegateCondition(cond);
1994 BranchShort(&skip, neg_cond, rs, rt);
2002 if (is_trampoline_emitted()) {
2003 if (cond != cc_always) {
2005 Condition neg_cond = NegateCondition(cond);
2006 BranchShort(&skip, neg_cond, rs, rt);
2013 BranchShort(L, cond, rs, rt, bdslot);
2019 void MacroAssembler::Branch(Label* L,
2022 Heap::RootListIndex index,
2023 BranchDelaySlot bdslot) {
2024 LoadRoot(at, index);
2025 Branch(L, cond, rs, Operand(at), bdslot);
2029 void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
2032 // Emit a nop in the branch delay slot if required.
2033 if (bdslot == PROTECT)
2038 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
2040 BranchDelaySlot bdslot) {
2041 BRANCH_ARGS_CHECK(cond, rs, rt);
2042 DCHECK(!rs.is(zero_reg));
2043 Register r2 = no_reg;
2044 Register scratch = at;
2047 // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
2049 BlockTrampolinePoolScope block_trampoline_pool(this);
2056 beq(rs, r2, offset);
2059 bne(rs, r2, offset);
2061 // Signed comparison.
2063 if (r2.is(zero_reg)) {
2066 slt(scratch, r2, rs);
2067 bne(scratch, zero_reg, offset);
2071 if (r2.is(zero_reg)) {
2074 slt(scratch, rs, r2);
2075 beq(scratch, zero_reg, offset);
2079 if (r2.is(zero_reg)) {
2082 slt(scratch, rs, r2);
2083 bne(scratch, zero_reg, offset);
2087 if (r2.is(zero_reg)) {
2090 slt(scratch, r2, rs);
2091 beq(scratch, zero_reg, offset);
2094 // Unsigned comparison.
2096 if (r2.is(zero_reg)) {
2097 bne(rs, zero_reg, offset);
2099 sltu(scratch, r2, rs);
2100 bne(scratch, zero_reg, offset);
2103 case Ugreater_equal:
2104 if (r2.is(zero_reg)) {
2107 sltu(scratch, rs, r2);
2108 beq(scratch, zero_reg, offset);
2112 if (r2.is(zero_reg)) {
2113 // No code needs to be emitted.
2116 sltu(scratch, rs, r2);
2117 bne(scratch, zero_reg, offset);
2121 if (r2.is(zero_reg)) {
2122 beq(rs, zero_reg, offset);
2124 sltu(scratch, r2, rs);
2125 beq(scratch, zero_reg, offset);
2132 // Be careful to always use shifted_branch_offset only just before the
2133 // branch instruction, as the location will be remember for patching the
2135 BlockTrampolinePoolScope block_trampoline_pool(this);
2141 if (rt.imm32_ == 0) {
2142 beq(rs, zero_reg, offset);
2144 // We don't want any other register but scratch clobbered.
2145 DCHECK(!scratch.is(rs));
2148 beq(rs, r2, offset);
2152 if (rt.imm32_ == 0) {
2153 bne(rs, zero_reg, offset);
2155 // We don't want any other register but scratch clobbered.
2156 DCHECK(!scratch.is(rs));
2159 bne(rs, r2, offset);
2162 // Signed comparison.
2164 if (rt.imm32_ == 0) {
2169 slt(scratch, r2, rs);
2170 bne(scratch, zero_reg, offset);
2174 if (rt.imm32_ == 0) {
2176 } else if (is_int16(rt.imm32_)) {
2177 slti(scratch, rs, rt.imm32_);
2178 beq(scratch, zero_reg, offset);
2182 slt(scratch, rs, r2);
2183 beq(scratch, zero_reg, offset);
2187 if (rt.imm32_ == 0) {
2189 } else if (is_int16(rt.imm32_)) {
2190 slti(scratch, rs, rt.imm32_);
2191 bne(scratch, zero_reg, offset);
2195 slt(scratch, rs, r2);
2196 bne(scratch, zero_reg, offset);
2200 if (rt.imm32_ == 0) {
2205 slt(scratch, r2, rs);
2206 beq(scratch, zero_reg, offset);
2209 // Unsigned comparison.
2211 if (rt.imm32_ == 0) {
2212 bne(rs, zero_reg, offset);
2216 sltu(scratch, r2, rs);
2217 bne(scratch, zero_reg, offset);
2220 case Ugreater_equal:
2221 if (rt.imm32_ == 0) {
2223 } else if (is_int16(rt.imm32_)) {
2224 sltiu(scratch, rs, rt.imm32_);
2225 beq(scratch, zero_reg, offset);
2229 sltu(scratch, rs, r2);
2230 beq(scratch, zero_reg, offset);
2234 if (rt.imm32_ == 0) {
2235 // No code needs to be emitted.
2237 } else if (is_int16(rt.imm32_)) {
2238 sltiu(scratch, rs, rt.imm32_);
2239 bne(scratch, zero_reg, offset);
2243 sltu(scratch, rs, r2);
2244 bne(scratch, zero_reg, offset);
2248 if (rt.imm32_ == 0) {
2249 beq(rs, zero_reg, offset);
2253 sltu(scratch, r2, rs);
2254 beq(scratch, zero_reg, offset);
2261 // Emit a nop in the branch delay slot if required.
2262 if (bdslot == PROTECT)
2267 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
2268 // We use branch_offset as an argument for the branch instructions to be sure
2269 // it is called just before generating the branch instruction, as needed.
2271 b(shifted_branch_offset(L, false));
2273 // Emit a nop in the branch delay slot if required.
2274 if (bdslot == PROTECT)
2279 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
2281 BranchDelaySlot bdslot) {
2282 BRANCH_ARGS_CHECK(cond, rs, rt);
2285 Register r2 = no_reg;
2286 Register scratch = at;
2288 BlockTrampolinePoolScope block_trampoline_pool(this);
2290 // Be careful to always use shifted_branch_offset only just before the
2291 // branch instruction, as the location will be remember for patching the
2295 offset = shifted_branch_offset(L, false);
2299 offset = shifted_branch_offset(L, false);
2300 beq(rs, r2, offset);
2303 offset = shifted_branch_offset(L, false);
2304 bne(rs, r2, offset);
2306 // Signed comparison.
2308 if (r2.is(zero_reg)) {
2309 offset = shifted_branch_offset(L, false);
2312 slt(scratch, r2, rs);
2313 offset = shifted_branch_offset(L, false);
2314 bne(scratch, zero_reg, offset);
2318 if (r2.is(zero_reg)) {
2319 offset = shifted_branch_offset(L, false);
2322 slt(scratch, rs, r2);
2323 offset = shifted_branch_offset(L, false);
2324 beq(scratch, zero_reg, offset);
2328 if (r2.is(zero_reg)) {
2329 offset = shifted_branch_offset(L, false);
2332 slt(scratch, rs, r2);
2333 offset = shifted_branch_offset(L, false);
2334 bne(scratch, zero_reg, offset);
2338 if (r2.is(zero_reg)) {
2339 offset = shifted_branch_offset(L, false);
2342 slt(scratch, r2, rs);
2343 offset = shifted_branch_offset(L, false);
2344 beq(scratch, zero_reg, offset);
2347 // Unsigned comparison.
2349 if (r2.is(zero_reg)) {
2350 offset = shifted_branch_offset(L, false);
2351 bne(rs, zero_reg, offset);
2353 sltu(scratch, r2, rs);
2354 offset = shifted_branch_offset(L, false);
2355 bne(scratch, zero_reg, offset);
2358 case Ugreater_equal:
2359 if (r2.is(zero_reg)) {
2360 offset = shifted_branch_offset(L, false);
2363 sltu(scratch, rs, r2);
2364 offset = shifted_branch_offset(L, false);
2365 beq(scratch, zero_reg, offset);
2369 if (r2.is(zero_reg)) {
2370 // No code needs to be emitted.
2373 sltu(scratch, rs, r2);
2374 offset = shifted_branch_offset(L, false);
2375 bne(scratch, zero_reg, offset);
2379 if (r2.is(zero_reg)) {
2380 offset = shifted_branch_offset(L, false);
2381 beq(rs, zero_reg, offset);
2383 sltu(scratch, r2, rs);
2384 offset = shifted_branch_offset(L, false);
2385 beq(scratch, zero_reg, offset);
2392 // Be careful to always use shifted_branch_offset only just before the
2393 // branch instruction, as the location will be remember for patching the
2395 BlockTrampolinePoolScope block_trampoline_pool(this);
2398 offset = shifted_branch_offset(L, false);
2402 if (rt.imm32_ == 0) {
2403 offset = shifted_branch_offset(L, false);
2404 beq(rs, zero_reg, offset);
2406 DCHECK(!scratch.is(rs));
2409 offset = shifted_branch_offset(L, false);
2410 beq(rs, r2, offset);
2414 if (rt.imm32_ == 0) {
2415 offset = shifted_branch_offset(L, false);
2416 bne(rs, zero_reg, offset);
2418 DCHECK(!scratch.is(rs));
2421 offset = shifted_branch_offset(L, false);
2422 bne(rs, r2, offset);
2425 // Signed comparison.
2427 if (rt.imm32_ == 0) {
2428 offset = shifted_branch_offset(L, false);
2431 DCHECK(!scratch.is(rs));
2434 slt(scratch, r2, rs);
2435 offset = shifted_branch_offset(L, false);
2436 bne(scratch, zero_reg, offset);
2440 if (rt.imm32_ == 0) {
2441 offset = shifted_branch_offset(L, false);
2443 } else if (is_int16(rt.imm32_)) {
2444 slti(scratch, rs, rt.imm32_);
2445 offset = shifted_branch_offset(L, false);
2446 beq(scratch, zero_reg, offset);
2448 DCHECK(!scratch.is(rs));
2451 slt(scratch, rs, r2);
2452 offset = shifted_branch_offset(L, false);
2453 beq(scratch, zero_reg, offset);
2457 if (rt.imm32_ == 0) {
2458 offset = shifted_branch_offset(L, false);
2460 } else if (is_int16(rt.imm32_)) {
2461 slti(scratch, rs, rt.imm32_);
2462 offset = shifted_branch_offset(L, false);
2463 bne(scratch, zero_reg, offset);
2465 DCHECK(!scratch.is(rs));
2468 slt(scratch, rs, r2);
2469 offset = shifted_branch_offset(L, false);
2470 bne(scratch, zero_reg, offset);
2474 if (rt.imm32_ == 0) {
2475 offset = shifted_branch_offset(L, false);
2478 DCHECK(!scratch.is(rs));
2481 slt(scratch, r2, rs);
2482 offset = shifted_branch_offset(L, false);
2483 beq(scratch, zero_reg, offset);
2486 // Unsigned comparison.
2488 if (rt.imm32_ == 0) {
2489 offset = shifted_branch_offset(L, false);
2490 bne(rs, zero_reg, offset);
2492 DCHECK(!scratch.is(rs));
2495 sltu(scratch, r2, rs);
2496 offset = shifted_branch_offset(L, false);
2497 bne(scratch, zero_reg, offset);
2500 case Ugreater_equal:
2501 if (rt.imm32_ == 0) {
2502 offset = shifted_branch_offset(L, false);
2504 } else if (is_int16(rt.imm32_)) {
2505 sltiu(scratch, rs, rt.imm32_);
2506 offset = shifted_branch_offset(L, false);
2507 beq(scratch, zero_reg, offset);
2509 DCHECK(!scratch.is(rs));
2512 sltu(scratch, rs, r2);
2513 offset = shifted_branch_offset(L, false);
2514 beq(scratch, zero_reg, offset);
2518 if (rt.imm32_ == 0) {
2519 // No code needs to be emitted.
2521 } else if (is_int16(rt.imm32_)) {
2522 sltiu(scratch, rs, rt.imm32_);
2523 offset = shifted_branch_offset(L, false);
2524 bne(scratch, zero_reg, offset);
2526 DCHECK(!scratch.is(rs));
2529 sltu(scratch, rs, r2);
2530 offset = shifted_branch_offset(L, false);
2531 bne(scratch, zero_reg, offset);
2535 if (rt.imm32_ == 0) {
2536 offset = shifted_branch_offset(L, false);
2537 beq(rs, zero_reg, offset);
2539 DCHECK(!scratch.is(rs));
2542 sltu(scratch, r2, rs);
2543 offset = shifted_branch_offset(L, false);
2544 beq(scratch, zero_reg, offset);
2551 // Check that offset could actually hold on an int16_t.
2552 DCHECK(is_int16(offset));
2553 // Emit a nop in the branch delay slot if required.
2554 if (bdslot == PROTECT)
2559 void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
2560 BranchAndLinkShort(offset, bdslot);
2564 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
2566 BranchDelaySlot bdslot) {
2567 BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2571 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
2572 if (L->is_bound()) {
2574 BranchAndLinkShort(L, bdslot);
2579 if (is_trampoline_emitted()) {
2582 BranchAndLinkShort(L, bdslot);
2588 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2590 BranchDelaySlot bdslot) {
2591 if (L->is_bound()) {
2593 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2596 Condition neg_cond = NegateCondition(cond);
2597 BranchShort(&skip, neg_cond, rs, rt);
2602 if (is_trampoline_emitted()) {
2604 Condition neg_cond = NegateCondition(cond);
2605 BranchShort(&skip, neg_cond, rs, rt);
2609 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2615 // We need to use a bgezal or bltzal, but they can't be used directly with the
2616 // slt instructions. We could use sub or add instead but we would miss overflow
2617 // cases, so we keep slt and add an intermediate third instruction.
2618 void MacroAssembler::BranchAndLinkShort(int16_t offset,
2619 BranchDelaySlot bdslot) {
2622 // Emit a nop in the branch delay slot if required.
2623 if (bdslot == PROTECT)
2628 void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
2629 Register rs, const Operand& rt,
2630 BranchDelaySlot bdslot) {
2631 BRANCH_ARGS_CHECK(cond, rs, rt);
2632 Register r2 = no_reg;
2633 Register scratch = at;
2637 } else if (cond != cc_always) {
2642 if (!IsMipsArchVariant(kMips32r6)) {
2643 BlockTrampolinePoolScope block_trampoline_pool(this);
2659 // Signed comparison.
2661 slt(scratch, r2, rs);
2662 addiu(scratch, scratch, -1);
2663 bgezal(scratch, offset);
2666 slt(scratch, rs, r2);
2667 addiu(scratch, scratch, -1);
2668 bltzal(scratch, offset);
2671 slt(scratch, rs, r2);
2672 addiu(scratch, scratch, -1);
2673 bgezal(scratch, offset);
2676 slt(scratch, r2, rs);
2677 addiu(scratch, scratch, -1);
2678 bltzal(scratch, offset);
2681 // Unsigned comparison.
2683 sltu(scratch, r2, rs);
2684 addiu(scratch, scratch, -1);
2685 bgezal(scratch, offset);
2687 case Ugreater_equal:
2688 sltu(scratch, rs, r2);
2689 addiu(scratch, scratch, -1);
2690 bltzal(scratch, offset);
2693 sltu(scratch, rs, r2);
2694 addiu(scratch, scratch, -1);
2695 bgezal(scratch, offset);
2698 sltu(scratch, r2, rs);
2699 addiu(scratch, scratch, -1);
2700 bltzal(scratch, offset);
2707 BlockTrampolinePoolScope block_trampoline_pool(this);
2723 // Signed comparison.
2726 slt(scratch, r2, rs);
2727 beq(scratch, zero_reg, 2);
2733 slt(scratch, rs, r2);
2734 bne(scratch, zero_reg, 2);
2740 slt(scratch, rs, r2);
2741 bne(scratch, zero_reg, 2);
2747 slt(scratch, r2, rs);
2748 bne(scratch, zero_reg, 2);
2754 // Unsigned comparison.
2757 sltu(scratch, r2, rs);
2758 beq(scratch, zero_reg, 2);
2762 case Ugreater_equal:
2764 sltu(scratch, rs, r2);
2765 bne(scratch, zero_reg, 2);
2771 sltu(scratch, rs, r2);
2772 bne(scratch, zero_reg, 2);
2778 sltu(scratch, r2, rs);
2779 bne(scratch, zero_reg, 2);
2788 // Emit a nop in the branch delay slot if required.
2789 if (bdslot == PROTECT)
2794 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
2795 bal(shifted_branch_offset(L, false));
2797 // Emit a nop in the branch delay slot if required.
2798 if (bdslot == PROTECT)
2803 void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
2805 BranchDelaySlot bdslot) {
2806 BRANCH_ARGS_CHECK(cond, rs, rt);
2809 Register r2 = no_reg;
2810 Register scratch = at;
2813 } else if (cond != cc_always) {
2818 if (!IsMipsArchVariant(kMips32r6)) {
2819 BlockTrampolinePoolScope block_trampoline_pool(this);
2822 offset = shifted_branch_offset(L, false);
2828 offset = shifted_branch_offset(L, false);
2834 offset = shifted_branch_offset(L, false);
2838 // Signed comparison.
2840 slt(scratch, r2, rs);
2841 addiu(scratch, scratch, -1);
2842 offset = shifted_branch_offset(L, false);
2843 bgezal(scratch, offset);
2846 slt(scratch, rs, r2);
2847 addiu(scratch, scratch, -1);
2848 offset = shifted_branch_offset(L, false);
2849 bltzal(scratch, offset);
2852 slt(scratch, rs, r2);
2853 addiu(scratch, scratch, -1);
2854 offset = shifted_branch_offset(L, false);
2855 bgezal(scratch, offset);
2858 slt(scratch, r2, rs);
2859 addiu(scratch, scratch, -1);
2860 offset = shifted_branch_offset(L, false);
2861 bltzal(scratch, offset);
2864 // Unsigned comparison.
2866 sltu(scratch, r2, rs);
2867 addiu(scratch, scratch, -1);
2868 offset = shifted_branch_offset(L, false);
2869 bgezal(scratch, offset);
2871 case Ugreater_equal:
2872 sltu(scratch, rs, r2);
2873 addiu(scratch, scratch, -1);
2874 offset = shifted_branch_offset(L, false);
2875 bltzal(scratch, offset);
2878 sltu(scratch, rs, r2);
2879 addiu(scratch, scratch, -1);
2880 offset = shifted_branch_offset(L, false);
2881 bgezal(scratch, offset);
2884 sltu(scratch, r2, rs);
2885 addiu(scratch, scratch, -1);
2886 offset = shifted_branch_offset(L, false);
2887 bltzal(scratch, offset);
2894 BlockTrampolinePoolScope block_trampoline_pool(this);
2897 offset = shifted_branch_offset(L, false);
2903 offset = shifted_branch_offset(L, false);
2909 offset = shifted_branch_offset(L, false);
2913 // Signed comparison.
2916 slt(scratch, r2, rs);
2917 beq(scratch, zero_reg, 2);
2919 offset = shifted_branch_offset(L, false);
2924 slt(scratch, rs, r2);
2925 bne(scratch, zero_reg, 2);
2927 offset = shifted_branch_offset(L, false);
2932 slt(scratch, rs, r2);
2933 bne(scratch, zero_reg, 2);
2935 offset = shifted_branch_offset(L, false);
2940 slt(scratch, r2, rs);
2941 bne(scratch, zero_reg, 2);
2943 offset = shifted_branch_offset(L, false);
2948 // Unsigned comparison.
2951 sltu(scratch, r2, rs);
2952 beq(scratch, zero_reg, 2);
2954 offset = shifted_branch_offset(L, false);
2957 case Ugreater_equal:
2959 sltu(scratch, rs, r2);
2960 bne(scratch, zero_reg, 2);
2962 offset = shifted_branch_offset(L, false);
2967 sltu(scratch, rs, r2);
2968 bne(scratch, zero_reg, 2);
2970 offset = shifted_branch_offset(L, false);
2975 sltu(scratch, r2, rs);
2976 bne(scratch, zero_reg, 2);
2978 offset = shifted_branch_offset(L, false);
2987 // Check that offset could actually hold on an int16_t.
2988 DCHECK(is_int16(offset));
2990 // Emit a nop in the branch delay slot if required.
2991 if (bdslot == PROTECT)
2996 void MacroAssembler::Jump(Register target,
3000 BranchDelaySlot bd) {
3001 BlockTrampolinePoolScope block_trampoline_pool(this);
3002 if (cond == cc_always) {
3005 BRANCH_ARGS_CHECK(cond, rs, rt);
3006 Branch(2, NegateCondition(cond), rs, rt);
3009 // Emit a nop in the branch delay slot if required.
3015 void MacroAssembler::Jump(intptr_t target,
3016 RelocInfo::Mode rmode,
3020 BranchDelaySlot bd) {
3022 if (cond != cc_always) {
3023 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
3025 // The first instruction of 'li' may be placed in the delay slot.
3026 // This is not an issue, t9 is expected to be clobbered anyway.
3027 li(t9, Operand(target, rmode));
3028 Jump(t9, al, zero_reg, Operand(zero_reg), bd);
3033 void MacroAssembler::Jump(Address target,
3034 RelocInfo::Mode rmode,
3038 BranchDelaySlot bd) {
3039 DCHECK(!RelocInfo::IsCodeTarget(rmode));
3040 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
3044 void MacroAssembler::Jump(Handle<Code> code,
3045 RelocInfo::Mode rmode,
3049 BranchDelaySlot bd) {
3050 DCHECK(RelocInfo::IsCodeTarget(rmode));
3051 AllowDeferredHandleDereference embedding_raw_address;
3052 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
3056 int MacroAssembler::CallSize(Register target,
3060 BranchDelaySlot bd) {
3063 if (cond == cc_always) {
3072 return size * kInstrSize;
3076 // Note: To call gcc-compiled C code on mips, you must call thru t9.
3077 void MacroAssembler::Call(Register target,
3081 BranchDelaySlot bd) {
3082 BlockTrampolinePoolScope block_trampoline_pool(this);
3085 if (cond == cc_always) {
3088 BRANCH_ARGS_CHECK(cond, rs, rt);
3089 Branch(2, NegateCondition(cond), rs, rt);
3092 // Emit a nop in the branch delay slot if required.
3096 DCHECK_EQ(CallSize(target, cond, rs, rt, bd),
3097 SizeOfCodeGeneratedSince(&start));
3101 int MacroAssembler::CallSize(Address target,
3102 RelocInfo::Mode rmode,
3106 BranchDelaySlot bd) {
3107 int size = CallSize(t9, cond, rs, rt, bd);
3108 return size + 2 * kInstrSize;
3112 void MacroAssembler::Call(Address target,
3113 RelocInfo::Mode rmode,
3117 BranchDelaySlot bd) {
3118 BlockTrampolinePoolScope block_trampoline_pool(this);
3121 int32_t target_int = reinterpret_cast<int32_t>(target);
3122 // Must record previous source positions before the
3123 // li() generates a new code target.
3124 positions_recorder()->WriteRecordedPositions();
3125 li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
3126 Call(t9, cond, rs, rt, bd);
3127 DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
3128 SizeOfCodeGeneratedSince(&start));
3132 int MacroAssembler::CallSize(Handle<Code> code,
3133 RelocInfo::Mode rmode,
3134 TypeFeedbackId ast_id,
3138 BranchDelaySlot bd) {
3139 AllowDeferredHandleDereference using_raw_address;
3140 return CallSize(reinterpret_cast<Address>(code.location()),
3141 rmode, cond, rs, rt, bd);
3145 void MacroAssembler::Call(Handle<Code> code,
3146 RelocInfo::Mode rmode,
3147 TypeFeedbackId ast_id,
3151 BranchDelaySlot bd) {
3152 BlockTrampolinePoolScope block_trampoline_pool(this);
3155 DCHECK(RelocInfo::IsCodeTarget(rmode));
3156 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
3157 SetRecordedAstId(ast_id);
3158 rmode = RelocInfo::CODE_TARGET_WITH_ID;
3160 AllowDeferredHandleDereference embedding_raw_address;
3161 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
3162 DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
3163 SizeOfCodeGeneratedSince(&start));
3167 void MacroAssembler::Ret(Condition cond,
3170 BranchDelaySlot bd) {
3171 Jump(ra, cond, rs, rt, bd);
3175 void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
3176 BlockTrampolinePoolScope block_trampoline_pool(this);
3179 imm32 = jump_address(L);
3180 { BlockGrowBufferScope block_buf_growth(this);
3181 // Buffer growth (and relocation) must be blocked for internal references
3182 // until associated instructions are emitted and available to be patched.
3183 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3184 lui(at, (imm32 & kHiMask) >> kLuiShift);
3185 ori(at, at, (imm32 & kImm16Mask));
3189 // Emit a nop in the branch delay slot if required.
3190 if (bdslot == PROTECT)
3195 void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
3196 BlockTrampolinePoolScope block_trampoline_pool(this);
3199 imm32 = jump_address(L);
3200 { BlockGrowBufferScope block_buf_growth(this);
3201 // Buffer growth (and relocation) must be blocked for internal references
3202 // until associated instructions are emitted and available to be patched.
3203 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3204 lui(at, (imm32 & kHiMask) >> kLuiShift);
3205 ori(at, at, (imm32 & kImm16Mask));
3209 // Emit a nop in the branch delay slot if required.
3210 if (bdslot == PROTECT)
3215 void MacroAssembler::DropAndRet(int drop) {
3216 DCHECK(is_int16(drop * kPointerSize));
3217 Ret(USE_DELAY_SLOT);
3218 addiu(sp, sp, drop * kPointerSize);
3221 void MacroAssembler::DropAndRet(int drop,
3224 const Operand& r2) {
3225 // Both Drop and Ret need to be conditional.
3227 if (cond != cc_always) {
3228 Branch(&skip, NegateCondition(cond), r1, r2);
3234 if (cond != cc_always) {
3240 void MacroAssembler::Drop(int count,
3243 const Operand& op) {
3251 Branch(&skip, NegateCondition(cond), reg, op);
3254 Addu(sp, sp, Operand(count * kPointerSize));
3263 void MacroAssembler::Swap(Register reg1,
3266 if (scratch.is(no_reg)) {
3267 Xor(reg1, reg1, Operand(reg2));
3268 Xor(reg2, reg2, Operand(reg1));
3269 Xor(reg1, reg1, Operand(reg2));
3278 void MacroAssembler::Call(Label* target) {
3279 BranchAndLink(target);
3283 void MacroAssembler::Push(Handle<Object> handle) {
3284 li(at, Operand(handle));
3289 void MacroAssembler::DebugBreak() {
3290 PrepareCEntryArgs(0);
3291 PrepareCEntryFunction(
3292 ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
3293 CEntryStub ces(isolate(), 1);
3294 DCHECK(AllowThisStubCall(&ces));
3295 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
3299 // ---------------------------------------------------------------------------
3300 // Exception handling.
3302 void MacroAssembler::PushStackHandler() {
3303 // Adjust this code if not the case.
3304 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
3305 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3307 // Link the current handler as the next handler.
3308 li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3309 lw(t1, MemOperand(t2));
3312 // Set this new handler as the current one.
3313 sw(sp, MemOperand(t2));
3317 void MacroAssembler::PopStackHandler() {
3318 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3320 Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
3321 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3322 sw(a1, MemOperand(at));
3326 void MacroAssembler::Allocate(int object_size,
3331 AllocationFlags flags) {
3332 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3333 if (!FLAG_inline_new) {
3334 if (emit_debug_code()) {
3335 // Trash the registers to simulate an allocation failure.
3337 li(scratch1, 0x7191);
3338 li(scratch2, 0x7291);
3344 DCHECK(!result.is(scratch1));
3345 DCHECK(!result.is(scratch2));
3346 DCHECK(!scratch1.is(scratch2));
3347 DCHECK(!scratch1.is(t9));
3348 DCHECK(!scratch2.is(t9));
3349 DCHECK(!result.is(t9));
3351 // Make object size into bytes.
3352 if ((flags & SIZE_IN_WORDS) != 0) {
3353 object_size *= kPointerSize;
3355 DCHECK_EQ(0, object_size & kObjectAlignmentMask);
3357 // Check relative positions of allocation top and limit addresses.
3358 // ARM adds additional checks to make sure the ldm instruction can be
3359 // used. On MIPS we don't have ldm so we don't need additional checks either.
3360 ExternalReference allocation_top =
3361 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3362 ExternalReference allocation_limit =
3363 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3366 reinterpret_cast<intptr_t>(allocation_top.address());
3368 reinterpret_cast<intptr_t>(allocation_limit.address());
3369 DCHECK((limit - top) == kPointerSize);
3371 // Set up allocation top address and object size registers.
3372 Register topaddr = scratch1;
3373 li(topaddr, Operand(allocation_top));
3375 // This code stores a temporary value in t9.
3376 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3377 // Load allocation top into result and allocation limit into t9.
3378 lw(result, MemOperand(topaddr));
3379 lw(t9, MemOperand(topaddr, kPointerSize));
3381 if (emit_debug_code()) {
3382 // Assert that result actually contains top on entry. t9 is used
3383 // immediately below so this use of t9 does not cause difference with
3384 // respect to register content between debug and release mode.
3385 lw(t9, MemOperand(topaddr));
3386 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3388 // Load allocation limit into t9. Result already contains allocation top.
3389 lw(t9, MemOperand(topaddr, limit - top));
3392 if ((flags & DOUBLE_ALIGNMENT) != 0) {
3393 // Align the next allocation. Storing the filler map without checking top is
3394 // safe in new-space because the limit of the heap is aligned there.
3395 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
3396 And(scratch2, result, Operand(kDoubleAlignmentMask));
3398 Branch(&aligned, eq, scratch2, Operand(zero_reg));
3399 if ((flags & PRETENURE) != 0) {
3400 Branch(gc_required, Ugreater_equal, result, Operand(t9));
3402 li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
3403 sw(scratch2, MemOperand(result));
3404 Addu(result, result, Operand(kDoubleSize / 2));
3408 // Calculate new top and bail out if new space is exhausted. Use result
3409 // to calculate the new top.
3410 Addu(scratch2, result, Operand(object_size));
3411 Branch(gc_required, Ugreater, scratch2, Operand(t9));
3412 sw(scratch2, MemOperand(topaddr));
3414 // Tag object if requested.
3415 if ((flags & TAG_OBJECT) != 0) {
3416 Addu(result, result, Operand(kHeapObjectTag));
3421 void MacroAssembler::Allocate(Register object_size,
3426 AllocationFlags flags) {
3427 if (!FLAG_inline_new) {
3428 if (emit_debug_code()) {
3429 // Trash the registers to simulate an allocation failure.
3431 li(scratch1, 0x7191);
3432 li(scratch2, 0x7291);
3438 DCHECK(!result.is(scratch1));
3439 DCHECK(!result.is(scratch2));
3440 DCHECK(!scratch1.is(scratch2));
3441 DCHECK(!object_size.is(t9));
3442 DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
3444 // Check relative positions of allocation top and limit addresses.
3445 // ARM adds additional checks to make sure the ldm instruction can be
3446 // used. On MIPS we don't have ldm so we don't need additional checks either.
3447 ExternalReference allocation_top =
3448 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3449 ExternalReference allocation_limit =
3450 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3452 reinterpret_cast<intptr_t>(allocation_top.address());
3454 reinterpret_cast<intptr_t>(allocation_limit.address());
3455 DCHECK((limit - top) == kPointerSize);
3457 // Set up allocation top address and object size registers.
3458 Register topaddr = scratch1;
3459 li(topaddr, Operand(allocation_top));
3461 // This code stores a temporary value in t9.
3462 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3463 // Load allocation top into result and allocation limit into t9.
3464 lw(result, MemOperand(topaddr));
3465 lw(t9, MemOperand(topaddr, kPointerSize));
3467 if (emit_debug_code()) {
3468 // Assert that result actually contains top on entry. t9 is used
3469 // immediately below so this use of t9 does not cause difference with
3470 // respect to register content between debug and release mode.
3471 lw(t9, MemOperand(topaddr));
3472 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3474 // Load allocation limit into t9. Result already contains allocation top.
3475 lw(t9, MemOperand(topaddr, limit - top));
3478 if ((flags & DOUBLE_ALIGNMENT) != 0) {
3479 // Align the next allocation. Storing the filler map without checking top is
3480 // safe in new-space because the limit of the heap is aligned there.
3481 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
3482 And(scratch2, result, Operand(kDoubleAlignmentMask));
3484 Branch(&aligned, eq, scratch2, Operand(zero_reg));
3485 if ((flags & PRETENURE) != 0) {
3486 Branch(gc_required, Ugreater_equal, result, Operand(t9));
3488 li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
3489 sw(scratch2, MemOperand(result));
3490 Addu(result, result, Operand(kDoubleSize / 2));
3494 // Calculate new top and bail out if new space is exhausted. Use result
3495 // to calculate the new top. Object size may be in words so a shift is
3496 // required to get the number of bytes.
3497 if ((flags & SIZE_IN_WORDS) != 0) {
3498 sll(scratch2, object_size, kPointerSizeLog2);
3499 Addu(scratch2, result, scratch2);
3501 Addu(scratch2, result, Operand(object_size));
3503 Branch(gc_required, Ugreater, scratch2, Operand(t9));
3505 // Update allocation top. result temporarily holds the new top.
3506 if (emit_debug_code()) {
3507 And(t9, scratch2, Operand(kObjectAlignmentMask));
3508 Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
3510 sw(scratch2, MemOperand(topaddr));
3512 // Tag object if requested.
3513 if ((flags & TAG_OBJECT) != 0) {
3514 Addu(result, result, Operand(kHeapObjectTag));
3519 void MacroAssembler::AllocateTwoByteString(Register result,
3524 Label* gc_required) {
3525 // Calculate the number of bytes needed for the characters in the string while
3526 // observing object alignment.
3527 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3528 sll(scratch1, length, 1); // Length in bytes, not chars.
3529 addiu(scratch1, scratch1,
3530 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3531 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3533 // Allocate two-byte string in new space.
3541 // Set the map, length and hash field.
3542 InitializeNewString(result,
3544 Heap::kStringMapRootIndex,
3550 void MacroAssembler::AllocateOneByteString(Register result, Register length,
3551 Register scratch1, Register scratch2,
3553 Label* gc_required) {
3554 // Calculate the number of bytes needed for the characters in the string
3555 // while observing object alignment.
3556 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3557 DCHECK(kCharSize == 1);
3558 addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3559 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3561 // Allocate one-byte string in new space.
3569 // Set the map, length and hash field.
3570 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
3571 scratch1, scratch2);
3575 void MacroAssembler::AllocateTwoByteConsString(Register result,
3579 Label* gc_required) {
3580 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3582 InitializeNewString(result,
3584 Heap::kConsStringMapRootIndex,
3590 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
3593 Label* gc_required) {
3594 Allocate(ConsString::kSize,
3601 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
3602 scratch1, scratch2);
3606 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3610 Label* gc_required) {
3611 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3614 InitializeNewString(result,
3616 Heap::kSlicedStringMapRootIndex,
3622 void MacroAssembler::AllocateOneByteSlicedString(Register result,
3626 Label* gc_required) {
3627 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3630 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
3631 scratch1, scratch2);
3635 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
3636 Label* not_unique_name) {
3637 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3639 And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3640 Branch(&succeed, eq, at, Operand(zero_reg));
3641 Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
3647 // Allocates a heap number or jumps to the label if the young space is full and
3648 // a scavenge is needed.
3649 void MacroAssembler::AllocateHeapNumber(Register result,
3652 Register heap_number_map,
3654 TaggingMode tagging_mode,
3656 // Allocate an object in the heap for the heap number and tag it as a heap
3658 Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
3659 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3661 Heap::RootListIndex map_index = mode == MUTABLE
3662 ? Heap::kMutableHeapNumberMapRootIndex
3663 : Heap::kHeapNumberMapRootIndex;
3664 AssertIsRoot(heap_number_map, map_index);
3666 // Store heap number map in the allocated object.
3667 if (tagging_mode == TAG_RESULT) {
3668 sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3670 sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3675 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3679 Label* gc_required) {
3680 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3681 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3682 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3686 // Copies a fixed number of fields of heap objects from src to dst.
3687 void MacroAssembler::CopyFields(Register dst,
3691 DCHECK((temps & dst.bit()) == 0);
3692 DCHECK((temps & src.bit()) == 0);
3693 // Primitive implementation using only one temporary register.
3695 Register tmp = no_reg;
3696 // Find a temp register in temps list.
3697 for (int i = 0; i < kNumRegisters; i++) {
3698 if ((temps & (1 << i)) != 0) {
3703 DCHECK(!tmp.is(no_reg));
3705 for (int i = 0; i < field_count; i++) {
3706 lw(tmp, FieldMemOperand(src, i * kPointerSize));
3707 sw(tmp, FieldMemOperand(dst, i * kPointerSize));
3712 void MacroAssembler::CopyBytes(Register src,
3716 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3718 // Align src before copying in word size chunks.
3719 Branch(&byte_loop, le, length, Operand(kPointerSize));
3720 bind(&align_loop_1);
3721 And(scratch, src, kPointerSize - 1);
3722 Branch(&word_loop, eq, scratch, Operand(zero_reg));
3723 lbu(scratch, MemOperand(src));
3725 sb(scratch, MemOperand(dst));
3727 Subu(length, length, Operand(1));
3728 Branch(&align_loop_1, ne, length, Operand(zero_reg));
3730 // Copy bytes in word size chunks.
3732 if (emit_debug_code()) {
3733 And(scratch, src, kPointerSize - 1);
3734 Assert(eq, kExpectingAlignmentForCopyBytes,
3735 scratch, Operand(zero_reg));
3737 Branch(&byte_loop, lt, length, Operand(kPointerSize));
3738 lw(scratch, MemOperand(src));
3739 Addu(src, src, kPointerSize);
3741 // TODO(kalmard) check if this can be optimized to use sw in most cases.
3742 // Can't use unaligned access - copy byte by byte.
3743 if (kArchEndian == kLittle) {
3744 sb(scratch, MemOperand(dst, 0));
3745 srl(scratch, scratch, 8);
3746 sb(scratch, MemOperand(dst, 1));
3747 srl(scratch, scratch, 8);
3748 sb(scratch, MemOperand(dst, 2));
3749 srl(scratch, scratch, 8);
3750 sb(scratch, MemOperand(dst, 3));
3752 sb(scratch, MemOperand(dst, 3));
3753 srl(scratch, scratch, 8);
3754 sb(scratch, MemOperand(dst, 2));
3755 srl(scratch, scratch, 8);
3756 sb(scratch, MemOperand(dst, 1));
3757 srl(scratch, scratch, 8);
3758 sb(scratch, MemOperand(dst, 0));
3763 Subu(length, length, Operand(kPointerSize));
3766 // Copy the last bytes if any left.
3768 Branch(&done, eq, length, Operand(zero_reg));
3770 lbu(scratch, MemOperand(src));
3772 sb(scratch, MemOperand(dst));
3774 Subu(length, length, Operand(1));
3775 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3780 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3781 Register end_offset,
3786 sw(filler, MemOperand(start_offset));
3787 Addu(start_offset, start_offset, kPointerSize);
3789 Branch(&loop, ult, start_offset, Operand(end_offset));
3793 void MacroAssembler::CheckFastElements(Register map,
3796 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3797 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3798 STATIC_ASSERT(FAST_ELEMENTS == 2);
3799 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3800 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3801 Branch(fail, hi, scratch,
3802 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3806 void MacroAssembler::CheckFastObjectElements(Register map,
3809 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3810 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3811 STATIC_ASSERT(FAST_ELEMENTS == 2);
3812 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3813 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3814 Branch(fail, ls, scratch,
3815 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3816 Branch(fail, hi, scratch,
3817 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3821 void MacroAssembler::CheckFastSmiElements(Register map,
3824 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3825 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3826 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3827 Branch(fail, hi, scratch,
3828 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3832 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3834 Register elements_reg,
3839 int elements_offset) {
3840 Label smi_value, maybe_nan, have_double_value, is_nan, done;
3841 Register mantissa_reg = scratch2;
3842 Register exponent_reg = scratch3;
3844 // Handle smi values specially.
3845 JumpIfSmi(value_reg, &smi_value);
3847 // Ensure that the object is a heap number
3850 Heap::kHeapNumberMapRootIndex,
3854 // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
3856 li(scratch1, Operand(kHoleNanUpper32 & HeapNumber::kExponentMask));
3857 lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3858 Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
3860 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3862 bind(&have_double_value);
3863 sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3864 Addu(scratch1, scratch1, elements_reg);
3866 FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
3867 + kHoleNanLower32Offset));
3869 FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
3870 + kHoleNanUpper32Offset));
3874 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3875 // it's an Infinity, and the non-NaN code path applies.
3876 Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
3877 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3878 Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
3880 // Load canonical NaN for storing into the double array.
3881 LoadRoot(at, Heap::kNanValueRootIndex);
3882 lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
3883 lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
3884 jmp(&have_double_value);
3887 Addu(scratch1, elements_reg,
3888 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
3890 sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3891 Addu(scratch1, scratch1, scratch2);
3892 // scratch1 is now effective address of the double element
3894 Register untagged_value = elements_reg;
3895 SmiUntag(untagged_value, value_reg);
3896 mtc1(untagged_value, f2);
3898 sdc1(f0, MemOperand(scratch1, 0));
3903 void MacroAssembler::CompareMapAndBranch(Register obj,
3906 Label* early_success,
3909 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3910 CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
3914 void MacroAssembler::CompareMapAndBranch(Register obj_map,
3916 Label* early_success,
3919 Branch(branch_to, cond, obj_map, Operand(map));
3923 void MacroAssembler::CheckMap(Register obj,
3927 SmiCheckType smi_check_type) {
3928 if (smi_check_type == DO_SMI_CHECK) {
3929 JumpIfSmi(obj, fail);
3932 CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
3937 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
3938 Register scratch2, Handle<WeakCell> cell,
3939 Handle<Code> success,
3940 SmiCheckType smi_check_type) {
3942 if (smi_check_type == DO_SMI_CHECK) {
3943 JumpIfSmi(obj, &fail);
3945 lw(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
3946 GetWeakValue(scratch2, cell);
3947 Jump(success, RelocInfo::CODE_TARGET, eq, scratch1, Operand(scratch2));
3952 void MacroAssembler::CheckMap(Register obj,
3954 Heap::RootListIndex index,
3956 SmiCheckType smi_check_type) {
3957 if (smi_check_type == DO_SMI_CHECK) {
3958 JumpIfSmi(obj, fail);
3960 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3961 LoadRoot(at, index);
3962 Branch(fail, ne, scratch, Operand(at));
3966 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
3967 li(value, Operand(cell));
3968 lw(value, FieldMemOperand(value, WeakCell::kValueOffset));
3972 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
3974 GetWeakValue(value, cell);
3975 JumpIfSmi(value, miss);
3979 void MacroAssembler::MovFromFloatResult(DoubleRegister dst) {
3980 if (IsMipsSoftFloatABI) {
3981 if (kArchEndian == kLittle) {
3987 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
3992 void MacroAssembler::MovFromFloatParameter(DoubleRegister dst) {
3993 if (IsMipsSoftFloatABI) {
3994 if (kArchEndian == kLittle) {
4000 Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
4005 void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
4006 if (!IsMipsSoftFloatABI) {
4009 if (kArchEndian == kLittle) {
4018 void MacroAssembler::MovToFloatResult(DoubleRegister src) {
4019 if (!IsMipsSoftFloatABI) {
4022 if (kArchEndian == kLittle) {
4031 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
4032 DoubleRegister src2) {
4033 if (!IsMipsSoftFloatABI) {
4035 DCHECK(!src1.is(f14));
4043 if (kArchEndian == kLittle) {
4054 // -----------------------------------------------------------------------------
4055 // JavaScript invokes.
4057 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
4058 const ParameterCount& actual,
4059 Handle<Code> code_constant,
4062 bool* definitely_mismatches,
4064 const CallWrapper& call_wrapper) {
4065 bool definitely_matches = false;
4066 *definitely_mismatches = false;
4067 Label regular_invoke;
4069 // Check whether the expected and actual arguments count match. If not,
4070 // setup registers according to contract with ArgumentsAdaptorTrampoline:
4071 // a0: actual arguments count
4072 // a1: function (passed through to callee)
4073 // a2: expected arguments count
4075 // The code below is made a lot easier because the calling code already sets
4076 // up actual and expected registers according to the contract if values are
4077 // passed in registers.
4078 DCHECK(actual.is_immediate() || actual.reg().is(a0));
4079 DCHECK(expected.is_immediate() || expected.reg().is(a2));
4080 DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
4082 if (expected.is_immediate()) {
4083 DCHECK(actual.is_immediate());
4084 if (expected.immediate() == actual.immediate()) {
4085 definitely_matches = true;
4087 li(a0, Operand(actual.immediate()));
4088 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
4089 if (expected.immediate() == sentinel) {
4090 // Don't worry about adapting arguments for builtins that
4091 // don't want that done. Skip adaption code by making it look
4092 // like we have a match between expected and actual number of
4094 definitely_matches = true;
4096 *definitely_mismatches = true;
4097 li(a2, Operand(expected.immediate()));
4100 } else if (actual.is_immediate()) {
4101 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.immediate()));
4102 li(a0, Operand(actual.immediate()));
4104 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg()));
4107 if (!definitely_matches) {
4108 if (!code_constant.is_null()) {
4109 li(a3, Operand(code_constant));
4110 addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
4113 Handle<Code> adaptor =
4114 isolate()->builtins()->ArgumentsAdaptorTrampoline();
4115 if (flag == CALL_FUNCTION) {
4116 call_wrapper.BeforeCall(CallSize(adaptor));
4118 call_wrapper.AfterCall();
4119 if (!*definitely_mismatches) {
4123 Jump(adaptor, RelocInfo::CODE_TARGET);
4125 bind(®ular_invoke);
4130 void MacroAssembler::InvokeCode(Register code,
4131 const ParameterCount& expected,
4132 const ParameterCount& actual,
4134 const CallWrapper& call_wrapper) {
4135 // You can't call a function without a valid frame.
4136 DCHECK(flag == JUMP_FUNCTION || has_frame());
4140 bool definitely_mismatches = false;
4141 InvokePrologue(expected, actual, Handle<Code>::null(), code,
4142 &done, &definitely_mismatches, flag,
4144 if (!definitely_mismatches) {
4145 if (flag == CALL_FUNCTION) {
4146 call_wrapper.BeforeCall(CallSize(code));
4148 call_wrapper.AfterCall();
4150 DCHECK(flag == JUMP_FUNCTION);
4153 // Continue here if InvokePrologue does handle the invocation due to
4154 // mismatched parameter counts.
4160 void MacroAssembler::InvokeFunction(Register function,
4161 const ParameterCount& actual,
4163 const CallWrapper& call_wrapper) {
4164 // You can't call a function without a valid frame.
4165 DCHECK(flag == JUMP_FUNCTION || has_frame());
4167 // Contract with called JS functions requires that function is passed in a1.
4168 DCHECK(function.is(a1));
4169 Register expected_reg = a2;
4170 Register code_reg = a3;
4172 lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4173 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4175 FieldMemOperand(code_reg,
4176 SharedFunctionInfo::kFormalParameterCountOffset));
4177 sra(expected_reg, expected_reg, kSmiTagSize);
4178 lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4180 ParameterCount expected(expected_reg);
4181 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
4185 void MacroAssembler::InvokeFunction(Register function,
4186 const ParameterCount& expected,
4187 const ParameterCount& actual,
4189 const CallWrapper& call_wrapper) {
4190 // You can't call a function without a valid frame.
4191 DCHECK(flag == JUMP_FUNCTION || has_frame());
4193 // Contract with called JS functions requires that function is passed in a1.
4194 DCHECK(function.is(a1));
4196 // Get the function and setup the context.
4197 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4199 // We call indirectly through the code field in the function to
4200 // allow recompilation to take effect without changing any of the
4202 lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4203 InvokeCode(a3, expected, actual, flag, call_wrapper);
4207 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
4208 const ParameterCount& expected,
4209 const ParameterCount& actual,
4211 const CallWrapper& call_wrapper) {
4213 InvokeFunction(a1, expected, actual, flag, call_wrapper);
4217 void MacroAssembler::IsObjectJSStringType(Register object,
4220 DCHECK(kNotStringTag != 0);
4222 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4223 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4224 And(scratch, scratch, Operand(kIsNotStringMask));
4225 Branch(fail, ne, scratch, Operand(zero_reg));
4229 void MacroAssembler::IsObjectNameType(Register object,
4232 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4233 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4234 Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
4238 // ---------------------------------------------------------------------------
4239 // Support functions.
4242 void MacroAssembler::GetMapConstructor(Register result, Register map,
4243 Register temp, Register temp2) {
4245 lw(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
4247 JumpIfSmi(result, &done);
4248 GetObjectType(result, temp, temp2);
4249 Branch(&done, ne, temp2, Operand(MAP_TYPE));
4250 lw(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
4256 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
4257 Register scratch, Label* miss) {
4258 // Get the prototype or initial map from the function.
4260 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4262 // If the prototype or initial map is the hole, don't return it and
4263 // simply miss the cache instead. This will allow us to allocate a
4264 // prototype object on-demand in the runtime system.
4265 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
4266 Branch(miss, eq, result, Operand(t8));
4268 // If the function does not have an initial map, we're done.
4270 GetObjectType(result, scratch, scratch);
4271 Branch(&done, ne, scratch, Operand(MAP_TYPE));
4273 // Get the prototype from the initial map.
4274 lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
4281 void MacroAssembler::GetObjectType(Register object,
4283 Register type_reg) {
4284 lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
4285 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
4289 // -----------------------------------------------------------------------------
4292 void MacroAssembler::CallStub(CodeStub* stub,
4293 TypeFeedbackId ast_id,
4297 BranchDelaySlot bd) {
4298 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
4299 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
4304 void MacroAssembler::TailCallStub(CodeStub* stub,
4308 BranchDelaySlot bd) {
4309 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
4313 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4314 return has_frame_ || !stub->SometimesSetsUpAFrame();
4318 void MacroAssembler::IndexFromHash(Register hash, Register index) {
4319 // If the hash field contains an array index pick it out. The assert checks
4320 // that the constants for the maximum number of digits for an array index
4321 // cached in the hash field and the number of bits reserved for it does not
4323 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
4324 (1 << String::kArrayIndexValueBits));
4325 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
4329 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4333 Register heap_number_map,
4335 ObjectToDoubleFlags flags) {
4337 if ((flags & OBJECT_NOT_SMI) == 0) {
4339 JumpIfNotSmi(object, ¬_smi);
4340 // Remove smi tag and convert to double.
4341 sra(scratch1, object, kSmiTagSize);
4342 mtc1(scratch1, result);
4343 cvt_d_w(result, result);
4347 // Check for heap number and load double value from it.
4348 lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4349 Branch(not_number, ne, scratch1, Operand(heap_number_map));
4351 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4352 // If exponent is all ones the number is either a NaN or +/-Infinity.
4353 Register exponent = scratch1;
4354 Register mask_reg = scratch2;
4355 lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4356 li(mask_reg, HeapNumber::kExponentMask);
4358 And(exponent, exponent, mask_reg);
4359 Branch(not_number, eq, exponent, Operand(mask_reg));
4361 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4366 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4368 Register scratch1) {
4369 sra(scratch1, smi, kSmiTagSize);
4370 mtc1(scratch1, value);
4371 cvt_d_w(value, value);
4375 void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
4376 const Operand& right,
4377 Register overflow_dst,
4379 if (right.is_reg()) {
4380 AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4383 mov(scratch, left); // Preserve left.
4384 Addu(dst, left, right.immediate()); // Left is overwritten.
4385 xor_(scratch, dst, scratch); // Original left.
4386 // Load right since xori takes uint16 as immediate.
4387 Addu(t9, zero_reg, right);
4388 xor_(overflow_dst, dst, t9);
4389 and_(overflow_dst, overflow_dst, scratch);
4391 Addu(dst, left, right.immediate());
4392 xor_(overflow_dst, dst, left);
4393 // Load right since xori takes uint16 as immediate.
4394 Addu(t9, zero_reg, right);
4395 xor_(scratch, dst, t9);
4396 and_(overflow_dst, scratch, overflow_dst);
4402 void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
4404 Register overflow_dst,
4406 DCHECK(!dst.is(overflow_dst));
4407 DCHECK(!dst.is(scratch));
4408 DCHECK(!overflow_dst.is(scratch));
4409 DCHECK(!overflow_dst.is(left));
4410 DCHECK(!overflow_dst.is(right));
4412 if (left.is(right) && dst.is(left)) {
4413 DCHECK(!dst.is(t9));
4414 DCHECK(!scratch.is(t9));
4415 DCHECK(!left.is(t9));
4416 DCHECK(!right.is(t9));
4417 DCHECK(!overflow_dst.is(t9));
4423 mov(scratch, left); // Preserve left.
4424 addu(dst, left, right); // Left is overwritten.
4425 xor_(scratch, dst, scratch); // Original left.
4426 xor_(overflow_dst, dst, right);
4427 and_(overflow_dst, overflow_dst, scratch);
4428 } else if (dst.is(right)) {
4429 mov(scratch, right); // Preserve right.
4430 addu(dst, left, right); // Right is overwritten.
4431 xor_(scratch, dst, scratch); // Original right.
4432 xor_(overflow_dst, dst, left);
4433 and_(overflow_dst, overflow_dst, scratch);
4435 addu(dst, left, right);
4436 xor_(overflow_dst, dst, left);
4437 xor_(scratch, dst, right);
4438 and_(overflow_dst, scratch, overflow_dst);
4443 void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
4444 const Operand& right,
4445 Register overflow_dst,
4447 if (right.is_reg()) {
4448 SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4451 mov(scratch, left); // Preserve left.
4452 Subu(dst, left, right); // Left is overwritten.
4453 xor_(overflow_dst, dst, scratch); // scratch is original left.
4454 // Load right since xori takes uint16 as immediate.
4455 Addu(t9, zero_reg, right);
4456 xor_(scratch, scratch, t9); // scratch is original left.
4457 and_(overflow_dst, scratch, overflow_dst);
4459 Subu(dst, left, right);
4460 xor_(overflow_dst, dst, left);
4461 // Load right since xori takes uint16 as immediate.
4462 Addu(t9, zero_reg, right);
4463 xor_(scratch, left, t9);
4464 and_(overflow_dst, scratch, overflow_dst);
4470 void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
4472 Register overflow_dst,
4474 DCHECK(!dst.is(overflow_dst));
4475 DCHECK(!dst.is(scratch));
4476 DCHECK(!overflow_dst.is(scratch));
4477 DCHECK(!overflow_dst.is(left));
4478 DCHECK(!overflow_dst.is(right));
4479 DCHECK(!scratch.is(left));
4480 DCHECK(!scratch.is(right));
4482 // This happens with some crankshaft code. Since Subu works fine if
4483 // left == right, let's not make that restriction here.
4484 if (left.is(right)) {
4486 mov(overflow_dst, zero_reg);
4491 mov(scratch, left); // Preserve left.
4492 subu(dst, left, right); // Left is overwritten.
4493 xor_(overflow_dst, dst, scratch); // scratch is original left.
4494 xor_(scratch, scratch, right); // scratch is original left.
4495 and_(overflow_dst, scratch, overflow_dst);
4496 } else if (dst.is(right)) {
4497 mov(scratch, right); // Preserve right.
4498 subu(dst, left, right); // Right is overwritten.
4499 xor_(overflow_dst, dst, left);
4500 xor_(scratch, left, scratch); // Original right.
4501 and_(overflow_dst, scratch, overflow_dst);
4503 subu(dst, left, right);
4504 xor_(overflow_dst, dst, left);
4505 xor_(scratch, left, right);
4506 and_(overflow_dst, scratch, overflow_dst);
4511 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
4512 SaveFPRegsMode save_doubles,
4513 BranchDelaySlot bd) {
4514 // All parameters are on the stack. v0 has the return value after call.
4516 // If the expected number of arguments of the runtime function is
4517 // constant, we check that the actual number of arguments match the
4519 CHECK(f->nargs < 0 || f->nargs == num_arguments);
4521 // TODO(1236192): Most runtime routines don't need the number of
4522 // arguments passed in because it is constant. At some point we
4523 // should remove this need and make the runtime routine entry code
4525 PrepareCEntryArgs(num_arguments);
4526 PrepareCEntryFunction(ExternalReference(f, isolate()));
4527 CEntryStub stub(isolate(), 1, save_doubles);
4528 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
4532 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
4534 BranchDelaySlot bd) {
4535 PrepareCEntryArgs(num_arguments);
4536 PrepareCEntryFunction(ext);
4538 CEntryStub stub(isolate(), 1);
4539 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
4543 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
4546 // TODO(1236192): Most runtime routines don't need the number of
4547 // arguments passed in because it is constant. At some point we
4548 // should remove this need and make the runtime routine entry code
4550 PrepareCEntryArgs(num_arguments);
4551 JumpToExternalReference(ext);
4555 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
4558 TailCallExternalReference(ExternalReference(fid, isolate()),
4564 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
4565 BranchDelaySlot bd) {
4566 PrepareCEntryFunction(builtin);
4567 CEntryStub stub(isolate(), 1);
4568 Jump(stub.GetCode(),
4569 RelocInfo::CODE_TARGET,
4577 void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
4578 const CallWrapper& call_wrapper) {
4579 // You can't call a builtin without a valid frame.
4580 DCHECK(flag == JUMP_FUNCTION || has_frame());
4582 GetBuiltinEntry(t9, native_context_index);
4583 if (flag == CALL_FUNCTION) {
4584 call_wrapper.BeforeCall(CallSize(t9));
4586 call_wrapper.AfterCall();
4588 DCHECK(flag == JUMP_FUNCTION);
4594 void MacroAssembler::GetBuiltinFunction(Register target,
4595 int native_context_index) {
4596 // Load the builtins object into target register.
4597 lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4598 lw(target, FieldMemOperand(target, GlobalObject::kNativeContextOffset));
4599 // Load the JavaScript builtin function from the builtins object.
4600 lw(target, ContextOperand(target, native_context_index));
4604 void MacroAssembler::GetBuiltinEntry(Register target,
4605 int native_context_index) {
4606 DCHECK(!target.is(a1));
4607 GetBuiltinFunction(a1, native_context_index);
4608 // Load the code entry point from the builtins object.
4609 lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4613 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4614 Register scratch1, Register scratch2) {
4615 if (FLAG_native_code_counters && counter->Enabled()) {
4616 li(scratch1, Operand(value));
4617 li(scratch2, Operand(ExternalReference(counter)));
4618 sw(scratch1, MemOperand(scratch2));
4623 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4624 Register scratch1, Register scratch2) {
4626 if (FLAG_native_code_counters && counter->Enabled()) {
4627 li(scratch2, Operand(ExternalReference(counter)));
4628 lw(scratch1, MemOperand(scratch2));
4629 Addu(scratch1, scratch1, Operand(value));
4630 sw(scratch1, MemOperand(scratch2));
4635 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4636 Register scratch1, Register scratch2) {
4638 if (FLAG_native_code_counters && counter->Enabled()) {
4639 li(scratch2, Operand(ExternalReference(counter)));
4640 lw(scratch1, MemOperand(scratch2));
4641 Subu(scratch1, scratch1, Operand(value));
4642 sw(scratch1, MemOperand(scratch2));
4647 // -----------------------------------------------------------------------------
4650 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
4651 Register rs, Operand rt) {
4652 if (emit_debug_code())
4653 Check(cc, reason, rs, rt);
4657 void MacroAssembler::AssertFastElements(Register elements) {
4658 if (emit_debug_code()) {
4659 DCHECK(!elements.is(at));
4662 lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4663 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4664 Branch(&ok, eq, elements, Operand(at));
4665 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4666 Branch(&ok, eq, elements, Operand(at));
4667 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4668 Branch(&ok, eq, elements, Operand(at));
4669 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4676 void MacroAssembler::Check(Condition cc, BailoutReason reason,
4677 Register rs, Operand rt) {
4679 Branch(&L, cc, rs, rt);
4681 // Will not return here.
4686 void MacroAssembler::Abort(BailoutReason reason) {
4690 const char* msg = GetBailoutReason(reason);
4692 RecordComment("Abort message: ");
4696 if (FLAG_trap_on_abort) {
4702 li(a0, Operand(Smi::FromInt(reason)));
4704 // Disable stub call restrictions to always allow calls to abort.
4706 // We don't actually want to generate a pile of code for this, so just
4707 // claim there is a stack frame, without generating one.
4708 FrameScope scope(this, StackFrame::NONE);
4709 CallRuntime(Runtime::kAbort, 1);
4711 CallRuntime(Runtime::kAbort, 1);
4713 // Will not return here.
4714 if (is_trampoline_pool_blocked()) {
4715 // If the calling code cares about the exact number of
4716 // instructions generated, we insert padding here to keep the size
4717 // of the Abort macro constant.
4718 // Currently in debug mode with debug_code enabled the number of
4719 // generated instructions is 10, so we use this as a maximum value.
4720 static const int kExpectedAbortInstructions = 10;
4721 int abort_instructions = InstructionsGeneratedSince(&abort_start);
4722 DCHECK(abort_instructions <= kExpectedAbortInstructions);
4723 while (abort_instructions++ < kExpectedAbortInstructions) {
4730 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4731 if (context_chain_length > 0) {
4732 // Move up the chain of contexts to the context containing the slot.
4733 lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4734 for (int i = 1; i < context_chain_length; i++) {
4735 lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4738 // Slot is in the current function context. Move it into the
4739 // destination register in case we store into it (the write barrier
4740 // cannot be allowed to destroy the context in esi).
4746 void MacroAssembler::LoadTransitionedArrayMapConditional(
4747 ElementsKind expected_kind,
4748 ElementsKind transitioned_kind,
4749 Register map_in_out,
4751 Label* no_map_match) {
4752 // Load the global or builtins object from the current context.
4754 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4755 lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
4757 // Check that the function's map is the same as the expected cached map.
4760 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4761 size_t offset = expected_kind * kPointerSize +
4762 FixedArrayBase::kHeaderSize;
4763 lw(at, FieldMemOperand(scratch, offset));
4764 Branch(no_map_match, ne, map_in_out, Operand(at));
4766 // Use the transitioned cached map.
4767 offset = transitioned_kind * kPointerSize +
4768 FixedArrayBase::kHeaderSize;
4769 lw(map_in_out, FieldMemOperand(scratch, offset));
4773 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4774 // Load the global or builtins object from the current context.
4776 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4777 // Load the native context from the global or builtins object.
4778 lw(function, FieldMemOperand(function,
4779 GlobalObject::kNativeContextOffset));
4780 // Load the function from the native context.
4781 lw(function, MemOperand(function, Context::SlotOffset(index)));
4785 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4788 // Load the initial map. The global functions all have initial maps.
4789 lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4790 if (emit_debug_code()) {
4792 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4795 Abort(kGlobalFunctionsMustHaveInitialMap);
4801 void MacroAssembler::StubPrologue() {
4803 Push(Smi::FromInt(StackFrame::STUB));
4804 // Adjust FP to point to saved FP.
4805 Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4809 void MacroAssembler::Prologue(bool code_pre_aging) {
4810 PredictableCodeSizeScope predictible_code_size_scope(
4811 this, kNoCodeAgeSequenceLength);
4812 // The following three instructions must remain together and unmodified
4813 // for code aging to work properly.
4814 if (code_pre_aging) {
4815 // Pre-age the code.
4816 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
4817 nop(Assembler::CODE_AGE_MARKER_NOP);
4818 // Load the stub address to t9 and call it,
4819 // GetCodeAgeAndParity() extracts the stub address from this instruction.
4821 Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
4823 nop(); // Prevent jalr to jal optimization.
4825 nop(); // Branch delay slot nop.
4826 nop(); // Pad the empty space.
4828 Push(ra, fp, cp, a1);
4829 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4830 // Adjust fp to point to caller's fp.
4831 Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4836 void MacroAssembler::EnterFrame(StackFrame::Type type,
4837 bool load_constant_pool_pointer_reg) {
4838 // Out-of-line constant pool not implemented on mips.
4843 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4844 addiu(sp, sp, -5 * kPointerSize);
4845 li(t8, Operand(Smi::FromInt(type)));
4846 li(t9, Operand(CodeObject()), CONSTANT_SIZE);
4847 sw(ra, MemOperand(sp, 4 * kPointerSize));
4848 sw(fp, MemOperand(sp, 3 * kPointerSize));
4849 sw(cp, MemOperand(sp, 2 * kPointerSize));
4850 sw(t8, MemOperand(sp, 1 * kPointerSize));
4851 sw(t9, MemOperand(sp, 0 * kPointerSize));
4852 // Adjust FP to point to saved FP.
4854 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
4858 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4860 lw(fp, MemOperand(sp, 0 * kPointerSize));
4861 lw(ra, MemOperand(sp, 1 * kPointerSize));
4862 addiu(sp, sp, 2 * kPointerSize);
4866 void MacroAssembler::EnterExitFrame(bool save_doubles,
4868 // Set up the frame structure on the stack.
4869 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4870 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4871 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4873 // This is how the stack will look:
4874 // fp + 2 (==kCallerSPDisplacement) - old stack's end
4875 // [fp + 1 (==kCallerPCOffset)] - saved old ra
4876 // [fp + 0 (==kCallerFPOffset)] - saved old fp
4877 // [fp - 1 (==kSPOffset)] - sp of the called function
4878 // [fp - 2 (==kCodeOffset)] - CodeObject
4879 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
4880 // new stack (will contain saved ra)
4883 addiu(sp, sp, -4 * kPointerSize);
4884 sw(ra, MemOperand(sp, 3 * kPointerSize));
4885 sw(fp, MemOperand(sp, 2 * kPointerSize));
4886 addiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
4888 if (emit_debug_code()) {
4889 sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
4892 // Accessed from ExitFrame::code_slot.
4893 li(t8, Operand(CodeObject()), CONSTANT_SIZE);
4894 sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
4896 // Save the frame pointer and the context in top.
4897 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4898 sw(fp, MemOperand(t8));
4899 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4900 sw(cp, MemOperand(t8));
4902 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4904 // The stack must be allign to 0 modulo 8 for stores with sdc1.
4905 DCHECK(kDoubleSize == frame_alignment);
4906 if (frame_alignment > 0) {
4907 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4908 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4910 int space = FPURegister::kMaxNumRegisters * kDoubleSize;
4911 Subu(sp, sp, Operand(space));
4912 // Remember: we only need to save every 2nd double FPU value.
4913 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4914 FPURegister reg = FPURegister::from_code(i);
4915 sdc1(reg, MemOperand(sp, i * kDoubleSize));
4919 // Reserve place for the return address, stack space and an optional slot
4920 // (used by the DirectCEntryStub to hold the return value if a struct is
4921 // returned) and align the frame preparing for calling the runtime function.
4922 DCHECK(stack_space >= 0);
4923 Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4924 if (frame_alignment > 0) {
4925 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4926 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4929 // Set the exit frame sp value to point just before the return address
4931 addiu(at, sp, kPointerSize);
4932 sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
4936 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
4937 bool restore_context, bool do_return,
4938 bool argument_count_is_length) {
4939 // Optionally restore all double registers.
4941 // Remember: we only need to restore every 2nd double FPU value.
4942 lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
4943 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4944 FPURegister reg = FPURegister::from_code(i);
4945 ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
4950 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4951 sw(zero_reg, MemOperand(t8));
4953 // Restore current context from top and clear it in debug mode.
4954 if (restore_context) {
4955 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4956 lw(cp, MemOperand(t8));
4959 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4960 sw(a3, MemOperand(t8));
4963 // Pop the arguments, restore registers, and return.
4964 mov(sp, fp); // Respect ABI stack constraint.
4965 lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
4966 lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
4968 if (argument_count.is_valid()) {
4969 if (argument_count_is_length) {
4970 addu(sp, sp, argument_count);
4972 sll(t8, argument_count, kPointerSizeLog2);
4978 Ret(USE_DELAY_SLOT);
4979 // If returning, the instruction in the delay slot will be the addiu below.
4985 void MacroAssembler::InitializeNewString(Register string,
4987 Heap::RootListIndex map_index,
4989 Register scratch2) {
4990 sll(scratch1, length, kSmiTagSize);
4991 LoadRoot(scratch2, map_index);
4992 sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
4993 li(scratch1, Operand(String::kEmptyHashField));
4994 sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
4995 sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
4999 int MacroAssembler::ActivationFrameAlignment() {
5000 #if V8_HOST_ARCH_MIPS
5001 // Running on the real platform. Use the alignment as mandated by the local
5003 // Note: This will break if we ever start generating snapshots on one Mips
5004 // platform for another Mips platform with a different alignment.
5005 return base::OS::ActivationFrameAlignment();
5006 #else // V8_HOST_ARCH_MIPS
5007 // If we are using the simulator then we should always align to the expected
5008 // alignment. As the simulator is used to generate snapshots we do not know
5009 // if the target platform will need alignment, so this is controlled from a
5011 return FLAG_sim_stack_alignment;
5012 #endif // V8_HOST_ARCH_MIPS
5016 void MacroAssembler::AssertStackIsAligned() {
5017 if (emit_debug_code()) {
5018 const int frame_alignment = ActivationFrameAlignment();
5019 const int frame_alignment_mask = frame_alignment - 1;
5021 if (frame_alignment > kPointerSize) {
5022 Label alignment_as_expected;
5023 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5024 andi(at, sp, frame_alignment_mask);
5025 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5026 // Don't use Check here, as it will call Runtime_Abort re-entering here.
5027 stop("Unexpected stack alignment");
5028 bind(&alignment_as_expected);
5034 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
5037 Label* not_power_of_two_or_zero) {
5038 Subu(scratch, reg, Operand(1));
5039 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
5040 scratch, Operand(zero_reg));
5041 and_(at, scratch, reg); // In the delay slot.
5042 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
5046 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
5047 DCHECK(!reg.is(overflow));
5048 mov(overflow, reg); // Save original value.
5050 xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
5054 void MacroAssembler::SmiTagCheckOverflow(Register dst,
5056 Register overflow) {
5058 // Fall back to slower case.
5059 SmiTagCheckOverflow(dst, overflow);
5061 DCHECK(!dst.is(src));
5062 DCHECK(!dst.is(overflow));
5063 DCHECK(!src.is(overflow));
5065 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
5070 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
5073 JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
5078 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
5080 Label* non_smi_case) {
5081 JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
5085 void MacroAssembler::JumpIfSmi(Register value,
5088 BranchDelaySlot bd) {
5089 DCHECK_EQ(0, kSmiTag);
5090 andi(scratch, value, kSmiTagMask);
5091 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
5094 void MacroAssembler::JumpIfNotSmi(Register value,
5095 Label* not_smi_label,
5097 BranchDelaySlot bd) {
5098 DCHECK_EQ(0, kSmiTag);
5099 andi(scratch, value, kSmiTagMask);
5100 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
5104 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
5106 Label* on_not_both_smi) {
5107 STATIC_ASSERT(kSmiTag == 0);
5108 DCHECK_EQ(1, kSmiTagMask);
5109 or_(at, reg1, reg2);
5110 JumpIfNotSmi(at, on_not_both_smi);
5114 void MacroAssembler::JumpIfEitherSmi(Register reg1,
5116 Label* on_either_smi) {
5117 STATIC_ASSERT(kSmiTag == 0);
5118 DCHECK_EQ(1, kSmiTagMask);
5119 // Both Smi tags must be 1 (not Smi).
5120 and_(at, reg1, reg2);
5121 JumpIfSmi(at, on_either_smi);
5125 void MacroAssembler::AssertNotSmi(Register object) {
5126 if (emit_debug_code()) {
5127 STATIC_ASSERT(kSmiTag == 0);
5128 andi(at, object, kSmiTagMask);
5129 Check(ne, kOperandIsASmi, at, Operand(zero_reg));
5134 void MacroAssembler::AssertSmi(Register object) {
5135 if (emit_debug_code()) {
5136 STATIC_ASSERT(kSmiTag == 0);
5137 andi(at, object, kSmiTagMask);
5138 Check(eq, kOperandIsASmi, at, Operand(zero_reg));
5143 void MacroAssembler::AssertString(Register object) {
5144 if (emit_debug_code()) {
5145 STATIC_ASSERT(kSmiTag == 0);
5147 Check(ne, kOperandIsASmiAndNotAString, t0, Operand(zero_reg));
5149 lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
5150 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
5151 Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
5157 void MacroAssembler::AssertName(Register object) {
5158 if (emit_debug_code()) {
5159 STATIC_ASSERT(kSmiTag == 0);
5161 Check(ne, kOperandIsASmiAndNotAName, t0, Operand(zero_reg));
5163 lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
5164 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
5165 Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
5171 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
5173 if (emit_debug_code()) {
5174 Label done_checking;
5175 AssertNotSmi(object);
5176 LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5177 Branch(&done_checking, eq, object, Operand(scratch));
5179 lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
5180 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
5181 Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
5183 bind(&done_checking);
5188 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
5189 if (emit_debug_code()) {
5190 DCHECK(!reg.is(at));
5191 LoadRoot(at, index);
5192 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
5197 void MacroAssembler::JumpIfNotHeapNumber(Register object,
5198 Register heap_number_map,
5200 Label* on_not_heap_number) {
5201 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5202 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
5203 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
5207 void MacroAssembler::LookupNumberStringCache(Register object,
5213 // Use of registers. Register result is used as a temporary.
5214 Register number_string_cache = result;
5215 Register mask = scratch3;
5217 // Load the number string cache.
5218 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
5220 // Make the hash mask from the length of the number string cache. It
5221 // contains two elements (number and string) for each cache entry.
5222 lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
5223 // Divide length by two (length is a smi).
5224 sra(mask, mask, kSmiTagSize + 1);
5225 Addu(mask, mask, -1); // Make mask.
5227 // Calculate the entry in the number string cache. The hash value in the
5228 // number string cache for smis is just the smi value, and the hash for
5229 // doubles is the xor of the upper and lower words. See
5230 // Heap::GetNumberStringCache.
5232 Label load_result_from_cache;
5233 JumpIfSmi(object, &is_smi);
5236 Heap::kHeapNumberMapRootIndex,
5240 STATIC_ASSERT(8 == kDoubleSize);
5243 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
5244 lw(scratch2, MemOperand(scratch1, kPointerSize));
5245 lw(scratch1, MemOperand(scratch1, 0));
5246 Xor(scratch1, scratch1, Operand(scratch2));
5247 And(scratch1, scratch1, Operand(mask));
5249 // Calculate address of entry in string cache: each entry consists
5250 // of two pointer sized fields.
5251 sll(scratch1, scratch1, kPointerSizeLog2 + 1);
5252 Addu(scratch1, number_string_cache, scratch1);
5254 Register probe = mask;
5255 lw(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
5256 JumpIfSmi(probe, not_found);
5257 ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
5258 ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
5259 BranchF(&load_result_from_cache, NULL, eq, f12, f14);
5263 Register scratch = scratch1;
5264 sra(scratch, object, 1); // Shift away the tag.
5265 And(scratch, mask, Operand(scratch));
5267 // Calculate address of entry in string cache: each entry consists
5268 // of two pointer sized fields.
5269 sll(scratch, scratch, kPointerSizeLog2 + 1);
5270 Addu(scratch, number_string_cache, scratch);
5272 // Check if the entry is the smi we are looking for.
5273 lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
5274 Branch(not_found, ne, object, Operand(probe));
5276 // Get the result from the cache.
5277 bind(&load_result_from_cache);
5278 lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
5280 IncrementCounter(isolate()->counters()->number_to_string_native(),
5287 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
5288 Register first, Register second, Register scratch1, Register scratch2,
5290 // Test that both first and second are sequential one-byte strings.
5291 // Assume that they are non-smis.
5292 lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
5293 lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
5294 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
5295 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
5297 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
5302 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
5307 // Check that neither is a smi.
5308 STATIC_ASSERT(kSmiTag == 0);
5309 And(scratch1, first, Operand(second));
5310 JumpIfSmi(scratch1, failure);
5311 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
5316 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
5317 Register first, Register second, Register scratch1, Register scratch2,
5319 const int kFlatOneByteStringMask =
5320 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5321 const int kFlatOneByteStringTag =
5322 kStringTag | kOneByteStringTag | kSeqStringTag;
5323 DCHECK(kFlatOneByteStringTag <= 0xffff); // Ensure this fits 16-bit immed.
5324 andi(scratch1, first, kFlatOneByteStringMask);
5325 Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
5326 andi(scratch2, second, kFlatOneByteStringMask);
5327 Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
5331 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
5334 const int kFlatOneByteStringMask =
5335 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5336 const int kFlatOneByteStringTag =
5337 kStringTag | kOneByteStringTag | kSeqStringTag;
5338 And(scratch, type, Operand(kFlatOneByteStringMask));
5339 Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
5343 static const int kRegisterPassedArguments = 4;
5345 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
5346 int num_double_arguments) {
5347 int stack_passed_words = 0;
5348 num_reg_arguments += 2 * num_double_arguments;
5350 // Up to four simple arguments are passed in registers a0..a3.
5351 if (num_reg_arguments > kRegisterPassedArguments) {
5352 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5354 stack_passed_words += kCArgSlotCount;
5355 return stack_passed_words;
5359 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5363 uint32_t encoding_mask) {
5366 Check(ne, kNonObject, at, Operand(zero_reg));
5368 lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
5369 lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
5371 andi(at, at, kStringRepresentationMask | kStringEncodingMask);
5372 li(scratch, Operand(encoding_mask));
5373 Check(eq, kUnexpectedStringType, at, Operand(scratch));
5375 // The index is assumed to be untagged coming in, tag it to compare with the
5376 // string length without using a temp register, it is restored at the end of
5378 Label index_tag_ok, index_tag_bad;
5379 TrySmiTag(index, scratch, &index_tag_bad);
5380 Branch(&index_tag_ok);
5381 bind(&index_tag_bad);
5382 Abort(kIndexIsTooLarge);
5383 bind(&index_tag_ok);
5385 lw(at, FieldMemOperand(string, String::kLengthOffset));
5386 Check(lt, kIndexIsTooLarge, index, Operand(at));
5388 DCHECK(Smi::FromInt(0) == 0);
5389 Check(ge, kIndexIsNegative, index, Operand(zero_reg));
5391 SmiUntag(index, index);
5395 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5396 int num_double_arguments,
5398 int frame_alignment = ActivationFrameAlignment();
5400 // Up to four simple arguments are passed in registers a0..a3.
5401 // Those four arguments must have reserved argument slots on the stack for
5402 // mips, even though those argument slots are not normally used.
5403 // Remaining arguments are pushed on the stack, above (higher address than)
5404 // the argument slots.
5405 int stack_passed_arguments = CalculateStackPassedWords(
5406 num_reg_arguments, num_double_arguments);
5407 if (frame_alignment > kPointerSize) {
5408 // Make stack end at alignment and make room for num_arguments - 4 words
5409 // and the original value of sp.
5411 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5412 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5413 And(sp, sp, Operand(-frame_alignment));
5414 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
5416 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5421 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5423 PrepareCallCFunction(num_reg_arguments, 0, scratch);
5427 void MacroAssembler::CallCFunction(ExternalReference function,
5428 int num_reg_arguments,
5429 int num_double_arguments) {
5430 li(t8, Operand(function));
5431 CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
5435 void MacroAssembler::CallCFunction(Register function,
5436 int num_reg_arguments,
5437 int num_double_arguments) {
5438 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
5442 void MacroAssembler::CallCFunction(ExternalReference function,
5443 int num_arguments) {
5444 CallCFunction(function, num_arguments, 0);
5448 void MacroAssembler::CallCFunction(Register function,
5449 int num_arguments) {
5450 CallCFunction(function, num_arguments, 0);
5454 void MacroAssembler::CallCFunctionHelper(Register function,
5455 int num_reg_arguments,
5456 int num_double_arguments) {
5457 DCHECK(has_frame());
5458 // Make sure that the stack is aligned before calling a C function unless
5459 // running in the simulator. The simulator has its own alignment check which
5460 // provides more information.
5461 // The argument stots are presumed to have been set up by
5462 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
5464 #if V8_HOST_ARCH_MIPS
5465 if (emit_debug_code()) {
5466 int frame_alignment = base::OS::ActivationFrameAlignment();
5467 int frame_alignment_mask = frame_alignment - 1;
5468 if (frame_alignment > kPointerSize) {
5469 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5470 Label alignment_as_expected;
5471 And(at, sp, Operand(frame_alignment_mask));
5472 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5473 // Don't use Check here, as it will call Runtime_Abort possibly
5474 // re-entering here.
5475 stop("Unexpected alignment in CallCFunction");
5476 bind(&alignment_as_expected);
5479 #endif // V8_HOST_ARCH_MIPS
5481 // Just call directly. The function called cannot cause a GC, or
5482 // allow preemption, so the return address in the link register
5485 if (!function.is(t9)) {
5492 int stack_passed_arguments = CalculateStackPassedWords(
5493 num_reg_arguments, num_double_arguments);
5495 if (base::OS::ActivationFrameAlignment() > kPointerSize) {
5496 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5498 Addu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5503 #undef BRANCH_ARGS_CHECK
5506 void MacroAssembler::CheckPageFlag(
5511 Label* condition_met) {
5512 And(scratch, object, Operand(~Page::kPageAlignmentMask));
5513 lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5514 And(scratch, scratch, Operand(mask));
5515 Branch(condition_met, cc, scratch, Operand(zero_reg));
5519 void MacroAssembler::JumpIfBlack(Register object,
5523 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
5524 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5528 void MacroAssembler::HasColor(Register object,
5529 Register bitmap_scratch,
5530 Register mask_scratch,
5534 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
5535 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
5537 GetMarkBits(object, bitmap_scratch, mask_scratch);
5539 Label other_color, word_boundary;
5540 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5541 And(t8, t9, Operand(mask_scratch));
5542 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
5543 // Shift left 1 by adding.
5544 Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
5545 Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
5546 And(t8, t9, Operand(mask_scratch));
5547 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
5550 bind(&word_boundary);
5551 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
5552 And(t9, t9, Operand(1));
5553 Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
5558 // Detect some, but not all, common pointer-free objects. This is used by the
5559 // incremental write barrier which doesn't care about oddballs (they are always
5560 // marked black immediately so this code is not hit).
5561 void MacroAssembler::JumpIfDataObject(Register value,
5563 Label* not_data_object) {
5564 DCHECK(!AreAliased(value, scratch, t8, no_reg));
5565 Label is_data_object;
5566 lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5567 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5568 Branch(&is_data_object, eq, t8, Operand(scratch));
5569 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5570 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5571 // If it's a string and it's not a cons string then it's an object containing
5573 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5574 And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
5575 Branch(not_data_object, ne, t8, Operand(zero_reg));
5576 bind(&is_data_object);
5580 void MacroAssembler::GetMarkBits(Register addr_reg,
5581 Register bitmap_reg,
5582 Register mask_reg) {
5583 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
5584 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5585 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
5586 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
5587 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
5588 sll(t8, t8, kPointerSizeLog2);
5589 Addu(bitmap_reg, bitmap_reg, t8);
5591 sllv(mask_reg, t8, mask_reg);
5595 void MacroAssembler::EnsureNotWhite(
5597 Register bitmap_scratch,
5598 Register mask_scratch,
5599 Register load_scratch,
5600 Label* value_is_white_and_not_data) {
5601 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
5602 GetMarkBits(value, bitmap_scratch, mask_scratch);
5604 // If the value is black or grey we don't need to do anything.
5605 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5606 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5607 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
5608 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5612 // Since both black and grey have a 1 in the first position and white does
5613 // not have a 1 there we only need to check one bit.
5614 lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5615 And(t8, mask_scratch, load_scratch);
5616 Branch(&done, ne, t8, Operand(zero_reg));
5618 if (emit_debug_code()) {
5619 // Check for impossible bit pattern.
5621 // sll may overflow, making the check conservative.
5622 sll(t8, mask_scratch, 1);
5623 And(t8, load_scratch, t8);
5624 Branch(&ok, eq, t8, Operand(zero_reg));
5625 stop("Impossible marking bit pattern");
5629 // Value is white. We check whether it is data that doesn't need scanning.
5630 // Currently only checks for HeapNumber and non-cons strings.
5631 Register map = load_scratch; // Holds map while checking type.
5632 Register length = load_scratch; // Holds length of object after testing type.
5633 Label is_data_object;
5635 // Check for heap-number
5636 lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
5637 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5640 Branch(&skip, ne, t8, Operand(map));
5641 li(length, HeapNumber::kSize);
5642 Branch(&is_data_object);
5646 // Check for strings.
5647 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5648 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5649 // If it's a string and it's not a cons string then it's an object containing
5651 Register instance_type = load_scratch;
5652 lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
5653 And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
5654 Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5655 // It's a non-indirect (non-cons and non-slice) string.
5656 // If it's external, the length is just ExternalString::kSize.
5657 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5658 // External strings are the only ones with the kExternalStringTag bit
5660 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
5661 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
5662 And(t8, instance_type, Operand(kExternalStringTag));
5665 Branch(&skip, eq, t8, Operand(zero_reg));
5666 li(length, ExternalString::kSize);
5667 Branch(&is_data_object);
5671 // Sequential string, either Latin1 or UC16.
5672 // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
5673 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
5674 // getting the length multiplied by 2.
5675 DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
5676 DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
5677 lw(t9, FieldMemOperand(value, String::kLengthOffset));
5678 And(t8, instance_type, Operand(kStringEncodingMask));
5681 Branch(&skip, eq, t8, Operand(zero_reg));
5685 Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
5686 And(length, length, Operand(~kObjectAlignmentMask));
5688 bind(&is_data_object);
5689 // Value is a data object, and it is white. Mark it black. Since we know
5690 // that the object is white we can make it black by flipping one bit.
5691 lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5692 Or(t8, t8, Operand(mask_scratch));
5693 sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5695 And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5696 lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5697 Addu(t8, t8, Operand(length));
5698 sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5704 void MacroAssembler::LoadInstanceDescriptors(Register map,
5705 Register descriptors) {
5706 lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
5710 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
5711 lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5712 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
5716 void MacroAssembler::EnumLength(Register dst, Register map) {
5717 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
5718 lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5719 And(dst, dst, Operand(Map::EnumLengthBits::kMask));
5724 void MacroAssembler::LoadAccessor(Register dst, Register holder,
5726 AccessorComponent accessor) {
5727 lw(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
5728 LoadInstanceDescriptors(dst, dst);
5730 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
5731 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
5732 : AccessorPair::kSetterOffset;
5733 lw(dst, FieldMemOperand(dst, offset));
5737 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5738 Register empty_fixed_array_value = t2;
5739 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5743 // Check if the enum length field is properly initialized, indicating that
5744 // there is an enum cache.
5745 lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5749 call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
5754 lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5756 // For all objects but the receiver, check that the cache is empty.
5758 Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
5762 // Check that there are no elements. Register a2 contains the current JS
5763 // object we've reached through the prototype chain.
5765 lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
5766 Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
5768 // Second chance, the object may be using the empty slow element dictionary.
5769 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
5770 Branch(call_runtime, ne, a2, Operand(at));
5773 lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
5774 Branch(&next, ne, a2, Operand(null_value));
5778 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5779 DCHECK(!output_reg.is(input_reg));
5781 li(output_reg, Operand(255));
5782 // Normal branch: nop in delay slot.
5783 Branch(&done, gt, input_reg, Operand(output_reg));
5784 // Use delay slot in this branch.
5785 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
5786 mov(output_reg, zero_reg); // In delay slot.
5787 mov(output_reg, input_reg); // Value is in range 0..255.
5792 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
5793 DoubleRegister input_reg,
5794 DoubleRegister temp_double_reg) {
5799 Move(temp_double_reg, 0.0);
5800 BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
5802 // Double value is less than zero, NaN or Inf, return 0.
5803 mov(result_reg, zero_reg);
5806 // Double value is >= 255, return 255.
5808 Move(temp_double_reg, 255.0);
5809 BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
5810 li(result_reg, Operand(255));
5813 // In 0-255 range, round and truncate.
5815 cvt_w_d(temp_double_reg, input_reg);
5816 mfc1(result_reg, temp_double_reg);
5821 void MacroAssembler::TestJSArrayForAllocationMemento(
5822 Register receiver_reg,
5823 Register scratch_reg,
5824 Label* no_memento_found,
5826 Label* allocation_memento_present) {
5827 ExternalReference new_space_start =
5828 ExternalReference::new_space_start(isolate());
5829 ExternalReference new_space_allocation_top =
5830 ExternalReference::new_space_allocation_top_address(isolate());
5831 Addu(scratch_reg, receiver_reg,
5832 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5833 Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
5834 li(at, Operand(new_space_allocation_top));
5835 lw(at, MemOperand(at));
5836 Branch(no_memento_found, gt, scratch_reg, Operand(at));
5837 lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
5838 if (allocation_memento_present) {
5839 Branch(allocation_memento_present, cond, scratch_reg,
5840 Operand(isolate()->factory()->allocation_memento_map()));
5845 Register GetRegisterThatIsNotOneOf(Register reg1,
5852 if (reg1.is_valid()) regs |= reg1.bit();
5853 if (reg2.is_valid()) regs |= reg2.bit();
5854 if (reg3.is_valid()) regs |= reg3.bit();
5855 if (reg4.is_valid()) regs |= reg4.bit();
5856 if (reg5.is_valid()) regs |= reg5.bit();
5857 if (reg6.is_valid()) regs |= reg6.bit();
5859 for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
5860 Register candidate = Register::FromAllocationIndex(i);
5861 if (regs & candidate.bit()) continue;
5869 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5874 DCHECK(!scratch1.is(scratch0));
5875 Factory* factory = isolate()->factory();
5876 Register current = scratch0;
5877 Label loop_again, end;
5879 // Scratch contained elements pointer.
5880 Move(current, object);
5881 lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
5882 lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
5883 Branch(&end, eq, current, Operand(factory->null_value()));
5885 // Loop based on the map going up the prototype chain.
5887 lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
5888 lbu(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
5889 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
5890 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
5891 Branch(found, lo, scratch1, Operand(JS_OBJECT_TYPE));
5892 lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
5893 DecodeField<Map::ElementsKindBits>(scratch1);
5894 Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
5895 lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
5896 Branch(&loop_again, ne, current, Operand(factory->null_value()));
5902 bool AreAliased(Register reg1,
5910 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
5911 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
5912 reg7.is_valid() + reg8.is_valid();
5915 if (reg1.is_valid()) regs |= reg1.bit();
5916 if (reg2.is_valid()) regs |= reg2.bit();
5917 if (reg3.is_valid()) regs |= reg3.bit();
5918 if (reg4.is_valid()) regs |= reg4.bit();
5919 if (reg5.is_valid()) regs |= reg5.bit();
5920 if (reg6.is_valid()) regs |= reg6.bit();
5921 if (reg7.is_valid()) regs |= reg7.bit();
5922 if (reg8.is_valid()) regs |= reg8.bit();
5923 int n_of_non_aliasing_regs = NumRegs(regs);
5925 return n_of_valid_regs != n_of_non_aliasing_regs;
5929 CodePatcher::CodePatcher(byte* address,
5931 FlushICache flush_cache)
5932 : address_(address),
5933 size_(instructions * Assembler::kInstrSize),
5934 masm_(NULL, address, size_ + Assembler::kGap),
5935 flush_cache_(flush_cache) {
5936 // Create a new macro assembler pointing to the address of the code to patch.
5937 // The size is adjusted with kGap on order for the assembler to generate size
5938 // bytes of instructions without failing with buffer size constraints.
5939 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5943 CodePatcher::~CodePatcher() {
5944 // Indicate that code has changed.
5945 if (flush_cache_ == FLUSH) {
5946 CpuFeatures::FlushICache(address_, size_);
5949 // Check that the code was patched as expected.
5950 DCHECK(masm_.pc_ == address_ + size_);
5951 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5955 void CodePatcher::Emit(Instr instr) {
5956 masm()->emit(instr);
5960 void CodePatcher::Emit(Address addr) {
5961 masm()->emit(reinterpret_cast<Instr>(addr));
5965 void CodePatcher::ChangeBranchCondition(Condition cond) {
5966 Instr instr = Assembler::instr_at(masm_.pc_);
5967 DCHECK(Assembler::IsBranch(instr));
5968 uint32_t opcode = Assembler::GetOpcodeField(instr);
5969 // Currently only the 'eq' and 'ne' cond values are supported and the simple
5970 // branch instructions (with opcode being the branch type).
5971 // There are some special cases (see Assembler::IsBranch()) so extending this
5973 DCHECK(opcode == BEQ ||
5981 opcode = (cond == eq) ? BEQ : BNE;
5982 instr = (instr & ~kOpcodeMask) | opcode;
5987 void MacroAssembler::TruncatingDiv(Register result,
5990 DCHECK(!dividend.is(result));
5991 DCHECK(!dividend.is(at));
5992 DCHECK(!result.is(at));
5993 base::MagicNumbersForDivision<uint32_t> mag =
5994 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
5995 li(at, Operand(mag.multiplier));
5996 Mulh(result, dividend, Operand(at));
5997 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
5998 if (divisor > 0 && neg) {
5999 Addu(result, result, Operand(dividend));
6001 if (divisor < 0 && !neg && mag.multiplier > 0) {
6002 Subu(result, result, Operand(dividend));
6004 if (mag.shift > 0) sra(result, result, mag.shift);
6005 srl(at, dividend, 31);
6006 Addu(result, result, Operand(at));
6010 } // namespace internal
6013 #endif // V8_TARGET_ARCH_MIPS