1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include <limits.h> // For LONG_MIN, LONG_MAX.
9 #if V8_TARGET_ARCH_MIPS
11 #include "src/base/bits.h"
12 #include "src/base/division-by-constant.h"
13 #include "src/bootstrapper.h"
14 #include "src/codegen.h"
15 #include "src/cpu-profiler.h"
16 #include "src/debug.h"
17 #include "src/isolate-inl.h"
18 #include "src/runtime/runtime.h"
23 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
24 : Assembler(arg_isolate, buffer, size),
25 generating_stub_(false),
27 has_double_zero_reg_set_(false) {
28 if (isolate() != NULL) {
29 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
35 void MacroAssembler::Load(Register dst,
36 const MemOperand& src,
38 DCHECK(!r.IsDouble());
41 } else if (r.IsUInteger8()) {
43 } else if (r.IsInteger16()) {
45 } else if (r.IsUInteger16()) {
53 void MacroAssembler::Store(Register src,
54 const MemOperand& dst,
56 DCHECK(!r.IsDouble());
57 if (r.IsInteger8() || r.IsUInteger8()) {
59 } else if (r.IsInteger16() || r.IsUInteger16()) {
62 if (r.IsHeapObject()) {
64 } else if (r.IsSmi()) {
72 void MacroAssembler::LoadRoot(Register destination,
73 Heap::RootListIndex index) {
74 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
78 void MacroAssembler::LoadRoot(Register destination,
79 Heap::RootListIndex index,
81 Register src1, const Operand& src2) {
82 Branch(2, NegateCondition(cond), src1, src2);
83 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
87 void MacroAssembler::StoreRoot(Register source,
88 Heap::RootListIndex index) {
89 sw(source, MemOperand(s6, index << kPointerSizeLog2));
93 void MacroAssembler::StoreRoot(Register source,
94 Heap::RootListIndex index,
96 Register src1, const Operand& src2) {
97 Branch(2, NegateCondition(cond), src1, src2);
98 sw(source, MemOperand(s6, index << kPointerSizeLog2));
102 // Push and pop all registers that can hold pointers.
103 void MacroAssembler::PushSafepointRegisters() {
104 // Safepoints expect a block of kNumSafepointRegisters values on the
105 // stack, so adjust the stack for unsaved registers.
106 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
107 DCHECK(num_unsaved >= 0);
108 if (num_unsaved > 0) {
109 Subu(sp, sp, Operand(num_unsaved * kPointerSize));
111 MultiPush(kSafepointSavedRegisters);
115 void MacroAssembler::PopSafepointRegisters() {
116 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
117 MultiPop(kSafepointSavedRegisters);
118 if (num_unsaved > 0) {
119 Addu(sp, sp, Operand(num_unsaved * kPointerSize));
124 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
125 sw(src, SafepointRegisterSlot(dst));
129 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
130 lw(dst, SafepointRegisterSlot(src));
134 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
135 // The registers are pushed starting with the highest encoding,
136 // which means that lowest encodings are closest to the stack pointer.
137 return kSafepointRegisterStackIndexMap[reg_code];
141 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
142 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
146 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
147 UNIMPLEMENTED_MIPS();
148 // General purpose registers are pushed last on the stack.
149 int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
150 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
151 return MemOperand(sp, doubles_size + register_offset);
155 void MacroAssembler::InNewSpace(Register object,
159 DCHECK(cc == eq || cc == ne);
160 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
161 Branch(branch, cc, scratch,
162 Operand(ExternalReference::new_space_start(isolate())));
166 void MacroAssembler::RecordWriteField(
172 SaveFPRegsMode save_fp,
173 RememberedSetAction remembered_set_action,
175 PointersToHereCheck pointers_to_here_check_for_value) {
176 DCHECK(!AreAliased(value, dst, t8, object));
177 // First, check if a write barrier is even needed. The tests below
178 // catch stores of Smis.
181 // Skip barrier if writing a smi.
182 if (smi_check == INLINE_SMI_CHECK) {
183 JumpIfSmi(value, &done);
186 // Although the object register is tagged, the offset is relative to the start
187 // of the object, so so offset must be a multiple of kPointerSize.
188 DCHECK(IsAligned(offset, kPointerSize));
190 Addu(dst, object, Operand(offset - kHeapObjectTag));
191 if (emit_debug_code()) {
193 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
194 Branch(&ok, eq, t8, Operand(zero_reg));
195 stop("Unaligned cell in write barrier");
204 remembered_set_action,
206 pointers_to_here_check_for_value);
210 // Clobber clobbered input registers when running with the debug-code flag
211 // turned on to provoke errors.
212 if (emit_debug_code()) {
213 li(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
214 li(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
219 // Will clobber 4 registers: object, map, dst, ip. The
220 // register 'object' contains a heap object pointer.
221 void MacroAssembler::RecordWriteForMap(Register object,
225 SaveFPRegsMode fp_mode) {
226 if (emit_debug_code()) {
228 lw(dst, FieldMemOperand(map, HeapObject::kMapOffset));
230 kWrongAddressOrValuePassedToRecordWrite,
232 Operand(isolate()->factory()->meta_map()));
235 if (!FLAG_incremental_marking) {
239 if (emit_debug_code()) {
240 lw(at, FieldMemOperand(object, HeapObject::kMapOffset));
242 kWrongAddressOrValuePassedToRecordWrite,
249 // A single check of the map's pages interesting flag suffices, since it is
250 // only set during incremental collection, and then it's also guaranteed that
251 // the from object's page's interesting flag is also set. This optimization
252 // relies on the fact that maps can never be in new space.
254 map, // Used as scratch.
255 MemoryChunk::kPointersToHereAreInterestingMask,
259 Addu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
260 if (emit_debug_code()) {
262 And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
263 Branch(&ok, eq, at, Operand(zero_reg));
264 stop("Unaligned cell in write barrier");
268 // Record the actual write.
269 if (ra_status == kRAHasNotBeenSaved) {
272 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
275 if (ra_status == kRAHasNotBeenSaved) {
281 // Count number of write barriers in generated code.
282 isolate()->counters()->write_barriers_static()->Increment();
283 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
285 // Clobber clobbered registers when running with the debug-code flag
286 // turned on to provoke errors.
287 if (emit_debug_code()) {
288 li(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
289 li(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
294 // Will clobber 4 registers: object, address, scratch, ip. The
295 // register 'object' contains a heap object pointer. The heap object
296 // tag is shifted away.
297 void MacroAssembler::RecordWrite(
302 SaveFPRegsMode fp_mode,
303 RememberedSetAction remembered_set_action,
305 PointersToHereCheck pointers_to_here_check_for_value) {
306 DCHECK(!AreAliased(object, address, value, t8));
307 DCHECK(!AreAliased(object, address, value, t9));
309 if (emit_debug_code()) {
310 lw(at, MemOperand(address));
312 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
315 if (remembered_set_action == OMIT_REMEMBERED_SET &&
316 !FLAG_incremental_marking) {
320 // First, check if a write barrier is even needed. The tests below
321 // catch stores of smis and stores into the young generation.
324 if (smi_check == INLINE_SMI_CHECK) {
325 DCHECK_EQ(0, kSmiTag);
326 JumpIfSmi(value, &done);
329 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
331 value, // Used as scratch.
332 MemoryChunk::kPointersToHereAreInterestingMask,
336 CheckPageFlag(object,
337 value, // Used as scratch.
338 MemoryChunk::kPointersFromHereAreInterestingMask,
342 // Record the actual write.
343 if (ra_status == kRAHasNotBeenSaved) {
346 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
349 if (ra_status == kRAHasNotBeenSaved) {
355 // Count number of write barriers in generated code.
356 isolate()->counters()->write_barriers_static()->Increment();
357 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
360 // Clobber clobbered registers when running with the debug-code flag
361 // turned on to provoke errors.
362 if (emit_debug_code()) {
363 li(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
364 li(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
369 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
372 SaveFPRegsMode fp_mode,
373 RememberedSetFinalAction and_then) {
375 if (emit_debug_code()) {
377 JumpIfNotInNewSpace(object, scratch, &ok);
378 stop("Remembered set pointer is in new space");
381 // Load store buffer top.
382 ExternalReference store_buffer =
383 ExternalReference::store_buffer_top(isolate());
384 li(t8, Operand(store_buffer));
385 lw(scratch, MemOperand(t8));
386 // Store pointer to buffer and increment buffer top.
387 sw(address, MemOperand(scratch));
388 Addu(scratch, scratch, kPointerSize);
389 // Write back new top of buffer.
390 sw(scratch, MemOperand(t8));
391 // Call stub on end of buffer.
392 // Check for end of buffer.
393 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
394 if (and_then == kFallThroughAtEnd) {
395 Branch(&done, eq, t8, Operand(zero_reg));
397 DCHECK(and_then == kReturnAtEnd);
398 Ret(eq, t8, Operand(zero_reg));
401 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
402 CallStub(&store_buffer_overflow);
405 if (and_then == kReturnAtEnd) {
411 // -----------------------------------------------------------------------------
412 // Allocation support.
415 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
420 DCHECK(!holder_reg.is(scratch));
421 DCHECK(!holder_reg.is(at));
422 DCHECK(!scratch.is(at));
424 // Load current lexical context from the stack frame.
425 lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
426 // In debug mode, make sure the lexical context is set.
428 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
429 scratch, Operand(zero_reg));
432 // Load the native context of the current context.
434 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
435 lw(scratch, FieldMemOperand(scratch, offset));
436 lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
438 // Check the context is a native context.
439 if (emit_debug_code()) {
440 push(holder_reg); // Temporarily save holder on the stack.
441 // Read the first word and compare to the native_context_map.
442 lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
443 LoadRoot(at, Heap::kNativeContextMapRootIndex);
444 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
445 holder_reg, Operand(at));
446 pop(holder_reg); // Restore holder.
449 // Check if both contexts are the same.
450 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
451 Branch(&same_contexts, eq, scratch, Operand(at));
453 // Check the context is a native context.
454 if (emit_debug_code()) {
455 push(holder_reg); // Temporarily save holder on the stack.
456 mov(holder_reg, at); // Move at to its holding place.
457 LoadRoot(at, Heap::kNullValueRootIndex);
458 Check(ne, kJSGlobalProxyContextShouldNotBeNull,
459 holder_reg, Operand(at));
461 lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
462 LoadRoot(at, Heap::kNativeContextMapRootIndex);
463 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
464 holder_reg, Operand(at));
465 // Restore at is not needed. at is reloaded below.
466 pop(holder_reg); // Restore holder.
467 // Restore at to holder's context.
468 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
471 // Check that the security token in the calling global object is
472 // compatible with the security token in the receiving global
474 int token_offset = Context::kHeaderSize +
475 Context::SECURITY_TOKEN_INDEX * kPointerSize;
477 lw(scratch, FieldMemOperand(scratch, token_offset));
478 lw(at, FieldMemOperand(at, token_offset));
479 Branch(miss, ne, scratch, Operand(at));
481 bind(&same_contexts);
485 // Compute the hash code from the untagged key. This must be kept in sync with
486 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
487 // code-stub-hydrogen.cc
488 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
489 // First of all we assign the hash seed to scratch.
490 LoadRoot(scratch, Heap::kHashSeedRootIndex);
493 // Xor original key with a seed.
494 xor_(reg0, reg0, scratch);
496 // Compute the hash code from the untagged key. This must be kept in sync
497 // with ComputeIntegerHash in utils.h.
499 // hash = ~hash + (hash << 15);
500 nor(scratch, reg0, zero_reg);
502 addu(reg0, scratch, at);
504 // hash = hash ^ (hash >> 12);
506 xor_(reg0, reg0, at);
508 // hash = hash + (hash << 2);
510 addu(reg0, reg0, at);
512 // hash = hash ^ (hash >> 4);
514 xor_(reg0, reg0, at);
516 // hash = hash * 2057;
517 sll(scratch, reg0, 11);
519 addu(reg0, reg0, at);
520 addu(reg0, reg0, scratch);
522 // hash = hash ^ (hash >> 16);
524 xor_(reg0, reg0, at);
528 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
537 // elements - holds the slow-case elements of the receiver on entry.
538 // Unchanged unless 'result' is the same register.
540 // key - holds the smi key on entry.
541 // Unchanged unless 'result' is the same register.
544 // result - holds the result on exit if the load succeeded.
545 // Allowed to be the same as 'key' or 'result'.
546 // Unchanged on bailout so 'key' or 'result' can be used
547 // in further computation.
549 // Scratch registers:
551 // reg0 - holds the untagged key on entry and holds the hash once computed.
553 // reg1 - Used to hold the capacity mask of the dictionary.
555 // reg2 - Used for the index into the dictionary.
556 // at - Temporary (avoid MacroAssembler instructions also using 'at').
559 GetNumberHash(reg0, reg1);
561 // Compute the capacity mask.
562 lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
563 sra(reg1, reg1, kSmiTagSize);
564 Subu(reg1, reg1, Operand(1));
566 // Generate an unrolled loop that performs a few probes before giving up.
567 for (int i = 0; i < kNumberDictionaryProbes; i++) {
568 // Use reg2 for index calculations and keep the hash intact in reg0.
570 // Compute the masked index: (hash + i + i * i) & mask.
572 Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
574 and_(reg2, reg2, reg1);
576 // Scale the index by multiplying by the element size.
577 DCHECK(SeededNumberDictionary::kEntrySize == 3);
578 sll(at, reg2, 1); // 2x.
579 addu(reg2, reg2, at); // reg2 = reg2 * 3.
581 // Check if the key is identical to the name.
582 sll(at, reg2, kPointerSizeLog2);
583 addu(reg2, elements, at);
585 lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
586 if (i != kNumberDictionaryProbes - 1) {
587 Branch(&done, eq, key, Operand(at));
589 Branch(miss, ne, key, Operand(at));
594 // Check that the value is a field property.
595 // reg2: elements + (index * kPointerSize).
596 const int kDetailsOffset =
597 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
598 lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
600 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
601 Branch(miss, ne, at, Operand(zero_reg));
603 // Get the value at the masked, scaled index and return.
604 const int kValueOffset =
605 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
606 lw(result, FieldMemOperand(reg2, kValueOffset));
610 // ---------------------------------------------------------------------------
611 // Instruction macros.
613 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
615 addu(rd, rs, rt.rm());
617 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
618 addiu(rd, rs, rt.imm32_);
620 // li handles the relocation.
629 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
631 subu(rd, rs, rt.rm());
633 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
634 addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
636 // li handles the relocation.
645 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
647 if (IsMipsArchVariant(kLoongson)) {
651 mul(rd, rs, rt.rm());
654 // li handles the relocation.
657 if (IsMipsArchVariant(kLoongson)) {
667 void MacroAssembler::Mul(Register rd_hi, Register rd_lo,
668 Register rs, const Operand& rt) {
670 if (!IsMipsArchVariant(kMips32r6)) {
676 DCHECK(!rd_hi.is(rs));
677 DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
678 muh(rd_hi, rs, rt.rm());
679 mul(rd_lo, rs, rt.rm());
681 DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
682 mul(rd_lo, rs, rt.rm());
683 muh(rd_hi, rs, rt.rm());
687 // li handles the relocation.
690 if (!IsMipsArchVariant(kMips32r6)) {
696 DCHECK(!rd_hi.is(rs));
697 DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
701 DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
710 void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
712 if (!IsMipsArchVariant(kMips32r6)) {
716 muh(rd, rs, rt.rm());
719 // li handles the relocation.
722 if (!IsMipsArchVariant(kMips32r6)) {
732 void MacroAssembler::Mult(Register rs, const Operand& rt) {
736 // li handles the relocation.
744 void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
746 if (!IsMipsArchVariant(kMips32r6)) {
750 muhu(rd, rs, rt.rm());
753 // li handles the relocation.
756 if (!IsMipsArchVariant(kMips32r6)) {
766 void MacroAssembler::Multu(Register rs, const Operand& rt) {
770 // li handles the relocation.
778 void MacroAssembler::Div(Register rs, const Operand& rt) {
782 // li handles the relocation.
790 void MacroAssembler::Div(Register rem, Register res,
791 Register rs, const Operand& rt) {
793 if (!IsMipsArchVariant(kMips32r6)) {
798 div(res, rs, rt.rm());
799 mod(rem, rs, rt.rm());
802 // li handles the relocation.
805 if (!IsMipsArchVariant(kMips32r6)) {
817 void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
819 if (!IsMipsArchVariant(kMips32r6)) {
823 div(res, rs, rt.rm());
826 // li handles the relocation.
829 if (!IsMipsArchVariant(kMips32r6)) {
839 void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
841 if (!IsMipsArchVariant(kMips32r6)) {
845 mod(rd, rs, rt.rm());
848 // li handles the relocation.
851 if (!IsMipsArchVariant(kMips32r6)) {
861 void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
863 if (!IsMipsArchVariant(kMips32r6)) {
867 modu(rd, rs, rt.rm());
870 // li handles the relocation.
873 if (!IsMipsArchVariant(kMips32r6)) {
883 void MacroAssembler::Divu(Register rs, const Operand& rt) {
887 // li handles the relocation.
895 void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
897 if (!IsMipsArchVariant(kMips32r6)) {
901 divu(res, rs, rt.rm());
904 // li handles the relocation.
907 if (!IsMipsArchVariant(kMips32r6)) {
917 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
919 and_(rd, rs, rt.rm());
921 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
922 andi(rd, rs, rt.imm32_);
924 // li handles the relocation.
933 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
935 or_(rd, rs, rt.rm());
937 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
938 ori(rd, rs, rt.imm32_);
940 // li handles the relocation.
949 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
951 xor_(rd, rs, rt.rm());
953 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
954 xori(rd, rs, rt.imm32_);
956 // li handles the relocation.
965 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
967 nor(rd, rs, rt.rm());
969 // li handles the relocation.
977 void MacroAssembler::Neg(Register rs, const Operand& rt) {
980 DCHECK(!at.is(rt.rm()));
982 xor_(rs, rt.rm(), at);
986 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
988 slt(rd, rs, rt.rm());
990 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
991 slti(rd, rs, rt.imm32_);
993 // li handles the relocation.
1002 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
1004 sltu(rd, rs, rt.rm());
1006 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
1007 sltiu(rd, rs, rt.imm32_);
1009 // li handles the relocation.
1018 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
1019 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1021 rotrv(rd, rs, rt.rm());
1023 rotr(rd, rs, rt.imm32_);
1027 subu(at, zero_reg, rt.rm());
1029 srlv(rd, rs, rt.rm());
1032 if (rt.imm32_ == 0) {
1035 srl(at, rs, rt.imm32_);
1036 sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
1044 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
1045 if (IsMipsArchVariant(kLoongson)) {
1053 // ------------Pseudo-instructions-------------
1055 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
1057 lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1061 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
1063 swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1067 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1068 AllowDeferredHandleDereference smi_check;
1069 if (value->IsSmi()) {
1070 li(dst, Operand(value), mode);
1072 DCHECK(value->IsHeapObject());
1073 if (isolate()->heap()->InNewSpace(*value)) {
1074 Handle<Cell> cell = isolate()->factory()->NewCell(value);
1075 li(dst, Operand(cell));
1076 lw(dst, FieldMemOperand(dst, Cell::kValueOffset));
1078 li(dst, Operand(value));
1084 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1085 DCHECK(!j.is_reg());
1086 BlockTrampolinePoolScope block_trampoline_pool(this);
1087 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
1088 // Normal load of an immediate value which does not need Relocation Info.
1089 if (is_int16(j.imm32_)) {
1090 addiu(rd, zero_reg, j.imm32_);
1091 } else if (!(j.imm32_ & kHiMask)) {
1092 ori(rd, zero_reg, j.imm32_);
1093 } else if (!(j.imm32_ & kImm16Mask)) {
1094 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1096 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1097 ori(rd, rd, (j.imm32_ & kImm16Mask));
1100 if (MustUseReg(j.rmode_)) {
1101 RecordRelocInfo(j.rmode_, j.imm32_);
1103 // We always need the same number of instructions as we may need to patch
1104 // this code to load another value which may need 2 instructions to load.
1105 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
1106 ori(rd, rd, (j.imm32_ & kImm16Mask));
1111 void MacroAssembler::MultiPush(RegList regs) {
1112 int16_t num_to_push = NumberOfBitsSet(regs);
1113 int16_t stack_offset = num_to_push * kPointerSize;
1115 Subu(sp, sp, Operand(stack_offset));
1116 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1117 if ((regs & (1 << i)) != 0) {
1118 stack_offset -= kPointerSize;
1119 sw(ToRegister(i), MemOperand(sp, stack_offset));
1125 void MacroAssembler::MultiPushReversed(RegList regs) {
1126 int16_t num_to_push = NumberOfBitsSet(regs);
1127 int16_t stack_offset = num_to_push * kPointerSize;
1129 Subu(sp, sp, Operand(stack_offset));
1130 for (int16_t i = 0; i < kNumRegisters; i++) {
1131 if ((regs & (1 << i)) != 0) {
1132 stack_offset -= kPointerSize;
1133 sw(ToRegister(i), MemOperand(sp, stack_offset));
1139 void MacroAssembler::MultiPop(RegList regs) {
1140 int16_t stack_offset = 0;
1142 for (int16_t i = 0; i < kNumRegisters; i++) {
1143 if ((regs & (1 << i)) != 0) {
1144 lw(ToRegister(i), MemOperand(sp, stack_offset));
1145 stack_offset += kPointerSize;
1148 addiu(sp, sp, stack_offset);
1152 void MacroAssembler::MultiPopReversed(RegList regs) {
1153 int16_t stack_offset = 0;
1155 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1156 if ((regs & (1 << i)) != 0) {
1157 lw(ToRegister(i), MemOperand(sp, stack_offset));
1158 stack_offset += kPointerSize;
1161 addiu(sp, sp, stack_offset);
1165 void MacroAssembler::MultiPushFPU(RegList regs) {
1166 int16_t num_to_push = NumberOfBitsSet(regs);
1167 int16_t stack_offset = num_to_push * kDoubleSize;
1169 Subu(sp, sp, Operand(stack_offset));
1170 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1171 if ((regs & (1 << i)) != 0) {
1172 stack_offset -= kDoubleSize;
1173 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1179 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
1180 int16_t num_to_push = NumberOfBitsSet(regs);
1181 int16_t stack_offset = num_to_push * kDoubleSize;
1183 Subu(sp, sp, Operand(stack_offset));
1184 for (int16_t i = 0; i < kNumRegisters; i++) {
1185 if ((regs & (1 << i)) != 0) {
1186 stack_offset -= kDoubleSize;
1187 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1193 void MacroAssembler::MultiPopFPU(RegList regs) {
1194 int16_t stack_offset = 0;
1196 for (int16_t i = 0; i < kNumRegisters; i++) {
1197 if ((regs & (1 << i)) != 0) {
1198 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1199 stack_offset += kDoubleSize;
1202 addiu(sp, sp, stack_offset);
1206 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
1207 int16_t stack_offset = 0;
1209 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1210 if ((regs & (1 << i)) != 0) {
1211 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1212 stack_offset += kDoubleSize;
1215 addiu(sp, sp, stack_offset);
1219 void MacroAssembler::FlushICache(Register address, unsigned instructions) {
1220 RegList saved_regs = kJSCallerSaved | ra.bit();
1221 MultiPush(saved_regs);
1222 AllowExternalCallThatCantCauseGC scope(this);
1224 // Save to a0 in case address == t0.
1226 PrepareCallCFunction(2, t0);
1228 li(a1, instructions * kInstrSize);
1229 CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
1230 MultiPop(saved_regs);
1234 void MacroAssembler::Ext(Register rt,
1239 DCHECK(pos + size < 33);
1241 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1242 ext_(rt, rs, pos, size);
1244 // Move rs to rt and shift it left then right to get the
1245 // desired bitfield on the right side and zeroes on the left.
1246 int shift_left = 32 - (pos + size);
1247 sll(rt, rs, shift_left); // Acts as a move if shift_left == 0.
1249 int shift_right = 32 - size;
1250 if (shift_right > 0) {
1251 srl(rt, rt, shift_right);
1257 void MacroAssembler::Ins(Register rt,
1262 DCHECK(pos + size <= 32);
1265 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1266 ins_(rt, rs, pos, size);
1268 DCHECK(!rt.is(t8) && !rs.is(t8));
1269 Subu(at, zero_reg, Operand(1));
1270 srl(at, at, 32 - size);
1274 nor(at, at, zero_reg);
1281 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1283 FPURegister scratch) {
1284 // Move the data from fs to t8.
1286 Cvt_d_uw(fd, t8, scratch);
1290 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1292 FPURegister scratch) {
1293 // Convert rs to a FP value in fd (and fd + 1).
1294 // We do this by converting rs minus the MSB to avoid sign conversion,
1295 // then adding 2^31 to the result (if needed).
1297 DCHECK(!fd.is(scratch));
1301 // Save rs's MSB to t9.
1305 // Move the result to fd.
1308 // Convert fd to a real FP value.
1311 Label conversion_done;
1313 // If rs's MSB was 0, it's done.
1314 // Otherwise we need to add that to the FP register.
1315 Branch(&conversion_done, eq, t9, Operand(zero_reg));
1317 // Load 2^31 into f20 as its float representation.
1319 mtc1(zero_reg, scratch);
1322 add_d(fd, fd, scratch);
1324 bind(&conversion_done);
1328 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1330 FPURegister scratch) {
1331 Trunc_uw_d(fs, t8, scratch);
1336 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1337 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1347 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1348 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1358 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1359 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1369 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1370 if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
1380 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1382 FPURegister scratch) {
1383 DCHECK(!fd.is(scratch));
1386 // Load 2^31 into scratch as its float representation.
1388 mtc1(zero_reg, scratch);
1390 // Test if scratch > fd.
1391 // If fd < 2^31 we can convert it normally.
1392 Label simple_convert;
1393 BranchF(&simple_convert, NULL, lt, fd, scratch);
1395 // First we subtract 2^31 from fd, then trunc it to rs
1396 // and add 2^31 to rs.
1397 sub_d(scratch, fd, scratch);
1398 trunc_w_d(scratch, scratch);
1400 Or(rs, rs, 1 << 31);
1404 // Simple conversion.
1405 bind(&simple_convert);
1406 trunc_w_d(scratch, fd);
1413 void MacroAssembler::Mthc1(Register rt, FPURegister fs) {
1417 mtc1(rt, fs.high());
1422 void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
1426 mfc1(rt, fs.high());
1431 void MacroAssembler::BranchF(Label* target,
1436 BranchDelaySlot bd) {
1437 BlockTrampolinePoolScope block_trampoline_pool(this);
1443 DCHECK(nan || target);
1444 // Check for unordered (NaN) cases.
1446 if (!IsMipsArchVariant(kMips32r6)) {
1447 c(UN, D, cmp1, cmp2);
1450 // Use kDoubleCompareReg for comparison result. It has to be unavailable
1451 // to lithium register allocator.
1452 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
1453 cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
1454 bc1nez(nan, kDoubleCompareReg);
1458 if (!IsMipsArchVariant(kMips32r6)) {
1460 // Here NaN cases were either handled by this function or are assumed to
1461 // have been handled by the caller.
1464 c(OLT, D, cmp1, cmp2);
1468 c(ULE, D, cmp1, cmp2);
1472 c(ULT, D, cmp1, cmp2);
1476 c(OLE, D, cmp1, cmp2);
1480 c(EQ, D, cmp1, cmp2);
1484 c(UEQ, D, cmp1, cmp2);
1488 c(EQ, D, cmp1, cmp2);
1492 c(UEQ, D, cmp1, cmp2);
1501 // Here NaN cases were either handled by this function or are assumed to
1502 // have been handled by the caller.
1503 // Unsigned conditions are treated as their signed counterpart.
1504 // Use kDoubleCompareReg for comparison result, it is
1505 // valid in fp64 (FR = 1) mode which is implied for mips32r6.
1506 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
1509 cmp(OLT, L, kDoubleCompareReg, cmp1, cmp2);
1510 bc1nez(target, kDoubleCompareReg);
1513 cmp(ULE, L, kDoubleCompareReg, cmp1, cmp2);
1514 bc1eqz(target, kDoubleCompareReg);
1517 cmp(ULT, L, kDoubleCompareReg, cmp1, cmp2);
1518 bc1eqz(target, kDoubleCompareReg);
1521 cmp(OLE, L, kDoubleCompareReg, cmp1, cmp2);
1522 bc1nez(target, kDoubleCompareReg);
1525 cmp(EQ, L, kDoubleCompareReg, cmp1, cmp2);
1526 bc1nez(target, kDoubleCompareReg);
1529 cmp(UEQ, L, kDoubleCompareReg, cmp1, cmp2);
1530 bc1nez(target, kDoubleCompareReg);
1533 cmp(EQ, L, kDoubleCompareReg, cmp1, cmp2);
1534 bc1eqz(target, kDoubleCompareReg);
1537 cmp(UEQ, L, kDoubleCompareReg, cmp1, cmp2);
1538 bc1eqz(target, kDoubleCompareReg);
1546 if (bd == PROTECT) {
1552 void MacroAssembler::Move(FPURegister dst, float imm) {
1553 li(at, Operand(bit_cast<int32_t>(imm)));
1558 void MacroAssembler::Move(FPURegister dst, double imm) {
1559 static const DoubleRepresentation minus_zero(-0.0);
1560 static const DoubleRepresentation zero(0.0);
1561 DoubleRepresentation value_rep(imm);
1562 // Handle special values first.
1563 if (value_rep == zero && has_double_zero_reg_set_) {
1564 mov_d(dst, kDoubleRegZero);
1565 } else if (value_rep == minus_zero && has_double_zero_reg_set_) {
1566 neg_d(dst, kDoubleRegZero);
1569 DoubleAsTwoUInt32(imm, &lo, &hi);
1570 // Move the low part of the double into the lower of the corresponding FPU
1571 // register of FPU register pair.
1573 li(at, Operand(lo));
1576 mtc1(zero_reg, dst);
1578 // Move the high part of the double into the higher of the corresponding FPU
1579 // register of FPU register pair.
1581 li(at, Operand(hi));
1584 Mthc1(zero_reg, dst);
1586 if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true;
1591 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1592 if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
1594 Branch(&done, ne, rt, Operand(zero_reg));
1603 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1604 if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
1606 Branch(&done, eq, rt, Operand(zero_reg));
1615 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
1616 if (IsMipsArchVariant(kLoongson)) {
1617 // Tests an FP condition code and then conditionally move rs to rd.
1618 // We do not currently use any FPU cc bit other than bit 0.
1620 DCHECK(!(rs.is(t8) || rd.is(t8)));
1622 Register scratch = t8;
1623 // For testing purposes we need to fetch content of the FCSR register and
1624 // than test its cc (floating point condition code) bit (for cc = 0, it is
1625 // 24. bit of the FCSR).
1626 cfc1(scratch, FCSR);
1627 // For the MIPS I, II and III architectures, the contents of scratch is
1628 // UNPREDICTABLE for the instruction immediately following CFC1.
1630 srl(scratch, scratch, 16);
1631 andi(scratch, scratch, 0x0080);
1632 Branch(&done, eq, scratch, Operand(zero_reg));
1641 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
1642 if (IsMipsArchVariant(kLoongson)) {
1643 // Tests an FP condition code and then conditionally move rs to rd.
1644 // We do not currently use any FPU cc bit other than bit 0.
1646 DCHECK(!(rs.is(t8) || rd.is(t8)));
1648 Register scratch = t8;
1649 // For testing purposes we need to fetch content of the FCSR register and
1650 // than test its cc (floating point condition code) bit (for cc = 0, it is
1651 // 24. bit of the FCSR).
1652 cfc1(scratch, FCSR);
1653 // For the MIPS I, II and III architectures, the contents of scratch is
1654 // UNPREDICTABLE for the instruction immediately following CFC1.
1656 srl(scratch, scratch, 16);
1657 andi(scratch, scratch, 0x0080);
1658 Branch(&done, ne, scratch, Operand(zero_reg));
1667 void MacroAssembler::Clz(Register rd, Register rs) {
1668 if (IsMipsArchVariant(kLoongson)) {
1669 DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
1671 Register scratch = t9;
1677 and_(scratch, at, mask);
1678 Branch(&end, ne, scratch, Operand(zero_reg));
1680 Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
1689 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1691 DoubleRegister double_input,
1693 DoubleRegister double_scratch,
1694 Register except_flag,
1695 CheckForInexactConversion check_inexact) {
1696 DCHECK(!result.is(scratch));
1697 DCHECK(!double_input.is(double_scratch));
1698 DCHECK(!except_flag.is(scratch));
1702 // Clear the except flag (0 = no exception)
1703 mov(except_flag, zero_reg);
1705 // Test for values that can be exactly represented as a signed 32-bit integer.
1706 cvt_w_d(double_scratch, double_input);
1707 mfc1(result, double_scratch);
1708 cvt_d_w(double_scratch, double_scratch);
1709 BranchF(&done, NULL, eq, double_input, double_scratch);
1711 int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
1713 if (check_inexact == kDontCheckForInexactConversion) {
1714 // Ignore inexact exceptions.
1715 except_mask &= ~kFCSRInexactFlagMask;
1719 cfc1(scratch, FCSR);
1720 // Disable FPU exceptions.
1721 ctc1(zero_reg, FCSR);
1723 // Do operation based on rounding mode.
1724 switch (rounding_mode) {
1725 case kRoundToNearest:
1726 Round_w_d(double_scratch, double_input);
1729 Trunc_w_d(double_scratch, double_input);
1731 case kRoundToPlusInf:
1732 Ceil_w_d(double_scratch, double_input);
1734 case kRoundToMinusInf:
1735 Floor_w_d(double_scratch, double_input);
1737 } // End of switch-statement.
1740 cfc1(except_flag, FCSR);
1742 ctc1(scratch, FCSR);
1743 // Move the converted value into the result register.
1744 mfc1(result, double_scratch);
1746 // Check for fpu exceptions.
1747 And(except_flag, except_flag, Operand(except_mask));
1753 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
1754 DoubleRegister double_input,
1756 DoubleRegister single_scratch = kLithiumScratchDouble.low();
1757 Register scratch = at;
1758 Register scratch2 = t9;
1760 // Clear cumulative exception flags and save the FCSR.
1761 cfc1(scratch2, FCSR);
1762 ctc1(zero_reg, FCSR);
1763 // Try a conversion to a signed integer.
1764 trunc_w_d(single_scratch, double_input);
1765 mfc1(result, single_scratch);
1766 // Retrieve and restore the FCSR.
1767 cfc1(scratch, FCSR);
1768 ctc1(scratch2, FCSR);
1769 // Check for overflow and NaNs.
1772 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
1773 // If we had no exceptions we are done.
1774 Branch(done, eq, scratch, Operand(zero_reg));
1778 void MacroAssembler::TruncateDoubleToI(Register result,
1779 DoubleRegister double_input) {
1782 TryInlineTruncateDoubleToI(result, double_input, &done);
1784 // If we fell through then inline version didn't succeed - call stub instead.
1786 Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
1787 sdc1(double_input, MemOperand(sp, 0));
1789 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
1792 Addu(sp, sp, Operand(kDoubleSize));
1799 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
1801 DoubleRegister double_scratch = f12;
1802 DCHECK(!result.is(object));
1804 ldc1(double_scratch,
1805 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
1806 TryInlineTruncateDoubleToI(result, double_scratch, &done);
1808 // If we fell through then inline version didn't succeed - call stub instead.
1810 DoubleToIStub stub(isolate(),
1813 HeapNumber::kValueOffset - kHeapObjectTag,
1823 void MacroAssembler::TruncateNumberToI(Register object,
1825 Register heap_number_map,
1827 Label* not_number) {
1829 DCHECK(!result.is(object));
1831 UntagAndJumpIfSmi(result, object, &done);
1832 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
1833 TruncateHeapNumberToI(result, object);
1839 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1841 int num_least_bits) {
1842 Ext(dst, src, kSmiTagSize, num_least_bits);
1846 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1848 int num_least_bits) {
1849 And(dst, src, Operand((1 << num_least_bits) - 1));
1853 // Emulated condtional branches do not emit a nop in the branch delay slot.
1855 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1856 #define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \
1857 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1858 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1861 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1862 BranchShort(offset, bdslot);
1866 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1868 BranchDelaySlot bdslot) {
1869 BranchShort(offset, cond, rs, rt, bdslot);
1873 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1874 if (L->is_bound()) {
1876 BranchShort(L, bdslot);
1881 if (is_trampoline_emitted()) {
1884 BranchShort(L, bdslot);
1890 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1892 BranchDelaySlot bdslot) {
1893 if (L->is_bound()) {
1895 BranchShort(L, cond, rs, rt, bdslot);
1897 if (cond != cc_always) {
1899 Condition neg_cond = NegateCondition(cond);
1900 BranchShort(&skip, neg_cond, rs, rt);
1908 if (is_trampoline_emitted()) {
1909 if (cond != cc_always) {
1911 Condition neg_cond = NegateCondition(cond);
1912 BranchShort(&skip, neg_cond, rs, rt);
1919 BranchShort(L, cond, rs, rt, bdslot);
1925 void MacroAssembler::Branch(Label* L,
1928 Heap::RootListIndex index,
1929 BranchDelaySlot bdslot) {
1930 LoadRoot(at, index);
1931 Branch(L, cond, rs, Operand(at), bdslot);
1935 void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
1938 // Emit a nop in the branch delay slot if required.
1939 if (bdslot == PROTECT)
1944 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1946 BranchDelaySlot bdslot) {
1947 BRANCH_ARGS_CHECK(cond, rs, rt);
1948 DCHECK(!rs.is(zero_reg));
1949 Register r2 = no_reg;
1950 Register scratch = at;
1953 // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
1955 BlockTrampolinePoolScope block_trampoline_pool(this);
1962 beq(rs, r2, offset);
1965 bne(rs, r2, offset);
1967 // Signed comparison.
1969 if (r2.is(zero_reg)) {
1972 slt(scratch, r2, rs);
1973 bne(scratch, zero_reg, offset);
1977 if (r2.is(zero_reg)) {
1980 slt(scratch, rs, r2);
1981 beq(scratch, zero_reg, offset);
1985 if (r2.is(zero_reg)) {
1988 slt(scratch, rs, r2);
1989 bne(scratch, zero_reg, offset);
1993 if (r2.is(zero_reg)) {
1996 slt(scratch, r2, rs);
1997 beq(scratch, zero_reg, offset);
2000 // Unsigned comparison.
2002 if (r2.is(zero_reg)) {
2003 bne(rs, zero_reg, offset);
2005 sltu(scratch, r2, rs);
2006 bne(scratch, zero_reg, offset);
2009 case Ugreater_equal:
2010 if (r2.is(zero_reg)) {
2013 sltu(scratch, rs, r2);
2014 beq(scratch, zero_reg, offset);
2018 if (r2.is(zero_reg)) {
2019 // No code needs to be emitted.
2022 sltu(scratch, rs, r2);
2023 bne(scratch, zero_reg, offset);
2027 if (r2.is(zero_reg)) {
2028 beq(rs, zero_reg, offset);
2030 sltu(scratch, r2, rs);
2031 beq(scratch, zero_reg, offset);
2038 // Be careful to always use shifted_branch_offset only just before the
2039 // branch instruction, as the location will be remember for patching the
2041 BlockTrampolinePoolScope block_trampoline_pool(this);
2047 if (rt.imm32_ == 0) {
2048 beq(rs, zero_reg, offset);
2050 // We don't want any other register but scratch clobbered.
2051 DCHECK(!scratch.is(rs));
2054 beq(rs, r2, offset);
2058 if (rt.imm32_ == 0) {
2059 bne(rs, zero_reg, offset);
2061 // We don't want any other register but scratch clobbered.
2062 DCHECK(!scratch.is(rs));
2065 bne(rs, r2, offset);
2068 // Signed comparison.
2070 if (rt.imm32_ == 0) {
2075 slt(scratch, r2, rs);
2076 bne(scratch, zero_reg, offset);
2080 if (rt.imm32_ == 0) {
2082 } else if (is_int16(rt.imm32_)) {
2083 slti(scratch, rs, rt.imm32_);
2084 beq(scratch, zero_reg, offset);
2088 slt(scratch, rs, r2);
2089 beq(scratch, zero_reg, offset);
2093 if (rt.imm32_ == 0) {
2095 } else if (is_int16(rt.imm32_)) {
2096 slti(scratch, rs, rt.imm32_);
2097 bne(scratch, zero_reg, offset);
2101 slt(scratch, rs, r2);
2102 bne(scratch, zero_reg, offset);
2106 if (rt.imm32_ == 0) {
2111 slt(scratch, r2, rs);
2112 beq(scratch, zero_reg, offset);
2115 // Unsigned comparison.
2117 if (rt.imm32_ == 0) {
2118 bne(rs, zero_reg, offset);
2122 sltu(scratch, r2, rs);
2123 bne(scratch, zero_reg, offset);
2126 case Ugreater_equal:
2127 if (rt.imm32_ == 0) {
2129 } else if (is_int16(rt.imm32_)) {
2130 sltiu(scratch, rs, rt.imm32_);
2131 beq(scratch, zero_reg, offset);
2135 sltu(scratch, rs, r2);
2136 beq(scratch, zero_reg, offset);
2140 if (rt.imm32_ == 0) {
2141 // No code needs to be emitted.
2143 } else if (is_int16(rt.imm32_)) {
2144 sltiu(scratch, rs, rt.imm32_);
2145 bne(scratch, zero_reg, offset);
2149 sltu(scratch, rs, r2);
2150 bne(scratch, zero_reg, offset);
2154 if (rt.imm32_ == 0) {
2155 beq(rs, zero_reg, offset);
2159 sltu(scratch, r2, rs);
2160 beq(scratch, zero_reg, offset);
2167 // Emit a nop in the branch delay slot if required.
2168 if (bdslot == PROTECT)
2173 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
2174 // We use branch_offset as an argument for the branch instructions to be sure
2175 // it is called just before generating the branch instruction, as needed.
2177 b(shifted_branch_offset(L, false));
2179 // Emit a nop in the branch delay slot if required.
2180 if (bdslot == PROTECT)
2185 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
2187 BranchDelaySlot bdslot) {
2188 BRANCH_ARGS_CHECK(cond, rs, rt);
2191 Register r2 = no_reg;
2192 Register scratch = at;
2194 BlockTrampolinePoolScope block_trampoline_pool(this);
2196 // Be careful to always use shifted_branch_offset only just before the
2197 // branch instruction, as the location will be remember for patching the
2201 offset = shifted_branch_offset(L, false);
2205 offset = shifted_branch_offset(L, false);
2206 beq(rs, r2, offset);
2209 offset = shifted_branch_offset(L, false);
2210 bne(rs, r2, offset);
2212 // Signed comparison.
2214 if (r2.is(zero_reg)) {
2215 offset = shifted_branch_offset(L, false);
2218 slt(scratch, r2, rs);
2219 offset = shifted_branch_offset(L, false);
2220 bne(scratch, zero_reg, offset);
2224 if (r2.is(zero_reg)) {
2225 offset = shifted_branch_offset(L, false);
2228 slt(scratch, rs, r2);
2229 offset = shifted_branch_offset(L, false);
2230 beq(scratch, zero_reg, offset);
2234 if (r2.is(zero_reg)) {
2235 offset = shifted_branch_offset(L, false);
2238 slt(scratch, rs, r2);
2239 offset = shifted_branch_offset(L, false);
2240 bne(scratch, zero_reg, offset);
2244 if (r2.is(zero_reg)) {
2245 offset = shifted_branch_offset(L, false);
2248 slt(scratch, r2, rs);
2249 offset = shifted_branch_offset(L, false);
2250 beq(scratch, zero_reg, offset);
2253 // Unsigned comparison.
2255 if (r2.is(zero_reg)) {
2256 offset = shifted_branch_offset(L, false);
2257 bne(rs, zero_reg, offset);
2259 sltu(scratch, r2, rs);
2260 offset = shifted_branch_offset(L, false);
2261 bne(scratch, zero_reg, offset);
2264 case Ugreater_equal:
2265 if (r2.is(zero_reg)) {
2266 offset = shifted_branch_offset(L, false);
2269 sltu(scratch, rs, r2);
2270 offset = shifted_branch_offset(L, false);
2271 beq(scratch, zero_reg, offset);
2275 if (r2.is(zero_reg)) {
2276 // No code needs to be emitted.
2279 sltu(scratch, rs, r2);
2280 offset = shifted_branch_offset(L, false);
2281 bne(scratch, zero_reg, offset);
2285 if (r2.is(zero_reg)) {
2286 offset = shifted_branch_offset(L, false);
2287 beq(rs, zero_reg, offset);
2289 sltu(scratch, r2, rs);
2290 offset = shifted_branch_offset(L, false);
2291 beq(scratch, zero_reg, offset);
2298 // Be careful to always use shifted_branch_offset only just before the
2299 // branch instruction, as the location will be remember for patching the
2301 BlockTrampolinePoolScope block_trampoline_pool(this);
2304 offset = shifted_branch_offset(L, false);
2308 if (rt.imm32_ == 0) {
2309 offset = shifted_branch_offset(L, false);
2310 beq(rs, zero_reg, offset);
2312 DCHECK(!scratch.is(rs));
2315 offset = shifted_branch_offset(L, false);
2316 beq(rs, r2, offset);
2320 if (rt.imm32_ == 0) {
2321 offset = shifted_branch_offset(L, false);
2322 bne(rs, zero_reg, offset);
2324 DCHECK(!scratch.is(rs));
2327 offset = shifted_branch_offset(L, false);
2328 bne(rs, r2, offset);
2331 // Signed comparison.
2333 if (rt.imm32_ == 0) {
2334 offset = shifted_branch_offset(L, false);
2337 DCHECK(!scratch.is(rs));
2340 slt(scratch, r2, rs);
2341 offset = shifted_branch_offset(L, false);
2342 bne(scratch, zero_reg, offset);
2346 if (rt.imm32_ == 0) {
2347 offset = shifted_branch_offset(L, false);
2349 } else if (is_int16(rt.imm32_)) {
2350 slti(scratch, rs, rt.imm32_);
2351 offset = shifted_branch_offset(L, false);
2352 beq(scratch, zero_reg, offset);
2354 DCHECK(!scratch.is(rs));
2357 slt(scratch, rs, r2);
2358 offset = shifted_branch_offset(L, false);
2359 beq(scratch, zero_reg, offset);
2363 if (rt.imm32_ == 0) {
2364 offset = shifted_branch_offset(L, false);
2366 } else if (is_int16(rt.imm32_)) {
2367 slti(scratch, rs, rt.imm32_);
2368 offset = shifted_branch_offset(L, false);
2369 bne(scratch, zero_reg, offset);
2371 DCHECK(!scratch.is(rs));
2374 slt(scratch, rs, r2);
2375 offset = shifted_branch_offset(L, false);
2376 bne(scratch, zero_reg, offset);
2380 if (rt.imm32_ == 0) {
2381 offset = shifted_branch_offset(L, false);
2384 DCHECK(!scratch.is(rs));
2387 slt(scratch, r2, rs);
2388 offset = shifted_branch_offset(L, false);
2389 beq(scratch, zero_reg, offset);
2392 // Unsigned comparison.
2394 if (rt.imm32_ == 0) {
2395 offset = shifted_branch_offset(L, false);
2396 bne(rs, zero_reg, offset);
2398 DCHECK(!scratch.is(rs));
2401 sltu(scratch, r2, rs);
2402 offset = shifted_branch_offset(L, false);
2403 bne(scratch, zero_reg, offset);
2406 case Ugreater_equal:
2407 if (rt.imm32_ == 0) {
2408 offset = shifted_branch_offset(L, false);
2410 } else if (is_int16(rt.imm32_)) {
2411 sltiu(scratch, rs, rt.imm32_);
2412 offset = shifted_branch_offset(L, false);
2413 beq(scratch, zero_reg, offset);
2415 DCHECK(!scratch.is(rs));
2418 sltu(scratch, rs, r2);
2419 offset = shifted_branch_offset(L, false);
2420 beq(scratch, zero_reg, offset);
2424 if (rt.imm32_ == 0) {
2425 // No code needs to be emitted.
2427 } else if (is_int16(rt.imm32_)) {
2428 sltiu(scratch, rs, rt.imm32_);
2429 offset = shifted_branch_offset(L, false);
2430 bne(scratch, zero_reg, offset);
2432 DCHECK(!scratch.is(rs));
2435 sltu(scratch, rs, r2);
2436 offset = shifted_branch_offset(L, false);
2437 bne(scratch, zero_reg, offset);
2441 if (rt.imm32_ == 0) {
2442 offset = shifted_branch_offset(L, false);
2443 beq(rs, zero_reg, offset);
2445 DCHECK(!scratch.is(rs));
2448 sltu(scratch, r2, rs);
2449 offset = shifted_branch_offset(L, false);
2450 beq(scratch, zero_reg, offset);
2457 // Check that offset could actually hold on an int16_t.
2458 DCHECK(is_int16(offset));
2459 // Emit a nop in the branch delay slot if required.
2460 if (bdslot == PROTECT)
2465 void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
2466 BranchAndLinkShort(offset, bdslot);
2470 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
2472 BranchDelaySlot bdslot) {
2473 BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2477 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
2478 if (L->is_bound()) {
2480 BranchAndLinkShort(L, bdslot);
2485 if (is_trampoline_emitted()) {
2488 BranchAndLinkShort(L, bdslot);
2494 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2496 BranchDelaySlot bdslot) {
2497 if (L->is_bound()) {
2499 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2502 Condition neg_cond = NegateCondition(cond);
2503 BranchShort(&skip, neg_cond, rs, rt);
2508 if (is_trampoline_emitted()) {
2510 Condition neg_cond = NegateCondition(cond);
2511 BranchShort(&skip, neg_cond, rs, rt);
2515 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2521 // We need to use a bgezal or bltzal, but they can't be used directly with the
2522 // slt instructions. We could use sub or add instead but we would miss overflow
2523 // cases, so we keep slt and add an intermediate third instruction.
2524 void MacroAssembler::BranchAndLinkShort(int16_t offset,
2525 BranchDelaySlot bdslot) {
2528 // Emit a nop in the branch delay slot if required.
2529 if (bdslot == PROTECT)
2534 void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
2535 Register rs, const Operand& rt,
2536 BranchDelaySlot bdslot) {
2537 BRANCH_ARGS_CHECK(cond, rs, rt);
2538 Register r2 = no_reg;
2539 Register scratch = at;
2543 } else if (cond != cc_always) {
2548 if (!IsMipsArchVariant(kMips32r6)) {
2549 BlockTrampolinePoolScope block_trampoline_pool(this);
2565 // Signed comparison.
2567 slt(scratch, r2, rs);
2568 addiu(scratch, scratch, -1);
2569 bgezal(scratch, offset);
2572 slt(scratch, rs, r2);
2573 addiu(scratch, scratch, -1);
2574 bltzal(scratch, offset);
2577 slt(scratch, rs, r2);
2578 addiu(scratch, scratch, -1);
2579 bgezal(scratch, offset);
2582 slt(scratch, r2, rs);
2583 addiu(scratch, scratch, -1);
2584 bltzal(scratch, offset);
2587 // Unsigned comparison.
2589 sltu(scratch, r2, rs);
2590 addiu(scratch, scratch, -1);
2591 bgezal(scratch, offset);
2593 case Ugreater_equal:
2594 sltu(scratch, rs, r2);
2595 addiu(scratch, scratch, -1);
2596 bltzal(scratch, offset);
2599 sltu(scratch, rs, r2);
2600 addiu(scratch, scratch, -1);
2601 bgezal(scratch, offset);
2604 sltu(scratch, r2, rs);
2605 addiu(scratch, scratch, -1);
2606 bltzal(scratch, offset);
2613 BlockTrampolinePoolScope block_trampoline_pool(this);
2629 // Signed comparison.
2632 slt(scratch, r2, rs);
2633 beq(scratch, zero_reg, 2);
2639 slt(scratch, rs, r2);
2640 bne(scratch, zero_reg, 2);
2646 slt(scratch, rs, r2);
2647 bne(scratch, zero_reg, 2);
2653 slt(scratch, r2, rs);
2654 bne(scratch, zero_reg, 2);
2660 // Unsigned comparison.
2663 sltu(scratch, r2, rs);
2664 beq(scratch, zero_reg, 2);
2668 case Ugreater_equal:
2670 sltu(scratch, rs, r2);
2671 bne(scratch, zero_reg, 2);
2677 sltu(scratch, rs, r2);
2678 bne(scratch, zero_reg, 2);
2684 sltu(scratch, r2, rs);
2685 bne(scratch, zero_reg, 2);
2694 // Emit a nop in the branch delay slot if required.
2695 if (bdslot == PROTECT)
2700 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
2701 bal(shifted_branch_offset(L, false));
2703 // Emit a nop in the branch delay slot if required.
2704 if (bdslot == PROTECT)
2709 void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
2711 BranchDelaySlot bdslot) {
2712 BRANCH_ARGS_CHECK(cond, rs, rt);
2715 Register r2 = no_reg;
2716 Register scratch = at;
2719 } else if (cond != cc_always) {
2724 if (!IsMipsArchVariant(kMips32r6)) {
2725 BlockTrampolinePoolScope block_trampoline_pool(this);
2728 offset = shifted_branch_offset(L, false);
2734 offset = shifted_branch_offset(L, false);
2740 offset = shifted_branch_offset(L, false);
2744 // Signed comparison.
2746 slt(scratch, r2, rs);
2747 addiu(scratch, scratch, -1);
2748 offset = shifted_branch_offset(L, false);
2749 bgezal(scratch, offset);
2752 slt(scratch, rs, r2);
2753 addiu(scratch, scratch, -1);
2754 offset = shifted_branch_offset(L, false);
2755 bltzal(scratch, offset);
2758 slt(scratch, rs, r2);
2759 addiu(scratch, scratch, -1);
2760 offset = shifted_branch_offset(L, false);
2761 bgezal(scratch, offset);
2764 slt(scratch, r2, rs);
2765 addiu(scratch, scratch, -1);
2766 offset = shifted_branch_offset(L, false);
2767 bltzal(scratch, offset);
2770 // Unsigned comparison.
2772 sltu(scratch, r2, rs);
2773 addiu(scratch, scratch, -1);
2774 offset = shifted_branch_offset(L, false);
2775 bgezal(scratch, offset);
2777 case Ugreater_equal:
2778 sltu(scratch, rs, r2);
2779 addiu(scratch, scratch, -1);
2780 offset = shifted_branch_offset(L, false);
2781 bltzal(scratch, offset);
2784 sltu(scratch, rs, r2);
2785 addiu(scratch, scratch, -1);
2786 offset = shifted_branch_offset(L, false);
2787 bgezal(scratch, offset);
2790 sltu(scratch, r2, rs);
2791 addiu(scratch, scratch, -1);
2792 offset = shifted_branch_offset(L, false);
2793 bltzal(scratch, offset);
2800 BlockTrampolinePoolScope block_trampoline_pool(this);
2803 offset = shifted_branch_offset(L, false);
2809 offset = shifted_branch_offset(L, false);
2815 offset = shifted_branch_offset(L, false);
2819 // Signed comparison.
2822 slt(scratch, r2, rs);
2823 beq(scratch, zero_reg, 2);
2825 offset = shifted_branch_offset(L, false);
2830 slt(scratch, rs, r2);
2831 bne(scratch, zero_reg, 2);
2833 offset = shifted_branch_offset(L, false);
2838 slt(scratch, rs, r2);
2839 bne(scratch, zero_reg, 2);
2841 offset = shifted_branch_offset(L, false);
2846 slt(scratch, r2, rs);
2847 bne(scratch, zero_reg, 2);
2849 offset = shifted_branch_offset(L, false);
2854 // Unsigned comparison.
2857 sltu(scratch, r2, rs);
2858 beq(scratch, zero_reg, 2);
2860 offset = shifted_branch_offset(L, false);
2863 case Ugreater_equal:
2865 sltu(scratch, rs, r2);
2866 bne(scratch, zero_reg, 2);
2868 offset = shifted_branch_offset(L, false);
2873 sltu(scratch, rs, r2);
2874 bne(scratch, zero_reg, 2);
2876 offset = shifted_branch_offset(L, false);
2881 sltu(scratch, r2, rs);
2882 bne(scratch, zero_reg, 2);
2884 offset = shifted_branch_offset(L, false);
2893 // Check that offset could actually hold on an int16_t.
2894 DCHECK(is_int16(offset));
2896 // Emit a nop in the branch delay slot if required.
2897 if (bdslot == PROTECT)
2902 void MacroAssembler::Jump(Register target,
2906 BranchDelaySlot bd) {
2907 BlockTrampolinePoolScope block_trampoline_pool(this);
2908 if (cond == cc_always) {
2911 BRANCH_ARGS_CHECK(cond, rs, rt);
2912 Branch(2, NegateCondition(cond), rs, rt);
2915 // Emit a nop in the branch delay slot if required.
2921 void MacroAssembler::Jump(intptr_t target,
2922 RelocInfo::Mode rmode,
2926 BranchDelaySlot bd) {
2928 if (cond != cc_always) {
2929 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
2931 // The first instruction of 'li' may be placed in the delay slot.
2932 // This is not an issue, t9 is expected to be clobbered anyway.
2933 li(t9, Operand(target, rmode));
2934 Jump(t9, al, zero_reg, Operand(zero_reg), bd);
2939 void MacroAssembler::Jump(Address target,
2940 RelocInfo::Mode rmode,
2944 BranchDelaySlot bd) {
2945 DCHECK(!RelocInfo::IsCodeTarget(rmode));
2946 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2950 void MacroAssembler::Jump(Handle<Code> code,
2951 RelocInfo::Mode rmode,
2955 BranchDelaySlot bd) {
2956 DCHECK(RelocInfo::IsCodeTarget(rmode));
2957 AllowDeferredHandleDereference embedding_raw_address;
2958 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2962 int MacroAssembler::CallSize(Register target,
2966 BranchDelaySlot bd) {
2969 if (cond == cc_always) {
2978 return size * kInstrSize;
2982 // Note: To call gcc-compiled C code on mips, you must call thru t9.
2983 void MacroAssembler::Call(Register target,
2987 BranchDelaySlot bd) {
2988 BlockTrampolinePoolScope block_trampoline_pool(this);
2991 if (cond == cc_always) {
2994 BRANCH_ARGS_CHECK(cond, rs, rt);
2995 Branch(2, NegateCondition(cond), rs, rt);
2998 // Emit a nop in the branch delay slot if required.
3002 DCHECK_EQ(CallSize(target, cond, rs, rt, bd),
3003 SizeOfCodeGeneratedSince(&start));
3007 int MacroAssembler::CallSize(Address target,
3008 RelocInfo::Mode rmode,
3012 BranchDelaySlot bd) {
3013 int size = CallSize(t9, cond, rs, rt, bd);
3014 return size + 2 * kInstrSize;
3018 void MacroAssembler::Call(Address target,
3019 RelocInfo::Mode rmode,
3023 BranchDelaySlot bd) {
3024 BlockTrampolinePoolScope block_trampoline_pool(this);
3027 int32_t target_int = reinterpret_cast<int32_t>(target);
3028 // Must record previous source positions before the
3029 // li() generates a new code target.
3030 positions_recorder()->WriteRecordedPositions();
3031 li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
3032 Call(t9, cond, rs, rt, bd);
3033 DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
3034 SizeOfCodeGeneratedSince(&start));
3038 int MacroAssembler::CallSize(Handle<Code> code,
3039 RelocInfo::Mode rmode,
3040 TypeFeedbackId ast_id,
3044 BranchDelaySlot bd) {
3045 AllowDeferredHandleDereference using_raw_address;
3046 return CallSize(reinterpret_cast<Address>(code.location()),
3047 rmode, cond, rs, rt, bd);
3051 void MacroAssembler::Call(Handle<Code> code,
3052 RelocInfo::Mode rmode,
3053 TypeFeedbackId ast_id,
3057 BranchDelaySlot bd) {
3058 BlockTrampolinePoolScope block_trampoline_pool(this);
3061 DCHECK(RelocInfo::IsCodeTarget(rmode));
3062 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
3063 SetRecordedAstId(ast_id);
3064 rmode = RelocInfo::CODE_TARGET_WITH_ID;
3066 AllowDeferredHandleDereference embedding_raw_address;
3067 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
3068 DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
3069 SizeOfCodeGeneratedSince(&start));
3073 void MacroAssembler::Ret(Condition cond,
3076 BranchDelaySlot bd) {
3077 Jump(ra, cond, rs, rt, bd);
3081 void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
3082 BlockTrampolinePoolScope block_trampoline_pool(this);
3085 imm28 = jump_address(L);
3086 imm28 &= kImm28Mask;
3087 { BlockGrowBufferScope block_buf_growth(this);
3088 // Buffer growth (and relocation) must be blocked for internal references
3089 // until associated instructions are emitted and available to be patched.
3090 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3093 // Emit a nop in the branch delay slot if required.
3094 if (bdslot == PROTECT)
3099 void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
3100 BlockTrampolinePoolScope block_trampoline_pool(this);
3103 imm32 = jump_address(L);
3104 { BlockGrowBufferScope block_buf_growth(this);
3105 // Buffer growth (and relocation) must be blocked for internal references
3106 // until associated instructions are emitted and available to be patched.
3107 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3108 lui(at, (imm32 & kHiMask) >> kLuiShift);
3109 ori(at, at, (imm32 & kImm16Mask));
3113 // Emit a nop in the branch delay slot if required.
3114 if (bdslot == PROTECT)
3119 void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
3120 BlockTrampolinePoolScope block_trampoline_pool(this);
3123 imm32 = jump_address(L);
3124 { BlockGrowBufferScope block_buf_growth(this);
3125 // Buffer growth (and relocation) must be blocked for internal references
3126 // until associated instructions are emitted and available to be patched.
3127 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3128 lui(at, (imm32 & kHiMask) >> kLuiShift);
3129 ori(at, at, (imm32 & kImm16Mask));
3133 // Emit a nop in the branch delay slot if required.
3134 if (bdslot == PROTECT)
3139 void MacroAssembler::DropAndRet(int drop) {
3140 Ret(USE_DELAY_SLOT);
3141 addiu(sp, sp, drop * kPointerSize);
3144 void MacroAssembler::DropAndRet(int drop,
3147 const Operand& r2) {
3148 // Both Drop and Ret need to be conditional.
3150 if (cond != cc_always) {
3151 Branch(&skip, NegateCondition(cond), r1, r2);
3157 if (cond != cc_always) {
3163 void MacroAssembler::Drop(int count,
3166 const Operand& op) {
3174 Branch(&skip, NegateCondition(cond), reg, op);
3177 addiu(sp, sp, count * kPointerSize);
3186 void MacroAssembler::Swap(Register reg1,
3189 if (scratch.is(no_reg)) {
3190 Xor(reg1, reg1, Operand(reg2));
3191 Xor(reg2, reg2, Operand(reg1));
3192 Xor(reg1, reg1, Operand(reg2));
3201 void MacroAssembler::Call(Label* target) {
3202 BranchAndLink(target);
3206 void MacroAssembler::Push(Handle<Object> handle) {
3207 li(at, Operand(handle));
3212 void MacroAssembler::DebugBreak() {
3213 PrepareCEntryArgs(0);
3214 PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
3215 CEntryStub ces(isolate(), 1);
3216 DCHECK(AllowThisStubCall(&ces));
3217 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3221 // ---------------------------------------------------------------------------
3222 // Exception handling.
3224 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
3225 int handler_index) {
3226 // Adjust this code if not the case.
3227 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
3228 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3229 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3230 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3231 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3232 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3234 // For the JSEntry handler, we must preserve a0-a3 and s0.
3235 // t1-t3 are available. We will build up the handler from the bottom by
3236 // pushing on the stack.
3237 // Set up the code object (t1) and the state (t2) for pushing.
3239 StackHandler::IndexField::encode(handler_index) |
3240 StackHandler::KindField::encode(kind);
3241 li(t1, Operand(CodeObject()), CONSTANT_SIZE);
3242 li(t2, Operand(state));
3244 // Push the frame pointer, context, state, and code object.
3245 if (kind == StackHandler::JS_ENTRY) {
3246 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
3247 // The second zero_reg indicates no context.
3248 // The first zero_reg is the NULL frame pointer.
3249 // The operands are reversed to match the order of MultiPush/Pop.
3250 Push(zero_reg, zero_reg, t2, t1);
3252 MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit());
3255 // Link the current handler as the next handler.
3256 li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3257 lw(t1, MemOperand(t2));
3259 // Set this new handler as the current one.
3260 sw(sp, MemOperand(t2));
3264 void MacroAssembler::PopTryHandler() {
3265 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3267 Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
3268 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3269 sw(a1, MemOperand(at));
3273 void MacroAssembler::JumpToHandlerEntry() {
3274 // Compute the handler entry address and jump to it. The handler table is
3275 // a fixed array of (smi-tagged) code offsets.
3276 // v0 = exception, a1 = code object, a2 = state.
3277 lw(a3, FieldMemOperand(a1, Code::kHandlerTableOffset)); // Handler table.
3278 Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3279 srl(a2, a2, StackHandler::kKindWidth); // Handler index.
3280 sll(a2, a2, kPointerSizeLog2);
3282 lw(a2, MemOperand(a2)); // Smi-tagged offset.
3283 Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
3284 sra(t9, a2, kSmiTagSize);
3290 void MacroAssembler::Throw(Register value) {
3291 // Adjust this code if not the case.
3292 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
3293 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3294 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3295 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3296 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3297 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3299 // The exception is expected in v0.
3302 // Drop the stack pointer to the top of the top handler.
3303 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
3305 lw(sp, MemOperand(a3));
3307 // Restore the next handler.
3309 sw(a2, MemOperand(a3));
3311 // Get the code object (a1) and state (a2). Restore the context and frame
3313 MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
3315 // If the handler is a JS frame, restore the context to the frame.
3316 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
3319 Branch(&done, eq, cp, Operand(zero_reg));
3320 sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3323 JumpToHandlerEntry();
3327 void MacroAssembler::ThrowUncatchable(Register value) {
3328 // Adjust this code if not the case.
3329 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
3330 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3331 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3332 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3333 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3334 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3336 // The exception is expected in v0.
3337 if (!value.is(v0)) {
3340 // Drop the stack pointer to the top of the top stack handler.
3341 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3342 lw(sp, MemOperand(a3));
3344 // Unwind the handlers until the ENTRY handler is found.
3345 Label fetch_next, check_kind;
3348 lw(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
3351 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
3352 lw(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
3353 And(a2, a2, Operand(StackHandler::KindField::kMask));
3354 Branch(&fetch_next, ne, a2, Operand(zero_reg));
3356 // Set the top handler address to next handler past the top ENTRY handler.
3358 sw(a2, MemOperand(a3));
3360 // Get the code object (a1) and state (a2). Clear the context and frame
3361 // pointer (0 was saved in the handler).
3362 MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
3364 JumpToHandlerEntry();
3368 void MacroAssembler::Allocate(int object_size,
3373 AllocationFlags flags) {
3374 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3375 if (!FLAG_inline_new) {
3376 if (emit_debug_code()) {
3377 // Trash the registers to simulate an allocation failure.
3379 li(scratch1, 0x7191);
3380 li(scratch2, 0x7291);
3386 DCHECK(!result.is(scratch1));
3387 DCHECK(!result.is(scratch2));
3388 DCHECK(!scratch1.is(scratch2));
3389 DCHECK(!scratch1.is(t9));
3390 DCHECK(!scratch2.is(t9));
3391 DCHECK(!result.is(t9));
3393 // Make object size into bytes.
3394 if ((flags & SIZE_IN_WORDS) != 0) {
3395 object_size *= kPointerSize;
3397 DCHECK_EQ(0, object_size & kObjectAlignmentMask);
3399 // Check relative positions of allocation top and limit addresses.
3400 // ARM adds additional checks to make sure the ldm instruction can be
3401 // used. On MIPS we don't have ldm so we don't need additional checks either.
3402 ExternalReference allocation_top =
3403 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3404 ExternalReference allocation_limit =
3405 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3408 reinterpret_cast<intptr_t>(allocation_top.address());
3410 reinterpret_cast<intptr_t>(allocation_limit.address());
3411 DCHECK((limit - top) == kPointerSize);
3413 // Set up allocation top address and object size registers.
3414 Register topaddr = scratch1;
3415 li(topaddr, Operand(allocation_top));
3417 // This code stores a temporary value in t9.
3418 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3419 // Load allocation top into result and allocation limit into t9.
3420 lw(result, MemOperand(topaddr));
3421 lw(t9, MemOperand(topaddr, kPointerSize));
3423 if (emit_debug_code()) {
3424 // Assert that result actually contains top on entry. t9 is used
3425 // immediately below so this use of t9 does not cause difference with
3426 // respect to register content between debug and release mode.
3427 lw(t9, MemOperand(topaddr));
3428 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3430 // Load allocation limit into t9. Result already contains allocation top.
3431 lw(t9, MemOperand(topaddr, limit - top));
3434 if ((flags & DOUBLE_ALIGNMENT) != 0) {
3435 // Align the next allocation. Storing the filler map without checking top is
3436 // safe in new-space because the limit of the heap is aligned there.
3437 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
3438 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
3439 And(scratch2, result, Operand(kDoubleAlignmentMask));
3441 Branch(&aligned, eq, scratch2, Operand(zero_reg));
3442 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
3443 Branch(gc_required, Ugreater_equal, result, Operand(t9));
3445 li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
3446 sw(scratch2, MemOperand(result));
3447 Addu(result, result, Operand(kDoubleSize / 2));
3451 // Calculate new top and bail out if new space is exhausted. Use result
3452 // to calculate the new top.
3453 Addu(scratch2, result, Operand(object_size));
3454 Branch(gc_required, Ugreater, scratch2, Operand(t9));
3455 sw(scratch2, MemOperand(topaddr));
3457 // Tag object if requested.
3458 if ((flags & TAG_OBJECT) != 0) {
3459 Addu(result, result, Operand(kHeapObjectTag));
3464 void MacroAssembler::Allocate(Register object_size,
3469 AllocationFlags flags) {
3470 if (!FLAG_inline_new) {
3471 if (emit_debug_code()) {
3472 // Trash the registers to simulate an allocation failure.
3474 li(scratch1, 0x7191);
3475 li(scratch2, 0x7291);
3481 DCHECK(!result.is(scratch1));
3482 DCHECK(!result.is(scratch2));
3483 DCHECK(!scratch1.is(scratch2));
3484 DCHECK(!object_size.is(t9));
3485 DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
3487 // Check relative positions of allocation top and limit addresses.
3488 // ARM adds additional checks to make sure the ldm instruction can be
3489 // used. On MIPS we don't have ldm so we don't need additional checks either.
3490 ExternalReference allocation_top =
3491 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3492 ExternalReference allocation_limit =
3493 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3495 reinterpret_cast<intptr_t>(allocation_top.address());
3497 reinterpret_cast<intptr_t>(allocation_limit.address());
3498 DCHECK((limit - top) == kPointerSize);
3500 // Set up allocation top address and object size registers.
3501 Register topaddr = scratch1;
3502 li(topaddr, Operand(allocation_top));
3504 // This code stores a temporary value in t9.
3505 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3506 // Load allocation top into result and allocation limit into t9.
3507 lw(result, MemOperand(topaddr));
3508 lw(t9, MemOperand(topaddr, kPointerSize));
3510 if (emit_debug_code()) {
3511 // Assert that result actually contains top on entry. t9 is used
3512 // immediately below so this use of t9 does not cause difference with
3513 // respect to register content between debug and release mode.
3514 lw(t9, MemOperand(topaddr));
3515 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3517 // Load allocation limit into t9. Result already contains allocation top.
3518 lw(t9, MemOperand(topaddr, limit - top));
3521 if ((flags & DOUBLE_ALIGNMENT) != 0) {
3522 // Align the next allocation. Storing the filler map without checking top is
3523 // safe in new-space because the limit of the heap is aligned there.
3524 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
3525 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
3526 And(scratch2, result, Operand(kDoubleAlignmentMask));
3528 Branch(&aligned, eq, scratch2, Operand(zero_reg));
3529 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
3530 Branch(gc_required, Ugreater_equal, result, Operand(t9));
3532 li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
3533 sw(scratch2, MemOperand(result));
3534 Addu(result, result, Operand(kDoubleSize / 2));
3538 // Calculate new top and bail out if new space is exhausted. Use result
3539 // to calculate the new top. Object size may be in words so a shift is
3540 // required to get the number of bytes.
3541 if ((flags & SIZE_IN_WORDS) != 0) {
3542 sll(scratch2, object_size, kPointerSizeLog2);
3543 Addu(scratch2, result, scratch2);
3545 Addu(scratch2, result, Operand(object_size));
3547 Branch(gc_required, Ugreater, scratch2, Operand(t9));
3549 // Update allocation top. result temporarily holds the new top.
3550 if (emit_debug_code()) {
3551 And(t9, scratch2, Operand(kObjectAlignmentMask));
3552 Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
3554 sw(scratch2, MemOperand(topaddr));
3556 // Tag object if requested.
3557 if ((flags & TAG_OBJECT) != 0) {
3558 Addu(result, result, Operand(kHeapObjectTag));
3563 void MacroAssembler::UndoAllocationInNewSpace(Register object,
3565 ExternalReference new_space_allocation_top =
3566 ExternalReference::new_space_allocation_top_address(isolate());
3568 // Make sure the object has no tag before resetting top.
3569 And(object, object, Operand(~kHeapObjectTagMask));
3571 // Check that the object un-allocated is below the current top.
3572 li(scratch, Operand(new_space_allocation_top));
3573 lw(scratch, MemOperand(scratch));
3574 Check(less, kUndoAllocationOfNonAllocatedMemory,
3575 object, Operand(scratch));
3577 // Write the address of the object to un-allocate as the current top.
3578 li(scratch, Operand(new_space_allocation_top));
3579 sw(object, MemOperand(scratch));
3583 void MacroAssembler::AllocateTwoByteString(Register result,
3588 Label* gc_required) {
3589 // Calculate the number of bytes needed for the characters in the string while
3590 // observing object alignment.
3591 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3592 sll(scratch1, length, 1); // Length in bytes, not chars.
3593 addiu(scratch1, scratch1,
3594 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3595 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3597 // Allocate two-byte string in new space.
3605 // Set the map, length and hash field.
3606 InitializeNewString(result,
3608 Heap::kStringMapRootIndex,
3614 void MacroAssembler::AllocateOneByteString(Register result, Register length,
3615 Register scratch1, Register scratch2,
3617 Label* gc_required) {
3618 // Calculate the number of bytes needed for the characters in the string
3619 // while observing object alignment.
3620 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3621 DCHECK(kCharSize == 1);
3622 addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3623 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3625 // Allocate one-byte string in new space.
3633 // Set the map, length and hash field.
3634 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
3635 scratch1, scratch2);
3639 void MacroAssembler::AllocateTwoByteConsString(Register result,
3643 Label* gc_required) {
3644 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3646 InitializeNewString(result,
3648 Heap::kConsStringMapRootIndex,
3654 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
3657 Label* gc_required) {
3658 Allocate(ConsString::kSize,
3665 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
3666 scratch1, scratch2);
3670 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3674 Label* gc_required) {
3675 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3678 InitializeNewString(result,
3680 Heap::kSlicedStringMapRootIndex,
3686 void MacroAssembler::AllocateOneByteSlicedString(Register result,
3690 Label* gc_required) {
3691 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3694 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
3695 scratch1, scratch2);
3699 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
3700 Label* not_unique_name) {
3701 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3703 And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3704 Branch(&succeed, eq, at, Operand(zero_reg));
3705 Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
3711 // Allocates a heap number or jumps to the label if the young space is full and
3712 // a scavenge is needed.
3713 void MacroAssembler::AllocateHeapNumber(Register result,
3716 Register heap_number_map,
3718 TaggingMode tagging_mode,
3720 // Allocate an object in the heap for the heap number and tag it as a heap
3722 Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
3723 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3725 Heap::RootListIndex map_index = mode == MUTABLE
3726 ? Heap::kMutableHeapNumberMapRootIndex
3727 : Heap::kHeapNumberMapRootIndex;
3728 AssertIsRoot(heap_number_map, map_index);
3730 // Store heap number map in the allocated object.
3731 if (tagging_mode == TAG_RESULT) {
3732 sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3734 sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3739 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3743 Label* gc_required) {
3744 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3745 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3746 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3750 // Copies a fixed number of fields of heap objects from src to dst.
3751 void MacroAssembler::CopyFields(Register dst,
3755 DCHECK((temps & dst.bit()) == 0);
3756 DCHECK((temps & src.bit()) == 0);
3757 // Primitive implementation using only one temporary register.
3759 Register tmp = no_reg;
3760 // Find a temp register in temps list.
3761 for (int i = 0; i < kNumRegisters; i++) {
3762 if ((temps & (1 << i)) != 0) {
3767 DCHECK(!tmp.is(no_reg));
3769 for (int i = 0; i < field_count; i++) {
3770 lw(tmp, FieldMemOperand(src, i * kPointerSize));
3771 sw(tmp, FieldMemOperand(dst, i * kPointerSize));
3776 void MacroAssembler::CopyBytes(Register src,
3780 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3782 // Align src before copying in word size chunks.
3783 Branch(&byte_loop, le, length, Operand(kPointerSize));
3784 bind(&align_loop_1);
3785 And(scratch, src, kPointerSize - 1);
3786 Branch(&word_loop, eq, scratch, Operand(zero_reg));
3787 lbu(scratch, MemOperand(src));
3789 sb(scratch, MemOperand(dst));
3791 Subu(length, length, Operand(1));
3792 Branch(&align_loop_1, ne, length, Operand(zero_reg));
3794 // Copy bytes in word size chunks.
3796 if (emit_debug_code()) {
3797 And(scratch, src, kPointerSize - 1);
3798 Assert(eq, kExpectingAlignmentForCopyBytes,
3799 scratch, Operand(zero_reg));
3801 Branch(&byte_loop, lt, length, Operand(kPointerSize));
3802 lw(scratch, MemOperand(src));
3803 Addu(src, src, kPointerSize);
3805 // TODO(kalmard) check if this can be optimized to use sw in most cases.
3806 // Can't use unaligned access - copy byte by byte.
3807 if (kArchEndian == kLittle) {
3808 sb(scratch, MemOperand(dst, 0));
3809 srl(scratch, scratch, 8);
3810 sb(scratch, MemOperand(dst, 1));
3811 srl(scratch, scratch, 8);
3812 sb(scratch, MemOperand(dst, 2));
3813 srl(scratch, scratch, 8);
3814 sb(scratch, MemOperand(dst, 3));
3816 sb(scratch, MemOperand(dst, 3));
3817 srl(scratch, scratch, 8);
3818 sb(scratch, MemOperand(dst, 2));
3819 srl(scratch, scratch, 8);
3820 sb(scratch, MemOperand(dst, 1));
3821 srl(scratch, scratch, 8);
3822 sb(scratch, MemOperand(dst, 0));
3827 Subu(length, length, Operand(kPointerSize));
3830 // Copy the last bytes if any left.
3832 Branch(&done, eq, length, Operand(zero_reg));
3834 lbu(scratch, MemOperand(src));
3836 sb(scratch, MemOperand(dst));
3838 Subu(length, length, Operand(1));
3839 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3844 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3845 Register end_offset,
3850 sw(filler, MemOperand(start_offset));
3851 Addu(start_offset, start_offset, kPointerSize);
3853 Branch(&loop, lt, start_offset, Operand(end_offset));
3857 void MacroAssembler::CheckFastElements(Register map,
3860 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3861 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3862 STATIC_ASSERT(FAST_ELEMENTS == 2);
3863 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3864 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3865 Branch(fail, hi, scratch,
3866 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3870 void MacroAssembler::CheckFastObjectElements(Register map,
3873 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3874 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3875 STATIC_ASSERT(FAST_ELEMENTS == 2);
3876 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3877 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3878 Branch(fail, ls, scratch,
3879 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3880 Branch(fail, hi, scratch,
3881 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3885 void MacroAssembler::CheckFastSmiElements(Register map,
3888 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3889 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3890 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3891 Branch(fail, hi, scratch,
3892 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3896 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3898 Register elements_reg,
3903 int elements_offset) {
3904 Label smi_value, maybe_nan, have_double_value, is_nan, done;
3905 Register mantissa_reg = scratch2;
3906 Register exponent_reg = scratch3;
3908 // Handle smi values specially.
3909 JumpIfSmi(value_reg, &smi_value);
3911 // Ensure that the object is a heap number
3914 Heap::kHeapNumberMapRootIndex,
3918 // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
3920 li(scratch1, Operand(kHoleNanUpper32 & HeapNumber::kExponentMask));
3921 lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3922 Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
3924 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3926 bind(&have_double_value);
3927 sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3928 Addu(scratch1, scratch1, elements_reg);
3930 FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
3931 + kHoleNanLower32Offset));
3933 FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
3934 + kHoleNanUpper32Offset));
3938 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3939 // it's an Infinity, and the non-NaN code path applies.
3940 Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
3941 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3942 Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
3944 // Load canonical NaN for storing into the double array.
3945 LoadRoot(at, Heap::kNanValueRootIndex);
3946 lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
3947 lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
3948 jmp(&have_double_value);
3951 Addu(scratch1, elements_reg,
3952 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
3954 sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3955 Addu(scratch1, scratch1, scratch2);
3956 // scratch1 is now effective address of the double element
3958 Register untagged_value = elements_reg;
3959 SmiUntag(untagged_value, value_reg);
3960 mtc1(untagged_value, f2);
3962 sdc1(f0, MemOperand(scratch1, 0));
3967 void MacroAssembler::CompareMapAndBranch(Register obj,
3970 Label* early_success,
3973 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3974 CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
3978 void MacroAssembler::CompareMapAndBranch(Register obj_map,
3980 Label* early_success,
3983 Branch(branch_to, cond, obj_map, Operand(map));
3987 void MacroAssembler::CheckMap(Register obj,
3991 SmiCheckType smi_check_type) {
3992 if (smi_check_type == DO_SMI_CHECK) {
3993 JumpIfSmi(obj, fail);
3996 CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
4001 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
4002 Register scratch2, Handle<WeakCell> cell,
4003 Handle<Code> success,
4004 SmiCheckType smi_check_type) {
4006 if (smi_check_type == DO_SMI_CHECK) {
4007 JumpIfSmi(obj, &fail);
4009 lw(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
4010 GetWeakValue(scratch2, cell);
4011 Jump(success, RelocInfo::CODE_TARGET, eq, scratch1, Operand(scratch2));
4016 void MacroAssembler::CheckMap(Register obj,
4018 Heap::RootListIndex index,
4020 SmiCheckType smi_check_type) {
4021 if (smi_check_type == DO_SMI_CHECK) {
4022 JumpIfSmi(obj, fail);
4024 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
4025 LoadRoot(at, index);
4026 Branch(fail, ne, scratch, Operand(at));
4030 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
4031 li(value, Operand(cell));
4032 lw(value, FieldMemOperand(value, WeakCell::kValueOffset));
4036 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
4038 GetWeakValue(value, cell);
4039 JumpIfSmi(value, miss);
4043 void MacroAssembler::MovFromFloatResult(DoubleRegister dst) {
4044 if (IsMipsSoftFloatABI) {
4045 if (kArchEndian == kLittle) {
4051 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
4056 void MacroAssembler::MovFromFloatParameter(DoubleRegister dst) {
4057 if (IsMipsSoftFloatABI) {
4058 if (kArchEndian == kLittle) {
4064 Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
4069 void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
4070 if (!IsMipsSoftFloatABI) {
4073 if (kArchEndian == kLittle) {
4082 void MacroAssembler::MovToFloatResult(DoubleRegister src) {
4083 if (!IsMipsSoftFloatABI) {
4086 if (kArchEndian == kLittle) {
4095 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
4096 DoubleRegister src2) {
4097 if (!IsMipsSoftFloatABI) {
4099 DCHECK(!src1.is(f14));
4107 if (kArchEndian == kLittle) {
4118 // -----------------------------------------------------------------------------
4119 // JavaScript invokes.
4121 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
4122 const ParameterCount& actual,
4123 Handle<Code> code_constant,
4126 bool* definitely_mismatches,
4128 const CallWrapper& call_wrapper) {
4129 bool definitely_matches = false;
4130 *definitely_mismatches = false;
4131 Label regular_invoke;
4133 // Check whether the expected and actual arguments count match. If not,
4134 // setup registers according to contract with ArgumentsAdaptorTrampoline:
4135 // a0: actual arguments count
4136 // a1: function (passed through to callee)
4137 // a2: expected arguments count
4139 // The code below is made a lot easier because the calling code already sets
4140 // up actual and expected registers according to the contract if values are
4141 // passed in registers.
4142 DCHECK(actual.is_immediate() || actual.reg().is(a0));
4143 DCHECK(expected.is_immediate() || expected.reg().is(a2));
4144 DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
4146 if (expected.is_immediate()) {
4147 DCHECK(actual.is_immediate());
4148 if (expected.immediate() == actual.immediate()) {
4149 definitely_matches = true;
4151 li(a0, Operand(actual.immediate()));
4152 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
4153 if (expected.immediate() == sentinel) {
4154 // Don't worry about adapting arguments for builtins that
4155 // don't want that done. Skip adaption code by making it look
4156 // like we have a match between expected and actual number of
4158 definitely_matches = true;
4160 *definitely_mismatches = true;
4161 li(a2, Operand(expected.immediate()));
4164 } else if (actual.is_immediate()) {
4165 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.immediate()));
4166 li(a0, Operand(actual.immediate()));
4168 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg()));
4171 if (!definitely_matches) {
4172 if (!code_constant.is_null()) {
4173 li(a3, Operand(code_constant));
4174 addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
4177 Handle<Code> adaptor =
4178 isolate()->builtins()->ArgumentsAdaptorTrampoline();
4179 if (flag == CALL_FUNCTION) {
4180 call_wrapper.BeforeCall(CallSize(adaptor));
4182 call_wrapper.AfterCall();
4183 if (!*definitely_mismatches) {
4187 Jump(adaptor, RelocInfo::CODE_TARGET);
4189 bind(®ular_invoke);
4194 void MacroAssembler::InvokeCode(Register code,
4195 const ParameterCount& expected,
4196 const ParameterCount& actual,
4198 const CallWrapper& call_wrapper) {
4199 // You can't call a function without a valid frame.
4200 DCHECK(flag == JUMP_FUNCTION || has_frame());
4204 bool definitely_mismatches = false;
4205 InvokePrologue(expected, actual, Handle<Code>::null(), code,
4206 &done, &definitely_mismatches, flag,
4208 if (!definitely_mismatches) {
4209 if (flag == CALL_FUNCTION) {
4210 call_wrapper.BeforeCall(CallSize(code));
4212 call_wrapper.AfterCall();
4214 DCHECK(flag == JUMP_FUNCTION);
4217 // Continue here if InvokePrologue does handle the invocation due to
4218 // mismatched parameter counts.
4224 void MacroAssembler::InvokeFunction(Register function,
4225 const ParameterCount& actual,
4227 const CallWrapper& call_wrapper) {
4228 // You can't call a function without a valid frame.
4229 DCHECK(flag == JUMP_FUNCTION || has_frame());
4231 // Contract with called JS functions requires that function is passed in a1.
4232 DCHECK(function.is(a1));
4233 Register expected_reg = a2;
4234 Register code_reg = a3;
4236 lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4237 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4239 FieldMemOperand(code_reg,
4240 SharedFunctionInfo::kFormalParameterCountOffset));
4241 sra(expected_reg, expected_reg, kSmiTagSize);
4242 lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4244 ParameterCount expected(expected_reg);
4245 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
4249 void MacroAssembler::InvokeFunction(Register function,
4250 const ParameterCount& expected,
4251 const ParameterCount& actual,
4253 const CallWrapper& call_wrapper) {
4254 // You can't call a function without a valid frame.
4255 DCHECK(flag == JUMP_FUNCTION || has_frame());
4257 // Contract with called JS functions requires that function is passed in a1.
4258 DCHECK(function.is(a1));
4260 // Get the function and setup the context.
4261 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4263 // We call indirectly through the code field in the function to
4264 // allow recompilation to take effect without changing any of the
4266 lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4267 InvokeCode(a3, expected, actual, flag, call_wrapper);
4271 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
4272 const ParameterCount& expected,
4273 const ParameterCount& actual,
4275 const CallWrapper& call_wrapper) {
4277 InvokeFunction(a1, expected, actual, flag, call_wrapper);
4281 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
4285 lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
4286 IsInstanceJSObjectType(map, scratch, fail);
4290 void MacroAssembler::IsInstanceJSObjectType(Register map,
4293 lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
4294 Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
4295 Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
4299 void MacroAssembler::IsObjectJSStringType(Register object,
4302 DCHECK(kNotStringTag != 0);
4304 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4305 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4306 And(scratch, scratch, Operand(kIsNotStringMask));
4307 Branch(fail, ne, scratch, Operand(zero_reg));
4311 void MacroAssembler::IsObjectNameType(Register object,
4314 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4315 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4316 Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
4320 // ---------------------------------------------------------------------------
4321 // Support functions.
4324 void MacroAssembler::TryGetFunctionPrototype(Register function,
4328 bool miss_on_bound_function) {
4330 if (miss_on_bound_function) {
4331 // Check that the receiver isn't a smi.
4332 JumpIfSmi(function, miss);
4334 // Check that the function really is a function. Load map into result reg.
4335 GetObjectType(function, result, scratch);
4336 Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
4339 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
4341 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
4342 And(scratch, scratch,
4343 Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
4344 Branch(miss, ne, scratch, Operand(zero_reg));
4346 // Make sure that the function has an instance prototype.
4347 lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
4348 And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
4349 Branch(&non_instance, ne, scratch, Operand(zero_reg));
4352 // Get the prototype or initial map from the function.
4354 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4356 // If the prototype or initial map is the hole, don't return it and
4357 // simply miss the cache instead. This will allow us to allocate a
4358 // prototype object on-demand in the runtime system.
4359 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
4360 Branch(miss, eq, result, Operand(t8));
4362 // If the function does not have an initial map, we're done.
4364 GetObjectType(result, scratch, scratch);
4365 Branch(&done, ne, scratch, Operand(MAP_TYPE));
4367 // Get the prototype from the initial map.
4368 lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
4370 if (miss_on_bound_function) {
4373 // Non-instance prototype: Fetch prototype from constructor field
4375 bind(&non_instance);
4376 lw(result, FieldMemOperand(result, Map::kConstructorOffset));
4384 void MacroAssembler::GetObjectType(Register object,
4386 Register type_reg) {
4387 lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
4388 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
4392 // -----------------------------------------------------------------------------
4395 void MacroAssembler::CallStub(CodeStub* stub,
4396 TypeFeedbackId ast_id,
4400 BranchDelaySlot bd) {
4401 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
4402 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
4407 void MacroAssembler::TailCallStub(CodeStub* stub,
4411 BranchDelaySlot bd) {
4412 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
4416 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4417 return has_frame_ || !stub->SometimesSetsUpAFrame();
4421 void MacroAssembler::IndexFromHash(Register hash, Register index) {
4422 // If the hash field contains an array index pick it out. The assert checks
4423 // that the constants for the maximum number of digits for an array index
4424 // cached in the hash field and the number of bits reserved for it does not
4426 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
4427 (1 << String::kArrayIndexValueBits));
4428 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
4432 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4436 Register heap_number_map,
4438 ObjectToDoubleFlags flags) {
4440 if ((flags & OBJECT_NOT_SMI) == 0) {
4442 JumpIfNotSmi(object, ¬_smi);
4443 // Remove smi tag and convert to double.
4444 sra(scratch1, object, kSmiTagSize);
4445 mtc1(scratch1, result);
4446 cvt_d_w(result, result);
4450 // Check for heap number and load double value from it.
4451 lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4452 Branch(not_number, ne, scratch1, Operand(heap_number_map));
4454 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4455 // If exponent is all ones the number is either a NaN or +/-Infinity.
4456 Register exponent = scratch1;
4457 Register mask_reg = scratch2;
4458 lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4459 li(mask_reg, HeapNumber::kExponentMask);
4461 And(exponent, exponent, mask_reg);
4462 Branch(not_number, eq, exponent, Operand(mask_reg));
4464 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4469 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4471 Register scratch1) {
4472 sra(scratch1, smi, kSmiTagSize);
4473 mtc1(scratch1, value);
4474 cvt_d_w(value, value);
4478 void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
4479 const Operand& right,
4480 Register overflow_dst,
4482 if (right.is_reg()) {
4483 AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4486 mov(scratch, left); // Preserve left.
4487 addiu(dst, left, right.immediate()); // Left is overwritten.
4488 xor_(scratch, dst, scratch); // Original left.
4489 // Load right since xori takes uint16 as immediate.
4490 addiu(t9, zero_reg, right.immediate());
4491 xor_(overflow_dst, dst, t9);
4492 and_(overflow_dst, overflow_dst, scratch);
4494 addiu(dst, left, right.immediate());
4495 xor_(overflow_dst, dst, left);
4496 // Load right since xori takes uint16 as immediate.
4497 addiu(t9, zero_reg, right.immediate());
4498 xor_(scratch, dst, t9);
4499 and_(overflow_dst, scratch, overflow_dst);
4505 void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
4507 Register overflow_dst,
4509 DCHECK(!dst.is(overflow_dst));
4510 DCHECK(!dst.is(scratch));
4511 DCHECK(!overflow_dst.is(scratch));
4512 DCHECK(!overflow_dst.is(left));
4513 DCHECK(!overflow_dst.is(right));
4515 if (left.is(right) && dst.is(left)) {
4516 DCHECK(!dst.is(t9));
4517 DCHECK(!scratch.is(t9));
4518 DCHECK(!left.is(t9));
4519 DCHECK(!right.is(t9));
4520 DCHECK(!overflow_dst.is(t9));
4526 mov(scratch, left); // Preserve left.
4527 addu(dst, left, right); // Left is overwritten.
4528 xor_(scratch, dst, scratch); // Original left.
4529 xor_(overflow_dst, dst, right);
4530 and_(overflow_dst, overflow_dst, scratch);
4531 } else if (dst.is(right)) {
4532 mov(scratch, right); // Preserve right.
4533 addu(dst, left, right); // Right is overwritten.
4534 xor_(scratch, dst, scratch); // Original right.
4535 xor_(overflow_dst, dst, left);
4536 and_(overflow_dst, overflow_dst, scratch);
4538 addu(dst, left, right);
4539 xor_(overflow_dst, dst, left);
4540 xor_(scratch, dst, right);
4541 and_(overflow_dst, scratch, overflow_dst);
4546 void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
4547 const Operand& right,
4548 Register overflow_dst,
4550 if (right.is_reg()) {
4551 SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4554 mov(scratch, left); // Preserve left.
4555 addiu(dst, left, -(right.immediate())); // Left is overwritten.
4556 xor_(overflow_dst, dst, scratch); // scratch is original left.
4557 // Load right since xori takes uint16 as immediate.
4558 addiu(t9, zero_reg, right.immediate());
4559 xor_(scratch, scratch, t9); // scratch is original left.
4560 and_(overflow_dst, scratch, overflow_dst);
4562 addiu(dst, left, -(right.immediate()));
4563 xor_(overflow_dst, dst, left);
4564 // Load right since xori takes uint16 as immediate.
4565 addiu(t9, zero_reg, right.immediate());
4566 xor_(scratch, left, t9);
4567 and_(overflow_dst, scratch, overflow_dst);
4573 void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
4575 Register overflow_dst,
4577 DCHECK(!dst.is(overflow_dst));
4578 DCHECK(!dst.is(scratch));
4579 DCHECK(!overflow_dst.is(scratch));
4580 DCHECK(!overflow_dst.is(left));
4581 DCHECK(!overflow_dst.is(right));
4582 DCHECK(!scratch.is(left));
4583 DCHECK(!scratch.is(right));
4585 // This happens with some crankshaft code. Since Subu works fine if
4586 // left == right, let's not make that restriction here.
4587 if (left.is(right)) {
4589 mov(overflow_dst, zero_reg);
4594 mov(scratch, left); // Preserve left.
4595 subu(dst, left, right); // Left is overwritten.
4596 xor_(overflow_dst, dst, scratch); // scratch is original left.
4597 xor_(scratch, scratch, right); // scratch is original left.
4598 and_(overflow_dst, scratch, overflow_dst);
4599 } else if (dst.is(right)) {
4600 mov(scratch, right); // Preserve right.
4601 subu(dst, left, right); // Right is overwritten.
4602 xor_(overflow_dst, dst, left);
4603 xor_(scratch, left, scratch); // Original right.
4604 and_(overflow_dst, scratch, overflow_dst);
4606 subu(dst, left, right);
4607 xor_(overflow_dst, dst, left);
4608 xor_(scratch, left, right);
4609 and_(overflow_dst, scratch, overflow_dst);
4614 void MacroAssembler::CallRuntime(const Runtime::Function* f,
4616 SaveFPRegsMode save_doubles) {
4617 // All parameters are on the stack. v0 has the return value after call.
4619 // If the expected number of arguments of the runtime function is
4620 // constant, we check that the actual number of arguments match the
4622 CHECK(f->nargs < 0 || f->nargs == num_arguments);
4624 // TODO(1236192): Most runtime routines don't need the number of
4625 // arguments passed in because it is constant. At some point we
4626 // should remove this need and make the runtime routine entry code
4628 PrepareCEntryArgs(num_arguments);
4629 PrepareCEntryFunction(ExternalReference(f, isolate()));
4630 CEntryStub stub(isolate(), 1, save_doubles);
4635 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
4637 BranchDelaySlot bd) {
4638 PrepareCEntryArgs(num_arguments);
4639 PrepareCEntryFunction(ext);
4641 CEntryStub stub(isolate(), 1);
4642 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
4646 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
4649 // TODO(1236192): Most runtime routines don't need the number of
4650 // arguments passed in because it is constant. At some point we
4651 // should remove this need and make the runtime routine entry code
4653 PrepareCEntryArgs(num_arguments);
4654 JumpToExternalReference(ext);
4658 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
4661 TailCallExternalReference(ExternalReference(fid, isolate()),
4667 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
4668 BranchDelaySlot bd) {
4669 PrepareCEntryFunction(builtin);
4670 CEntryStub stub(isolate(), 1);
4671 Jump(stub.GetCode(),
4672 RelocInfo::CODE_TARGET,
4680 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
4682 const CallWrapper& call_wrapper) {
4683 // You can't call a builtin without a valid frame.
4684 DCHECK(flag == JUMP_FUNCTION || has_frame());
4686 GetBuiltinEntry(t9, id);
4687 if (flag == CALL_FUNCTION) {
4688 call_wrapper.BeforeCall(CallSize(t9));
4690 call_wrapper.AfterCall();
4692 DCHECK(flag == JUMP_FUNCTION);
4698 void MacroAssembler::GetBuiltinFunction(Register target,
4699 Builtins::JavaScript id) {
4700 // Load the builtins object into target register.
4701 lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4702 lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
4703 // Load the JavaScript builtin function from the builtins object.
4704 lw(target, FieldMemOperand(target,
4705 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
4709 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
4710 DCHECK(!target.is(a1));
4711 GetBuiltinFunction(a1, id);
4712 // Load the code entry point from the builtins object.
4713 lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4717 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4718 Register scratch1, Register scratch2) {
4719 if (FLAG_native_code_counters && counter->Enabled()) {
4720 li(scratch1, Operand(value));
4721 li(scratch2, Operand(ExternalReference(counter)));
4722 sw(scratch1, MemOperand(scratch2));
4727 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4728 Register scratch1, Register scratch2) {
4730 if (FLAG_native_code_counters && counter->Enabled()) {
4731 li(scratch2, Operand(ExternalReference(counter)));
4732 lw(scratch1, MemOperand(scratch2));
4733 Addu(scratch1, scratch1, Operand(value));
4734 sw(scratch1, MemOperand(scratch2));
4739 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4740 Register scratch1, Register scratch2) {
4742 if (FLAG_native_code_counters && counter->Enabled()) {
4743 li(scratch2, Operand(ExternalReference(counter)));
4744 lw(scratch1, MemOperand(scratch2));
4745 Subu(scratch1, scratch1, Operand(value));
4746 sw(scratch1, MemOperand(scratch2));
4751 // -----------------------------------------------------------------------------
4754 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
4755 Register rs, Operand rt) {
4756 if (emit_debug_code())
4757 Check(cc, reason, rs, rt);
4761 void MacroAssembler::AssertFastElements(Register elements) {
4762 if (emit_debug_code()) {
4763 DCHECK(!elements.is(at));
4766 lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4767 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4768 Branch(&ok, eq, elements, Operand(at));
4769 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4770 Branch(&ok, eq, elements, Operand(at));
4771 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4772 Branch(&ok, eq, elements, Operand(at));
4773 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4780 void MacroAssembler::Check(Condition cc, BailoutReason reason,
4781 Register rs, Operand rt) {
4783 Branch(&L, cc, rs, rt);
4785 // Will not return here.
4790 void MacroAssembler::Abort(BailoutReason reason) {
4794 const char* msg = GetBailoutReason(reason);
4796 RecordComment("Abort message: ");
4800 if (FLAG_trap_on_abort) {
4806 li(a0, Operand(Smi::FromInt(reason)));
4808 // Disable stub call restrictions to always allow calls to abort.
4810 // We don't actually want to generate a pile of code for this, so just
4811 // claim there is a stack frame, without generating one.
4812 FrameScope scope(this, StackFrame::NONE);
4813 CallRuntime(Runtime::kAbort, 1);
4815 CallRuntime(Runtime::kAbort, 1);
4817 // Will not return here.
4818 if (is_trampoline_pool_blocked()) {
4819 // If the calling code cares about the exact number of
4820 // instructions generated, we insert padding here to keep the size
4821 // of the Abort macro constant.
4822 // Currently in debug mode with debug_code enabled the number of
4823 // generated instructions is 10, so we use this as a maximum value.
4824 static const int kExpectedAbortInstructions = 10;
4825 int abort_instructions = InstructionsGeneratedSince(&abort_start);
4826 DCHECK(abort_instructions <= kExpectedAbortInstructions);
4827 while (abort_instructions++ < kExpectedAbortInstructions) {
4834 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4835 if (context_chain_length > 0) {
4836 // Move up the chain of contexts to the context containing the slot.
4837 lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4838 for (int i = 1; i < context_chain_length; i++) {
4839 lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4842 // Slot is in the current function context. Move it into the
4843 // destination register in case we store into it (the write barrier
4844 // cannot be allowed to destroy the context in esi).
4850 void MacroAssembler::LoadTransitionedArrayMapConditional(
4851 ElementsKind expected_kind,
4852 ElementsKind transitioned_kind,
4853 Register map_in_out,
4855 Label* no_map_match) {
4856 // Load the global or builtins object from the current context.
4858 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4859 lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
4861 // Check that the function's map is the same as the expected cached map.
4864 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4865 size_t offset = expected_kind * kPointerSize +
4866 FixedArrayBase::kHeaderSize;
4867 lw(at, FieldMemOperand(scratch, offset));
4868 Branch(no_map_match, ne, map_in_out, Operand(at));
4870 // Use the transitioned cached map.
4871 offset = transitioned_kind * kPointerSize +
4872 FixedArrayBase::kHeaderSize;
4873 lw(map_in_out, FieldMemOperand(scratch, offset));
4877 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4878 // Load the global or builtins object from the current context.
4880 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4881 // Load the native context from the global or builtins object.
4882 lw(function, FieldMemOperand(function,
4883 GlobalObject::kNativeContextOffset));
4884 // Load the function from the native context.
4885 lw(function, MemOperand(function, Context::SlotOffset(index)));
4889 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4892 // Load the initial map. The global functions all have initial maps.
4893 lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4894 if (emit_debug_code()) {
4896 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4899 Abort(kGlobalFunctionsMustHaveInitialMap);
4905 void MacroAssembler::StubPrologue() {
4907 Push(Smi::FromInt(StackFrame::STUB));
4908 // Adjust FP to point to saved FP.
4909 Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4913 void MacroAssembler::Prologue(bool code_pre_aging) {
4914 PredictableCodeSizeScope predictible_code_size_scope(
4915 this, kNoCodeAgeSequenceLength);
4916 // The following three instructions must remain together and unmodified
4917 // for code aging to work properly.
4918 if (code_pre_aging) {
4919 // Pre-age the code.
4920 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
4921 nop(Assembler::CODE_AGE_MARKER_NOP);
4922 // Load the stub address to t9 and call it,
4923 // GetCodeAgeAndParity() extracts the stub address from this instruction.
4925 Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
4927 nop(); // Prevent jalr to jal optimization.
4929 nop(); // Branch delay slot nop.
4930 nop(); // Pad the empty space.
4932 Push(ra, fp, cp, a1);
4933 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4934 // Adjust fp to point to caller's fp.
4935 Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4940 void MacroAssembler::EnterFrame(StackFrame::Type type,
4941 bool load_constant_pool_pointer_reg) {
4942 // Out-of-line constant pool not implemented on mips.
4947 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4948 addiu(sp, sp, -5 * kPointerSize);
4949 li(t8, Operand(Smi::FromInt(type)));
4950 li(t9, Operand(CodeObject()), CONSTANT_SIZE);
4951 sw(ra, MemOperand(sp, 4 * kPointerSize));
4952 sw(fp, MemOperand(sp, 3 * kPointerSize));
4953 sw(cp, MemOperand(sp, 2 * kPointerSize));
4954 sw(t8, MemOperand(sp, 1 * kPointerSize));
4955 sw(t9, MemOperand(sp, 0 * kPointerSize));
4956 // Adjust FP to point to saved FP.
4958 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
4962 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4964 lw(fp, MemOperand(sp, 0 * kPointerSize));
4965 lw(ra, MemOperand(sp, 1 * kPointerSize));
4966 addiu(sp, sp, 2 * kPointerSize);
4970 void MacroAssembler::EnterExitFrame(bool save_doubles,
4972 // Set up the frame structure on the stack.
4973 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4974 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4975 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4977 // This is how the stack will look:
4978 // fp + 2 (==kCallerSPDisplacement) - old stack's end
4979 // [fp + 1 (==kCallerPCOffset)] - saved old ra
4980 // [fp + 0 (==kCallerFPOffset)] - saved old fp
4981 // [fp - 1 (==kSPOffset)] - sp of the called function
4982 // [fp - 2 (==kCodeOffset)] - CodeObject
4983 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
4984 // new stack (will contain saved ra)
4987 addiu(sp, sp, -4 * kPointerSize);
4988 sw(ra, MemOperand(sp, 3 * kPointerSize));
4989 sw(fp, MemOperand(sp, 2 * kPointerSize));
4990 addiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
4992 if (emit_debug_code()) {
4993 sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
4996 // Accessed from ExitFrame::code_slot.
4997 li(t8, Operand(CodeObject()), CONSTANT_SIZE);
4998 sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
5000 // Save the frame pointer and the context in top.
5001 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
5002 sw(fp, MemOperand(t8));
5003 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5004 sw(cp, MemOperand(t8));
5006 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
5008 // The stack must be allign to 0 modulo 8 for stores with sdc1.
5009 DCHECK(kDoubleSize == frame_alignment);
5010 if (frame_alignment > 0) {
5011 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5012 And(sp, sp, Operand(-frame_alignment)); // Align stack.
5014 int space = FPURegister::kMaxNumRegisters * kDoubleSize;
5015 Subu(sp, sp, Operand(space));
5016 // Remember: we only need to save every 2nd double FPU value.
5017 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
5018 FPURegister reg = FPURegister::from_code(i);
5019 sdc1(reg, MemOperand(sp, i * kDoubleSize));
5023 // Reserve place for the return address, stack space and an optional slot
5024 // (used by the DirectCEntryStub to hold the return value if a struct is
5025 // returned) and align the frame preparing for calling the runtime function.
5026 DCHECK(stack_space >= 0);
5027 Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
5028 if (frame_alignment > 0) {
5029 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5030 And(sp, sp, Operand(-frame_alignment)); // Align stack.
5033 // Set the exit frame sp value to point just before the return address
5035 addiu(at, sp, kPointerSize);
5036 sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
5040 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
5041 bool restore_context, bool do_return,
5042 bool argument_count_is_length) {
5043 // Optionally restore all double registers.
5045 // Remember: we only need to restore every 2nd double FPU value.
5046 lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
5047 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
5048 FPURegister reg = FPURegister::from_code(i);
5049 ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
5054 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
5055 sw(zero_reg, MemOperand(t8));
5057 // Restore current context from top and clear it in debug mode.
5058 if (restore_context) {
5059 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5060 lw(cp, MemOperand(t8));
5063 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5064 sw(a3, MemOperand(t8));
5067 // Pop the arguments, restore registers, and return.
5068 mov(sp, fp); // Respect ABI stack constraint.
5069 lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
5070 lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
5072 if (argument_count.is_valid()) {
5073 if (argument_count_is_length) {
5074 addu(sp, sp, argument_count);
5076 sll(t8, argument_count, kPointerSizeLog2);
5082 Ret(USE_DELAY_SLOT);
5083 // If returning, the instruction in the delay slot will be the addiu below.
5089 void MacroAssembler::InitializeNewString(Register string,
5091 Heap::RootListIndex map_index,
5093 Register scratch2) {
5094 sll(scratch1, length, kSmiTagSize);
5095 LoadRoot(scratch2, map_index);
5096 sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
5097 li(scratch1, Operand(String::kEmptyHashField));
5098 sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
5099 sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
5103 int MacroAssembler::ActivationFrameAlignment() {
5104 #if V8_HOST_ARCH_MIPS
5105 // Running on the real platform. Use the alignment as mandated by the local
5107 // Note: This will break if we ever start generating snapshots on one Mips
5108 // platform for another Mips platform with a different alignment.
5109 return base::OS::ActivationFrameAlignment();
5110 #else // V8_HOST_ARCH_MIPS
5111 // If we are using the simulator then we should always align to the expected
5112 // alignment. As the simulator is used to generate snapshots we do not know
5113 // if the target platform will need alignment, so this is controlled from a
5115 return FLAG_sim_stack_alignment;
5116 #endif // V8_HOST_ARCH_MIPS
5120 void MacroAssembler::AssertStackIsAligned() {
5121 if (emit_debug_code()) {
5122 const int frame_alignment = ActivationFrameAlignment();
5123 const int frame_alignment_mask = frame_alignment - 1;
5125 if (frame_alignment > kPointerSize) {
5126 Label alignment_as_expected;
5127 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5128 andi(at, sp, frame_alignment_mask);
5129 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5130 // Don't use Check here, as it will call Runtime_Abort re-entering here.
5131 stop("Unexpected stack alignment");
5132 bind(&alignment_as_expected);
5138 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
5141 Label* not_power_of_two_or_zero) {
5142 Subu(scratch, reg, Operand(1));
5143 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
5144 scratch, Operand(zero_reg));
5145 and_(at, scratch, reg); // In the delay slot.
5146 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
5150 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
5151 DCHECK(!reg.is(overflow));
5152 mov(overflow, reg); // Save original value.
5154 xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
5158 void MacroAssembler::SmiTagCheckOverflow(Register dst,
5160 Register overflow) {
5162 // Fall back to slower case.
5163 SmiTagCheckOverflow(dst, overflow);
5165 DCHECK(!dst.is(src));
5166 DCHECK(!dst.is(overflow));
5167 DCHECK(!src.is(overflow));
5169 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
5174 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
5177 JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
5182 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
5184 Label* non_smi_case) {
5185 JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
5189 void MacroAssembler::JumpIfSmi(Register value,
5192 BranchDelaySlot bd) {
5193 DCHECK_EQ(0, kSmiTag);
5194 andi(scratch, value, kSmiTagMask);
5195 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
5198 void MacroAssembler::JumpIfNotSmi(Register value,
5199 Label* not_smi_label,
5201 BranchDelaySlot bd) {
5202 DCHECK_EQ(0, kSmiTag);
5203 andi(scratch, value, kSmiTagMask);
5204 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
5208 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
5210 Label* on_not_both_smi) {
5211 STATIC_ASSERT(kSmiTag == 0);
5212 DCHECK_EQ(1, kSmiTagMask);
5213 or_(at, reg1, reg2);
5214 JumpIfNotSmi(at, on_not_both_smi);
5218 void MacroAssembler::JumpIfEitherSmi(Register reg1,
5220 Label* on_either_smi) {
5221 STATIC_ASSERT(kSmiTag == 0);
5222 DCHECK_EQ(1, kSmiTagMask);
5223 // Both Smi tags must be 1 (not Smi).
5224 and_(at, reg1, reg2);
5225 JumpIfSmi(at, on_either_smi);
5229 void MacroAssembler::AssertNotSmi(Register object) {
5230 if (emit_debug_code()) {
5231 STATIC_ASSERT(kSmiTag == 0);
5232 andi(at, object, kSmiTagMask);
5233 Check(ne, kOperandIsASmi, at, Operand(zero_reg));
5238 void MacroAssembler::AssertSmi(Register object) {
5239 if (emit_debug_code()) {
5240 STATIC_ASSERT(kSmiTag == 0);
5241 andi(at, object, kSmiTagMask);
5242 Check(eq, kOperandIsASmi, at, Operand(zero_reg));
5247 void MacroAssembler::AssertString(Register object) {
5248 if (emit_debug_code()) {
5249 STATIC_ASSERT(kSmiTag == 0);
5251 Check(ne, kOperandIsASmiAndNotAString, t0, Operand(zero_reg));
5253 lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
5254 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
5255 Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
5261 void MacroAssembler::AssertName(Register object) {
5262 if (emit_debug_code()) {
5263 STATIC_ASSERT(kSmiTag == 0);
5265 Check(ne, kOperandIsASmiAndNotAName, t0, Operand(zero_reg));
5267 lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
5268 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
5269 Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
5275 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
5277 if (emit_debug_code()) {
5278 Label done_checking;
5279 AssertNotSmi(object);
5280 LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5281 Branch(&done_checking, eq, object, Operand(scratch));
5283 lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
5284 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
5285 Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
5287 bind(&done_checking);
5292 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
5293 if (emit_debug_code()) {
5294 DCHECK(!reg.is(at));
5295 LoadRoot(at, index);
5296 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
5301 void MacroAssembler::JumpIfNotHeapNumber(Register object,
5302 Register heap_number_map,
5304 Label* on_not_heap_number) {
5305 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5306 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
5307 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
5311 void MacroAssembler::LookupNumberStringCache(Register object,
5317 // Use of registers. Register result is used as a temporary.
5318 Register number_string_cache = result;
5319 Register mask = scratch3;
5321 // Load the number string cache.
5322 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
5324 // Make the hash mask from the length of the number string cache. It
5325 // contains two elements (number and string) for each cache entry.
5326 lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
5327 // Divide length by two (length is a smi).
5328 sra(mask, mask, kSmiTagSize + 1);
5329 Addu(mask, mask, -1); // Make mask.
5331 // Calculate the entry in the number string cache. The hash value in the
5332 // number string cache for smis is just the smi value, and the hash for
5333 // doubles is the xor of the upper and lower words. See
5334 // Heap::GetNumberStringCache.
5336 Label load_result_from_cache;
5337 JumpIfSmi(object, &is_smi);
5340 Heap::kHeapNumberMapRootIndex,
5344 STATIC_ASSERT(8 == kDoubleSize);
5347 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
5348 lw(scratch2, MemOperand(scratch1, kPointerSize));
5349 lw(scratch1, MemOperand(scratch1, 0));
5350 Xor(scratch1, scratch1, Operand(scratch2));
5351 And(scratch1, scratch1, Operand(mask));
5353 // Calculate address of entry in string cache: each entry consists
5354 // of two pointer sized fields.
5355 sll(scratch1, scratch1, kPointerSizeLog2 + 1);
5356 Addu(scratch1, number_string_cache, scratch1);
5358 Register probe = mask;
5359 lw(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
5360 JumpIfSmi(probe, not_found);
5361 ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
5362 ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
5363 BranchF(&load_result_from_cache, NULL, eq, f12, f14);
5367 Register scratch = scratch1;
5368 sra(scratch, object, 1); // Shift away the tag.
5369 And(scratch, mask, Operand(scratch));
5371 // Calculate address of entry in string cache: each entry consists
5372 // of two pointer sized fields.
5373 sll(scratch, scratch, kPointerSizeLog2 + 1);
5374 Addu(scratch, number_string_cache, scratch);
5376 // Check if the entry is the smi we are looking for.
5377 lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
5378 Branch(not_found, ne, object, Operand(probe));
5380 // Get the result from the cache.
5381 bind(&load_result_from_cache);
5382 lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
5384 IncrementCounter(isolate()->counters()->number_to_string_native(),
5391 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
5392 Register first, Register second, Register scratch1, Register scratch2,
5394 // Test that both first and second are sequential one-byte strings.
5395 // Assume that they are non-smis.
5396 lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
5397 lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
5398 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
5399 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
5401 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
5406 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
5411 // Check that neither is a smi.
5412 STATIC_ASSERT(kSmiTag == 0);
5413 And(scratch1, first, Operand(second));
5414 JumpIfSmi(scratch1, failure);
5415 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
5420 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
5421 Register first, Register second, Register scratch1, Register scratch2,
5423 const int kFlatOneByteStringMask =
5424 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5425 const int kFlatOneByteStringTag =
5426 kStringTag | kOneByteStringTag | kSeqStringTag;
5427 DCHECK(kFlatOneByteStringTag <= 0xffff); // Ensure this fits 16-bit immed.
5428 andi(scratch1, first, kFlatOneByteStringMask);
5429 Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
5430 andi(scratch2, second, kFlatOneByteStringMask);
5431 Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
5435 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
5438 const int kFlatOneByteStringMask =
5439 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5440 const int kFlatOneByteStringTag =
5441 kStringTag | kOneByteStringTag | kSeqStringTag;
5442 And(scratch, type, Operand(kFlatOneByteStringMask));
5443 Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
5447 static const int kRegisterPassedArguments = 4;
5449 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
5450 int num_double_arguments) {
5451 int stack_passed_words = 0;
5452 num_reg_arguments += 2 * num_double_arguments;
5454 // Up to four simple arguments are passed in registers a0..a3.
5455 if (num_reg_arguments > kRegisterPassedArguments) {
5456 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5458 stack_passed_words += kCArgSlotCount;
5459 return stack_passed_words;
5463 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5467 uint32_t encoding_mask) {
5470 Check(ne, kNonObject, at, Operand(zero_reg));
5472 lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
5473 lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
5475 andi(at, at, kStringRepresentationMask | kStringEncodingMask);
5476 li(scratch, Operand(encoding_mask));
5477 Check(eq, kUnexpectedStringType, at, Operand(scratch));
5479 // The index is assumed to be untagged coming in, tag it to compare with the
5480 // string length without using a temp register, it is restored at the end of
5482 Label index_tag_ok, index_tag_bad;
5483 TrySmiTag(index, scratch, &index_tag_bad);
5484 Branch(&index_tag_ok);
5485 bind(&index_tag_bad);
5486 Abort(kIndexIsTooLarge);
5487 bind(&index_tag_ok);
5489 lw(at, FieldMemOperand(string, String::kLengthOffset));
5490 Check(lt, kIndexIsTooLarge, index, Operand(at));
5492 DCHECK(Smi::FromInt(0) == 0);
5493 Check(ge, kIndexIsNegative, index, Operand(zero_reg));
5495 SmiUntag(index, index);
5499 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5500 int num_double_arguments,
5502 int frame_alignment = ActivationFrameAlignment();
5504 // Up to four simple arguments are passed in registers a0..a3.
5505 // Those four arguments must have reserved argument slots on the stack for
5506 // mips, even though those argument slots are not normally used.
5507 // Remaining arguments are pushed on the stack, above (higher address than)
5508 // the argument slots.
5509 int stack_passed_arguments = CalculateStackPassedWords(
5510 num_reg_arguments, num_double_arguments);
5511 if (frame_alignment > kPointerSize) {
5512 // Make stack end at alignment and make room for num_arguments - 4 words
5513 // and the original value of sp.
5515 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5516 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5517 And(sp, sp, Operand(-frame_alignment));
5518 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
5520 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5525 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5527 PrepareCallCFunction(num_reg_arguments, 0, scratch);
5531 void MacroAssembler::CallCFunction(ExternalReference function,
5532 int num_reg_arguments,
5533 int num_double_arguments) {
5534 li(t8, Operand(function));
5535 CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
5539 void MacroAssembler::CallCFunction(Register function,
5540 int num_reg_arguments,
5541 int num_double_arguments) {
5542 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
5546 void MacroAssembler::CallCFunction(ExternalReference function,
5547 int num_arguments) {
5548 CallCFunction(function, num_arguments, 0);
5552 void MacroAssembler::CallCFunction(Register function,
5553 int num_arguments) {
5554 CallCFunction(function, num_arguments, 0);
5558 void MacroAssembler::CallCFunctionHelper(Register function,
5559 int num_reg_arguments,
5560 int num_double_arguments) {
5561 DCHECK(has_frame());
5562 // Make sure that the stack is aligned before calling a C function unless
5563 // running in the simulator. The simulator has its own alignment check which
5564 // provides more information.
5565 // The argument stots are presumed to have been set up by
5566 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
5568 #if V8_HOST_ARCH_MIPS
5569 if (emit_debug_code()) {
5570 int frame_alignment = base::OS::ActivationFrameAlignment();
5571 int frame_alignment_mask = frame_alignment - 1;
5572 if (frame_alignment > kPointerSize) {
5573 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5574 Label alignment_as_expected;
5575 And(at, sp, Operand(frame_alignment_mask));
5576 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5577 // Don't use Check here, as it will call Runtime_Abort possibly
5578 // re-entering here.
5579 stop("Unexpected alignment in CallCFunction");
5580 bind(&alignment_as_expected);
5583 #endif // V8_HOST_ARCH_MIPS
5585 // Just call directly. The function called cannot cause a GC, or
5586 // allow preemption, so the return address in the link register
5589 if (!function.is(t9)) {
5596 int stack_passed_arguments = CalculateStackPassedWords(
5597 num_reg_arguments, num_double_arguments);
5599 if (base::OS::ActivationFrameAlignment() > kPointerSize) {
5600 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5602 Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
5607 #undef BRANCH_ARGS_CHECK
5610 void MacroAssembler::PatchRelocatedValue(Register li_location,
5612 Register new_value) {
5613 lw(scratch, MemOperand(li_location));
5614 // At this point scratch is a lui(at, ...) instruction.
5615 if (emit_debug_code()) {
5616 And(scratch, scratch, kOpcodeMask);
5617 Check(eq, kTheInstructionToPatchShouldBeALui,
5618 scratch, Operand(LUI));
5619 lw(scratch, MemOperand(li_location));
5621 srl(t9, new_value, kImm16Bits);
5622 Ins(scratch, t9, 0, kImm16Bits);
5623 sw(scratch, MemOperand(li_location));
5625 lw(scratch, MemOperand(li_location, kInstrSize));
5626 // scratch is now ori(at, ...).
5627 if (emit_debug_code()) {
5628 And(scratch, scratch, kOpcodeMask);
5629 Check(eq, kTheInstructionToPatchShouldBeAnOri,
5630 scratch, Operand(ORI));
5631 lw(scratch, MemOperand(li_location, kInstrSize));
5633 Ins(scratch, new_value, 0, kImm16Bits);
5634 sw(scratch, MemOperand(li_location, kInstrSize));
5636 // Update the I-cache so the new lui and ori can be executed.
5637 FlushICache(li_location, 2);
5640 void MacroAssembler::GetRelocatedValue(Register li_location,
5643 lw(value, MemOperand(li_location));
5644 if (emit_debug_code()) {
5645 And(value, value, kOpcodeMask);
5646 Check(eq, kTheInstructionShouldBeALui,
5647 value, Operand(LUI));
5648 lw(value, MemOperand(li_location));
5651 // value now holds a lui instruction. Extract the immediate.
5652 sll(value, value, kImm16Bits);
5654 lw(scratch, MemOperand(li_location, kInstrSize));
5655 if (emit_debug_code()) {
5656 And(scratch, scratch, kOpcodeMask);
5657 Check(eq, kTheInstructionShouldBeAnOri,
5658 scratch, Operand(ORI));
5659 lw(scratch, MemOperand(li_location, kInstrSize));
5661 // "scratch" now holds an ori instruction. Extract the immediate.
5662 andi(scratch, scratch, kImm16Mask);
5664 // Merge the results.
5665 or_(value, value, scratch);
5669 void MacroAssembler::CheckPageFlag(
5674 Label* condition_met) {
5675 And(scratch, object, Operand(~Page::kPageAlignmentMask));
5676 lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5677 And(scratch, scratch, Operand(mask));
5678 Branch(condition_met, cc, scratch, Operand(zero_reg));
5682 void MacroAssembler::JumpIfBlack(Register object,
5686 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
5687 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5691 void MacroAssembler::HasColor(Register object,
5692 Register bitmap_scratch,
5693 Register mask_scratch,
5697 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
5698 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
5700 GetMarkBits(object, bitmap_scratch, mask_scratch);
5702 Label other_color, word_boundary;
5703 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5704 And(t8, t9, Operand(mask_scratch));
5705 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
5706 // Shift left 1 by adding.
5707 Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
5708 Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
5709 And(t8, t9, Operand(mask_scratch));
5710 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
5713 bind(&word_boundary);
5714 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
5715 And(t9, t9, Operand(1));
5716 Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
5721 // Detect some, but not all, common pointer-free objects. This is used by the
5722 // incremental write barrier which doesn't care about oddballs (they are always
5723 // marked black immediately so this code is not hit).
5724 void MacroAssembler::JumpIfDataObject(Register value,
5726 Label* not_data_object) {
5727 DCHECK(!AreAliased(value, scratch, t8, no_reg));
5728 Label is_data_object;
5729 lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5730 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5731 Branch(&is_data_object, eq, t8, Operand(scratch));
5732 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5733 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5734 // If it's a string and it's not a cons string then it's an object containing
5736 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5737 And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
5738 Branch(not_data_object, ne, t8, Operand(zero_reg));
5739 bind(&is_data_object);
5743 void MacroAssembler::GetMarkBits(Register addr_reg,
5744 Register bitmap_reg,
5745 Register mask_reg) {
5746 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
5747 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5748 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
5749 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
5750 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
5751 sll(t8, t8, kPointerSizeLog2);
5752 Addu(bitmap_reg, bitmap_reg, t8);
5754 sllv(mask_reg, t8, mask_reg);
5758 void MacroAssembler::EnsureNotWhite(
5760 Register bitmap_scratch,
5761 Register mask_scratch,
5762 Register load_scratch,
5763 Label* value_is_white_and_not_data) {
5764 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
5765 GetMarkBits(value, bitmap_scratch, mask_scratch);
5767 // If the value is black or grey we don't need to do anything.
5768 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5769 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5770 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
5771 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5775 // Since both black and grey have a 1 in the first position and white does
5776 // not have a 1 there we only need to check one bit.
5777 lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5778 And(t8, mask_scratch, load_scratch);
5779 Branch(&done, ne, t8, Operand(zero_reg));
5781 if (emit_debug_code()) {
5782 // Check for impossible bit pattern.
5784 // sll may overflow, making the check conservative.
5785 sll(t8, mask_scratch, 1);
5786 And(t8, load_scratch, t8);
5787 Branch(&ok, eq, t8, Operand(zero_reg));
5788 stop("Impossible marking bit pattern");
5792 // Value is white. We check whether it is data that doesn't need scanning.
5793 // Currently only checks for HeapNumber and non-cons strings.
5794 Register map = load_scratch; // Holds map while checking type.
5795 Register length = load_scratch; // Holds length of object after testing type.
5796 Label is_data_object;
5798 // Check for heap-number
5799 lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
5800 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5803 Branch(&skip, ne, t8, Operand(map));
5804 li(length, HeapNumber::kSize);
5805 Branch(&is_data_object);
5809 // Check for strings.
5810 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5811 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5812 // If it's a string and it's not a cons string then it's an object containing
5814 Register instance_type = load_scratch;
5815 lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
5816 And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
5817 Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5818 // It's a non-indirect (non-cons and non-slice) string.
5819 // If it's external, the length is just ExternalString::kSize.
5820 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5821 // External strings are the only ones with the kExternalStringTag bit
5823 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
5824 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
5825 And(t8, instance_type, Operand(kExternalStringTag));
5828 Branch(&skip, eq, t8, Operand(zero_reg));
5829 li(length, ExternalString::kSize);
5830 Branch(&is_data_object);
5834 // Sequential string, either Latin1 or UC16.
5835 // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
5836 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
5837 // getting the length multiplied by 2.
5838 DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
5839 DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
5840 lw(t9, FieldMemOperand(value, String::kLengthOffset));
5841 And(t8, instance_type, Operand(kStringEncodingMask));
5844 Branch(&skip, eq, t8, Operand(zero_reg));
5848 Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
5849 And(length, length, Operand(~kObjectAlignmentMask));
5851 bind(&is_data_object);
5852 // Value is a data object, and it is white. Mark it black. Since we know
5853 // that the object is white we can make it black by flipping one bit.
5854 lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5855 Or(t8, t8, Operand(mask_scratch));
5856 sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5858 And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5859 lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5860 Addu(t8, t8, Operand(length));
5861 sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5867 void MacroAssembler::LoadInstanceDescriptors(Register map,
5868 Register descriptors) {
5869 lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
5873 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
5874 lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5875 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
5879 void MacroAssembler::EnumLength(Register dst, Register map) {
5880 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
5881 lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5882 And(dst, dst, Operand(Map::EnumLengthBits::kMask));
5887 void MacroAssembler::LoadAccessor(Register dst, Register holder,
5889 AccessorComponent accessor) {
5890 lw(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
5891 LoadInstanceDescriptors(dst, dst);
5893 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
5894 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
5895 : AccessorPair::kSetterOffset;
5896 lw(dst, FieldMemOperand(dst, offset));
5900 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5901 Register empty_fixed_array_value = t2;
5902 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5906 // Check if the enum length field is properly initialized, indicating that
5907 // there is an enum cache.
5908 lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5912 call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
5917 lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5919 // For all objects but the receiver, check that the cache is empty.
5921 Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
5925 // Check that there are no elements. Register a2 contains the current JS
5926 // object we've reached through the prototype chain.
5928 lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
5929 Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
5931 // Second chance, the object may be using the empty slow element dictionary.
5932 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
5933 Branch(call_runtime, ne, a2, Operand(at));
5936 lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
5937 Branch(&next, ne, a2, Operand(null_value));
5941 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5942 DCHECK(!output_reg.is(input_reg));
5944 li(output_reg, Operand(255));
5945 // Normal branch: nop in delay slot.
5946 Branch(&done, gt, input_reg, Operand(output_reg));
5947 // Use delay slot in this branch.
5948 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
5949 mov(output_reg, zero_reg); // In delay slot.
5950 mov(output_reg, input_reg); // Value is in range 0..255.
5955 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
5956 DoubleRegister input_reg,
5957 DoubleRegister temp_double_reg) {
5962 Move(temp_double_reg, 0.0);
5963 BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
5965 // Double value is less than zero, NaN or Inf, return 0.
5966 mov(result_reg, zero_reg);
5969 // Double value is >= 255, return 255.
5971 Move(temp_double_reg, 255.0);
5972 BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
5973 li(result_reg, Operand(255));
5976 // In 0-255 range, round and truncate.
5978 cvt_w_d(temp_double_reg, input_reg);
5979 mfc1(result_reg, temp_double_reg);
5984 void MacroAssembler::TestJSArrayForAllocationMemento(
5985 Register receiver_reg,
5986 Register scratch_reg,
5987 Label* no_memento_found,
5989 Label* allocation_memento_present) {
5990 ExternalReference new_space_start =
5991 ExternalReference::new_space_start(isolate());
5992 ExternalReference new_space_allocation_top =
5993 ExternalReference::new_space_allocation_top_address(isolate());
5994 Addu(scratch_reg, receiver_reg,
5995 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5996 Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
5997 li(at, Operand(new_space_allocation_top));
5998 lw(at, MemOperand(at));
5999 Branch(no_memento_found, gt, scratch_reg, Operand(at));
6000 lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
6001 if (allocation_memento_present) {
6002 Branch(allocation_memento_present, cond, scratch_reg,
6003 Operand(isolate()->factory()->allocation_memento_map()));
6008 Register GetRegisterThatIsNotOneOf(Register reg1,
6015 if (reg1.is_valid()) regs |= reg1.bit();
6016 if (reg2.is_valid()) regs |= reg2.bit();
6017 if (reg3.is_valid()) regs |= reg3.bit();
6018 if (reg4.is_valid()) regs |= reg4.bit();
6019 if (reg5.is_valid()) regs |= reg5.bit();
6020 if (reg6.is_valid()) regs |= reg6.bit();
6022 for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
6023 Register candidate = Register::FromAllocationIndex(i);
6024 if (regs & candidate.bit()) continue;
6032 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
6037 DCHECK(!scratch1.is(scratch0));
6038 Factory* factory = isolate()->factory();
6039 Register current = scratch0;
6042 // Scratch contained elements pointer.
6043 Move(current, object);
6045 // Loop based on the map going up the prototype chain.
6047 lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
6048 lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
6049 DecodeField<Map::ElementsKindBits>(scratch1);
6050 Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
6051 lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
6052 Branch(&loop_again, ne, current, Operand(factory->null_value()));
6056 bool AreAliased(Register reg1,
6064 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
6065 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
6066 reg7.is_valid() + reg8.is_valid();
6069 if (reg1.is_valid()) regs |= reg1.bit();
6070 if (reg2.is_valid()) regs |= reg2.bit();
6071 if (reg3.is_valid()) regs |= reg3.bit();
6072 if (reg4.is_valid()) regs |= reg4.bit();
6073 if (reg5.is_valid()) regs |= reg5.bit();
6074 if (reg6.is_valid()) regs |= reg6.bit();
6075 if (reg7.is_valid()) regs |= reg7.bit();
6076 if (reg8.is_valid()) regs |= reg8.bit();
6077 int n_of_non_aliasing_regs = NumRegs(regs);
6079 return n_of_valid_regs != n_of_non_aliasing_regs;
6083 CodePatcher::CodePatcher(byte* address,
6085 FlushICache flush_cache)
6086 : address_(address),
6087 size_(instructions * Assembler::kInstrSize),
6088 masm_(NULL, address, size_ + Assembler::kGap),
6089 flush_cache_(flush_cache) {
6090 // Create a new macro assembler pointing to the address of the code to patch.
6091 // The size is adjusted with kGap on order for the assembler to generate size
6092 // bytes of instructions without failing with buffer size constraints.
6093 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6097 CodePatcher::~CodePatcher() {
6098 // Indicate that code has changed.
6099 if (flush_cache_ == FLUSH) {
6100 CpuFeatures::FlushICache(address_, size_);
6103 // Check that the code was patched as expected.
6104 DCHECK(masm_.pc_ == address_ + size_);
6105 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6109 void CodePatcher::Emit(Instr instr) {
6110 masm()->emit(instr);
6114 void CodePatcher::Emit(Address addr) {
6115 masm()->emit(reinterpret_cast<Instr>(addr));
6119 void CodePatcher::ChangeBranchCondition(Condition cond) {
6120 Instr instr = Assembler::instr_at(masm_.pc_);
6121 DCHECK(Assembler::IsBranch(instr));
6122 uint32_t opcode = Assembler::GetOpcodeField(instr);
6123 // Currently only the 'eq' and 'ne' cond values are supported and the simple
6124 // branch instructions (with opcode being the branch type).
6125 // There are some special cases (see Assembler::IsBranch()) so extending this
6127 DCHECK(opcode == BEQ ||
6135 opcode = (cond == eq) ? BEQ : BNE;
6136 instr = (instr & ~kOpcodeMask) | opcode;
6141 void MacroAssembler::TruncatingDiv(Register result,
6144 DCHECK(!dividend.is(result));
6145 DCHECK(!dividend.is(at));
6146 DCHECK(!result.is(at));
6147 base::MagicNumbersForDivision<uint32_t> mag =
6148 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
6149 li(at, Operand(mag.multiplier));
6150 Mulh(result, dividend, Operand(at));
6151 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
6152 if (divisor > 0 && neg) {
6153 Addu(result, result, Operand(dividend));
6155 if (divisor < 0 && !neg && mag.multiplier > 0) {
6156 Subu(result, result, Operand(dividend));
6158 if (mag.shift > 0) sra(result, result, mag.shift);
6159 srl(at, dividend, 31);
6160 Addu(result, result, Operand(at));
6164 } } // namespace v8::internal
6166 #endif // V8_TARGET_ARCH_MIPS