1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include <limits.h> // For LONG_MIN, LONG_MAX.
9 #if V8_TARGET_ARCH_MIPS64
11 #include "src/base/division-by-constant.h"
12 #include "src/bootstrapper.h"
13 #include "src/codegen.h"
14 #include "src/cpu-profiler.h"
15 #include "src/debug.h"
16 #include "src/isolate-inl.h"
17 #include "src/runtime/runtime.h"
22 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
23 : Assembler(arg_isolate, buffer, size),
24 generating_stub_(false),
26 has_double_zero_reg_set_(false) {
27 if (isolate() != NULL) {
28 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
34 void MacroAssembler::Load(Register dst,
35 const MemOperand& src,
37 DCHECK(!r.IsDouble());
40 } else if (r.IsUInteger8()) {
42 } else if (r.IsInteger16()) {
44 } else if (r.IsUInteger16()) {
46 } else if (r.IsInteger32()) {
54 void MacroAssembler::Store(Register src,
55 const MemOperand& dst,
57 DCHECK(!r.IsDouble());
58 if (r.IsInteger8() || r.IsUInteger8()) {
60 } else if (r.IsInteger16() || r.IsUInteger16()) {
62 } else if (r.IsInteger32()) {
65 if (r.IsHeapObject()) {
67 } else if (r.IsSmi()) {
75 void MacroAssembler::LoadRoot(Register destination,
76 Heap::RootListIndex index) {
77 ld(destination, MemOperand(s6, index << kPointerSizeLog2));
81 void MacroAssembler::LoadRoot(Register destination,
82 Heap::RootListIndex index,
84 Register src1, const Operand& src2) {
85 Branch(2, NegateCondition(cond), src1, src2);
86 ld(destination, MemOperand(s6, index << kPointerSizeLog2));
90 void MacroAssembler::StoreRoot(Register source,
91 Heap::RootListIndex index) {
92 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
93 sd(source, MemOperand(s6, index << kPointerSizeLog2));
97 void MacroAssembler::StoreRoot(Register source,
98 Heap::RootListIndex index,
100 Register src1, const Operand& src2) {
101 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
102 Branch(2, NegateCondition(cond), src1, src2);
103 sd(source, MemOperand(s6, index << kPointerSizeLog2));
107 // Push and pop all registers that can hold pointers.
108 void MacroAssembler::PushSafepointRegisters() {
109 // Safepoints expect a block of kNumSafepointRegisters values on the
110 // stack, so adjust the stack for unsaved registers.
111 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
112 DCHECK(num_unsaved >= 0);
113 if (num_unsaved > 0) {
114 Dsubu(sp, sp, Operand(num_unsaved * kPointerSize));
116 MultiPush(kSafepointSavedRegisters);
120 void MacroAssembler::PopSafepointRegisters() {
121 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
122 MultiPop(kSafepointSavedRegisters);
123 if (num_unsaved > 0) {
124 Daddu(sp, sp, Operand(num_unsaved * kPointerSize));
129 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
130 sd(src, SafepointRegisterSlot(dst));
134 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
135 ld(dst, SafepointRegisterSlot(src));
139 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
140 // The registers are pushed starting with the highest encoding,
141 // which means that lowest encodings are closest to the stack pointer.
142 return kSafepointRegisterStackIndexMap[reg_code];
146 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
147 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
151 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
152 UNIMPLEMENTED_MIPS();
153 // General purpose registers are pushed last on the stack.
154 int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
155 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
156 return MemOperand(sp, doubles_size + register_offset);
160 void MacroAssembler::InNewSpace(Register object,
164 DCHECK(cc == eq || cc == ne);
165 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
166 Branch(branch, cc, scratch,
167 Operand(ExternalReference::new_space_start(isolate())));
171 void MacroAssembler::RecordWriteField(
177 SaveFPRegsMode save_fp,
178 RememberedSetAction remembered_set_action,
180 PointersToHereCheck pointers_to_here_check_for_value) {
181 DCHECK(!AreAliased(value, dst, t8, object));
182 // First, check if a write barrier is even needed. The tests below
183 // catch stores of Smis.
186 // Skip barrier if writing a smi.
187 if (smi_check == INLINE_SMI_CHECK) {
188 JumpIfSmi(value, &done);
191 // Although the object register is tagged, the offset is relative to the start
192 // of the object, so so offset must be a multiple of kPointerSize.
193 DCHECK(IsAligned(offset, kPointerSize));
195 Daddu(dst, object, Operand(offset - kHeapObjectTag));
196 if (emit_debug_code()) {
198 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
199 Branch(&ok, eq, t8, Operand(zero_reg));
200 stop("Unaligned cell in write barrier");
209 remembered_set_action,
211 pointers_to_here_check_for_value);
215 // Clobber clobbered input registers when running with the debug-code flag
216 // turned on to provoke errors.
217 if (emit_debug_code()) {
218 li(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
219 li(dst, Operand(bit_cast<int64_t>(kZapValue + 8)));
224 // Will clobber 4 registers: object, map, dst, ip. The
225 // register 'object' contains a heap object pointer.
226 void MacroAssembler::RecordWriteForMap(Register object,
230 SaveFPRegsMode fp_mode) {
231 if (emit_debug_code()) {
233 ld(dst, FieldMemOperand(map, HeapObject::kMapOffset));
235 kWrongAddressOrValuePassedToRecordWrite,
237 Operand(isolate()->factory()->meta_map()));
240 if (!FLAG_incremental_marking) {
244 if (emit_debug_code()) {
245 ld(at, FieldMemOperand(object, HeapObject::kMapOffset));
247 kWrongAddressOrValuePassedToRecordWrite,
254 // A single check of the map's pages interesting flag suffices, since it is
255 // only set during incremental collection, and then it's also guaranteed that
256 // the from object's page's interesting flag is also set. This optimization
257 // relies on the fact that maps can never be in new space.
259 map, // Used as scratch.
260 MemoryChunk::kPointersToHereAreInterestingMask,
264 Daddu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
265 if (emit_debug_code()) {
267 And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
268 Branch(&ok, eq, at, Operand(zero_reg));
269 stop("Unaligned cell in write barrier");
273 // Record the actual write.
274 if (ra_status == kRAHasNotBeenSaved) {
277 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
280 if (ra_status == kRAHasNotBeenSaved) {
286 // Count number of write barriers in generated code.
287 isolate()->counters()->write_barriers_static()->Increment();
288 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
290 // Clobber clobbered registers when running with the debug-code flag
291 // turned on to provoke errors.
292 if (emit_debug_code()) {
293 li(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
294 li(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
299 // Will clobber 4 registers: object, address, scratch, ip. The
300 // register 'object' contains a heap object pointer. The heap object
301 // tag is shifted away.
302 void MacroAssembler::RecordWrite(
307 SaveFPRegsMode fp_mode,
308 RememberedSetAction remembered_set_action,
310 PointersToHereCheck pointers_to_here_check_for_value) {
311 DCHECK(!AreAliased(object, address, value, t8));
312 DCHECK(!AreAliased(object, address, value, t9));
314 if (emit_debug_code()) {
315 ld(at, MemOperand(address));
317 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
320 if (remembered_set_action == OMIT_REMEMBERED_SET &&
321 !FLAG_incremental_marking) {
325 // First, check if a write barrier is even needed. The tests below
326 // catch stores of smis and stores into the young generation.
329 if (smi_check == INLINE_SMI_CHECK) {
330 DCHECK_EQ(0, kSmiTag);
331 JumpIfSmi(value, &done);
334 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
336 value, // Used as scratch.
337 MemoryChunk::kPointersToHereAreInterestingMask,
341 CheckPageFlag(object,
342 value, // Used as scratch.
343 MemoryChunk::kPointersFromHereAreInterestingMask,
347 // Record the actual write.
348 if (ra_status == kRAHasNotBeenSaved) {
351 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
354 if (ra_status == kRAHasNotBeenSaved) {
360 // Count number of write barriers in generated code.
361 isolate()->counters()->write_barriers_static()->Increment();
362 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
365 // Clobber clobbered registers when running with the debug-code flag
366 // turned on to provoke errors.
367 if (emit_debug_code()) {
368 li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
369 li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
374 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
377 SaveFPRegsMode fp_mode,
378 RememberedSetFinalAction and_then) {
380 if (emit_debug_code()) {
382 JumpIfNotInNewSpace(object, scratch, &ok);
383 stop("Remembered set pointer is in new space");
386 // Load store buffer top.
387 ExternalReference store_buffer =
388 ExternalReference::store_buffer_top(isolate());
389 li(t8, Operand(store_buffer));
390 ld(scratch, MemOperand(t8));
391 // Store pointer to buffer and increment buffer top.
392 sd(address, MemOperand(scratch));
393 Daddu(scratch, scratch, kPointerSize);
394 // Write back new top of buffer.
395 sd(scratch, MemOperand(t8));
396 // Call stub on end of buffer.
397 // Check for end of buffer.
398 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
399 DCHECK(!scratch.is(t8));
400 if (and_then == kFallThroughAtEnd) {
401 Branch(&done, eq, t8, Operand(zero_reg));
403 DCHECK(and_then == kReturnAtEnd);
404 Ret(eq, t8, Operand(zero_reg));
407 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
408 CallStub(&store_buffer_overflow);
411 if (and_then == kReturnAtEnd) {
417 // -----------------------------------------------------------------------------
418 // Allocation support.
421 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
426 DCHECK(!holder_reg.is(scratch));
427 DCHECK(!holder_reg.is(at));
428 DCHECK(!scratch.is(at));
430 // Load current lexical context from the stack frame.
431 ld(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
432 // In debug mode, make sure the lexical context is set.
434 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
435 scratch, Operand(zero_reg));
438 // Load the native context of the current context.
440 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
441 ld(scratch, FieldMemOperand(scratch, offset));
442 ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
444 // Check the context is a native context.
445 if (emit_debug_code()) {
446 push(holder_reg); // Temporarily save holder on the stack.
447 // Read the first word and compare to the native_context_map.
448 ld(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
449 LoadRoot(at, Heap::kNativeContextMapRootIndex);
450 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
451 holder_reg, Operand(at));
452 pop(holder_reg); // Restore holder.
455 // Check if both contexts are the same.
456 ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
457 Branch(&same_contexts, eq, scratch, Operand(at));
459 // Check the context is a native context.
460 if (emit_debug_code()) {
461 push(holder_reg); // Temporarily save holder on the stack.
462 mov(holder_reg, at); // Move at to its holding place.
463 LoadRoot(at, Heap::kNullValueRootIndex);
464 Check(ne, kJSGlobalProxyContextShouldNotBeNull,
465 holder_reg, Operand(at));
467 ld(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
468 LoadRoot(at, Heap::kNativeContextMapRootIndex);
469 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
470 holder_reg, Operand(at));
471 // Restore at is not needed. at is reloaded below.
472 pop(holder_reg); // Restore holder.
473 // Restore at to holder's context.
474 ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
477 // Check that the security token in the calling global object is
478 // compatible with the security token in the receiving global
480 int token_offset = Context::kHeaderSize +
481 Context::SECURITY_TOKEN_INDEX * kPointerSize;
483 ld(scratch, FieldMemOperand(scratch, token_offset));
484 ld(at, FieldMemOperand(at, token_offset));
485 Branch(miss, ne, scratch, Operand(at));
487 bind(&same_contexts);
491 // Compute the hash code from the untagged key. This must be kept in sync with
492 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
493 // code-stub-hydrogen.cc
494 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
495 // First of all we assign the hash seed to scratch.
496 LoadRoot(scratch, Heap::kHashSeedRootIndex);
499 // Xor original key with a seed.
500 xor_(reg0, reg0, scratch);
502 // Compute the hash code from the untagged key. This must be kept in sync
503 // with ComputeIntegerHash in utils.h.
505 // hash = ~hash + (hash << 15);
506 // The algorithm uses 32-bit integer values.
507 nor(scratch, reg0, zero_reg);
509 addu(reg0, scratch, at);
511 // hash = hash ^ (hash >> 12);
513 xor_(reg0, reg0, at);
515 // hash = hash + (hash << 2);
517 addu(reg0, reg0, at);
519 // hash = hash ^ (hash >> 4);
521 xor_(reg0, reg0, at);
523 // hash = hash * 2057;
524 sll(scratch, reg0, 11);
526 addu(reg0, reg0, at);
527 addu(reg0, reg0, scratch);
529 // hash = hash ^ (hash >> 16);
531 xor_(reg0, reg0, at);
535 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
544 // elements - holds the slow-case elements of the receiver on entry.
545 // Unchanged unless 'result' is the same register.
547 // key - holds the smi key on entry.
548 // Unchanged unless 'result' is the same register.
551 // result - holds the result on exit if the load succeeded.
552 // Allowed to be the same as 'key' or 'result'.
553 // Unchanged on bailout so 'key' or 'result' can be used
554 // in further computation.
556 // Scratch registers:
558 // reg0 - holds the untagged key on entry and holds the hash once computed.
560 // reg1 - Used to hold the capacity mask of the dictionary.
562 // reg2 - Used for the index into the dictionary.
563 // at - Temporary (avoid MacroAssembler instructions also using 'at').
566 GetNumberHash(reg0, reg1);
568 // Compute the capacity mask.
569 ld(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
570 SmiUntag(reg1, reg1);
571 Dsubu(reg1, reg1, Operand(1));
573 // Generate an unrolled loop that performs a few probes before giving up.
574 for (int i = 0; i < kNumberDictionaryProbes; i++) {
575 // Use reg2 for index calculations and keep the hash intact in reg0.
577 // Compute the masked index: (hash + i + i * i) & mask.
579 Daddu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
581 and_(reg2, reg2, reg1);
583 // Scale the index by multiplying by the element size.
584 DCHECK(SeededNumberDictionary::kEntrySize == 3);
585 dsll(at, reg2, 1); // 2x.
586 daddu(reg2, reg2, at); // reg2 = reg2 * 3.
588 // Check if the key is identical to the name.
589 dsll(at, reg2, kPointerSizeLog2);
590 daddu(reg2, elements, at);
592 ld(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
593 if (i != kNumberDictionaryProbes - 1) {
594 Branch(&done, eq, key, Operand(at));
596 Branch(miss, ne, key, Operand(at));
601 // Check that the value is a field property.
602 // reg2: elements + (index * kPointerSize).
603 const int kDetailsOffset =
604 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
605 ld(reg1, FieldMemOperand(reg2, kDetailsOffset));
607 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
608 Branch(miss, ne, at, Operand(zero_reg));
610 // Get the value at the masked, scaled index and return.
611 const int kValueOffset =
612 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
613 ld(result, FieldMemOperand(reg2, kValueOffset));
617 // ---------------------------------------------------------------------------
618 // Instruction macros.
620 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
622 addu(rd, rs, rt.rm());
624 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
625 addiu(rd, rs, rt.imm64_);
627 // li handles the relocation.
636 void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
638 daddu(rd, rs, rt.rm());
640 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
641 daddiu(rd, rs, rt.imm64_);
643 // li handles the relocation.
652 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
654 subu(rd, rs, rt.rm());
656 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
657 addiu(rd, rs, -rt.imm64_); // No subiu instr, use addiu(x, y, -imm).
659 // li handles the relocation.
668 void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
670 dsubu(rd, rs, rt.rm());
672 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
673 daddiu(rd, rs, -rt.imm64_); // No subiu instr, use addiu(x, y, -imm).
675 // li handles the relocation.
684 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
686 mul(rd, rs, rt.rm());
688 // li handles the relocation.
696 void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
698 if (kArchVariant != kMips64r6) {
702 muh(rd, rs, rt.rm());
705 // li handles the relocation.
708 if (kArchVariant != kMips64r6) {
718 void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
720 if (kArchVariant != kMips64r6) {
724 muhu(rd, rs, rt.rm());
727 // li handles the relocation.
730 if (kArchVariant != kMips64r6) {
740 void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
742 if (kArchVariant == kMips64r6) {
743 dmul(rd, rs, rt.rm());
749 // li handles the relocation.
752 if (kArchVariant == kMips64r6) {
762 void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
764 if (kArchVariant == kMips64r6) {
765 dmuh(rd, rs, rt.rm());
771 // li handles the relocation.
774 if (kArchVariant == kMips64r6) {
784 void MacroAssembler::Mult(Register rs, const Operand& rt) {
788 // li handles the relocation.
796 void MacroAssembler::Dmult(Register rs, const Operand& rt) {
800 // li handles the relocation.
808 void MacroAssembler::Multu(Register rs, const Operand& rt) {
812 // li handles the relocation.
820 void MacroAssembler::Dmultu(Register rs, const Operand& rt) {
824 // li handles the relocation.
832 void MacroAssembler::Div(Register rs, const Operand& rt) {
836 // li handles the relocation.
844 void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
846 if (kArchVariant != kMips64r6) {
850 div(res, rs, rt.rm());
853 // li handles the relocation.
856 if (kArchVariant != kMips64r6) {
866 void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
868 if (kArchVariant != kMips64r6) {
872 mod(rd, rs, rt.rm());
875 // li handles the relocation.
878 if (kArchVariant != kMips64r6) {
888 void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
890 if (kArchVariant != kMips64r6) {
894 modu(rd, rs, rt.rm());
897 // li handles the relocation.
900 if (kArchVariant != kMips64r6) {
910 void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
914 // li handles the relocation.
922 void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
923 if (kArchVariant != kMips64r6) {
928 // li handles the relocation.
936 ddiv(rd, rs, rt.rm());
938 // li handles the relocation.
947 void MacroAssembler::Divu(Register rs, const Operand& rt) {
951 // li handles the relocation.
959 void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
961 if (kArchVariant != kMips64r6) {
965 divu(res, rs, rt.rm());
968 // li handles the relocation.
971 if (kArchVariant != kMips64r6) {
981 void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
985 // li handles the relocation.
993 void MacroAssembler::Ddivu(Register res, Register rs, const Operand& rt) {
995 if (kArchVariant != kMips64r6) {
999 ddivu(res, rs, rt.rm());
1002 // li handles the relocation.
1005 if (kArchVariant != kMips64r6) {
1015 void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
1016 if (kArchVariant != kMips64r6) {
1021 // li handles the relocation.
1029 dmod(rd, rs, rt.rm());
1031 // li handles the relocation.
1040 void MacroAssembler::Dmodu(Register rd, Register rs, const Operand& rt) {
1041 if (kArchVariant != kMips64r6) {
1046 // li handles the relocation.
1054 dmodu(rd, rs, rt.rm());
1056 // li handles the relocation.
1065 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
1067 and_(rd, rs, rt.rm());
1069 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1070 andi(rd, rs, rt.imm64_);
1072 // li handles the relocation.
1081 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
1083 or_(rd, rs, rt.rm());
1085 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1086 ori(rd, rs, rt.imm64_);
1088 // li handles the relocation.
1097 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
1099 xor_(rd, rs, rt.rm());
1101 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1102 xori(rd, rs, rt.imm64_);
1104 // li handles the relocation.
1113 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
1115 nor(rd, rs, rt.rm());
1117 // li handles the relocation.
1125 void MacroAssembler::Neg(Register rs, const Operand& rt) {
1126 DCHECK(rt.is_reg());
1128 DCHECK(!at.is(rt.rm()));
1130 xor_(rs, rt.rm(), at);
1134 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
1136 slt(rd, rs, rt.rm());
1138 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1139 slti(rd, rs, rt.imm64_);
1141 // li handles the relocation.
1150 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
1152 sltu(rd, rs, rt.rm());
1154 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1155 sltiu(rd, rs, rt.imm64_);
1157 // li handles the relocation.
1166 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
1168 rotrv(rd, rs, rt.rm());
1170 rotr(rd, rs, rt.imm64_);
1175 void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) {
1177 drotrv(rd, rs, rt.rm());
1179 drotr(rd, rs, rt.imm64_);
1184 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
1189 // ------------Pseudo-instructions-------------
1191 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
1193 lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1197 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
1199 swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1203 // Do 64-bit load from unaligned address. Note this only handles
1204 // the specific case of 32-bit aligned, but not 64-bit aligned.
1205 void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) {
1206 // Assert fail if the offset from start of object IS actually aligned.
1207 // ONLY use with known misalignment, since there is performance cost.
1208 DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
1209 // TODO(plind): endian dependency.
1211 lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1212 dsll32(scratch, scratch, 0);
1213 Daddu(rd, rd, scratch);
1217 // Do 64-bit store to unaligned address. Note this only handles
1218 // the specific case of 32-bit aligned, but not 64-bit aligned.
1219 void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) {
1220 // Assert fail if the offset from start of object IS actually aligned.
1221 // ONLY use with known misalignment, since there is performance cost.
1222 DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
1223 // TODO(plind): endian dependency.
1225 dsrl32(scratch, rd, 0);
1226 sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1230 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1231 AllowDeferredHandleDereference smi_check;
1232 if (value->IsSmi()) {
1233 li(dst, Operand(value), mode);
1235 DCHECK(value->IsHeapObject());
1236 if (isolate()->heap()->InNewSpace(*value)) {
1237 Handle<Cell> cell = isolate()->factory()->NewCell(value);
1238 li(dst, Operand(cell));
1239 ld(dst, FieldMemOperand(dst, Cell::kValueOffset));
1241 li(dst, Operand(value));
1247 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1248 DCHECK(!j.is_reg());
1249 BlockTrampolinePoolScope block_trampoline_pool(this);
1250 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
1251 // Normal load of an immediate value which does not need Relocation Info.
1252 if (is_int32(j.imm64_)) {
1253 if (is_int16(j.imm64_)) {
1254 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
1255 } else if (!(j.imm64_ & kHiMask)) {
1256 ori(rd, zero_reg, (j.imm64_ & kImm16Mask));
1257 } else if (!(j.imm64_ & kImm16Mask)) {
1258 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1260 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1261 ori(rd, rd, (j.imm64_ & kImm16Mask));
1264 lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1265 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1267 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1269 ori(rd, rd, j.imm64_ & kImm16Mask);
1271 } else if (MustUseReg(j.rmode_)) {
1272 RecordRelocInfo(j.rmode_, j.imm64_);
1273 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1274 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1276 ori(rd, rd, j.imm64_ & kImm16Mask);
1277 } else if (mode == ADDRESS_LOAD) {
1278 // We always need the same number of instructions as we may need to patch
1279 // this code to load another value which may need all 4 instructions.
1280 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1281 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1283 ori(rd, rd, j.imm64_ & kImm16Mask);
1285 lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1286 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1288 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1290 ori(rd, rd, j.imm64_ & kImm16Mask);
1295 void MacroAssembler::MultiPush(RegList regs) {
1296 int16_t num_to_push = NumberOfBitsSet(regs);
1297 int16_t stack_offset = num_to_push * kPointerSize;
1299 Dsubu(sp, sp, Operand(stack_offset));
1300 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1301 if ((regs & (1 << i)) != 0) {
1302 stack_offset -= kPointerSize;
1303 sd(ToRegister(i), MemOperand(sp, stack_offset));
1309 void MacroAssembler::MultiPushReversed(RegList regs) {
1310 int16_t num_to_push = NumberOfBitsSet(regs);
1311 int16_t stack_offset = num_to_push * kPointerSize;
1313 Dsubu(sp, sp, Operand(stack_offset));
1314 for (int16_t i = 0; i < kNumRegisters; i++) {
1315 if ((regs & (1 << i)) != 0) {
1316 stack_offset -= kPointerSize;
1317 sd(ToRegister(i), MemOperand(sp, stack_offset));
1323 void MacroAssembler::MultiPop(RegList regs) {
1324 int16_t stack_offset = 0;
1326 for (int16_t i = 0; i < kNumRegisters; i++) {
1327 if ((regs & (1 << i)) != 0) {
1328 ld(ToRegister(i), MemOperand(sp, stack_offset));
1329 stack_offset += kPointerSize;
1332 daddiu(sp, sp, stack_offset);
1336 void MacroAssembler::MultiPopReversed(RegList regs) {
1337 int16_t stack_offset = 0;
1339 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1340 if ((regs & (1 << i)) != 0) {
1341 ld(ToRegister(i), MemOperand(sp, stack_offset));
1342 stack_offset += kPointerSize;
1345 daddiu(sp, sp, stack_offset);
1349 void MacroAssembler::MultiPushFPU(RegList regs) {
1350 int16_t num_to_push = NumberOfBitsSet(regs);
1351 int16_t stack_offset = num_to_push * kDoubleSize;
1353 Dsubu(sp, sp, Operand(stack_offset));
1354 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1355 if ((regs & (1 << i)) != 0) {
1356 stack_offset -= kDoubleSize;
1357 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1363 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
1364 int16_t num_to_push = NumberOfBitsSet(regs);
1365 int16_t stack_offset = num_to_push * kDoubleSize;
1367 Dsubu(sp, sp, Operand(stack_offset));
1368 for (int16_t i = 0; i < kNumRegisters; i++) {
1369 if ((regs & (1 << i)) != 0) {
1370 stack_offset -= kDoubleSize;
1371 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1377 void MacroAssembler::MultiPopFPU(RegList regs) {
1378 int16_t stack_offset = 0;
1380 for (int16_t i = 0; i < kNumRegisters; i++) {
1381 if ((regs & (1 << i)) != 0) {
1382 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1383 stack_offset += kDoubleSize;
1386 daddiu(sp, sp, stack_offset);
1390 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
1391 int16_t stack_offset = 0;
1393 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1394 if ((regs & (1 << i)) != 0) {
1395 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1396 stack_offset += kDoubleSize;
1399 daddiu(sp, sp, stack_offset);
1403 void MacroAssembler::FlushICache(Register address, unsigned instructions) {
1404 RegList saved_regs = kJSCallerSaved | ra.bit();
1405 MultiPush(saved_regs);
1406 AllowExternalCallThatCantCauseGC scope(this);
1408 // Save to a0 in case address == a4.
1410 PrepareCallCFunction(2, a4);
1412 li(a1, instructions * kInstrSize);
1413 CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
1414 MultiPop(saved_regs);
1418 void MacroAssembler::Ext(Register rt,
1423 DCHECK(pos + size < 33);
1424 ext_(rt, rs, pos, size);
1428 void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
1431 DCHECK(pos + size < 33);
1432 dext_(rt, rs, pos, size);
1436 void MacroAssembler::Ins(Register rt,
1441 DCHECK(pos + size <= 32);
1443 ins_(rt, rs, pos, size);
1447 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1449 FPURegister scratch) {
1450 // Move the data from fs to t8.
1452 Cvt_d_uw(fd, t8, scratch);
1456 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1458 FPURegister scratch) {
1459 // Convert rs to a FP value in fd (and fd + 1).
1460 // We do this by converting rs minus the MSB to avoid sign conversion,
1461 // then adding 2^31 to the result (if needed).
1463 DCHECK(!fd.is(scratch));
1467 // Save rs's MSB to t9.
1471 // Move the result to fd.
1473 mthc1(zero_reg, fd);
1475 // Convert fd to a real FP value.
1478 Label conversion_done;
1480 // If rs's MSB was 0, it's done.
1481 // Otherwise we need to add that to the FP register.
1482 Branch(&conversion_done, eq, t9, Operand(zero_reg));
1484 // Load 2^31 into f20 as its float representation.
1486 mtc1(zero_reg, scratch);
1489 add_d(fd, fd, scratch);
1491 bind(&conversion_done);
1495 void MacroAssembler::Round_l_d(FPURegister fd, FPURegister fs) {
1500 void MacroAssembler::Floor_l_d(FPURegister fd, FPURegister fs) {
1505 void MacroAssembler::Ceil_l_d(FPURegister fd, FPURegister fs) {
1510 void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) {
1515 void MacroAssembler::Trunc_l_ud(FPURegister fd,
1517 FPURegister scratch) {
1521 li(at, 0x7fffffffffffffff);
1528 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1530 FPURegister scratch) {
1531 Trunc_uw_d(fs, t8, scratch);
1536 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1541 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1546 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1551 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1556 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1558 FPURegister scratch) {
1559 DCHECK(!fd.is(scratch));
1562 // Load 2^31 into scratch as its float representation.
1564 mtc1(zero_reg, scratch);
1566 // Test if scratch > fd.
1567 // If fd < 2^31 we can convert it normally.
1568 Label simple_convert;
1569 BranchF(&simple_convert, NULL, lt, fd, scratch);
1571 // First we subtract 2^31 from fd, then trunc it to rs
1572 // and add 2^31 to rs.
1573 sub_d(scratch, fd, scratch);
1574 trunc_w_d(scratch, scratch);
1576 Or(rs, rs, 1 << 31);
1580 // Simple conversion.
1581 bind(&simple_convert);
1582 trunc_w_d(scratch, fd);
1589 void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
1590 FPURegister ft, FPURegister scratch) {
1591 if (0) { // TODO(plind): find reasonable arch-variant symbol names.
1592 madd_d(fd, fr, fs, ft);
1594 // Can not change source regs's value.
1595 DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
1596 mul_d(scratch, fs, ft);
1597 add_d(fd, fr, scratch);
1602 void MacroAssembler::BranchF(Label* target,
1607 BranchDelaySlot bd) {
1608 BlockTrampolinePoolScope block_trampoline_pool(this);
1614 DCHECK(nan || target);
1615 // Check for unordered (NaN) cases.
1617 if (kArchVariant != kMips64r6) {
1618 c(UN, D, cmp1, cmp2);
1621 // Use f31 for comparison result. It has to be unavailable to lithium
1622 // register allocator.
1623 DCHECK(!cmp1.is(f31) && !cmp2.is(f31));
1624 cmp(UN, L, f31, cmp1, cmp2);
1629 if (kArchVariant != kMips64r6) {
1631 // Here NaN cases were either handled by this function or are assumed to
1632 // have been handled by the caller.
1635 c(OLT, D, cmp1, cmp2);
1639 c(ULE, D, cmp1, cmp2);
1643 c(ULT, D, cmp1, cmp2);
1647 c(OLE, D, cmp1, cmp2);
1651 c(EQ, D, cmp1, cmp2);
1655 c(UEQ, D, cmp1, cmp2);
1659 c(EQ, D, cmp1, cmp2);
1663 c(UEQ, D, cmp1, cmp2);
1672 // Here NaN cases were either handled by this function or are assumed to
1673 // have been handled by the caller.
1674 // Unsigned conditions are treated as their signed counterpart.
1675 // Use f31 for comparison result, it is valid in fp64 (FR = 1) mode.
1676 DCHECK(!cmp1.is(f31) && !cmp2.is(f31));
1679 cmp(OLT, L, f31, cmp1, cmp2);
1680 bc1nez(target, f31);
1683 cmp(ULE, L, f31, cmp1, cmp2);
1684 bc1eqz(target, f31);
1687 cmp(ULT, L, f31, cmp1, cmp2);
1688 bc1eqz(target, f31);
1691 cmp(OLE, L, f31, cmp1, cmp2);
1692 bc1nez(target, f31);
1695 cmp(EQ, L, f31, cmp1, cmp2);
1696 bc1nez(target, f31);
1699 cmp(UEQ, L, f31, cmp1, cmp2);
1700 bc1nez(target, f31);
1703 cmp(EQ, L, f31, cmp1, cmp2);
1704 bc1eqz(target, f31);
1707 cmp(UEQ, L, f31, cmp1, cmp2);
1708 bc1eqz(target, f31);
1716 if (bd == PROTECT) {
1722 void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
1723 DCHECK(!src_low.is(at));
1730 void MacroAssembler::Move(FPURegister dst, float imm) {
1731 li(at, Operand(bit_cast<int32_t>(imm)));
1736 void MacroAssembler::Move(FPURegister dst, double imm) {
1737 static const DoubleRepresentation minus_zero(-0.0);
1738 static const DoubleRepresentation zero(0.0);
1739 DoubleRepresentation value_rep(imm);
1740 // Handle special values first.
1741 if (value_rep == zero && has_double_zero_reg_set_) {
1742 mov_d(dst, kDoubleRegZero);
1743 } else if (value_rep == minus_zero && has_double_zero_reg_set_) {
1744 neg_d(dst, kDoubleRegZero);
1747 DoubleAsTwoUInt32(imm, &lo, &hi);
1748 // Move the low part of the double into the lower bits of the corresponding
1751 li(at, Operand(lo));
1754 mtc1(zero_reg, dst);
1756 // Move the high part of the double into the high bits of the corresponding
1759 li(at, Operand(hi));
1762 mthc1(zero_reg, dst);
1764 if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true;
1769 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1770 if (kArchVariant == kMips64r6) {
1772 Branch(&done, ne, rt, Operand(zero_reg));
1781 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1782 if (kArchVariant == kMips64r6) {
1784 Branch(&done, eq, rt, Operand(zero_reg));
1793 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
1798 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
1803 void MacroAssembler::Clz(Register rd, Register rs) {
1808 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1810 DoubleRegister double_input,
1812 DoubleRegister double_scratch,
1813 Register except_flag,
1814 CheckForInexactConversion check_inexact) {
1815 DCHECK(!result.is(scratch));
1816 DCHECK(!double_input.is(double_scratch));
1817 DCHECK(!except_flag.is(scratch));
1821 // Clear the except flag (0 = no exception)
1822 mov(except_flag, zero_reg);
1824 // Test for values that can be exactly represented as a signed 32-bit integer.
1825 cvt_w_d(double_scratch, double_input);
1826 mfc1(result, double_scratch);
1827 cvt_d_w(double_scratch, double_scratch);
1828 BranchF(&done, NULL, eq, double_input, double_scratch);
1830 int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
1832 if (check_inexact == kDontCheckForInexactConversion) {
1833 // Ignore inexact exceptions.
1834 except_mask &= ~kFCSRInexactFlagMask;
1838 cfc1(scratch, FCSR);
1839 // Disable FPU exceptions.
1840 ctc1(zero_reg, FCSR);
1842 // Do operation based on rounding mode.
1843 switch (rounding_mode) {
1844 case kRoundToNearest:
1845 Round_w_d(double_scratch, double_input);
1848 Trunc_w_d(double_scratch, double_input);
1850 case kRoundToPlusInf:
1851 Ceil_w_d(double_scratch, double_input);
1853 case kRoundToMinusInf:
1854 Floor_w_d(double_scratch, double_input);
1856 } // End of switch-statement.
1859 cfc1(except_flag, FCSR);
1861 ctc1(scratch, FCSR);
1862 // Move the converted value into the result register.
1863 mfc1(result, double_scratch);
1865 // Check for fpu exceptions.
1866 And(except_flag, except_flag, Operand(except_mask));
1872 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
1873 DoubleRegister double_input,
1875 DoubleRegister single_scratch = kLithiumScratchDouble.low();
1876 Register scratch = at;
1877 Register scratch2 = t9;
1879 // Clear cumulative exception flags and save the FCSR.
1880 cfc1(scratch2, FCSR);
1881 ctc1(zero_reg, FCSR);
1882 // Try a conversion to a signed integer.
1883 trunc_w_d(single_scratch, double_input);
1884 mfc1(result, single_scratch);
1885 // Retrieve and restore the FCSR.
1886 cfc1(scratch, FCSR);
1887 ctc1(scratch2, FCSR);
1888 // Check for overflow and NaNs.
1891 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
1892 // If we had no exceptions we are done.
1893 Branch(done, eq, scratch, Operand(zero_reg));
1897 void MacroAssembler::TruncateDoubleToI(Register result,
1898 DoubleRegister double_input) {
1901 TryInlineTruncateDoubleToI(result, double_input, &done);
1903 // If we fell through then inline version didn't succeed - call stub instead.
1905 Dsubu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
1906 sdc1(double_input, MemOperand(sp, 0));
1908 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
1911 Daddu(sp, sp, Operand(kDoubleSize));
1918 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
1920 DoubleRegister double_scratch = f12;
1921 DCHECK(!result.is(object));
1923 ldc1(double_scratch,
1924 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
1925 TryInlineTruncateDoubleToI(result, double_scratch, &done);
1927 // If we fell through then inline version didn't succeed - call stub instead.
1929 DoubleToIStub stub(isolate(),
1932 HeapNumber::kValueOffset - kHeapObjectTag,
1942 void MacroAssembler::TruncateNumberToI(Register object,
1944 Register heap_number_map,
1946 Label* not_number) {
1948 DCHECK(!result.is(object));
1950 UntagAndJumpIfSmi(result, object, &done);
1951 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
1952 TruncateHeapNumberToI(result, object);
1958 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1960 int num_least_bits) {
1961 // Ext(dst, src, kSmiTagSize, num_least_bits);
1963 And(dst, dst, Operand((1 << num_least_bits) - 1));
1967 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1969 int num_least_bits) {
1970 DCHECK(!src.is(dst));
1971 And(dst, src, Operand((1 << num_least_bits) - 1));
1975 // Emulated condtional branches do not emit a nop in the branch delay slot.
1977 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1978 #define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \
1979 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1980 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1983 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1984 BranchShort(offset, bdslot);
1988 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1990 BranchDelaySlot bdslot) {
1991 BranchShort(offset, cond, rs, rt, bdslot);
1995 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1996 if (L->is_bound()) {
1998 BranchShort(L, bdslot);
2003 if (is_trampoline_emitted()) {
2006 BranchShort(L, bdslot);
2012 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
2014 BranchDelaySlot bdslot) {
2015 if (L->is_bound()) {
2017 BranchShort(L, cond, rs, rt, bdslot);
2019 if (cond != cc_always) {
2021 Condition neg_cond = NegateCondition(cond);
2022 BranchShort(&skip, neg_cond, rs, rt);
2030 if (is_trampoline_emitted()) {
2031 if (cond != cc_always) {
2033 Condition neg_cond = NegateCondition(cond);
2034 BranchShort(&skip, neg_cond, rs, rt);
2041 BranchShort(L, cond, rs, rt, bdslot);
2047 void MacroAssembler::Branch(Label* L,
2050 Heap::RootListIndex index,
2051 BranchDelaySlot bdslot) {
2052 LoadRoot(at, index);
2053 Branch(L, cond, rs, Operand(at), bdslot);
2057 void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
2060 // Emit a nop in the branch delay slot if required.
2061 if (bdslot == PROTECT)
2066 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
2068 BranchDelaySlot bdslot) {
2069 BRANCH_ARGS_CHECK(cond, rs, rt);
2070 DCHECK(!rs.is(zero_reg));
2071 Register r2 = no_reg;
2072 Register scratch = at;
2075 // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
2077 BlockTrampolinePoolScope block_trampoline_pool(this);
2084 beq(rs, r2, offset);
2087 bne(rs, r2, offset);
2089 // Signed comparison.
2091 if (r2.is(zero_reg)) {
2094 slt(scratch, r2, rs);
2095 bne(scratch, zero_reg, offset);
2099 if (r2.is(zero_reg)) {
2102 slt(scratch, rs, r2);
2103 beq(scratch, zero_reg, offset);
2107 if (r2.is(zero_reg)) {
2110 slt(scratch, rs, r2);
2111 bne(scratch, zero_reg, offset);
2115 if (r2.is(zero_reg)) {
2118 slt(scratch, r2, rs);
2119 beq(scratch, zero_reg, offset);
2122 // Unsigned comparison.
2124 if (r2.is(zero_reg)) {
2125 bne(rs, zero_reg, offset);
2127 sltu(scratch, r2, rs);
2128 bne(scratch, zero_reg, offset);
2131 case Ugreater_equal:
2132 if (r2.is(zero_reg)) {
2135 sltu(scratch, rs, r2);
2136 beq(scratch, zero_reg, offset);
2140 if (r2.is(zero_reg)) {
2141 // No code needs to be emitted.
2144 sltu(scratch, rs, r2);
2145 bne(scratch, zero_reg, offset);
2149 if (r2.is(zero_reg)) {
2150 beq(rs, zero_reg, offset);
2152 sltu(scratch, r2, rs);
2153 beq(scratch, zero_reg, offset);
2160 // Be careful to always use shifted_branch_offset only just before the
2161 // branch instruction, as the location will be remember for patching the
2163 BlockTrampolinePoolScope block_trampoline_pool(this);
2169 if (rt.imm64_ == 0) {
2170 beq(rs, zero_reg, offset);
2172 // We don't want any other register but scratch clobbered.
2173 DCHECK(!scratch.is(rs));
2176 beq(rs, r2, offset);
2180 if (rt.imm64_ == 0) {
2181 bne(rs, zero_reg, offset);
2183 // We don't want any other register but scratch clobbered.
2184 DCHECK(!scratch.is(rs));
2187 bne(rs, r2, offset);
2190 // Signed comparison.
2192 if (rt.imm64_ == 0) {
2197 slt(scratch, r2, rs);
2198 bne(scratch, zero_reg, offset);
2202 if (rt.imm64_ == 0) {
2204 } else if (is_int16(rt.imm64_)) {
2205 slti(scratch, rs, rt.imm64_);
2206 beq(scratch, zero_reg, offset);
2210 slt(scratch, rs, r2);
2211 beq(scratch, zero_reg, offset);
2215 if (rt.imm64_ == 0) {
2217 } else if (is_int16(rt.imm64_)) {
2218 slti(scratch, rs, rt.imm64_);
2219 bne(scratch, zero_reg, offset);
2223 slt(scratch, rs, r2);
2224 bne(scratch, zero_reg, offset);
2228 if (rt.imm64_ == 0) {
2233 slt(scratch, r2, rs);
2234 beq(scratch, zero_reg, offset);
2237 // Unsigned comparison.
2239 if (rt.imm64_ == 0) {
2240 bne(rs, zero_reg, offset);
2244 sltu(scratch, r2, rs);
2245 bne(scratch, zero_reg, offset);
2248 case Ugreater_equal:
2249 if (rt.imm64_ == 0) {
2251 } else if (is_int16(rt.imm64_)) {
2252 sltiu(scratch, rs, rt.imm64_);
2253 beq(scratch, zero_reg, offset);
2257 sltu(scratch, rs, r2);
2258 beq(scratch, zero_reg, offset);
2262 if (rt.imm64_ == 0) {
2263 // No code needs to be emitted.
2265 } else if (is_int16(rt.imm64_)) {
2266 sltiu(scratch, rs, rt.imm64_);
2267 bne(scratch, zero_reg, offset);
2271 sltu(scratch, rs, r2);
2272 bne(scratch, zero_reg, offset);
2276 if (rt.imm64_ == 0) {
2277 beq(rs, zero_reg, offset);
2281 sltu(scratch, r2, rs);
2282 beq(scratch, zero_reg, offset);
2289 // Emit a nop in the branch delay slot if required.
2290 if (bdslot == PROTECT)
2295 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
2296 // We use branch_offset as an argument for the branch instructions to be sure
2297 // it is called just before generating the branch instruction, as needed.
2299 b(shifted_branch_offset(L, false));
2301 // Emit a nop in the branch delay slot if required.
2302 if (bdslot == PROTECT)
2307 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
2309 BranchDelaySlot bdslot) {
2310 BRANCH_ARGS_CHECK(cond, rs, rt);
2313 Register r2 = no_reg;
2314 Register scratch = at;
2316 BlockTrampolinePoolScope block_trampoline_pool(this);
2318 // Be careful to always use shifted_branch_offset only just before the
2319 // branch instruction, as the location will be remember for patching the
2323 offset = shifted_branch_offset(L, false);
2327 offset = shifted_branch_offset(L, false);
2328 beq(rs, r2, offset);
2331 offset = shifted_branch_offset(L, false);
2332 bne(rs, r2, offset);
2334 // Signed comparison.
2336 if (r2.is(zero_reg)) {
2337 offset = shifted_branch_offset(L, false);
2340 slt(scratch, r2, rs);
2341 offset = shifted_branch_offset(L, false);
2342 bne(scratch, zero_reg, offset);
2346 if (r2.is(zero_reg)) {
2347 offset = shifted_branch_offset(L, false);
2350 slt(scratch, rs, r2);
2351 offset = shifted_branch_offset(L, false);
2352 beq(scratch, zero_reg, offset);
2356 if (r2.is(zero_reg)) {
2357 offset = shifted_branch_offset(L, false);
2360 slt(scratch, rs, r2);
2361 offset = shifted_branch_offset(L, false);
2362 bne(scratch, zero_reg, offset);
2366 if (r2.is(zero_reg)) {
2367 offset = shifted_branch_offset(L, false);
2370 slt(scratch, r2, rs);
2371 offset = shifted_branch_offset(L, false);
2372 beq(scratch, zero_reg, offset);
2375 // Unsigned comparison.
2377 if (r2.is(zero_reg)) {
2378 offset = shifted_branch_offset(L, false);
2379 bne(rs, zero_reg, offset);
2381 sltu(scratch, r2, rs);
2382 offset = shifted_branch_offset(L, false);
2383 bne(scratch, zero_reg, offset);
2386 case Ugreater_equal:
2387 if (r2.is(zero_reg)) {
2388 offset = shifted_branch_offset(L, false);
2391 sltu(scratch, rs, r2);
2392 offset = shifted_branch_offset(L, false);
2393 beq(scratch, zero_reg, offset);
2397 if (r2.is(zero_reg)) {
2398 // No code needs to be emitted.
2401 sltu(scratch, rs, r2);
2402 offset = shifted_branch_offset(L, false);
2403 bne(scratch, zero_reg, offset);
2407 if (r2.is(zero_reg)) {
2408 offset = shifted_branch_offset(L, false);
2409 beq(rs, zero_reg, offset);
2411 sltu(scratch, r2, rs);
2412 offset = shifted_branch_offset(L, false);
2413 beq(scratch, zero_reg, offset);
2420 // Be careful to always use shifted_branch_offset only just before the
2421 // branch instruction, as the location will be remember for patching the
2423 BlockTrampolinePoolScope block_trampoline_pool(this);
2426 offset = shifted_branch_offset(L, false);
2430 if (rt.imm64_ == 0) {
2431 offset = shifted_branch_offset(L, false);
2432 beq(rs, zero_reg, offset);
2434 DCHECK(!scratch.is(rs));
2437 offset = shifted_branch_offset(L, false);
2438 beq(rs, r2, offset);
2442 if (rt.imm64_ == 0) {
2443 offset = shifted_branch_offset(L, false);
2444 bne(rs, zero_reg, offset);
2446 DCHECK(!scratch.is(rs));
2449 offset = shifted_branch_offset(L, false);
2450 bne(rs, r2, offset);
2453 // Signed comparison.
2455 if (rt.imm64_ == 0) {
2456 offset = shifted_branch_offset(L, false);
2459 DCHECK(!scratch.is(rs));
2462 slt(scratch, r2, rs);
2463 offset = shifted_branch_offset(L, false);
2464 bne(scratch, zero_reg, offset);
2468 if (rt.imm64_ == 0) {
2469 offset = shifted_branch_offset(L, false);
2471 } else if (is_int16(rt.imm64_)) {
2472 slti(scratch, rs, rt.imm64_);
2473 offset = shifted_branch_offset(L, false);
2474 beq(scratch, zero_reg, offset);
2476 DCHECK(!scratch.is(rs));
2479 slt(scratch, rs, r2);
2480 offset = shifted_branch_offset(L, false);
2481 beq(scratch, zero_reg, offset);
2485 if (rt.imm64_ == 0) {
2486 offset = shifted_branch_offset(L, false);
2488 } else if (is_int16(rt.imm64_)) {
2489 slti(scratch, rs, rt.imm64_);
2490 offset = shifted_branch_offset(L, false);
2491 bne(scratch, zero_reg, offset);
2493 DCHECK(!scratch.is(rs));
2496 slt(scratch, rs, r2);
2497 offset = shifted_branch_offset(L, false);
2498 bne(scratch, zero_reg, offset);
2502 if (rt.imm64_ == 0) {
2503 offset = shifted_branch_offset(L, false);
2506 DCHECK(!scratch.is(rs));
2509 slt(scratch, r2, rs);
2510 offset = shifted_branch_offset(L, false);
2511 beq(scratch, zero_reg, offset);
2514 // Unsigned comparison.
2516 if (rt.imm64_ == 0) {
2517 offset = shifted_branch_offset(L, false);
2518 bne(rs, zero_reg, offset);
2520 DCHECK(!scratch.is(rs));
2523 sltu(scratch, r2, rs);
2524 offset = shifted_branch_offset(L, false);
2525 bne(scratch, zero_reg, offset);
2528 case Ugreater_equal:
2529 if (rt.imm64_ == 0) {
2530 offset = shifted_branch_offset(L, false);
2532 } else if (is_int16(rt.imm64_)) {
2533 sltiu(scratch, rs, rt.imm64_);
2534 offset = shifted_branch_offset(L, false);
2535 beq(scratch, zero_reg, offset);
2537 DCHECK(!scratch.is(rs));
2540 sltu(scratch, rs, r2);
2541 offset = shifted_branch_offset(L, false);
2542 beq(scratch, zero_reg, offset);
2546 if (rt.imm64_ == 0) {
2547 // No code needs to be emitted.
2549 } else if (is_int16(rt.imm64_)) {
2550 sltiu(scratch, rs, rt.imm64_);
2551 offset = shifted_branch_offset(L, false);
2552 bne(scratch, zero_reg, offset);
2554 DCHECK(!scratch.is(rs));
2557 sltu(scratch, rs, r2);
2558 offset = shifted_branch_offset(L, false);
2559 bne(scratch, zero_reg, offset);
2563 if (rt.imm64_ == 0) {
2564 offset = shifted_branch_offset(L, false);
2565 beq(rs, zero_reg, offset);
2567 DCHECK(!scratch.is(rs));
2570 sltu(scratch, r2, rs);
2571 offset = shifted_branch_offset(L, false);
2572 beq(scratch, zero_reg, offset);
2579 // Check that offset could actually hold on an int16_t.
2580 DCHECK(is_int16(offset));
2581 // Emit a nop in the branch delay slot if required.
2582 if (bdslot == PROTECT)
2587 void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
2588 BranchAndLinkShort(offset, bdslot);
2592 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
2594 BranchDelaySlot bdslot) {
2595 BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2599 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
2600 if (L->is_bound()) {
2602 BranchAndLinkShort(L, bdslot);
2607 if (is_trampoline_emitted()) {
2610 BranchAndLinkShort(L, bdslot);
2616 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2618 BranchDelaySlot bdslot) {
2619 if (L->is_bound()) {
2621 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2624 Condition neg_cond = NegateCondition(cond);
2625 BranchShort(&skip, neg_cond, rs, rt);
2630 if (is_trampoline_emitted()) {
2632 Condition neg_cond = NegateCondition(cond);
2633 BranchShort(&skip, neg_cond, rs, rt);
2637 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2643 // We need to use a bgezal or bltzal, but they can't be used directly with the
2644 // slt instructions. We could use sub or add instead but we would miss overflow
2645 // cases, so we keep slt and add an intermediate third instruction.
2646 void MacroAssembler::BranchAndLinkShort(int16_t offset,
2647 BranchDelaySlot bdslot) {
2650 // Emit a nop in the branch delay slot if required.
2651 if (bdslot == PROTECT)
2656 void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
2657 Register rs, const Operand& rt,
2658 BranchDelaySlot bdslot) {
2659 BRANCH_ARGS_CHECK(cond, rs, rt);
2660 Register r2 = no_reg;
2661 Register scratch = at;
2665 } else if (cond != cc_always) {
2671 BlockTrampolinePoolScope block_trampoline_pool(this);
2687 // Signed comparison.
2690 slt(scratch, r2, rs);
2691 beq(scratch, zero_reg, 2);
2697 slt(scratch, rs, r2);
2698 bne(scratch, zero_reg, 2);
2704 slt(scratch, rs, r2);
2705 bne(scratch, zero_reg, 2);
2711 slt(scratch, r2, rs);
2712 bne(scratch, zero_reg, 2);
2718 // Unsigned comparison.
2721 sltu(scratch, r2, rs);
2722 beq(scratch, zero_reg, 2);
2726 case Ugreater_equal:
2728 sltu(scratch, rs, r2);
2729 bne(scratch, zero_reg, 2);
2735 sltu(scratch, rs, r2);
2736 bne(scratch, zero_reg, 2);
2742 sltu(scratch, r2, rs);
2743 bne(scratch, zero_reg, 2);
2751 // Emit a nop in the branch delay slot if required.
2752 if (bdslot == PROTECT)
2757 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
2758 bal(shifted_branch_offset(L, false));
2760 // Emit a nop in the branch delay slot if required.
2761 if (bdslot == PROTECT)
2766 void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
2768 BranchDelaySlot bdslot) {
2769 BRANCH_ARGS_CHECK(cond, rs, rt);
2772 Register r2 = no_reg;
2773 Register scratch = at;
2776 } else if (cond != cc_always) {
2782 BlockTrampolinePoolScope block_trampoline_pool(this);
2785 offset = shifted_branch_offset(L, false);
2791 offset = shifted_branch_offset(L, false);
2797 offset = shifted_branch_offset(L, false);
2801 // Signed comparison.
2804 slt(scratch, r2, rs);
2805 beq(scratch, zero_reg, 2);
2807 offset = shifted_branch_offset(L, false);
2812 slt(scratch, rs, r2);
2813 bne(scratch, zero_reg, 2);
2815 offset = shifted_branch_offset(L, false);
2820 slt(scratch, rs, r2);
2821 bne(scratch, zero_reg, 2);
2823 offset = shifted_branch_offset(L, false);
2828 slt(scratch, r2, rs);
2829 bne(scratch, zero_reg, 2);
2831 offset = shifted_branch_offset(L, false);
2836 // Unsigned comparison.
2839 sltu(scratch, r2, rs);
2840 beq(scratch, zero_reg, 2);
2842 offset = shifted_branch_offset(L, false);
2845 case Ugreater_equal:
2847 sltu(scratch, rs, r2);
2848 bne(scratch, zero_reg, 2);
2850 offset = shifted_branch_offset(L, false);
2855 sltu(scratch, rs, r2);
2856 bne(scratch, zero_reg, 2);
2858 offset = shifted_branch_offset(L, false);
2863 sltu(scratch, r2, rs);
2864 bne(scratch, zero_reg, 2);
2866 offset = shifted_branch_offset(L, false);
2874 // Check that offset could actually hold on an int16_t.
2875 DCHECK(is_int16(offset));
2877 // Emit a nop in the branch delay slot if required.
2878 if (bdslot == PROTECT)
2883 void MacroAssembler::Jump(Register target,
2887 BranchDelaySlot bd) {
2888 BlockTrampolinePoolScope block_trampoline_pool(this);
2889 if (cond == cc_always) {
2892 BRANCH_ARGS_CHECK(cond, rs, rt);
2893 Branch(2, NegateCondition(cond), rs, rt);
2896 // Emit a nop in the branch delay slot if required.
2902 void MacroAssembler::Jump(intptr_t target,
2903 RelocInfo::Mode rmode,
2907 BranchDelaySlot bd) {
2909 if (cond != cc_always) {
2910 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
2912 // The first instruction of 'li' may be placed in the delay slot.
2913 // This is not an issue, t9 is expected to be clobbered anyway.
2914 li(t9, Operand(target, rmode));
2915 Jump(t9, al, zero_reg, Operand(zero_reg), bd);
2920 void MacroAssembler::Jump(Address target,
2921 RelocInfo::Mode rmode,
2925 BranchDelaySlot bd) {
2926 DCHECK(!RelocInfo::IsCodeTarget(rmode));
2927 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2931 void MacroAssembler::Jump(Handle<Code> code,
2932 RelocInfo::Mode rmode,
2936 BranchDelaySlot bd) {
2937 DCHECK(RelocInfo::IsCodeTarget(rmode));
2938 AllowDeferredHandleDereference embedding_raw_address;
2939 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2943 int MacroAssembler::CallSize(Register target,
2947 BranchDelaySlot bd) {
2950 if (cond == cc_always) {
2959 return size * kInstrSize;
2963 // Note: To call gcc-compiled C code on mips, you must call thru t9.
2964 void MacroAssembler::Call(Register target,
2968 BranchDelaySlot bd) {
2969 BlockTrampolinePoolScope block_trampoline_pool(this);
2972 if (cond == cc_always) {
2975 BRANCH_ARGS_CHECK(cond, rs, rt);
2976 Branch(2, NegateCondition(cond), rs, rt);
2979 // Emit a nop in the branch delay slot if required.
2983 DCHECK_EQ(CallSize(target, cond, rs, rt, bd),
2984 SizeOfCodeGeneratedSince(&start));
2988 int MacroAssembler::CallSize(Address target,
2989 RelocInfo::Mode rmode,
2993 BranchDelaySlot bd) {
2994 int size = CallSize(t9, cond, rs, rt, bd);
2995 return size + 4 * kInstrSize;
2999 void MacroAssembler::Call(Address target,
3000 RelocInfo::Mode rmode,
3004 BranchDelaySlot bd) {
3005 BlockTrampolinePoolScope block_trampoline_pool(this);
3008 int64_t target_int = reinterpret_cast<int64_t>(target);
3009 // Must record previous source positions before the
3010 // li() generates a new code target.
3011 positions_recorder()->WriteRecordedPositions();
3012 li(t9, Operand(target_int, rmode), ADDRESS_LOAD);
3013 Call(t9, cond, rs, rt, bd);
3014 DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
3015 SizeOfCodeGeneratedSince(&start));
3019 int MacroAssembler::CallSize(Handle<Code> code,
3020 RelocInfo::Mode rmode,
3021 TypeFeedbackId ast_id,
3025 BranchDelaySlot bd) {
3026 AllowDeferredHandleDereference using_raw_address;
3027 return CallSize(reinterpret_cast<Address>(code.location()),
3028 rmode, cond, rs, rt, bd);
3032 void MacroAssembler::Call(Handle<Code> code,
3033 RelocInfo::Mode rmode,
3034 TypeFeedbackId ast_id,
3038 BranchDelaySlot bd) {
3039 BlockTrampolinePoolScope block_trampoline_pool(this);
3042 DCHECK(RelocInfo::IsCodeTarget(rmode));
3043 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
3044 SetRecordedAstId(ast_id);
3045 rmode = RelocInfo::CODE_TARGET_WITH_ID;
3047 AllowDeferredHandleDereference embedding_raw_address;
3048 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
3049 DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
3050 SizeOfCodeGeneratedSince(&start));
3054 void MacroAssembler::Ret(Condition cond,
3057 BranchDelaySlot bd) {
3058 Jump(ra, cond, rs, rt, bd);
3062 void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
3063 BlockTrampolinePoolScope block_trampoline_pool(this);
3066 imm28 = jump_address(L);
3067 imm28 &= kImm28Mask;
3068 { BlockGrowBufferScope block_buf_growth(this);
3069 // Buffer growth (and relocation) must be blocked for internal references
3070 // until associated instructions are emitted and available to be patched.
3071 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3074 // Emit a nop in the branch delay slot if required.
3075 if (bdslot == PROTECT)
3080 void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
3081 BlockTrampolinePoolScope block_trampoline_pool(this);
3084 imm64 = jump_address(L);
3085 { BlockGrowBufferScope block_buf_growth(this);
3086 // Buffer growth (and relocation) must be blocked for internal references
3087 // until associated instructions are emitted and available to be patched.
3088 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3089 li(at, Operand(imm64), ADDRESS_LOAD);
3093 // Emit a nop in the branch delay slot if required.
3094 if (bdslot == PROTECT)
3099 void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
3100 BlockTrampolinePoolScope block_trampoline_pool(this);
3103 imm64 = jump_address(L);
3104 { BlockGrowBufferScope block_buf_growth(this);
3105 // Buffer growth (and relocation) must be blocked for internal references
3106 // until associated instructions are emitted and available to be patched.
3107 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3108 li(at, Operand(imm64), ADDRESS_LOAD);
3112 // Emit a nop in the branch delay slot if required.
3113 if (bdslot == PROTECT)
3118 void MacroAssembler::DropAndRet(int drop) {
3119 Ret(USE_DELAY_SLOT);
3120 daddiu(sp, sp, drop * kPointerSize);
3123 void MacroAssembler::DropAndRet(int drop,
3126 const Operand& r2) {
3127 // Both Drop and Ret need to be conditional.
3129 if (cond != cc_always) {
3130 Branch(&skip, NegateCondition(cond), r1, r2);
3136 if (cond != cc_always) {
3142 void MacroAssembler::Drop(int count,
3145 const Operand& op) {
3153 Branch(&skip, NegateCondition(cond), reg, op);
3156 daddiu(sp, sp, count * kPointerSize);
3165 void MacroAssembler::Swap(Register reg1,
3168 if (scratch.is(no_reg)) {
3169 Xor(reg1, reg1, Operand(reg2));
3170 Xor(reg2, reg2, Operand(reg1));
3171 Xor(reg1, reg1, Operand(reg2));
3180 void MacroAssembler::Call(Label* target) {
3181 BranchAndLink(target);
3185 void MacroAssembler::Push(Handle<Object> handle) {
3186 li(at, Operand(handle));
3191 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
3192 DCHECK(!src.is(scratch));
3194 dsrl32(src, src, 0);
3195 dsll32(src, src, 0);
3197 dsll32(scratch, scratch, 0);
3202 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
3203 DCHECK(!dst.is(scratch));
3205 dsrl32(scratch, scratch, 0);
3207 dsrl32(dst, dst, 0);
3208 dsll32(dst, dst, 0);
3209 or_(dst, dst, scratch);
3213 void MacroAssembler::DebugBreak() {
3214 PrepareCEntryArgs(0);
3215 PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
3216 CEntryStub ces(isolate(), 1);
3217 DCHECK(AllowThisStubCall(&ces));
3218 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3222 // ---------------------------------------------------------------------------
3223 // Exception handling.
3225 void MacroAssembler::PushStackHandler() {
3226 // Adjust this code if not the case.
3227 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
3228 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3230 // Link the current handler as the next handler.
3231 li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3232 ld(a5, MemOperand(a6));
3235 // Set this new handler as the current one.
3236 sd(sp, MemOperand(a6));
3240 void MacroAssembler::PopStackHandler() {
3241 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3243 Daddu(sp, sp, Operand(static_cast<int64_t>(StackHandlerConstants::kSize -
3245 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3246 sd(a1, MemOperand(at));
3250 void MacroAssembler::Allocate(int object_size,
3255 AllocationFlags flags) {
3256 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3257 if (!FLAG_inline_new) {
3258 if (emit_debug_code()) {
3259 // Trash the registers to simulate an allocation failure.
3261 li(scratch1, 0x7191);
3262 li(scratch2, 0x7291);
3268 DCHECK(!result.is(scratch1));
3269 DCHECK(!result.is(scratch2));
3270 DCHECK(!scratch1.is(scratch2));
3271 DCHECK(!scratch1.is(t9));
3272 DCHECK(!scratch2.is(t9));
3273 DCHECK(!result.is(t9));
3275 // Make object size into bytes.
3276 if ((flags & SIZE_IN_WORDS) != 0) {
3277 object_size *= kPointerSize;
3279 DCHECK(0 == (object_size & kObjectAlignmentMask));
3281 // Check relative positions of allocation top and limit addresses.
3282 // ARM adds additional checks to make sure the ldm instruction can be
3283 // used. On MIPS we don't have ldm so we don't need additional checks either.
3284 ExternalReference allocation_top =
3285 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3286 ExternalReference allocation_limit =
3287 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3290 reinterpret_cast<intptr_t>(allocation_top.address());
3292 reinterpret_cast<intptr_t>(allocation_limit.address());
3293 DCHECK((limit - top) == kPointerSize);
3295 // Set up allocation top address and object size registers.
3296 Register topaddr = scratch1;
3297 li(topaddr, Operand(allocation_top));
3299 // This code stores a temporary value in t9.
3300 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3301 // Load allocation top into result and allocation limit into t9.
3302 ld(result, MemOperand(topaddr));
3303 ld(t9, MemOperand(topaddr, kPointerSize));
3305 if (emit_debug_code()) {
3306 // Assert that result actually contains top on entry. t9 is used
3307 // immediately below so this use of t9 does not cause difference with
3308 // respect to register content between debug and release mode.
3309 ld(t9, MemOperand(topaddr));
3310 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3312 // Load allocation limit into t9. Result already contains allocation top.
3313 ld(t9, MemOperand(topaddr, limit - top));
3316 DCHECK(kPointerSize == kDoubleSize);
3317 if (emit_debug_code()) {
3318 And(at, result, Operand(kDoubleAlignmentMask));
3319 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
3322 // Calculate new top and bail out if new space is exhausted. Use result
3323 // to calculate the new top.
3324 Daddu(scratch2, result, Operand(object_size));
3325 Branch(gc_required, Ugreater, scratch2, Operand(t9));
3326 sd(scratch2, MemOperand(topaddr));
3328 // Tag object if requested.
3329 if ((flags & TAG_OBJECT) != 0) {
3330 Daddu(result, result, Operand(kHeapObjectTag));
3335 void MacroAssembler::Allocate(Register object_size,
3340 AllocationFlags flags) {
3341 if (!FLAG_inline_new) {
3342 if (emit_debug_code()) {
3343 // Trash the registers to simulate an allocation failure.
3345 li(scratch1, 0x7191);
3346 li(scratch2, 0x7291);
3352 DCHECK(!result.is(scratch1));
3353 DCHECK(!result.is(scratch2));
3354 DCHECK(!scratch1.is(scratch2));
3355 DCHECK(!object_size.is(t9));
3356 DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
3358 // Check relative positions of allocation top and limit addresses.
3359 // ARM adds additional checks to make sure the ldm instruction can be
3360 // used. On MIPS we don't have ldm so we don't need additional checks either.
3361 ExternalReference allocation_top =
3362 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3363 ExternalReference allocation_limit =
3364 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3366 reinterpret_cast<intptr_t>(allocation_top.address());
3368 reinterpret_cast<intptr_t>(allocation_limit.address());
3369 DCHECK((limit - top) == kPointerSize);
3371 // Set up allocation top address and object size registers.
3372 Register topaddr = scratch1;
3373 li(topaddr, Operand(allocation_top));
3375 // This code stores a temporary value in t9.
3376 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3377 // Load allocation top into result and allocation limit into t9.
3378 ld(result, MemOperand(topaddr));
3379 ld(t9, MemOperand(topaddr, kPointerSize));
3381 if (emit_debug_code()) {
3382 // Assert that result actually contains top on entry. t9 is used
3383 // immediately below so this use of t9 does not cause difference with
3384 // respect to register content between debug and release mode.
3385 ld(t9, MemOperand(topaddr));
3386 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3388 // Load allocation limit into t9. Result already contains allocation top.
3389 ld(t9, MemOperand(topaddr, limit - top));
3392 DCHECK(kPointerSize == kDoubleSize);
3393 if (emit_debug_code()) {
3394 And(at, result, Operand(kDoubleAlignmentMask));
3395 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
3398 // Calculate new top and bail out if new space is exhausted. Use result
3399 // to calculate the new top. Object size may be in words so a shift is
3400 // required to get the number of bytes.
3401 if ((flags & SIZE_IN_WORDS) != 0) {
3402 dsll(scratch2, object_size, kPointerSizeLog2);
3403 Daddu(scratch2, result, scratch2);
3405 Daddu(scratch2, result, Operand(object_size));
3407 Branch(gc_required, Ugreater, scratch2, Operand(t9));
3409 // Update allocation top. result temporarily holds the new top.
3410 if (emit_debug_code()) {
3411 And(t9, scratch2, Operand(kObjectAlignmentMask));
3412 Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
3414 sd(scratch2, MemOperand(topaddr));
3416 // Tag object if requested.
3417 if ((flags & TAG_OBJECT) != 0) {
3418 Daddu(result, result, Operand(kHeapObjectTag));
3423 void MacroAssembler::UndoAllocationInNewSpace(Register object,
3425 ExternalReference new_space_allocation_top =
3426 ExternalReference::new_space_allocation_top_address(isolate());
3428 // Make sure the object has no tag before resetting top.
3429 And(object, object, Operand(~kHeapObjectTagMask));
3431 // Check that the object un-allocated is below the current top.
3432 li(scratch, Operand(new_space_allocation_top));
3433 ld(scratch, MemOperand(scratch));
3434 Check(less, kUndoAllocationOfNonAllocatedMemory,
3435 object, Operand(scratch));
3437 // Write the address of the object to un-allocate as the current top.
3438 li(scratch, Operand(new_space_allocation_top));
3439 sd(object, MemOperand(scratch));
3443 void MacroAssembler::AllocateTwoByteString(Register result,
3448 Label* gc_required) {
3449 // Calculate the number of bytes needed for the characters in the string while
3450 // observing object alignment.
3451 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3452 dsll(scratch1, length, 1); // Length in bytes, not chars.
3453 daddiu(scratch1, scratch1,
3454 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3455 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3457 // Allocate two-byte string in new space.
3465 // Set the map, length and hash field.
3466 InitializeNewString(result,
3468 Heap::kStringMapRootIndex,
3474 void MacroAssembler::AllocateOneByteString(Register result, Register length,
3475 Register scratch1, Register scratch2,
3477 Label* gc_required) {
3478 // Calculate the number of bytes needed for the characters in the string
3479 // while observing object alignment.
3480 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3481 DCHECK(kCharSize == 1);
3482 daddiu(scratch1, length,
3483 kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3484 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3486 // Allocate one-byte string in new space.
3494 // Set the map, length and hash field.
3495 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
3496 scratch1, scratch2);
3500 void MacroAssembler::AllocateTwoByteConsString(Register result,
3504 Label* gc_required) {
3505 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3507 InitializeNewString(result,
3509 Heap::kConsStringMapRootIndex,
3515 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
3518 Label* gc_required) {
3519 Allocate(ConsString::kSize,
3526 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
3527 scratch1, scratch2);
3531 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3535 Label* gc_required) {
3536 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3539 InitializeNewString(result,
3541 Heap::kSlicedStringMapRootIndex,
3547 void MacroAssembler::AllocateOneByteSlicedString(Register result,
3551 Label* gc_required) {
3552 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3555 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
3556 scratch1, scratch2);
3560 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
3561 Label* not_unique_name) {
3562 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3564 And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3565 Branch(&succeed, eq, at, Operand(zero_reg));
3566 Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
3572 // Allocates a heap number or jumps to the label if the young space is full and
3573 // a scavenge is needed.
3574 void MacroAssembler::AllocateHeapNumber(Register result,
3577 Register heap_number_map,
3579 TaggingMode tagging_mode,
3581 // Allocate an object in the heap for the heap number and tag it as a heap
3583 Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
3584 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3586 Heap::RootListIndex map_index = mode == MUTABLE
3587 ? Heap::kMutableHeapNumberMapRootIndex
3588 : Heap::kHeapNumberMapRootIndex;
3589 AssertIsRoot(heap_number_map, map_index);
3591 // Store heap number map in the allocated object.
3592 if (tagging_mode == TAG_RESULT) {
3593 sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3595 sd(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3600 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3604 Label* gc_required) {
3605 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3606 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3607 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3611 // Copies a fixed number of fields of heap objects from src to dst.
3612 void MacroAssembler::CopyFields(Register dst,
3616 DCHECK((temps & dst.bit()) == 0);
3617 DCHECK((temps & src.bit()) == 0);
3618 // Primitive implementation using only one temporary register.
3620 Register tmp = no_reg;
3621 // Find a temp register in temps list.
3622 for (int i = 0; i < kNumRegisters; i++) {
3623 if ((temps & (1 << i)) != 0) {
3628 DCHECK(!tmp.is(no_reg));
3630 for (int i = 0; i < field_count; i++) {
3631 ld(tmp, FieldMemOperand(src, i * kPointerSize));
3632 sd(tmp, FieldMemOperand(dst, i * kPointerSize));
3637 void MacroAssembler::CopyBytes(Register src,
3641 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3643 // Align src before copying in word size chunks.
3644 Branch(&byte_loop, le, length, Operand(kPointerSize));
3645 bind(&align_loop_1);
3646 And(scratch, src, kPointerSize - 1);
3647 Branch(&word_loop, eq, scratch, Operand(zero_reg));
3648 lbu(scratch, MemOperand(src));
3650 sb(scratch, MemOperand(dst));
3652 Dsubu(length, length, Operand(1));
3653 Branch(&align_loop_1, ne, length, Operand(zero_reg));
3655 // Copy bytes in word size chunks.
3657 if (emit_debug_code()) {
3658 And(scratch, src, kPointerSize - 1);
3659 Assert(eq, kExpectingAlignmentForCopyBytes,
3660 scratch, Operand(zero_reg));
3662 Branch(&byte_loop, lt, length, Operand(kPointerSize));
3663 ld(scratch, MemOperand(src));
3664 Daddu(src, src, kPointerSize);
3666 // TODO(kalmard) check if this can be optimized to use sw in most cases.
3667 // Can't use unaligned access - copy byte by byte.
3668 sb(scratch, MemOperand(dst, 0));
3669 dsrl(scratch, scratch, 8);
3670 sb(scratch, MemOperand(dst, 1));
3671 dsrl(scratch, scratch, 8);
3672 sb(scratch, MemOperand(dst, 2));
3673 dsrl(scratch, scratch, 8);
3674 sb(scratch, MemOperand(dst, 3));
3675 dsrl(scratch, scratch, 8);
3676 sb(scratch, MemOperand(dst, 4));
3677 dsrl(scratch, scratch, 8);
3678 sb(scratch, MemOperand(dst, 5));
3679 dsrl(scratch, scratch, 8);
3680 sb(scratch, MemOperand(dst, 6));
3681 dsrl(scratch, scratch, 8);
3682 sb(scratch, MemOperand(dst, 7));
3685 Dsubu(length, length, Operand(kPointerSize));
3688 // Copy the last bytes if any left.
3690 Branch(&done, eq, length, Operand(zero_reg));
3692 lbu(scratch, MemOperand(src));
3694 sb(scratch, MemOperand(dst));
3696 Dsubu(length, length, Operand(1));
3697 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3702 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3703 Register end_offset,
3708 sd(filler, MemOperand(start_offset));
3709 Daddu(start_offset, start_offset, kPointerSize);
3711 Branch(&loop, lt, start_offset, Operand(end_offset));
3715 void MacroAssembler::CheckFastElements(Register map,
3718 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3719 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3720 STATIC_ASSERT(FAST_ELEMENTS == 2);
3721 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3722 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3723 Branch(fail, hi, scratch,
3724 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3728 void MacroAssembler::CheckFastObjectElements(Register map,
3731 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3732 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3733 STATIC_ASSERT(FAST_ELEMENTS == 2);
3734 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3735 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3736 Branch(fail, ls, scratch,
3737 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3738 Branch(fail, hi, scratch,
3739 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3743 void MacroAssembler::CheckFastSmiElements(Register map,
3746 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3747 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3748 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3749 Branch(fail, hi, scratch,
3750 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3754 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3756 Register elements_reg,
3760 int elements_offset) {
3761 Label smi_value, done;
3763 // Handle smi values specially.
3764 JumpIfSmi(value_reg, &smi_value);
3766 // Ensure that the object is a heap number.
3769 Heap::kHeapNumberMapRootIndex,
3773 // Double value, turn potential sNaN into qNan.
3774 DoubleRegister double_result = f0;
3775 DoubleRegister double_scratch = f2;
3777 ldc1(double_result, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
3778 Branch(USE_DELAY_SLOT, &done); // Canonicalization is one instruction.
3779 FPUCanonicalizeNaN(double_result, double_result);
3782 // scratch1 is now effective address of the double element.
3783 // Untag and transfer.
3784 dsrl32(at, value_reg, 0);
3785 mtc1(at, double_scratch);
3786 cvt_d_w(double_result, double_scratch);
3789 Daddu(scratch1, elements_reg,
3790 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
3792 dsra(scratch2, key_reg, 32 - kDoubleSizeLog2);
3793 Daddu(scratch1, scratch1, scratch2);
3794 sdc1(double_result, MemOperand(scratch1, 0));
3798 void MacroAssembler::CompareMapAndBranch(Register obj,
3801 Label* early_success,
3804 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3805 CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
3809 void MacroAssembler::CompareMapAndBranch(Register obj_map,
3811 Label* early_success,
3814 Branch(branch_to, cond, obj_map, Operand(map));
3818 void MacroAssembler::CheckMap(Register obj,
3822 SmiCheckType smi_check_type) {
3823 if (smi_check_type == DO_SMI_CHECK) {
3824 JumpIfSmi(obj, fail);
3827 CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
3832 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
3833 Register scratch2, Handle<WeakCell> cell,
3834 Handle<Code> success,
3835 SmiCheckType smi_check_type) {
3837 if (smi_check_type == DO_SMI_CHECK) {
3838 JumpIfSmi(obj, &fail);
3840 ld(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
3841 GetWeakValue(scratch2, cell);
3842 Jump(success, RelocInfo::CODE_TARGET, eq, scratch1, Operand(scratch2));
3847 void MacroAssembler::CheckMap(Register obj,
3849 Heap::RootListIndex index,
3851 SmiCheckType smi_check_type) {
3852 if (smi_check_type == DO_SMI_CHECK) {
3853 JumpIfSmi(obj, fail);
3855 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3856 LoadRoot(at, index);
3857 Branch(fail, ne, scratch, Operand(at));
3861 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
3862 li(value, Operand(cell));
3863 ld(value, FieldMemOperand(value, WeakCell::kValueOffset));
3866 void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
3867 const DoubleRegister src) {
3868 sub_d(dst, src, kDoubleRegZero);
3871 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
3873 GetWeakValue(value, cell);
3874 JumpIfSmi(value, miss);
3878 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
3879 if (IsMipsSoftFloatABI) {
3882 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
3887 void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
3888 if (IsMipsSoftFloatABI) {
3891 Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
3896 void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
3897 if (!IsMipsSoftFloatABI) {
3905 void MacroAssembler::MovToFloatResult(DoubleRegister src) {
3906 if (!IsMipsSoftFloatABI) {
3914 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
3915 DoubleRegister src2) {
3916 if (!IsMipsSoftFloatABI) {
3917 const DoubleRegister fparg2 = (kMipsAbi == kN64) ? f13 : f14;
3919 DCHECK(!src1.is(fparg2));
3933 // -----------------------------------------------------------------------------
3934 // JavaScript invokes.
3936 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3937 const ParameterCount& actual,
3938 Handle<Code> code_constant,
3941 bool* definitely_mismatches,
3943 const CallWrapper& call_wrapper) {
3944 bool definitely_matches = false;
3945 *definitely_mismatches = false;
3946 Label regular_invoke;
3948 // Check whether the expected and actual arguments count match. If not,
3949 // setup registers according to contract with ArgumentsAdaptorTrampoline:
3950 // a0: actual arguments count
3951 // a1: function (passed through to callee)
3952 // a2: expected arguments count
3954 // The code below is made a lot easier because the calling code already sets
3955 // up actual and expected registers according to the contract if values are
3956 // passed in registers.
3957 DCHECK(actual.is_immediate() || actual.reg().is(a0));
3958 DCHECK(expected.is_immediate() || expected.reg().is(a2));
3959 DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
3961 if (expected.is_immediate()) {
3962 DCHECK(actual.is_immediate());
3963 if (expected.immediate() == actual.immediate()) {
3964 definitely_matches = true;
3966 li(a0, Operand(actual.immediate()));
3967 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3968 if (expected.immediate() == sentinel) {
3969 // Don't worry about adapting arguments for builtins that
3970 // don't want that done. Skip adaption code by making it look
3971 // like we have a match between expected and actual number of
3973 definitely_matches = true;
3975 *definitely_mismatches = true;
3976 li(a2, Operand(expected.immediate()));
3979 } else if (actual.is_immediate()) {
3980 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.immediate()));
3981 li(a0, Operand(actual.immediate()));
3983 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg()));
3986 if (!definitely_matches) {
3987 if (!code_constant.is_null()) {
3988 li(a3, Operand(code_constant));
3989 daddiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
3992 Handle<Code> adaptor =
3993 isolate()->builtins()->ArgumentsAdaptorTrampoline();
3994 if (flag == CALL_FUNCTION) {
3995 call_wrapper.BeforeCall(CallSize(adaptor));
3997 call_wrapper.AfterCall();
3998 if (!*definitely_mismatches) {
4002 Jump(adaptor, RelocInfo::CODE_TARGET);
4004 bind(®ular_invoke);
4009 void MacroAssembler::InvokeCode(Register code,
4010 const ParameterCount& expected,
4011 const ParameterCount& actual,
4013 const CallWrapper& call_wrapper) {
4014 // You can't call a function without a valid frame.
4015 DCHECK(flag == JUMP_FUNCTION || has_frame());
4019 bool definitely_mismatches = false;
4020 InvokePrologue(expected, actual, Handle<Code>::null(), code,
4021 &done, &definitely_mismatches, flag,
4023 if (!definitely_mismatches) {
4024 if (flag == CALL_FUNCTION) {
4025 call_wrapper.BeforeCall(CallSize(code));
4027 call_wrapper.AfterCall();
4029 DCHECK(flag == JUMP_FUNCTION);
4032 // Continue here if InvokePrologue does handle the invocation due to
4033 // mismatched parameter counts.
4039 void MacroAssembler::InvokeFunction(Register function,
4040 const ParameterCount& actual,
4042 const CallWrapper& call_wrapper) {
4043 // You can't call a function without a valid frame.
4044 DCHECK(flag == JUMP_FUNCTION || has_frame());
4046 // Contract with called JS functions requires that function is passed in a1.
4047 DCHECK(function.is(a1));
4048 Register expected_reg = a2;
4049 Register code_reg = a3;
4050 ld(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4051 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4052 // The argument count is stored as int32_t on 64-bit platforms.
4053 // TODO(plind): Smi on 32-bit platforms.
4055 FieldMemOperand(code_reg,
4056 SharedFunctionInfo::kFormalParameterCountOffset));
4057 ld(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4058 ParameterCount expected(expected_reg);
4059 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
4063 void MacroAssembler::InvokeFunction(Register function,
4064 const ParameterCount& expected,
4065 const ParameterCount& actual,
4067 const CallWrapper& call_wrapper) {
4068 // You can't call a function without a valid frame.
4069 DCHECK(flag == JUMP_FUNCTION || has_frame());
4071 // Contract with called JS functions requires that function is passed in a1.
4072 DCHECK(function.is(a1));
4074 // Get the function and setup the context.
4075 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4077 // We call indirectly through the code field in the function to
4078 // allow recompilation to take effect without changing any of the
4080 ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4081 InvokeCode(a3, expected, actual, flag, call_wrapper);
4085 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
4086 const ParameterCount& expected,
4087 const ParameterCount& actual,
4089 const CallWrapper& call_wrapper) {
4091 InvokeFunction(a1, expected, actual, flag, call_wrapper);
4095 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
4099 ld(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
4100 IsInstanceJSObjectType(map, scratch, fail);
4104 void MacroAssembler::IsInstanceJSObjectType(Register map,
4107 lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
4108 Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
4109 Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
4113 void MacroAssembler::IsObjectJSStringType(Register object,
4116 DCHECK(kNotStringTag != 0);
4118 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4119 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4120 And(scratch, scratch, Operand(kIsNotStringMask));
4121 Branch(fail, ne, scratch, Operand(zero_reg));
4125 void MacroAssembler::IsObjectNameType(Register object,
4128 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4129 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4130 Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
4134 // ---------------------------------------------------------------------------
4135 // Support functions.
4138 void MacroAssembler::GetMapConstructor(Register result, Register map,
4139 Register temp, Register temp2) {
4141 ld(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
4143 JumpIfSmi(result, &done);
4144 GetObjectType(result, temp, temp2);
4145 Branch(&done, ne, temp2, Operand(MAP_TYPE));
4146 ld(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
4152 void MacroAssembler::TryGetFunctionPrototype(Register function,
4156 bool miss_on_bound_function) {
4158 if (miss_on_bound_function) {
4159 // Check that the receiver isn't a smi.
4160 JumpIfSmi(function, miss);
4162 // Check that the function really is a function. Load map into result reg.
4163 GetObjectType(function, result, scratch);
4164 Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
4167 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
4169 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
4170 And(scratch, scratch,
4171 Operand(1 << SharedFunctionInfo::kBoundFunction));
4172 Branch(miss, ne, scratch, Operand(zero_reg));
4174 // Make sure that the function has an instance prototype.
4175 lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
4176 And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
4177 Branch(&non_instance, ne, scratch, Operand(zero_reg));
4180 // Get the prototype or initial map from the function.
4182 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4184 // If the prototype or initial map is the hole, don't return it and
4185 // simply miss the cache instead. This will allow us to allocate a
4186 // prototype object on-demand in the runtime system.
4187 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
4188 Branch(miss, eq, result, Operand(t8));
4190 // If the function does not have an initial map, we're done.
4192 GetObjectType(result, scratch, scratch);
4193 Branch(&done, ne, scratch, Operand(MAP_TYPE));
4195 // Get the prototype from the initial map.
4196 ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
4198 if (miss_on_bound_function) {
4201 // Non-instance prototype: Fetch prototype from constructor field
4203 bind(&non_instance);
4204 GetMapConstructor(result, result, scratch, scratch);
4212 void MacroAssembler::GetObjectType(Register object,
4214 Register type_reg) {
4215 ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
4216 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
4220 // -----------------------------------------------------------------------------
4223 void MacroAssembler::CallStub(CodeStub* stub,
4224 TypeFeedbackId ast_id,
4228 BranchDelaySlot bd) {
4229 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
4230 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
4235 void MacroAssembler::TailCallStub(CodeStub* stub,
4239 BranchDelaySlot bd) {
4240 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
4244 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4245 return has_frame_ || !stub->SometimesSetsUpAFrame();
4249 void MacroAssembler::IndexFromHash(Register hash, Register index) {
4250 // If the hash field contains an array index pick it out. The assert checks
4251 // that the constants for the maximum number of digits for an array index
4252 // cached in the hash field and the number of bits reserved for it does not
4254 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
4255 (1 << String::kArrayIndexValueBits));
4256 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
4260 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4264 Register heap_number_map,
4266 ObjectToDoubleFlags flags) {
4268 if ((flags & OBJECT_NOT_SMI) == 0) {
4270 JumpIfNotSmi(object, ¬_smi);
4271 // Remove smi tag and convert to double.
4272 // dsra(scratch1, object, kSmiTagSize);
4273 dsra32(scratch1, object, 0);
4274 mtc1(scratch1, result);
4275 cvt_d_w(result, result);
4279 // Check for heap number and load double value from it.
4280 ld(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4281 Branch(not_number, ne, scratch1, Operand(heap_number_map));
4283 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4284 // If exponent is all ones the number is either a NaN or +/-Infinity.
4285 Register exponent = scratch1;
4286 Register mask_reg = scratch2;
4287 lwu(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4288 li(mask_reg, HeapNumber::kExponentMask);
4290 And(exponent, exponent, mask_reg);
4291 Branch(not_number, eq, exponent, Operand(mask_reg));
4293 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4298 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4300 Register scratch1) {
4301 // dsra(scratch1, smi, kSmiTagSize);
4302 dsra32(scratch1, smi, 0);
4303 mtc1(scratch1, value);
4304 cvt_d_w(value, value);
4308 void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
4309 const Operand& right,
4310 Register overflow_dst,
4312 if (right.is_reg()) {
4313 AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4316 mov(scratch, left); // Preserve left.
4317 daddiu(dst, left, right.immediate()); // Left is overwritten.
4318 xor_(scratch, dst, scratch); // Original left.
4319 // Load right since xori takes uint16 as immediate.
4320 daddiu(t9, zero_reg, right.immediate());
4321 xor_(overflow_dst, dst, t9);
4322 and_(overflow_dst, overflow_dst, scratch);
4324 daddiu(dst, left, right.immediate());
4325 xor_(overflow_dst, dst, left);
4326 // Load right since xori takes uint16 as immediate.
4327 daddiu(t9, zero_reg, right.immediate());
4328 xor_(scratch, dst, t9);
4329 and_(overflow_dst, scratch, overflow_dst);
4335 void MacroAssembler::AdduAndCheckForOverflow(Register dst,
4338 Register overflow_dst,
4340 DCHECK(!dst.is(overflow_dst));
4341 DCHECK(!dst.is(scratch));
4342 DCHECK(!overflow_dst.is(scratch));
4343 DCHECK(!overflow_dst.is(left));
4344 DCHECK(!overflow_dst.is(right));
4346 if (left.is(right) && dst.is(left)) {
4347 DCHECK(!dst.is(t9));
4348 DCHECK(!scratch.is(t9));
4349 DCHECK(!left.is(t9));
4350 DCHECK(!right.is(t9));
4351 DCHECK(!overflow_dst.is(t9));
4357 mov(scratch, left); // Preserve left.
4358 daddu(dst, left, right); // Left is overwritten.
4359 xor_(scratch, dst, scratch); // Original left.
4360 xor_(overflow_dst, dst, right);
4361 and_(overflow_dst, overflow_dst, scratch);
4362 } else if (dst.is(right)) {
4363 mov(scratch, right); // Preserve right.
4364 daddu(dst, left, right); // Right is overwritten.
4365 xor_(scratch, dst, scratch); // Original right.
4366 xor_(overflow_dst, dst, left);
4367 and_(overflow_dst, overflow_dst, scratch);
4369 daddu(dst, left, right);
4370 xor_(overflow_dst, dst, left);
4371 xor_(scratch, dst, right);
4372 and_(overflow_dst, scratch, overflow_dst);
4377 void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
4378 const Operand& right,
4379 Register overflow_dst,
4381 if (right.is_reg()) {
4382 SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4385 mov(scratch, left); // Preserve left.
4386 daddiu(dst, left, -(right.immediate())); // Left is overwritten.
4387 xor_(overflow_dst, dst, scratch); // scratch is original left.
4388 // Load right since xori takes uint16 as immediate.
4389 daddiu(t9, zero_reg, right.immediate());
4390 xor_(scratch, scratch, t9); // scratch is original left.
4391 and_(overflow_dst, scratch, overflow_dst);
4393 daddiu(dst, left, -(right.immediate()));
4394 xor_(overflow_dst, dst, left);
4395 // Load right since xori takes uint16 as immediate.
4396 daddiu(t9, zero_reg, right.immediate());
4397 xor_(scratch, left, t9);
4398 and_(overflow_dst, scratch, overflow_dst);
4404 void MacroAssembler::SubuAndCheckForOverflow(Register dst,
4407 Register overflow_dst,
4409 DCHECK(!dst.is(overflow_dst));
4410 DCHECK(!dst.is(scratch));
4411 DCHECK(!overflow_dst.is(scratch));
4412 DCHECK(!overflow_dst.is(left));
4413 DCHECK(!overflow_dst.is(right));
4414 DCHECK(!scratch.is(left));
4415 DCHECK(!scratch.is(right));
4417 // This happens with some crankshaft code. Since Subu works fine if
4418 // left == right, let's not make that restriction here.
4419 if (left.is(right)) {
4421 mov(overflow_dst, zero_reg);
4426 mov(scratch, left); // Preserve left.
4427 dsubu(dst, left, right); // Left is overwritten.
4428 xor_(overflow_dst, dst, scratch); // scratch is original left.
4429 xor_(scratch, scratch, right); // scratch is original left.
4430 and_(overflow_dst, scratch, overflow_dst);
4431 } else if (dst.is(right)) {
4432 mov(scratch, right); // Preserve right.
4433 dsubu(dst, left, right); // Right is overwritten.
4434 xor_(overflow_dst, dst, left);
4435 xor_(scratch, left, scratch); // Original right.
4436 and_(overflow_dst, scratch, overflow_dst);
4438 dsubu(dst, left, right);
4439 xor_(overflow_dst, dst, left);
4440 xor_(scratch, left, right);
4441 and_(overflow_dst, scratch, overflow_dst);
4446 void MacroAssembler::CallRuntime(const Runtime::Function* f,
4448 SaveFPRegsMode save_doubles) {
4449 // All parameters are on the stack. v0 has the return value after call.
4451 // If the expected number of arguments of the runtime function is
4452 // constant, we check that the actual number of arguments match the
4454 CHECK(f->nargs < 0 || f->nargs == num_arguments);
4456 // TODO(1236192): Most runtime routines don't need the number of
4457 // arguments passed in because it is constant. At some point we
4458 // should remove this need and make the runtime routine entry code
4460 PrepareCEntryArgs(num_arguments);
4461 PrepareCEntryFunction(ExternalReference(f, isolate()));
4462 CEntryStub stub(isolate(), 1, save_doubles);
4467 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
4469 BranchDelaySlot bd) {
4470 PrepareCEntryArgs(num_arguments);
4471 PrepareCEntryFunction(ext);
4473 CEntryStub stub(isolate(), 1);
4474 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
4478 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
4481 // TODO(1236192): Most runtime routines don't need the number of
4482 // arguments passed in because it is constant. At some point we
4483 // should remove this need and make the runtime routine entry code
4485 PrepareCEntryArgs(num_arguments);
4486 JumpToExternalReference(ext);
4490 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
4493 TailCallExternalReference(ExternalReference(fid, isolate()),
4499 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
4500 BranchDelaySlot bd) {
4501 PrepareCEntryFunction(builtin);
4502 CEntryStub stub(isolate(), 1);
4503 Jump(stub.GetCode(),
4504 RelocInfo::CODE_TARGET,
4512 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
4514 const CallWrapper& call_wrapper) {
4515 // You can't call a builtin without a valid frame.
4516 DCHECK(flag == JUMP_FUNCTION || has_frame());
4518 GetBuiltinEntry(t9, id);
4519 if (flag == CALL_FUNCTION) {
4520 call_wrapper.BeforeCall(CallSize(t9));
4522 call_wrapper.AfterCall();
4524 DCHECK(flag == JUMP_FUNCTION);
4530 void MacroAssembler::GetBuiltinFunction(Register target,
4531 Builtins::JavaScript id) {
4532 // Load the builtins object into target register.
4533 ld(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4534 ld(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
4535 // Load the JavaScript builtin function from the builtins object.
4536 ld(target, FieldMemOperand(target,
4537 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
4541 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
4542 DCHECK(!target.is(a1));
4543 GetBuiltinFunction(a1, id);
4544 // Load the code entry point from the builtins object.
4545 ld(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4549 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4550 Register scratch1, Register scratch2) {
4551 if (FLAG_native_code_counters && counter->Enabled()) {
4552 li(scratch1, Operand(value));
4553 li(scratch2, Operand(ExternalReference(counter)));
4554 sd(scratch1, MemOperand(scratch2));
4559 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4560 Register scratch1, Register scratch2) {
4562 if (FLAG_native_code_counters && counter->Enabled()) {
4563 li(scratch2, Operand(ExternalReference(counter)));
4564 ld(scratch1, MemOperand(scratch2));
4565 Daddu(scratch1, scratch1, Operand(value));
4566 sd(scratch1, MemOperand(scratch2));
4571 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4572 Register scratch1, Register scratch2) {
4574 if (FLAG_native_code_counters && counter->Enabled()) {
4575 li(scratch2, Operand(ExternalReference(counter)));
4576 ld(scratch1, MemOperand(scratch2));
4577 Dsubu(scratch1, scratch1, Operand(value));
4578 sd(scratch1, MemOperand(scratch2));
4583 // -----------------------------------------------------------------------------
4586 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
4587 Register rs, Operand rt) {
4588 if (emit_debug_code())
4589 Check(cc, reason, rs, rt);
4593 void MacroAssembler::AssertFastElements(Register elements) {
4594 if (emit_debug_code()) {
4595 DCHECK(!elements.is(at));
4598 ld(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4599 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4600 Branch(&ok, eq, elements, Operand(at));
4601 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4602 Branch(&ok, eq, elements, Operand(at));
4603 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4604 Branch(&ok, eq, elements, Operand(at));
4605 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4612 void MacroAssembler::Check(Condition cc, BailoutReason reason,
4613 Register rs, Operand rt) {
4615 Branch(&L, cc, rs, rt);
4617 // Will not return here.
4622 void MacroAssembler::Abort(BailoutReason reason) {
4626 const char* msg = GetBailoutReason(reason);
4628 RecordComment("Abort message: ");
4632 if (FLAG_trap_on_abort) {
4638 li(a0, Operand(Smi::FromInt(reason)));
4640 // Disable stub call restrictions to always allow calls to abort.
4642 // We don't actually want to generate a pile of code for this, so just
4643 // claim there is a stack frame, without generating one.
4644 FrameScope scope(this, StackFrame::NONE);
4645 CallRuntime(Runtime::kAbort, 1);
4647 CallRuntime(Runtime::kAbort, 1);
4649 // Will not return here.
4650 if (is_trampoline_pool_blocked()) {
4651 // If the calling code cares about the exact number of
4652 // instructions generated, we insert padding here to keep the size
4653 // of the Abort macro constant.
4654 // Currently in debug mode with debug_code enabled the number of
4655 // generated instructions is 10, so we use this as a maximum value.
4656 static const int kExpectedAbortInstructions = 10;
4657 int abort_instructions = InstructionsGeneratedSince(&abort_start);
4658 DCHECK(abort_instructions <= kExpectedAbortInstructions);
4659 while (abort_instructions++ < kExpectedAbortInstructions) {
4666 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4667 if (context_chain_length > 0) {
4668 // Move up the chain of contexts to the context containing the slot.
4669 ld(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4670 for (int i = 1; i < context_chain_length; i++) {
4671 ld(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4674 // Slot is in the current function context. Move it into the
4675 // destination register in case we store into it (the write barrier
4676 // cannot be allowed to destroy the context in esi).
4682 void MacroAssembler::LoadTransitionedArrayMapConditional(
4683 ElementsKind expected_kind,
4684 ElementsKind transitioned_kind,
4685 Register map_in_out,
4687 Label* no_map_match) {
4688 // Load the global or builtins object from the current context.
4690 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4691 ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
4693 // Check that the function's map is the same as the expected cached map.
4696 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4697 size_t offset = expected_kind * kPointerSize +
4698 FixedArrayBase::kHeaderSize;
4699 ld(at, FieldMemOperand(scratch, offset));
4700 Branch(no_map_match, ne, map_in_out, Operand(at));
4702 // Use the transitioned cached map.
4703 offset = transitioned_kind * kPointerSize +
4704 FixedArrayBase::kHeaderSize;
4705 ld(map_in_out, FieldMemOperand(scratch, offset));
4709 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4710 // Load the global or builtins object from the current context.
4712 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4713 // Load the native context from the global or builtins object.
4714 ld(function, FieldMemOperand(function,
4715 GlobalObject::kNativeContextOffset));
4716 // Load the function from the native context.
4717 ld(function, MemOperand(function, Context::SlotOffset(index)));
4721 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4724 // Load the initial map. The global functions all have initial maps.
4725 ld(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4726 if (emit_debug_code()) {
4728 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4731 Abort(kGlobalFunctionsMustHaveInitialMap);
4737 void MacroAssembler::StubPrologue() {
4739 Push(Smi::FromInt(StackFrame::STUB));
4740 // Adjust FP to point to saved FP.
4741 Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4745 void MacroAssembler::Prologue(bool code_pre_aging) {
4746 PredictableCodeSizeScope predictible_code_size_scope(
4747 this, kNoCodeAgeSequenceLength);
4748 // The following three instructions must remain together and unmodified
4749 // for code aging to work properly.
4750 if (code_pre_aging) {
4751 // Pre-age the code.
4752 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
4753 nop(Assembler::CODE_AGE_MARKER_NOP);
4754 // Load the stub address to t9 and call it,
4755 // GetCodeAgeAndParity() extracts the stub address from this instruction.
4757 Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
4759 nop(); // Prevent jalr to jal optimization.
4761 nop(); // Branch delay slot nop.
4762 nop(); // Pad the empty space.
4764 Push(ra, fp, cp, a1);
4765 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4766 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4767 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4768 // Adjust fp to point to caller's fp.
4769 Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4774 void MacroAssembler::EnterFrame(StackFrame::Type type,
4775 bool load_constant_pool_pointer_reg) {
4776 // Out-of-line constant pool not implemented on mips64.
4781 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4782 daddiu(sp, sp, -5 * kPointerSize);
4783 li(t8, Operand(Smi::FromInt(type)));
4784 li(t9, Operand(CodeObject()), CONSTANT_SIZE);
4785 sd(ra, MemOperand(sp, 4 * kPointerSize));
4786 sd(fp, MemOperand(sp, 3 * kPointerSize));
4787 sd(cp, MemOperand(sp, 2 * kPointerSize));
4788 sd(t8, MemOperand(sp, 1 * kPointerSize));
4789 sd(t9, MemOperand(sp, 0 * kPointerSize));
4790 // Adjust FP to point to saved FP.
4792 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
4796 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4798 ld(fp, MemOperand(sp, 0 * kPointerSize));
4799 ld(ra, MemOperand(sp, 1 * kPointerSize));
4800 daddiu(sp, sp, 2 * kPointerSize);
4804 void MacroAssembler::EnterExitFrame(bool save_doubles,
4806 // Set up the frame structure on the stack.
4807 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4808 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4809 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4811 // This is how the stack will look:
4812 // fp + 2 (==kCallerSPDisplacement) - old stack's end
4813 // [fp + 1 (==kCallerPCOffset)] - saved old ra
4814 // [fp + 0 (==kCallerFPOffset)] - saved old fp
4815 // [fp - 1 (==kSPOffset)] - sp of the called function
4816 // [fp - 2 (==kCodeOffset)] - CodeObject
4817 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
4818 // new stack (will contain saved ra)
4821 daddiu(sp, sp, -4 * kPointerSize);
4822 sd(ra, MemOperand(sp, 3 * kPointerSize));
4823 sd(fp, MemOperand(sp, 2 * kPointerSize));
4824 daddiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
4826 if (emit_debug_code()) {
4827 sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
4830 // Accessed from ExitFrame::code_slot.
4831 li(t8, Operand(CodeObject()), CONSTANT_SIZE);
4832 sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
4834 // Save the frame pointer and the context in top.
4835 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4836 sd(fp, MemOperand(t8));
4837 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4838 sd(cp, MemOperand(t8));
4840 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4842 // The stack is already aligned to 0 modulo 8 for stores with sdc1.
4843 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
4844 int space = kNumOfSavedRegisters * kDoubleSize ;
4845 Dsubu(sp, sp, Operand(space));
4846 // Remember: we only need to save every 2nd double FPU value.
4847 for (int i = 0; i < kNumOfSavedRegisters; i++) {
4848 FPURegister reg = FPURegister::from_code(2 * i);
4849 sdc1(reg, MemOperand(sp, i * kDoubleSize));
4853 // Reserve place for the return address, stack space and an optional slot
4854 // (used by the DirectCEntryStub to hold the return value if a struct is
4855 // returned) and align the frame preparing for calling the runtime function.
4856 DCHECK(stack_space >= 0);
4857 Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4858 if (frame_alignment > 0) {
4859 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4860 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4863 // Set the exit frame sp value to point just before the return address
4865 daddiu(at, sp, kPointerSize);
4866 sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
4870 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
4871 bool restore_context, bool do_return,
4872 bool argument_count_is_length) {
4873 // Optionally restore all double registers.
4875 // Remember: we only need to restore every 2nd double FPU value.
4876 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
4877 Dsubu(t8, fp, Operand(ExitFrameConstants::kFrameSize +
4878 kNumOfSavedRegisters * kDoubleSize));
4879 for (int i = 0; i < kNumOfSavedRegisters; i++) {
4880 FPURegister reg = FPURegister::from_code(2 * i);
4881 ldc1(reg, MemOperand(t8, i * kDoubleSize));
4886 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4887 sd(zero_reg, MemOperand(t8));
4889 // Restore current context from top and clear it in debug mode.
4890 if (restore_context) {
4891 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4892 ld(cp, MemOperand(t8));
4895 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4896 sd(a3, MemOperand(t8));
4899 // Pop the arguments, restore registers, and return.
4900 mov(sp, fp); // Respect ABI stack constraint.
4901 ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
4902 ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
4904 if (argument_count.is_valid()) {
4905 if (argument_count_is_length) {
4906 daddu(sp, sp, argument_count);
4908 dsll(t8, argument_count, kPointerSizeLog2);
4914 Ret(USE_DELAY_SLOT);
4915 // If returning, the instruction in the delay slot will be the addiu below.
4917 daddiu(sp, sp, 2 * kPointerSize);
4921 void MacroAssembler::InitializeNewString(Register string,
4923 Heap::RootListIndex map_index,
4925 Register scratch2) {
4926 // dsll(scratch1, length, kSmiTagSize);
4927 dsll32(scratch1, length, 0);
4928 LoadRoot(scratch2, map_index);
4929 sd(scratch1, FieldMemOperand(string, String::kLengthOffset));
4930 li(scratch1, Operand(String::kEmptyHashField));
4931 sd(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
4932 sd(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
4936 int MacroAssembler::ActivationFrameAlignment() {
4937 #if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
4938 // Running on the real platform. Use the alignment as mandated by the local
4940 // Note: This will break if we ever start generating snapshots on one Mips
4941 // platform for another Mips platform with a different alignment.
4942 return base::OS::ActivationFrameAlignment();
4943 #else // V8_HOST_ARCH_MIPS
4944 // If we are using the simulator then we should always align to the expected
4945 // alignment. As the simulator is used to generate snapshots we do not know
4946 // if the target platform will need alignment, so this is controlled from a
4948 return FLAG_sim_stack_alignment;
4949 #endif // V8_HOST_ARCH_MIPS
4953 void MacroAssembler::AssertStackIsAligned() {
4954 if (emit_debug_code()) {
4955 const int frame_alignment = ActivationFrameAlignment();
4956 const int frame_alignment_mask = frame_alignment - 1;
4958 if (frame_alignment > kPointerSize) {
4959 Label alignment_as_expected;
4960 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4961 andi(at, sp, frame_alignment_mask);
4962 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
4963 // Don't use Check here, as it will call Runtime_Abort re-entering here.
4964 stop("Unexpected stack alignment");
4965 bind(&alignment_as_expected);
4971 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
4974 Label* not_power_of_two_or_zero) {
4975 Dsubu(scratch, reg, Operand(1));
4976 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
4977 scratch, Operand(zero_reg));
4978 and_(at, scratch, reg); // In the delay slot.
4979 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
4983 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
4984 DCHECK(!reg.is(overflow));
4985 mov(overflow, reg); // Save original value.
4987 xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
4991 void MacroAssembler::SmiTagCheckOverflow(Register dst,
4993 Register overflow) {
4995 // Fall back to slower case.
4996 SmiTagCheckOverflow(dst, overflow);
4998 DCHECK(!dst.is(src));
4999 DCHECK(!dst.is(overflow));
5000 DCHECK(!src.is(overflow));
5002 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
5007 void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) {
5008 if (SmiValuesAre32Bits()) {
5009 lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
5017 void MacroAssembler::SmiLoadScale(Register dst, MemOperand src, int scale) {
5018 if (SmiValuesAre32Bits()) {
5019 // TODO(plind): not clear if lw or ld faster here, need micro-benchmark.
5020 lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
5021 dsll(dst, dst, scale);
5024 DCHECK(scale >= kSmiTagSize);
5025 sll(dst, dst, scale - kSmiTagSize);
5030 // Returns 2 values: the Smi and a scaled version of the int within the Smi.
5031 void MacroAssembler::SmiLoadWithScale(Register d_smi,
5035 if (SmiValuesAre32Bits()) {
5037 dsra(d_scaled, d_smi, kSmiShift - scale);
5040 DCHECK(scale >= kSmiTagSize);
5041 sll(d_scaled, d_smi, scale - kSmiTagSize);
5046 // Returns 2 values: the untagged Smi (int32) and scaled version of that int.
5047 void MacroAssembler::SmiLoadUntagWithScale(Register d_int,
5051 if (SmiValuesAre32Bits()) {
5052 lw(d_int, UntagSmiMemOperand(src.rm(), src.offset()));
5053 dsll(d_scaled, d_int, scale);
5056 // Need both the int and the scaled in, so use two instructions.
5058 sll(d_scaled, d_int, scale);
5063 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
5066 // DCHECK(!dst.is(src));
5067 JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
5072 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
5074 Label* non_smi_case) {
5075 // DCHECK(!dst.is(src));
5076 JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
5080 void MacroAssembler::JumpIfSmi(Register value,
5083 BranchDelaySlot bd) {
5084 DCHECK_EQ(0, kSmiTag);
5085 andi(scratch, value, kSmiTagMask);
5086 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
5089 void MacroAssembler::JumpIfNotSmi(Register value,
5090 Label* not_smi_label,
5092 BranchDelaySlot bd) {
5093 DCHECK_EQ(0, kSmiTag);
5094 andi(scratch, value, kSmiTagMask);
5095 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
5099 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
5101 Label* on_not_both_smi) {
5102 STATIC_ASSERT(kSmiTag == 0);
5103 // TODO(plind): Find some better to fix this assert issue.
5104 #if defined(__APPLE__)
5105 DCHECK_EQ(1, kSmiTagMask);
5107 DCHECK_EQ((int64_t)1, kSmiTagMask);
5109 or_(at, reg1, reg2);
5110 JumpIfNotSmi(at, on_not_both_smi);
5114 void MacroAssembler::JumpIfEitherSmi(Register reg1,
5116 Label* on_either_smi) {
5117 STATIC_ASSERT(kSmiTag == 0);
5118 // TODO(plind): Find some better to fix this assert issue.
5119 #if defined(__APPLE__)
5120 DCHECK_EQ(1, kSmiTagMask);
5122 DCHECK_EQ((int64_t)1, kSmiTagMask);
5124 // Both Smi tags must be 1 (not Smi).
5125 and_(at, reg1, reg2);
5126 JumpIfSmi(at, on_either_smi);
5130 void MacroAssembler::AssertNotSmi(Register object) {
5131 if (emit_debug_code()) {
5132 STATIC_ASSERT(kSmiTag == 0);
5133 andi(at, object, kSmiTagMask);
5134 Check(ne, kOperandIsASmi, at, Operand(zero_reg));
5139 void MacroAssembler::AssertSmi(Register object) {
5140 if (emit_debug_code()) {
5141 STATIC_ASSERT(kSmiTag == 0);
5142 andi(at, object, kSmiTagMask);
5143 Check(eq, kOperandIsASmi, at, Operand(zero_reg));
5148 void MacroAssembler::AssertString(Register object) {
5149 if (emit_debug_code()) {
5150 STATIC_ASSERT(kSmiTag == 0);
5152 Check(ne, kOperandIsASmiAndNotAString, a4, Operand(zero_reg));
5154 ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
5155 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
5156 Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
5162 void MacroAssembler::AssertName(Register object) {
5163 if (emit_debug_code()) {
5164 STATIC_ASSERT(kSmiTag == 0);
5166 Check(ne, kOperandIsASmiAndNotAName, a4, Operand(zero_reg));
5168 ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
5169 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
5170 Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
5176 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
5178 if (emit_debug_code()) {
5179 Label done_checking;
5180 AssertNotSmi(object);
5181 LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5182 Branch(&done_checking, eq, object, Operand(scratch));
5184 ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
5185 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
5186 Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
5188 bind(&done_checking);
5193 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
5194 if (emit_debug_code()) {
5195 DCHECK(!reg.is(at));
5196 LoadRoot(at, index);
5197 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
5202 void MacroAssembler::JumpIfNotHeapNumber(Register object,
5203 Register heap_number_map,
5205 Label* on_not_heap_number) {
5206 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5207 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
5208 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
5212 void MacroAssembler::LookupNumberStringCache(Register object,
5218 // Use of registers. Register result is used as a temporary.
5219 Register number_string_cache = result;
5220 Register mask = scratch3;
5222 // Load the number string cache.
5223 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
5225 // Make the hash mask from the length of the number string cache. It
5226 // contains two elements (number and string) for each cache entry.
5227 ld(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
5228 // Divide length by two (length is a smi).
5229 // dsra(mask, mask, kSmiTagSize + 1);
5230 dsra32(mask, mask, 1);
5231 Daddu(mask, mask, -1); // Make mask.
5233 // Calculate the entry in the number string cache. The hash value in the
5234 // number string cache for smis is just the smi value, and the hash for
5235 // doubles is the xor of the upper and lower words. See
5236 // Heap::GetNumberStringCache.
5238 Label load_result_from_cache;
5239 JumpIfSmi(object, &is_smi);
5242 Heap::kHeapNumberMapRootIndex,
5246 STATIC_ASSERT(8 == kDoubleSize);
5249 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
5250 ld(scratch2, MemOperand(scratch1, kPointerSize));
5251 ld(scratch1, MemOperand(scratch1, 0));
5252 Xor(scratch1, scratch1, Operand(scratch2));
5253 And(scratch1, scratch1, Operand(mask));
5255 // Calculate address of entry in string cache: each entry consists
5256 // of two pointer sized fields.
5257 dsll(scratch1, scratch1, kPointerSizeLog2 + 1);
5258 Daddu(scratch1, number_string_cache, scratch1);
5260 Register probe = mask;
5261 ld(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
5262 JumpIfSmi(probe, not_found);
5263 ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
5264 ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
5265 BranchF(&load_result_from_cache, NULL, eq, f12, f14);
5269 Register scratch = scratch1;
5270 // dsra(scratch, object, 1); // Shift away the tag.
5271 dsra32(scratch, scratch, 0);
5272 And(scratch, mask, Operand(scratch));
5274 // Calculate address of entry in string cache: each entry consists
5275 // of two pointer sized fields.
5276 dsll(scratch, scratch, kPointerSizeLog2 + 1);
5277 Daddu(scratch, number_string_cache, scratch);
5279 // Check if the entry is the smi we are looking for.
5280 ld(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
5281 Branch(not_found, ne, object, Operand(probe));
5283 // Get the result from the cache.
5284 bind(&load_result_from_cache);
5285 ld(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
5287 IncrementCounter(isolate()->counters()->number_to_string_native(),
5294 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
5295 Register first, Register second, Register scratch1, Register scratch2,
5297 // Test that both first and second are sequential one-byte strings.
5298 // Assume that they are non-smis.
5299 ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
5300 ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
5301 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
5302 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
5304 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
5309 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
5314 // Check that neither is a smi.
5315 STATIC_ASSERT(kSmiTag == 0);
5316 And(scratch1, first, Operand(second));
5317 JumpIfSmi(scratch1, failure);
5318 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
5323 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
5324 Register first, Register second, Register scratch1, Register scratch2,
5326 const int kFlatOneByteStringMask =
5327 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5328 const int kFlatOneByteStringTag =
5329 kStringTag | kOneByteStringTag | kSeqStringTag;
5330 DCHECK(kFlatOneByteStringTag <= 0xffff); // Ensure this fits 16-bit immed.
5331 andi(scratch1, first, kFlatOneByteStringMask);
5332 Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
5333 andi(scratch2, second, kFlatOneByteStringMask);
5334 Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
5338 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
5341 const int kFlatOneByteStringMask =
5342 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5343 const int kFlatOneByteStringTag =
5344 kStringTag | kOneByteStringTag | kSeqStringTag;
5345 And(scratch, type, Operand(kFlatOneByteStringMask));
5346 Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
5350 static const int kRegisterPassedArguments = (kMipsAbi == kN64) ? 8 : 4;
5352 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
5353 int num_double_arguments) {
5354 int stack_passed_words = 0;
5355 num_reg_arguments += 2 * num_double_arguments;
5357 // O32: Up to four simple arguments are passed in registers a0..a3.
5358 // N64: Up to eight simple arguments are passed in registers a0..a7.
5359 if (num_reg_arguments > kRegisterPassedArguments) {
5360 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5362 stack_passed_words += kCArgSlotCount;
5363 return stack_passed_words;
5367 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5371 uint32_t encoding_mask) {
5374 Check(ne, kNonObject, at, Operand(zero_reg));
5376 ld(at, FieldMemOperand(string, HeapObject::kMapOffset));
5377 lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
5379 andi(at, at, kStringRepresentationMask | kStringEncodingMask);
5380 li(scratch, Operand(encoding_mask));
5381 Check(eq, kUnexpectedStringType, at, Operand(scratch));
5383 // TODO(plind): requires Smi size check code for mips32.
5385 ld(at, FieldMemOperand(string, String::kLengthOffset));
5386 Check(lt, kIndexIsTooLarge, index, Operand(at));
5388 DCHECK(Smi::FromInt(0) == 0);
5389 Check(ge, kIndexIsNegative, index, Operand(zero_reg));
5393 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5394 int num_double_arguments,
5396 int frame_alignment = ActivationFrameAlignment();
5398 // n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots.
5399 // O32: Up to four simple arguments are passed in registers a0..a3.
5400 // Those four arguments must have reserved argument slots on the stack for
5401 // mips, even though those argument slots are not normally used.
5402 // Both ABIs: Remaining arguments are pushed on the stack, above (higher
5403 // address than) the (O32) argument slots. (arg slot calculation handled by
5404 // CalculateStackPassedWords()).
5405 int stack_passed_arguments = CalculateStackPassedWords(
5406 num_reg_arguments, num_double_arguments);
5407 if (frame_alignment > kPointerSize) {
5408 // Make stack end at alignment and make room for num_arguments - 4 words
5409 // and the original value of sp.
5411 Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5412 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5413 And(sp, sp, Operand(-frame_alignment));
5414 sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
5416 Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5421 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5423 PrepareCallCFunction(num_reg_arguments, 0, scratch);
5427 void MacroAssembler::CallCFunction(ExternalReference function,
5428 int num_reg_arguments,
5429 int num_double_arguments) {
5430 li(t8, Operand(function));
5431 CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
5435 void MacroAssembler::CallCFunction(Register function,
5436 int num_reg_arguments,
5437 int num_double_arguments) {
5438 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
5442 void MacroAssembler::CallCFunction(ExternalReference function,
5443 int num_arguments) {
5444 CallCFunction(function, num_arguments, 0);
5448 void MacroAssembler::CallCFunction(Register function,
5449 int num_arguments) {
5450 CallCFunction(function, num_arguments, 0);
5454 void MacroAssembler::CallCFunctionHelper(Register function,
5455 int num_reg_arguments,
5456 int num_double_arguments) {
5457 DCHECK(has_frame());
5458 // Make sure that the stack is aligned before calling a C function unless
5459 // running in the simulator. The simulator has its own alignment check which
5460 // provides more information.
5461 // The argument stots are presumed to have been set up by
5462 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
5464 #if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
5465 if (emit_debug_code()) {
5466 int frame_alignment = base::OS::ActivationFrameAlignment();
5467 int frame_alignment_mask = frame_alignment - 1;
5468 if (frame_alignment > kPointerSize) {
5469 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5470 Label alignment_as_expected;
5471 And(at, sp, Operand(frame_alignment_mask));
5472 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5473 // Don't use Check here, as it will call Runtime_Abort possibly
5474 // re-entering here.
5475 stop("Unexpected alignment in CallCFunction");
5476 bind(&alignment_as_expected);
5479 #endif // V8_HOST_ARCH_MIPS
5481 // Just call directly. The function called cannot cause a GC, or
5482 // allow preemption, so the return address in the link register
5485 if (!function.is(t9)) {
5492 int stack_passed_arguments = CalculateStackPassedWords(
5493 num_reg_arguments, num_double_arguments);
5495 if (base::OS::ActivationFrameAlignment() > kPointerSize) {
5496 ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5498 Daddu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5503 #undef BRANCH_ARGS_CHECK
5506 void MacroAssembler::PatchRelocatedValue(Register li_location,
5508 Register new_value) {
5509 lwu(scratch, MemOperand(li_location));
5510 // At this point scratch is a lui(at, ...) instruction.
5511 if (emit_debug_code()) {
5512 And(scratch, scratch, kOpcodeMask);
5513 Check(eq, kTheInstructionToPatchShouldBeALui,
5514 scratch, Operand(LUI));
5515 lwu(scratch, MemOperand(li_location));
5517 dsrl32(t9, new_value, 0);
5518 Ins(scratch, t9, 0, kImm16Bits);
5519 sw(scratch, MemOperand(li_location));
5521 lwu(scratch, MemOperand(li_location, kInstrSize));
5522 // scratch is now ori(at, ...).
5523 if (emit_debug_code()) {
5524 And(scratch, scratch, kOpcodeMask);
5525 Check(eq, kTheInstructionToPatchShouldBeAnOri,
5526 scratch, Operand(ORI));
5527 lwu(scratch, MemOperand(li_location, kInstrSize));
5529 dsrl(t9, new_value, kImm16Bits);
5530 Ins(scratch, t9, 0, kImm16Bits);
5531 sw(scratch, MemOperand(li_location, kInstrSize));
5533 lwu(scratch, MemOperand(li_location, kInstrSize * 3));
5534 // scratch is now ori(at, ...).
5535 if (emit_debug_code()) {
5536 And(scratch, scratch, kOpcodeMask);
5537 Check(eq, kTheInstructionToPatchShouldBeAnOri,
5538 scratch, Operand(ORI));
5539 lwu(scratch, MemOperand(li_location, kInstrSize * 3));
5542 Ins(scratch, new_value, 0, kImm16Bits);
5543 sw(scratch, MemOperand(li_location, kInstrSize * 3));
5545 // Update the I-cache so the new lui and ori can be executed.
5546 FlushICache(li_location, 4);
5549 void MacroAssembler::GetRelocatedValue(Register li_location,
5552 lwu(value, MemOperand(li_location));
5553 if (emit_debug_code()) {
5554 And(value, value, kOpcodeMask);
5555 Check(eq, kTheInstructionShouldBeALui,
5556 value, Operand(LUI));
5557 lwu(value, MemOperand(li_location));
5560 // value now holds a lui instruction. Extract the immediate.
5561 andi(value, value, kImm16Mask);
5562 dsll32(value, value, kImm16Bits);
5564 lwu(scratch, MemOperand(li_location, kInstrSize));
5565 if (emit_debug_code()) {
5566 And(scratch, scratch, kOpcodeMask);
5567 Check(eq, kTheInstructionShouldBeAnOri,
5568 scratch, Operand(ORI));
5569 lwu(scratch, MemOperand(li_location, kInstrSize));
5571 // "scratch" now holds an ori instruction. Extract the immediate.
5572 andi(scratch, scratch, kImm16Mask);
5573 dsll32(scratch, scratch, 0);
5575 or_(value, value, scratch);
5577 lwu(scratch, MemOperand(li_location, kInstrSize * 3));
5578 if (emit_debug_code()) {
5579 And(scratch, scratch, kOpcodeMask);
5580 Check(eq, kTheInstructionShouldBeAnOri,
5581 scratch, Operand(ORI));
5582 lwu(scratch, MemOperand(li_location, kInstrSize * 3));
5584 // "scratch" now holds an ori instruction. Extract the immediate.
5585 andi(scratch, scratch, kImm16Mask);
5586 dsll(scratch, scratch, kImm16Bits);
5588 or_(value, value, scratch);
5589 // Sign extend extracted address.
5590 dsra(value, value, kImm16Bits);
5594 void MacroAssembler::CheckPageFlag(
5599 Label* condition_met) {
5600 And(scratch, object, Operand(~Page::kPageAlignmentMask));
5601 ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5602 And(scratch, scratch, Operand(mask));
5603 Branch(condition_met, cc, scratch, Operand(zero_reg));
5607 void MacroAssembler::JumpIfBlack(Register object,
5611 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
5612 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5616 void MacroAssembler::HasColor(Register object,
5617 Register bitmap_scratch,
5618 Register mask_scratch,
5622 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
5623 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
5625 GetMarkBits(object, bitmap_scratch, mask_scratch);
5628 // Note that we are using a 4-byte aligned 8-byte load.
5629 Uld(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5630 And(t8, t9, Operand(mask_scratch));
5631 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
5632 // Shift left 1 by adding.
5633 Daddu(mask_scratch, mask_scratch, Operand(mask_scratch));
5634 And(t8, t9, Operand(mask_scratch));
5635 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
5641 // Detect some, but not all, common pointer-free objects. This is used by the
5642 // incremental write barrier which doesn't care about oddballs (they are always
5643 // marked black immediately so this code is not hit).
5644 void MacroAssembler::JumpIfDataObject(Register value,
5646 Label* not_data_object) {
5647 DCHECK(!AreAliased(value, scratch, t8, no_reg));
5648 Label is_data_object;
5649 ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5650 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5651 Branch(&is_data_object, eq, t8, Operand(scratch));
5652 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5653 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5654 // If it's a string and it's not a cons string then it's an object containing
5656 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5657 And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
5658 Branch(not_data_object, ne, t8, Operand(zero_reg));
5659 bind(&is_data_object);
5663 void MacroAssembler::GetMarkBits(Register addr_reg,
5664 Register bitmap_reg,
5665 Register mask_reg) {
5666 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
5667 // addr_reg is divided into fields:
5668 // |63 page base 20|19 high 8|7 shift 3|2 0|
5669 // 'high' gives the index of the cell holding color bits for the object.
5670 // 'shift' gives the offset in the cell for this object's color.
5671 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5672 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
5673 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
5674 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
5675 dsll(t8, t8, Bitmap::kBytesPerCellLog2);
5676 Daddu(bitmap_reg, bitmap_reg, t8);
5678 dsllv(mask_reg, t8, mask_reg);
5682 void MacroAssembler::EnsureNotWhite(
5684 Register bitmap_scratch,
5685 Register mask_scratch,
5686 Register load_scratch,
5687 Label* value_is_white_and_not_data) {
5688 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
5689 GetMarkBits(value, bitmap_scratch, mask_scratch);
5691 // If the value is black or grey we don't need to do anything.
5692 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5693 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5694 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
5695 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5699 // Since both black and grey have a 1 in the first position and white does
5700 // not have a 1 there we only need to check one bit.
5701 // Note that we are using a 4-byte aligned 8-byte load.
5702 Uld(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5703 And(t8, mask_scratch, load_scratch);
5704 Branch(&done, ne, t8, Operand(zero_reg));
5706 if (emit_debug_code()) {
5707 // Check for impossible bit pattern.
5709 // sll may overflow, making the check conservative.
5710 dsll(t8, mask_scratch, 1);
5711 And(t8, load_scratch, t8);
5712 Branch(&ok, eq, t8, Operand(zero_reg));
5713 stop("Impossible marking bit pattern");
5717 // Value is white. We check whether it is data that doesn't need scanning.
5718 // Currently only checks for HeapNumber and non-cons strings.
5719 Register map = load_scratch; // Holds map while checking type.
5720 Register length = load_scratch; // Holds length of object after testing type.
5721 Label is_data_object;
5723 // Check for heap-number
5724 ld(map, FieldMemOperand(value, HeapObject::kMapOffset));
5725 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5728 Branch(&skip, ne, t8, Operand(map));
5729 li(length, HeapNumber::kSize);
5730 Branch(&is_data_object);
5734 // Check for strings.
5735 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5736 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5737 // If it's a string and it's not a cons string then it's an object containing
5739 Register instance_type = load_scratch;
5740 lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
5741 And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
5742 Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5743 // It's a non-indirect (non-cons and non-slice) string.
5744 // If it's external, the length is just ExternalString::kSize.
5745 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5746 // External strings are the only ones with the kExternalStringTag bit
5748 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
5749 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
5750 And(t8, instance_type, Operand(kExternalStringTag));
5753 Branch(&skip, eq, t8, Operand(zero_reg));
5754 li(length, ExternalString::kSize);
5755 Branch(&is_data_object);
5759 // Sequential string, either Latin1 or UC16.
5760 // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
5761 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
5762 // getting the length multiplied by 2.
5763 DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
5764 DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
5765 lw(t9, UntagSmiFieldMemOperand(value, String::kLengthOffset));
5766 And(t8, instance_type, Operand(kStringEncodingMask));
5769 Branch(&skip, ne, t8, Operand(zero_reg));
5770 // Adjust length for UC16.
5774 Daddu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
5775 DCHECK(!length.is(t8));
5776 And(length, length, Operand(~kObjectAlignmentMask));
5778 bind(&is_data_object);
5779 // Value is a data object, and it is white. Mark it black. Since we know
5780 // that the object is white we can make it black by flipping one bit.
5781 Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5782 Or(t8, t8, Operand(mask_scratch));
5783 Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5785 And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5786 Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5787 Daddu(t8, t8, Operand(length));
5788 Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5794 void MacroAssembler::LoadInstanceDescriptors(Register map,
5795 Register descriptors) {
5796 ld(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
5800 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
5801 ld(dst, FieldMemOperand(map, Map::kBitField3Offset));
5802 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
5806 void MacroAssembler::EnumLength(Register dst, Register map) {
5807 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
5808 ld(dst, FieldMemOperand(map, Map::kBitField3Offset));
5809 And(dst, dst, Operand(Map::EnumLengthBits::kMask));
5814 void MacroAssembler::LoadAccessor(Register dst, Register holder,
5816 AccessorComponent accessor) {
5817 ld(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
5818 LoadInstanceDescriptors(dst, dst);
5820 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
5821 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
5822 : AccessorPair::kSetterOffset;
5823 ld(dst, FieldMemOperand(dst, offset));
5827 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5828 Register empty_fixed_array_value = a6;
5829 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5833 // Check if the enum length field is properly initialized, indicating that
5834 // there is an enum cache.
5835 ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5839 call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
5844 ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5846 // For all objects but the receiver, check that the cache is empty.
5848 Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
5852 // Check that there are no elements. Register a2 contains the current JS
5853 // object we've reached through the prototype chain.
5855 ld(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
5856 Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
5858 // Second chance, the object may be using the empty slow element dictionary.
5859 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
5860 Branch(call_runtime, ne, a2, Operand(at));
5863 ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
5864 Branch(&next, ne, a2, Operand(null_value));
5868 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5869 DCHECK(!output_reg.is(input_reg));
5871 li(output_reg, Operand(255));
5872 // Normal branch: nop in delay slot.
5873 Branch(&done, gt, input_reg, Operand(output_reg));
5874 // Use delay slot in this branch.
5875 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
5876 mov(output_reg, zero_reg); // In delay slot.
5877 mov(output_reg, input_reg); // Value is in range 0..255.
5882 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
5883 DoubleRegister input_reg,
5884 DoubleRegister temp_double_reg) {
5889 Move(temp_double_reg, 0.0);
5890 BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
5892 // Double value is less than zero, NaN or Inf, return 0.
5893 mov(result_reg, zero_reg);
5896 // Double value is >= 255, return 255.
5898 Move(temp_double_reg, 255.0);
5899 BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
5900 li(result_reg, Operand(255));
5903 // In 0-255 range, round and truncate.
5905 cvt_w_d(temp_double_reg, input_reg);
5906 mfc1(result_reg, temp_double_reg);
5911 void MacroAssembler::TestJSArrayForAllocationMemento(
5912 Register receiver_reg,
5913 Register scratch_reg,
5914 Label* no_memento_found,
5916 Label* allocation_memento_present) {
5917 ExternalReference new_space_start =
5918 ExternalReference::new_space_start(isolate());
5919 ExternalReference new_space_allocation_top =
5920 ExternalReference::new_space_allocation_top_address(isolate());
5921 Daddu(scratch_reg, receiver_reg,
5922 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5923 Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
5924 li(at, Operand(new_space_allocation_top));
5925 ld(at, MemOperand(at));
5926 Branch(no_memento_found, gt, scratch_reg, Operand(at));
5927 ld(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
5928 if (allocation_memento_present) {
5929 Branch(allocation_memento_present, cond, scratch_reg,
5930 Operand(isolate()->factory()->allocation_memento_map()));
5935 Register GetRegisterThatIsNotOneOf(Register reg1,
5942 if (reg1.is_valid()) regs |= reg1.bit();
5943 if (reg2.is_valid()) regs |= reg2.bit();
5944 if (reg3.is_valid()) regs |= reg3.bit();
5945 if (reg4.is_valid()) regs |= reg4.bit();
5946 if (reg5.is_valid()) regs |= reg5.bit();
5947 if (reg6.is_valid()) regs |= reg6.bit();
5949 for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
5950 Register candidate = Register::FromAllocationIndex(i);
5951 if (regs & candidate.bit()) continue;
5959 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5964 DCHECK(!scratch1.is(scratch0));
5965 Factory* factory = isolate()->factory();
5966 Register current = scratch0;
5969 // Scratch contained elements pointer.
5970 Move(current, object);
5972 // Loop based on the map going up the prototype chain.
5974 ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
5975 lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
5976 DecodeField<Map::ElementsKindBits>(scratch1);
5977 Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
5978 ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
5979 Branch(&loop_again, ne, current, Operand(factory->null_value()));
5983 bool AreAliased(Register reg1,
5991 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
5992 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
5993 reg7.is_valid() + reg8.is_valid();
5996 if (reg1.is_valid()) regs |= reg1.bit();
5997 if (reg2.is_valid()) regs |= reg2.bit();
5998 if (reg3.is_valid()) regs |= reg3.bit();
5999 if (reg4.is_valid()) regs |= reg4.bit();
6000 if (reg5.is_valid()) regs |= reg5.bit();
6001 if (reg6.is_valid()) regs |= reg6.bit();
6002 if (reg7.is_valid()) regs |= reg7.bit();
6003 if (reg8.is_valid()) regs |= reg8.bit();
6004 int n_of_non_aliasing_regs = NumRegs(regs);
6006 return n_of_valid_regs != n_of_non_aliasing_regs;
6010 CodePatcher::CodePatcher(byte* address,
6012 FlushICache flush_cache)
6013 : address_(address),
6014 size_(instructions * Assembler::kInstrSize),
6015 masm_(NULL, address, size_ + Assembler::kGap),
6016 flush_cache_(flush_cache) {
6017 // Create a new macro assembler pointing to the address of the code to patch.
6018 // The size is adjusted with kGap on order for the assembler to generate size
6019 // bytes of instructions without failing with buffer size constraints.
6020 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6024 CodePatcher::~CodePatcher() {
6025 // Indicate that code has changed.
6026 if (flush_cache_ == FLUSH) {
6027 CpuFeatures::FlushICache(address_, size_);
6029 // Check that the code was patched as expected.
6030 DCHECK(masm_.pc_ == address_ + size_);
6031 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6035 void CodePatcher::Emit(Instr instr) {
6036 masm()->emit(instr);
6040 void CodePatcher::Emit(Address addr) {
6041 // masm()->emit(reinterpret_cast<Instr>(addr));
6045 void CodePatcher::ChangeBranchCondition(Condition cond) {
6046 Instr instr = Assembler::instr_at(masm_.pc_);
6047 DCHECK(Assembler::IsBranch(instr));
6048 uint32_t opcode = Assembler::GetOpcodeField(instr);
6049 // Currently only the 'eq' and 'ne' cond values are supported and the simple
6050 // branch instructions (with opcode being the branch type).
6051 // There are some special cases (see Assembler::IsBranch()) so extending this
6053 DCHECK(opcode == BEQ ||
6061 opcode = (cond == eq) ? BEQ : BNE;
6062 instr = (instr & ~kOpcodeMask) | opcode;
6067 void MacroAssembler::TruncatingDiv(Register result,
6070 DCHECK(!dividend.is(result));
6071 DCHECK(!dividend.is(at));
6072 DCHECK(!result.is(at));
6073 base::MagicNumbersForDivision<uint32_t> mag =
6074 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
6075 li(at, Operand(static_cast<int32_t>(mag.multiplier)));
6076 Mulh(result, dividend, Operand(at));
6077 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
6078 if (divisor > 0 && neg) {
6079 Addu(result, result, Operand(dividend));
6081 if (divisor < 0 && !neg && mag.multiplier > 0) {
6082 Subu(result, result, Operand(dividend));
6084 if (mag.shift > 0) sra(result, result, mag.shift);
6085 srl(at, dividend, 31);
6086 Addu(result, result, Operand(at));
6090 } } // namespace v8::internal
6092 #endif // V8_TARGET_ARCH_MIPS64