1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include <limits.h> // For LONG_MIN, LONG_MAX.
7 #if V8_TARGET_ARCH_MIPS64
9 #include "src/base/division-by-constant.h"
10 #include "src/bootstrapper.h"
11 #include "src/codegen.h"
12 #include "src/cpu-profiler.h"
13 #include "src/debug/debug.h"
14 #include "src/mips64/macro-assembler-mips64.h"
15 #include "src/runtime/runtime.h"
20 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
21 : Assembler(arg_isolate, buffer, size),
22 generating_stub_(false),
24 has_double_zero_reg_set_(false) {
25 if (isolate() != NULL) {
26 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
32 void MacroAssembler::Load(Register dst,
33 const MemOperand& src,
35 DCHECK(!r.IsDouble());
38 } else if (r.IsUInteger8()) {
40 } else if (r.IsInteger16()) {
42 } else if (r.IsUInteger16()) {
44 } else if (r.IsInteger32()) {
52 void MacroAssembler::Store(Register src,
53 const MemOperand& dst,
55 DCHECK(!r.IsDouble());
56 if (r.IsInteger8() || r.IsUInteger8()) {
58 } else if (r.IsInteger16() || r.IsUInteger16()) {
60 } else if (r.IsInteger32()) {
63 if (r.IsHeapObject()) {
65 } else if (r.IsSmi()) {
73 void MacroAssembler::LoadRoot(Register destination,
74 Heap::RootListIndex index) {
75 ld(destination, MemOperand(s6, index << kPointerSizeLog2));
79 void MacroAssembler::LoadRoot(Register destination,
80 Heap::RootListIndex index,
82 Register src1, const Operand& src2) {
83 Branch(2, NegateCondition(cond), src1, src2);
84 ld(destination, MemOperand(s6, index << kPointerSizeLog2));
88 void MacroAssembler::StoreRoot(Register source,
89 Heap::RootListIndex index) {
90 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
91 sd(source, MemOperand(s6, index << kPointerSizeLog2));
95 void MacroAssembler::StoreRoot(Register source,
96 Heap::RootListIndex index,
98 Register src1, const Operand& src2) {
99 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
100 Branch(2, NegateCondition(cond), src1, src2);
101 sd(source, MemOperand(s6, index << kPointerSizeLog2));
105 // Push and pop all registers that can hold pointers.
106 void MacroAssembler::PushSafepointRegisters() {
107 // Safepoints expect a block of kNumSafepointRegisters values on the
108 // stack, so adjust the stack for unsaved registers.
109 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
110 DCHECK(num_unsaved >= 0);
111 if (num_unsaved > 0) {
112 Dsubu(sp, sp, Operand(num_unsaved * kPointerSize));
114 MultiPush(kSafepointSavedRegisters);
118 void MacroAssembler::PopSafepointRegisters() {
119 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
120 MultiPop(kSafepointSavedRegisters);
121 if (num_unsaved > 0) {
122 Daddu(sp, sp, Operand(num_unsaved * kPointerSize));
127 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
128 sd(src, SafepointRegisterSlot(dst));
132 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
133 ld(dst, SafepointRegisterSlot(src));
137 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
138 // The registers are pushed starting with the highest encoding,
139 // which means that lowest encodings are closest to the stack pointer.
140 return kSafepointRegisterStackIndexMap[reg_code];
144 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
145 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
149 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
150 UNIMPLEMENTED_MIPS();
151 // General purpose registers are pushed last on the stack.
152 int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
153 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
154 return MemOperand(sp, doubles_size + register_offset);
158 void MacroAssembler::InNewSpace(Register object,
162 DCHECK(cc == eq || cc == ne);
163 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
164 Branch(branch, cc, scratch,
165 Operand(ExternalReference::new_space_start(isolate())));
169 // Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
170 // The register 'object' contains a heap object pointer. The heap object
171 // tag is shifted away.
172 void MacroAssembler::RecordWriteField(
178 SaveFPRegsMode save_fp,
179 RememberedSetAction remembered_set_action,
181 PointersToHereCheck pointers_to_here_check_for_value) {
182 DCHECK(!AreAliased(value, dst, t8, object));
183 // First, check if a write barrier is even needed. The tests below
184 // catch stores of Smis.
187 // Skip barrier if writing a smi.
188 if (smi_check == INLINE_SMI_CHECK) {
189 JumpIfSmi(value, &done);
192 // Although the object register is tagged, the offset is relative to the start
193 // of the object, so so offset must be a multiple of kPointerSize.
194 DCHECK(IsAligned(offset, kPointerSize));
196 Daddu(dst, object, Operand(offset - kHeapObjectTag));
197 if (emit_debug_code()) {
199 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
200 Branch(&ok, eq, t8, Operand(zero_reg));
201 stop("Unaligned cell in write barrier");
210 remembered_set_action,
212 pointers_to_here_check_for_value);
216 // Clobber clobbered input registers when running with the debug-code flag
217 // turned on to provoke errors.
218 if (emit_debug_code()) {
219 li(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
220 li(dst, Operand(bit_cast<int64_t>(kZapValue + 8)));
225 // Clobbers object, dst, map, and ra, if (ra_status == kRAHasBeenSaved)
226 void MacroAssembler::RecordWriteForMap(Register object,
230 SaveFPRegsMode fp_mode) {
231 if (emit_debug_code()) {
233 ld(dst, FieldMemOperand(map, HeapObject::kMapOffset));
235 kWrongAddressOrValuePassedToRecordWrite,
237 Operand(isolate()->factory()->meta_map()));
240 if (!FLAG_incremental_marking) {
244 if (emit_debug_code()) {
245 ld(at, FieldMemOperand(object, HeapObject::kMapOffset));
247 kWrongAddressOrValuePassedToRecordWrite,
254 // A single check of the map's pages interesting flag suffices, since it is
255 // only set during incremental collection, and then it's also guaranteed that
256 // the from object's page's interesting flag is also set. This optimization
257 // relies on the fact that maps can never be in new space.
259 map, // Used as scratch.
260 MemoryChunk::kPointersToHereAreInterestingMask,
264 Daddu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
265 if (emit_debug_code()) {
267 And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
268 Branch(&ok, eq, at, Operand(zero_reg));
269 stop("Unaligned cell in write barrier");
273 // Record the actual write.
274 if (ra_status == kRAHasNotBeenSaved) {
277 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
280 if (ra_status == kRAHasNotBeenSaved) {
286 // Count number of write barriers in generated code.
287 isolate()->counters()->write_barriers_static()->Increment();
288 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
290 // Clobber clobbered registers when running with the debug-code flag
291 // turned on to provoke errors.
292 if (emit_debug_code()) {
293 li(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
294 li(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
299 // Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
300 // The register 'object' contains a heap object pointer. The heap object
301 // tag is shifted away.
302 void MacroAssembler::RecordWrite(
307 SaveFPRegsMode fp_mode,
308 RememberedSetAction remembered_set_action,
310 PointersToHereCheck pointers_to_here_check_for_value) {
311 DCHECK(!AreAliased(object, address, value, t8));
312 DCHECK(!AreAliased(object, address, value, t9));
314 if (emit_debug_code()) {
315 ld(at, MemOperand(address));
317 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
320 if (remembered_set_action == OMIT_REMEMBERED_SET &&
321 !FLAG_incremental_marking) {
325 // First, check if a write barrier is even needed. The tests below
326 // catch stores of smis and stores into the young generation.
329 if (smi_check == INLINE_SMI_CHECK) {
330 DCHECK_EQ(0, kSmiTag);
331 JumpIfSmi(value, &done);
334 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
336 value, // Used as scratch.
337 MemoryChunk::kPointersToHereAreInterestingMask,
341 CheckPageFlag(object,
342 value, // Used as scratch.
343 MemoryChunk::kPointersFromHereAreInterestingMask,
347 // Record the actual write.
348 if (ra_status == kRAHasNotBeenSaved) {
351 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
354 if (ra_status == kRAHasNotBeenSaved) {
360 // Count number of write barriers in generated code.
361 isolate()->counters()->write_barriers_static()->Increment();
362 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
365 // Clobber clobbered registers when running with the debug-code flag
366 // turned on to provoke errors.
367 if (emit_debug_code()) {
368 li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
369 li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
374 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
377 SaveFPRegsMode fp_mode,
378 RememberedSetFinalAction and_then) {
380 if (emit_debug_code()) {
382 JumpIfNotInNewSpace(object, scratch, &ok);
383 stop("Remembered set pointer is in new space");
386 // Load store buffer top.
387 ExternalReference store_buffer =
388 ExternalReference::store_buffer_top(isolate());
389 li(t8, Operand(store_buffer));
390 ld(scratch, MemOperand(t8));
391 // Store pointer to buffer and increment buffer top.
392 sd(address, MemOperand(scratch));
393 Daddu(scratch, scratch, kPointerSize);
394 // Write back new top of buffer.
395 sd(scratch, MemOperand(t8));
396 // Call stub on end of buffer.
397 // Check for end of buffer.
398 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
399 DCHECK(!scratch.is(t8));
400 if (and_then == kFallThroughAtEnd) {
401 Branch(&done, eq, t8, Operand(zero_reg));
403 DCHECK(and_then == kReturnAtEnd);
404 Ret(eq, t8, Operand(zero_reg));
407 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
408 CallStub(&store_buffer_overflow);
411 if (and_then == kReturnAtEnd) {
417 // -----------------------------------------------------------------------------
418 // Allocation support.
421 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
426 DCHECK(!holder_reg.is(scratch));
427 DCHECK(!holder_reg.is(at));
428 DCHECK(!scratch.is(at));
430 // Load current lexical context from the stack frame.
431 ld(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
432 // In debug mode, make sure the lexical context is set.
434 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
435 scratch, Operand(zero_reg));
438 // Load the native context of the current context.
440 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
441 ld(scratch, FieldMemOperand(scratch, offset));
442 ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
444 // Check the context is a native context.
445 if (emit_debug_code()) {
446 push(holder_reg); // Temporarily save holder on the stack.
447 // Read the first word and compare to the native_context_map.
448 ld(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
449 LoadRoot(at, Heap::kNativeContextMapRootIndex);
450 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
451 holder_reg, Operand(at));
452 pop(holder_reg); // Restore holder.
455 // Check if both contexts are the same.
456 ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
457 Branch(&same_contexts, eq, scratch, Operand(at));
459 // Check the context is a native context.
460 if (emit_debug_code()) {
461 push(holder_reg); // Temporarily save holder on the stack.
462 mov(holder_reg, at); // Move at to its holding place.
463 LoadRoot(at, Heap::kNullValueRootIndex);
464 Check(ne, kJSGlobalProxyContextShouldNotBeNull,
465 holder_reg, Operand(at));
467 ld(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
468 LoadRoot(at, Heap::kNativeContextMapRootIndex);
469 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
470 holder_reg, Operand(at));
471 // Restore at is not needed. at is reloaded below.
472 pop(holder_reg); // Restore holder.
473 // Restore at to holder's context.
474 ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
477 // Check that the security token in the calling global object is
478 // compatible with the security token in the receiving global
480 int token_offset = Context::kHeaderSize +
481 Context::SECURITY_TOKEN_INDEX * kPointerSize;
483 ld(scratch, FieldMemOperand(scratch, token_offset));
484 ld(at, FieldMemOperand(at, token_offset));
485 Branch(miss, ne, scratch, Operand(at));
487 bind(&same_contexts);
491 // Compute the hash code from the untagged key. This must be kept in sync with
492 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
493 // code-stub-hydrogen.cc
494 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
495 // First of all we assign the hash seed to scratch.
496 LoadRoot(scratch, Heap::kHashSeedRootIndex);
499 // Xor original key with a seed.
500 xor_(reg0, reg0, scratch);
502 // Compute the hash code from the untagged key. This must be kept in sync
503 // with ComputeIntegerHash in utils.h.
505 // hash = ~hash + (hash << 15);
506 // The algorithm uses 32-bit integer values.
507 nor(scratch, reg0, zero_reg);
509 addu(reg0, scratch, at);
511 // hash = hash ^ (hash >> 12);
513 xor_(reg0, reg0, at);
515 // hash = hash + (hash << 2);
517 addu(reg0, reg0, at);
519 // hash = hash ^ (hash >> 4);
521 xor_(reg0, reg0, at);
523 // hash = hash * 2057;
524 sll(scratch, reg0, 11);
526 addu(reg0, reg0, at);
527 addu(reg0, reg0, scratch);
529 // hash = hash ^ (hash >> 16);
531 xor_(reg0, reg0, at);
532 And(reg0, reg0, Operand(0x3fffffff));
536 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
545 // elements - holds the slow-case elements of the receiver on entry.
546 // Unchanged unless 'result' is the same register.
548 // key - holds the smi key on entry.
549 // Unchanged unless 'result' is the same register.
552 // result - holds the result on exit if the load succeeded.
553 // Allowed to be the same as 'key' or 'result'.
554 // Unchanged on bailout so 'key' or 'result' can be used
555 // in further computation.
557 // Scratch registers:
559 // reg0 - holds the untagged key on entry and holds the hash once computed.
561 // reg1 - Used to hold the capacity mask of the dictionary.
563 // reg2 - Used for the index into the dictionary.
564 // at - Temporary (avoid MacroAssembler instructions also using 'at').
567 GetNumberHash(reg0, reg1);
569 // Compute the capacity mask.
570 ld(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
571 SmiUntag(reg1, reg1);
572 Dsubu(reg1, reg1, Operand(1));
574 // Generate an unrolled loop that performs a few probes before giving up.
575 for (int i = 0; i < kNumberDictionaryProbes; i++) {
576 // Use reg2 for index calculations and keep the hash intact in reg0.
578 // Compute the masked index: (hash + i + i * i) & mask.
580 Daddu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
582 and_(reg2, reg2, reg1);
584 // Scale the index by multiplying by the element size.
585 DCHECK(SeededNumberDictionary::kEntrySize == 3);
586 dsll(at, reg2, 1); // 2x.
587 daddu(reg2, reg2, at); // reg2 = reg2 * 3.
589 // Check if the key is identical to the name.
590 dsll(at, reg2, kPointerSizeLog2);
591 daddu(reg2, elements, at);
593 ld(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
594 if (i != kNumberDictionaryProbes - 1) {
595 Branch(&done, eq, key, Operand(at));
597 Branch(miss, ne, key, Operand(at));
602 // Check that the value is a field property.
603 // reg2: elements + (index * kPointerSize).
604 const int kDetailsOffset =
605 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
606 ld(reg1, FieldMemOperand(reg2, kDetailsOffset));
608 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
609 Branch(miss, ne, at, Operand(zero_reg));
611 // Get the value at the masked, scaled index and return.
612 const int kValueOffset =
613 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
614 ld(result, FieldMemOperand(reg2, kValueOffset));
618 // ---------------------------------------------------------------------------
619 // Instruction macros.
621 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
623 addu(rd, rs, rt.rm());
625 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
626 addiu(rd, rs, static_cast<int32_t>(rt.imm64_));
628 // li handles the relocation.
637 void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
639 daddu(rd, rs, rt.rm());
641 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
642 daddiu(rd, rs, static_cast<int32_t>(rt.imm64_));
644 // li handles the relocation.
653 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
655 subu(rd, rs, rt.rm());
657 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
658 addiu(rd, rs, static_cast<int32_t>(
659 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm).
661 // li handles the relocation.
670 void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
672 dsubu(rd, rs, rt.rm());
674 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
676 static_cast<int32_t>(
677 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm).
679 // li handles the relocation.
688 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
690 mul(rd, rs, rt.rm());
692 // li handles the relocation.
700 void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
702 if (kArchVariant != kMips64r6) {
706 muh(rd, rs, rt.rm());
709 // li handles the relocation.
712 if (kArchVariant != kMips64r6) {
722 void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
724 if (kArchVariant != kMips64r6) {
728 muhu(rd, rs, rt.rm());
731 // li handles the relocation.
734 if (kArchVariant != kMips64r6) {
744 void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
746 if (kArchVariant == kMips64r6) {
747 dmul(rd, rs, rt.rm());
753 // li handles the relocation.
756 if (kArchVariant == kMips64r6) {
766 void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
768 if (kArchVariant == kMips64r6) {
769 dmuh(rd, rs, rt.rm());
775 // li handles the relocation.
778 if (kArchVariant == kMips64r6) {
788 void MacroAssembler::Mult(Register rs, const Operand& rt) {
792 // li handles the relocation.
800 void MacroAssembler::Dmult(Register rs, const Operand& rt) {
804 // li handles the relocation.
812 void MacroAssembler::Multu(Register rs, const Operand& rt) {
816 // li handles the relocation.
824 void MacroAssembler::Dmultu(Register rs, const Operand& rt) {
828 // li handles the relocation.
836 void MacroAssembler::Div(Register rs, const Operand& rt) {
840 // li handles the relocation.
848 void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
850 if (kArchVariant != kMips64r6) {
854 div(res, rs, rt.rm());
857 // li handles the relocation.
860 if (kArchVariant != kMips64r6) {
870 void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
872 if (kArchVariant != kMips64r6) {
876 mod(rd, rs, rt.rm());
879 // li handles the relocation.
882 if (kArchVariant != kMips64r6) {
892 void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
894 if (kArchVariant != kMips64r6) {
898 modu(rd, rs, rt.rm());
901 // li handles the relocation.
904 if (kArchVariant != kMips64r6) {
914 void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
918 // li handles the relocation.
926 void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
927 if (kArchVariant != kMips64r6) {
932 // li handles the relocation.
940 ddiv(rd, rs, rt.rm());
942 // li handles the relocation.
951 void MacroAssembler::Divu(Register rs, const Operand& rt) {
955 // li handles the relocation.
963 void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
965 if (kArchVariant != kMips64r6) {
969 divu(res, rs, rt.rm());
972 // li handles the relocation.
975 if (kArchVariant != kMips64r6) {
985 void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
989 // li handles the relocation.
997 void MacroAssembler::Ddivu(Register res, Register rs, const Operand& rt) {
999 if (kArchVariant != kMips64r6) {
1003 ddivu(res, rs, rt.rm());
1006 // li handles the relocation.
1009 if (kArchVariant != kMips64r6) {
1019 void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
1020 if (kArchVariant != kMips64r6) {
1025 // li handles the relocation.
1033 dmod(rd, rs, rt.rm());
1035 // li handles the relocation.
1044 void MacroAssembler::Dmodu(Register rd, Register rs, const Operand& rt) {
1045 if (kArchVariant != kMips64r6) {
1050 // li handles the relocation.
1058 dmodu(rd, rs, rt.rm());
1060 // li handles the relocation.
1069 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
1071 and_(rd, rs, rt.rm());
1073 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1074 andi(rd, rs, static_cast<int32_t>(rt.imm64_));
1076 // li handles the relocation.
1085 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
1087 or_(rd, rs, rt.rm());
1089 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1090 ori(rd, rs, static_cast<int32_t>(rt.imm64_));
1092 // li handles the relocation.
1101 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
1103 xor_(rd, rs, rt.rm());
1105 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1106 xori(rd, rs, static_cast<int32_t>(rt.imm64_));
1108 // li handles the relocation.
1117 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
1119 nor(rd, rs, rt.rm());
1121 // li handles the relocation.
1129 void MacroAssembler::Neg(Register rs, const Operand& rt) {
1130 DCHECK(rt.is_reg());
1132 DCHECK(!at.is(rt.rm()));
1134 xor_(rs, rt.rm(), at);
1138 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
1140 slt(rd, rs, rt.rm());
1142 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1143 slti(rd, rs, static_cast<int32_t>(rt.imm64_));
1145 // li handles the relocation.
1154 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
1156 sltu(rd, rs, rt.rm());
1158 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1159 sltiu(rd, rs, static_cast<int32_t>(rt.imm64_));
1161 // li handles the relocation.
1170 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
1172 rotrv(rd, rs, rt.rm());
1174 rotr(rd, rs, rt.imm64_);
1179 void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) {
1181 drotrv(rd, rs, rt.rm());
1183 drotr(rd, rs, rt.imm64_);
1188 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
1193 // ------------Pseudo-instructions-------------
1195 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
1197 lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1201 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
1203 swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1207 // Do 64-bit load from unaligned address. Note this only handles
1208 // the specific case of 32-bit aligned, but not 64-bit aligned.
1209 void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) {
1210 // Assert fail if the offset from start of object IS actually aligned.
1211 // ONLY use with known misalignment, since there is performance cost.
1212 DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
1213 // TODO(plind): endian dependency.
1215 lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1216 dsll32(scratch, scratch, 0);
1217 Daddu(rd, rd, scratch);
1221 // Do 64-bit store to unaligned address. Note this only handles
1222 // the specific case of 32-bit aligned, but not 64-bit aligned.
1223 void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) {
1224 // Assert fail if the offset from start of object IS actually aligned.
1225 // ONLY use with known misalignment, since there is performance cost.
1226 DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
1227 // TODO(plind): endian dependency.
1229 dsrl32(scratch, rd, 0);
1230 sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1234 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1235 AllowDeferredHandleDereference smi_check;
1236 if (value->IsSmi()) {
1237 li(dst, Operand(value), mode);
1239 DCHECK(value->IsHeapObject());
1240 if (isolate()->heap()->InNewSpace(*value)) {
1241 Handle<Cell> cell = isolate()->factory()->NewCell(value);
1242 li(dst, Operand(cell));
1243 ld(dst, FieldMemOperand(dst, Cell::kValueOffset));
1245 li(dst, Operand(value));
1251 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1252 DCHECK(!j.is_reg());
1253 BlockTrampolinePoolScope block_trampoline_pool(this);
1254 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
1255 // Normal load of an immediate value which does not need Relocation Info.
1256 if (is_int32(j.imm64_)) {
1257 if (is_int16(j.imm64_)) {
1258 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
1259 } else if (!(j.imm64_ & kHiMask)) {
1260 ori(rd, zero_reg, (j.imm64_ & kImm16Mask));
1261 } else if (!(j.imm64_ & kImm16Mask)) {
1262 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1264 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1265 ori(rd, rd, (j.imm64_ & kImm16Mask));
1268 if (is_int48(j.imm64_)) {
1269 if ((j.imm64_ >> 32) & kImm16Mask) {
1270 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1271 if ((j.imm64_ >> 16) & kImm16Mask) {
1272 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1275 ori(rd, zero_reg, (j.imm64_ >> 16) & kImm16Mask);
1278 if (j.imm64_ & kImm16Mask) {
1279 ori(rd, rd, j.imm64_ & kImm16Mask);
1282 lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1283 if ((j.imm64_ >> 32) & kImm16Mask) {
1284 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1286 if ((j.imm64_ >> 16) & kImm16Mask) {
1288 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1289 if (j.imm64_ & kImm16Mask) {
1291 ori(rd, rd, j.imm64_ & kImm16Mask);
1296 if (j.imm64_ & kImm16Mask) {
1298 ori(rd, rd, j.imm64_ & kImm16Mask);
1305 } else if (MustUseReg(j.rmode_)) {
1306 RecordRelocInfo(j.rmode_, j.imm64_);
1307 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1308 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1310 ori(rd, rd, j.imm64_ & kImm16Mask);
1311 } else if (mode == ADDRESS_LOAD) {
1312 // We always need the same number of instructions as we may need to patch
1313 // this code to load another value which may need all 4 instructions.
1314 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1315 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1317 ori(rd, rd, j.imm64_ & kImm16Mask);
1319 lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1320 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1322 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1324 ori(rd, rd, j.imm64_ & kImm16Mask);
1329 void MacroAssembler::MultiPush(RegList regs) {
1330 int16_t num_to_push = NumberOfBitsSet(regs);
1331 int16_t stack_offset = num_to_push * kPointerSize;
1333 Dsubu(sp, sp, Operand(stack_offset));
1334 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1335 if ((regs & (1 << i)) != 0) {
1336 stack_offset -= kPointerSize;
1337 sd(ToRegister(i), MemOperand(sp, stack_offset));
1343 void MacroAssembler::MultiPushReversed(RegList regs) {
1344 int16_t num_to_push = NumberOfBitsSet(regs);
1345 int16_t stack_offset = num_to_push * kPointerSize;
1347 Dsubu(sp, sp, Operand(stack_offset));
1348 for (int16_t i = 0; i < kNumRegisters; i++) {
1349 if ((regs & (1 << i)) != 0) {
1350 stack_offset -= kPointerSize;
1351 sd(ToRegister(i), MemOperand(sp, stack_offset));
1357 void MacroAssembler::MultiPop(RegList regs) {
1358 int16_t stack_offset = 0;
1360 for (int16_t i = 0; i < kNumRegisters; i++) {
1361 if ((regs & (1 << i)) != 0) {
1362 ld(ToRegister(i), MemOperand(sp, stack_offset));
1363 stack_offset += kPointerSize;
1366 daddiu(sp, sp, stack_offset);
1370 void MacroAssembler::MultiPopReversed(RegList regs) {
1371 int16_t stack_offset = 0;
1373 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1374 if ((regs & (1 << i)) != 0) {
1375 ld(ToRegister(i), MemOperand(sp, stack_offset));
1376 stack_offset += kPointerSize;
1379 daddiu(sp, sp, stack_offset);
1383 void MacroAssembler::MultiPushFPU(RegList regs) {
1384 int16_t num_to_push = NumberOfBitsSet(regs);
1385 int16_t stack_offset = num_to_push * kDoubleSize;
1387 Dsubu(sp, sp, Operand(stack_offset));
1388 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1389 if ((regs & (1 << i)) != 0) {
1390 stack_offset -= kDoubleSize;
1391 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1397 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
1398 int16_t num_to_push = NumberOfBitsSet(regs);
1399 int16_t stack_offset = num_to_push * kDoubleSize;
1401 Dsubu(sp, sp, Operand(stack_offset));
1402 for (int16_t i = 0; i < kNumRegisters; i++) {
1403 if ((regs & (1 << i)) != 0) {
1404 stack_offset -= kDoubleSize;
1405 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1411 void MacroAssembler::MultiPopFPU(RegList regs) {
1412 int16_t stack_offset = 0;
1414 for (int16_t i = 0; i < kNumRegisters; i++) {
1415 if ((regs & (1 << i)) != 0) {
1416 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1417 stack_offset += kDoubleSize;
1420 daddiu(sp, sp, stack_offset);
1424 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
1425 int16_t stack_offset = 0;
1427 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1428 if ((regs & (1 << i)) != 0) {
1429 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1430 stack_offset += kDoubleSize;
1433 daddiu(sp, sp, stack_offset);
1437 void MacroAssembler::FlushICache(Register address, unsigned instructions) {
1438 RegList saved_regs = kJSCallerSaved | ra.bit();
1439 MultiPush(saved_regs);
1440 AllowExternalCallThatCantCauseGC scope(this);
1442 // Save to a0 in case address == a4.
1444 PrepareCallCFunction(2, a4);
1446 li(a1, instructions * kInstrSize);
1447 CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
1448 MultiPop(saved_regs);
1452 void MacroAssembler::Ext(Register rt,
1457 DCHECK(pos + size < 33);
1458 ext_(rt, rs, pos, size);
1462 void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
1465 DCHECK(pos + size < 33);
1466 dext_(rt, rs, pos, size);
1470 void MacroAssembler::Ins(Register rt,
1475 DCHECK(pos + size <= 32);
1477 ins_(rt, rs, pos, size);
1481 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1483 FPURegister scratch) {
1484 // Move the data from fs to t8.
1486 Cvt_d_uw(fd, t8, scratch);
1490 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1492 FPURegister scratch) {
1493 // Convert rs to a FP value in fd (and fd + 1).
1494 // We do this by converting rs minus the MSB to avoid sign conversion,
1495 // then adding 2^31 to the result (if needed).
1497 DCHECK(!fd.is(scratch));
1501 // Save rs's MSB to t9.
1505 // Move the result to fd.
1507 mthc1(zero_reg, fd);
1509 // Convert fd to a real FP value.
1512 Label conversion_done;
1514 // If rs's MSB was 0, it's done.
1515 // Otherwise we need to add that to the FP register.
1516 Branch(&conversion_done, eq, t9, Operand(zero_reg));
1518 // Load 2^31 into f20 as its float representation.
1520 mtc1(zero_reg, scratch);
1523 add_d(fd, fd, scratch);
1525 bind(&conversion_done);
1529 void MacroAssembler::Round_l_d(FPURegister fd, FPURegister fs) {
1534 void MacroAssembler::Floor_l_d(FPURegister fd, FPURegister fs) {
1539 void MacroAssembler::Ceil_l_d(FPURegister fd, FPURegister fs) {
1544 void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) {
1549 void MacroAssembler::Trunc_l_ud(FPURegister fd,
1551 FPURegister scratch) {
1555 li(at, 0x7fffffffffffffff);
1562 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1564 FPURegister scratch) {
1565 Trunc_uw_d(fs, t8, scratch);
1570 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1575 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1580 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1585 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1590 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1592 FPURegister scratch) {
1593 DCHECK(!fd.is(scratch));
1596 // Load 2^31 into scratch as its float representation.
1598 mtc1(zero_reg, scratch);
1600 // Test if scratch > fd.
1601 // If fd < 2^31 we can convert it normally.
1602 Label simple_convert;
1603 BranchF(&simple_convert, NULL, lt, fd, scratch);
1605 // First we subtract 2^31 from fd, then trunc it to rs
1606 // and add 2^31 to rs.
1607 sub_d(scratch, fd, scratch);
1608 trunc_w_d(scratch, scratch);
1610 Or(rs, rs, 1 << 31);
1614 // Simple conversion.
1615 bind(&simple_convert);
1616 trunc_w_d(scratch, fd);
1623 void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
1624 FPURegister ft, FPURegister scratch) {
1625 if (0) { // TODO(plind): find reasonable arch-variant symbol names.
1626 madd_d(fd, fr, fs, ft);
1628 // Can not change source regs's value.
1629 DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
1630 mul_d(scratch, fs, ft);
1631 add_d(fd, fr, scratch);
1636 void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
1637 Label* nan, Condition cond, FPURegister cmp1,
1638 FPURegister cmp2, BranchDelaySlot bd) {
1639 BlockTrampolinePoolScope block_trampoline_pool(this);
1645 if (kArchVariant == kMips64r6) {
1646 sizeField = sizeField == D ? L : W;
1649 DCHECK(nan || target);
1650 // Check for unordered (NaN) cases.
1652 bool long_branch = nan->is_bound() ? is_near(nan) : is_trampoline_emitted();
1653 if (kArchVariant != kMips64r6) {
1656 c(UN, D, cmp1, cmp2);
1662 c(UN, D, cmp1, cmp2);
1664 if (bd == PROTECT) {
1669 // Use kDoubleCompareReg for comparison result. It has to be unavailable
1671 // register allocator.
1672 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
1675 cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
1676 bc1eqz(&skip, kDoubleCompareReg);
1681 cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
1682 bc1nez(nan, kDoubleCompareReg);
1683 if (bd == PROTECT) {
1692 target->is_bound() ? is_near(target) : is_trampoline_emitted();
1695 Condition neg_cond = NegateFpuCondition(cond);
1696 BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
1700 BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
1706 void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
1707 Condition cc, FPURegister cmp1,
1708 FPURegister cmp2, BranchDelaySlot bd) {
1709 if (kArchVariant != kMips64r6) {
1710 BlockTrampolinePoolScope block_trampoline_pool(this);
1712 // Here NaN cases were either handled by this function or are assumed to
1713 // have been handled by the caller.
1716 c(OLT, sizeField, cmp1, cmp2);
1720 c(ULT, sizeField, cmp1, cmp2);
1724 c(ULE, sizeField, cmp1, cmp2);
1728 c(OLE, sizeField, cmp1, cmp2);
1732 c(ULT, sizeField, cmp1, cmp2);
1736 c(OLT, sizeField, cmp1, cmp2);
1740 c(OLE, sizeField, cmp1, cmp2);
1744 c(ULE, sizeField, cmp1, cmp2);
1748 c(EQ, sizeField, cmp1, cmp2);
1752 c(UEQ, sizeField, cmp1, cmp2);
1755 case ne: // Unordered or not equal.
1756 c(EQ, sizeField, cmp1, cmp2);
1760 c(UEQ, sizeField, cmp1, cmp2);
1768 BlockTrampolinePoolScope block_trampoline_pool(this);
1770 // Here NaN cases were either handled by this function or are assumed to
1771 // have been handled by the caller.
1772 // Unsigned conditions are treated as their signed counterpart.
1773 // Use kDoubleCompareReg for comparison result, it is valid in fp64 (FR =
1775 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
1778 cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
1779 bc1nez(target, kDoubleCompareReg);
1782 cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
1783 bc1nez(target, kDoubleCompareReg);
1786 cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
1787 bc1eqz(target, kDoubleCompareReg);
1790 cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
1791 bc1eqz(target, kDoubleCompareReg);
1794 cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
1795 bc1eqz(target, kDoubleCompareReg);
1798 cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
1799 bc1eqz(target, kDoubleCompareReg);
1802 cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
1803 bc1nez(target, kDoubleCompareReg);
1806 cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
1807 bc1nez(target, kDoubleCompareReg);
1810 cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
1811 bc1nez(target, kDoubleCompareReg);
1814 cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
1815 bc1nez(target, kDoubleCompareReg);
1818 cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
1819 bc1eqz(target, kDoubleCompareReg);
1822 cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
1823 bc1eqz(target, kDoubleCompareReg);
1831 if (bd == PROTECT) {
1837 void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
1838 DCHECK(!src_low.is(at));
1845 void MacroAssembler::Move(FPURegister dst, float imm) {
1846 li(at, Operand(bit_cast<int32_t>(imm)));
1851 void MacroAssembler::Move(FPURegister dst, double imm) {
1852 static const DoubleRepresentation minus_zero(-0.0);
1853 static const DoubleRepresentation zero(0.0);
1854 DoubleRepresentation value_rep(imm);
1855 // Handle special values first.
1856 if (value_rep == zero && has_double_zero_reg_set_) {
1857 mov_d(dst, kDoubleRegZero);
1858 } else if (value_rep == minus_zero && has_double_zero_reg_set_) {
1859 neg_d(dst, kDoubleRegZero);
1862 DoubleAsTwoUInt32(imm, &lo, &hi);
1863 // Move the low part of the double into the lower bits of the corresponding
1866 if (!(lo & kImm16Mask)) {
1867 lui(at, (lo >> kLuiShift) & kImm16Mask);
1869 } else if (!(lo & kHiMask)) {
1870 ori(at, zero_reg, lo & kImm16Mask);
1873 lui(at, (lo >> kLuiShift) & kImm16Mask);
1874 ori(at, at, lo & kImm16Mask);
1878 mtc1(zero_reg, dst);
1880 // Move the high part of the double into the high bits of the corresponding
1883 if (!(hi & kImm16Mask)) {
1884 lui(at, (hi >> kLuiShift) & kImm16Mask);
1886 } else if (!(hi & kHiMask)) {
1887 ori(at, zero_reg, hi & kImm16Mask);
1890 lui(at, (hi >> kLuiShift) & kImm16Mask);
1891 ori(at, at, hi & kImm16Mask);
1895 mthc1(zero_reg, dst);
1897 if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true;
1902 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1903 if (kArchVariant == kMips64r6) {
1905 Branch(&done, ne, rt, Operand(zero_reg));
1914 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1915 if (kArchVariant == kMips64r6) {
1917 Branch(&done, eq, rt, Operand(zero_reg));
1926 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
1931 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
1936 void MacroAssembler::Clz(Register rd, Register rs) {
1941 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1943 DoubleRegister double_input,
1945 DoubleRegister double_scratch,
1946 Register except_flag,
1947 CheckForInexactConversion check_inexact) {
1948 DCHECK(!result.is(scratch));
1949 DCHECK(!double_input.is(double_scratch));
1950 DCHECK(!except_flag.is(scratch));
1954 // Clear the except flag (0 = no exception)
1955 mov(except_flag, zero_reg);
1957 // Test for values that can be exactly represented as a signed 32-bit integer.
1958 cvt_w_d(double_scratch, double_input);
1959 mfc1(result, double_scratch);
1960 cvt_d_w(double_scratch, double_scratch);
1961 BranchF(&done, NULL, eq, double_input, double_scratch);
1963 int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
1965 if (check_inexact == kDontCheckForInexactConversion) {
1966 // Ignore inexact exceptions.
1967 except_mask &= ~kFCSRInexactFlagMask;
1971 cfc1(scratch, FCSR);
1972 // Disable FPU exceptions.
1973 ctc1(zero_reg, FCSR);
1975 // Do operation based on rounding mode.
1976 switch (rounding_mode) {
1977 case kRoundToNearest:
1978 Round_w_d(double_scratch, double_input);
1981 Trunc_w_d(double_scratch, double_input);
1983 case kRoundToPlusInf:
1984 Ceil_w_d(double_scratch, double_input);
1986 case kRoundToMinusInf:
1987 Floor_w_d(double_scratch, double_input);
1989 } // End of switch-statement.
1992 cfc1(except_flag, FCSR);
1994 ctc1(scratch, FCSR);
1995 // Move the converted value into the result register.
1996 mfc1(result, double_scratch);
1998 // Check for fpu exceptions.
1999 And(except_flag, except_flag, Operand(except_mask));
2005 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2006 DoubleRegister double_input,
2008 DoubleRegister single_scratch = kLithiumScratchDouble.low();
2009 Register scratch = at;
2010 Register scratch2 = t9;
2012 // Clear cumulative exception flags and save the FCSR.
2013 cfc1(scratch2, FCSR);
2014 ctc1(zero_reg, FCSR);
2015 // Try a conversion to a signed integer.
2016 trunc_w_d(single_scratch, double_input);
2017 mfc1(result, single_scratch);
2018 // Retrieve and restore the FCSR.
2019 cfc1(scratch, FCSR);
2020 ctc1(scratch2, FCSR);
2021 // Check for overflow and NaNs.
2024 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
2025 // If we had no exceptions we are done.
2026 Branch(done, eq, scratch, Operand(zero_reg));
2030 void MacroAssembler::TruncateDoubleToI(Register result,
2031 DoubleRegister double_input) {
2034 TryInlineTruncateDoubleToI(result, double_input, &done);
2036 // If we fell through then inline version didn't succeed - call stub instead.
2038 Dsubu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
2039 sdc1(double_input, MemOperand(sp, 0));
2041 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2044 Daddu(sp, sp, Operand(kDoubleSize));
2051 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
2053 DoubleRegister double_scratch = f12;
2054 DCHECK(!result.is(object));
2056 ldc1(double_scratch,
2057 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
2058 TryInlineTruncateDoubleToI(result, double_scratch, &done);
2060 // If we fell through then inline version didn't succeed - call stub instead.
2062 DoubleToIStub stub(isolate(),
2065 HeapNumber::kValueOffset - kHeapObjectTag,
2075 void MacroAssembler::TruncateNumberToI(Register object,
2077 Register heap_number_map,
2079 Label* not_number) {
2081 DCHECK(!result.is(object));
2083 UntagAndJumpIfSmi(result, object, &done);
2084 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
2085 TruncateHeapNumberToI(result, object);
2091 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2093 int num_least_bits) {
2094 // Ext(dst, src, kSmiTagSize, num_least_bits);
2096 And(dst, dst, Operand((1 << num_least_bits) - 1));
2100 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2102 int num_least_bits) {
2103 DCHECK(!src.is(dst));
2104 And(dst, src, Operand((1 << num_least_bits) - 1));
2108 // Emulated condtional branches do not emit a nop in the branch delay slot.
2110 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
2111 #define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \
2112 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
2113 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
2116 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
2117 BranchShort(offset, bdslot);
2121 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
2123 BranchDelaySlot bdslot) {
2124 BranchShort(offset, cond, rs, rt, bdslot);
2128 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
2129 if (L->is_bound()) {
2131 BranchShort(L, bdslot);
2136 if (is_trampoline_emitted()) {
2139 BranchShort(L, bdslot);
2145 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
2147 BranchDelaySlot bdslot) {
2148 if (L->is_bound()) {
2150 BranchShort(L, cond, rs, rt, bdslot);
2152 if (cond != cc_always) {
2154 Condition neg_cond = NegateCondition(cond);
2155 BranchShort(&skip, neg_cond, rs, rt);
2163 if (is_trampoline_emitted()) {
2164 if (cond != cc_always) {
2166 Condition neg_cond = NegateCondition(cond);
2167 BranchShort(&skip, neg_cond, rs, rt);
2174 BranchShort(L, cond, rs, rt, bdslot);
2180 void MacroAssembler::Branch(Label* L,
2183 Heap::RootListIndex index,
2184 BranchDelaySlot bdslot) {
2185 LoadRoot(at, index);
2186 Branch(L, cond, rs, Operand(at), bdslot);
2190 void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
2193 // Emit a nop in the branch delay slot if required.
2194 if (bdslot == PROTECT)
2199 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
2201 BranchDelaySlot bdslot) {
2202 BRANCH_ARGS_CHECK(cond, rs, rt);
2203 DCHECK(!rs.is(zero_reg));
2204 Register r2 = no_reg;
2205 Register scratch = at;
2208 // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
2210 BlockTrampolinePoolScope block_trampoline_pool(this);
2217 beq(rs, r2, offset);
2220 bne(rs, r2, offset);
2222 // Signed comparison.
2224 if (r2.is(zero_reg)) {
2227 slt(scratch, r2, rs);
2228 bne(scratch, zero_reg, offset);
2232 if (r2.is(zero_reg)) {
2235 slt(scratch, rs, r2);
2236 beq(scratch, zero_reg, offset);
2240 if (r2.is(zero_reg)) {
2243 slt(scratch, rs, r2);
2244 bne(scratch, zero_reg, offset);
2248 if (r2.is(zero_reg)) {
2251 slt(scratch, r2, rs);
2252 beq(scratch, zero_reg, offset);
2255 // Unsigned comparison.
2257 if (r2.is(zero_reg)) {
2258 bne(rs, zero_reg, offset);
2260 sltu(scratch, r2, rs);
2261 bne(scratch, zero_reg, offset);
2264 case Ugreater_equal:
2265 if (r2.is(zero_reg)) {
2268 sltu(scratch, rs, r2);
2269 beq(scratch, zero_reg, offset);
2273 if (r2.is(zero_reg)) {
2274 // No code needs to be emitted.
2277 sltu(scratch, rs, r2);
2278 bne(scratch, zero_reg, offset);
2282 if (r2.is(zero_reg)) {
2283 beq(rs, zero_reg, offset);
2285 sltu(scratch, r2, rs);
2286 beq(scratch, zero_reg, offset);
2293 // Be careful to always use shifted_branch_offset only just before the
2294 // branch instruction, as the location will be remember for patching the
2296 BlockTrampolinePoolScope block_trampoline_pool(this);
2302 if (rt.imm64_ == 0) {
2303 beq(rs, zero_reg, offset);
2305 // We don't want any other register but scratch clobbered.
2306 DCHECK(!scratch.is(rs));
2309 beq(rs, r2, offset);
2313 if (rt.imm64_ == 0) {
2314 bne(rs, zero_reg, offset);
2316 // We don't want any other register but scratch clobbered.
2317 DCHECK(!scratch.is(rs));
2320 bne(rs, r2, offset);
2323 // Signed comparison.
2325 if (rt.imm64_ == 0) {
2330 slt(scratch, r2, rs);
2331 bne(scratch, zero_reg, offset);
2335 if (rt.imm64_ == 0) {
2337 } else if (is_int16(rt.imm64_)) {
2338 slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
2339 beq(scratch, zero_reg, offset);
2343 slt(scratch, rs, r2);
2344 beq(scratch, zero_reg, offset);
2348 if (rt.imm64_ == 0) {
2350 } else if (is_int16(rt.imm64_)) {
2351 slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
2352 bne(scratch, zero_reg, offset);
2356 slt(scratch, rs, r2);
2357 bne(scratch, zero_reg, offset);
2361 if (rt.imm64_ == 0) {
2366 slt(scratch, r2, rs);
2367 beq(scratch, zero_reg, offset);
2370 // Unsigned comparison.
2372 if (rt.imm64_ == 0) {
2373 bne(rs, zero_reg, offset);
2377 sltu(scratch, r2, rs);
2378 bne(scratch, zero_reg, offset);
2381 case Ugreater_equal:
2382 if (rt.imm64_ == 0) {
2384 } else if (is_int16(rt.imm64_)) {
2385 sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
2386 beq(scratch, zero_reg, offset);
2390 sltu(scratch, rs, r2);
2391 beq(scratch, zero_reg, offset);
2395 if (rt.imm64_ == 0) {
2396 // No code needs to be emitted.
2398 } else if (is_int16(rt.imm64_)) {
2399 sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
2400 bne(scratch, zero_reg, offset);
2404 sltu(scratch, rs, r2);
2405 bne(scratch, zero_reg, offset);
2409 if (rt.imm64_ == 0) {
2410 beq(rs, zero_reg, offset);
2414 sltu(scratch, r2, rs);
2415 beq(scratch, zero_reg, offset);
2422 // Emit a nop in the branch delay slot if required.
2423 if (bdslot == PROTECT)
2428 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
2429 // We use branch_offset as an argument for the branch instructions to be sure
2430 // it is called just before generating the branch instruction, as needed.
2432 b(shifted_branch_offset(L, false));
2434 // Emit a nop in the branch delay slot if required.
2435 if (bdslot == PROTECT)
2440 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
2442 BranchDelaySlot bdslot) {
2443 BRANCH_ARGS_CHECK(cond, rs, rt);
2446 Register r2 = no_reg;
2447 Register scratch = at;
2449 BlockTrampolinePoolScope block_trampoline_pool(this);
2451 // Be careful to always use shifted_branch_offset only just before the
2452 // branch instruction, as the location will be remember for patching the
2456 offset = shifted_branch_offset(L, false);
2460 offset = shifted_branch_offset(L, false);
2461 beq(rs, r2, offset);
2464 offset = shifted_branch_offset(L, false);
2465 bne(rs, r2, offset);
2467 // Signed comparison.
2469 if (r2.is(zero_reg)) {
2470 offset = shifted_branch_offset(L, false);
2473 slt(scratch, r2, rs);
2474 offset = shifted_branch_offset(L, false);
2475 bne(scratch, zero_reg, offset);
2479 if (r2.is(zero_reg)) {
2480 offset = shifted_branch_offset(L, false);
2483 slt(scratch, rs, r2);
2484 offset = shifted_branch_offset(L, false);
2485 beq(scratch, zero_reg, offset);
2489 if (r2.is(zero_reg)) {
2490 offset = shifted_branch_offset(L, false);
2493 slt(scratch, rs, r2);
2494 offset = shifted_branch_offset(L, false);
2495 bne(scratch, zero_reg, offset);
2499 if (r2.is(zero_reg)) {
2500 offset = shifted_branch_offset(L, false);
2503 slt(scratch, r2, rs);
2504 offset = shifted_branch_offset(L, false);
2505 beq(scratch, zero_reg, offset);
2508 // Unsigned comparison.
2510 if (r2.is(zero_reg)) {
2511 offset = shifted_branch_offset(L, false);
2512 bne(rs, zero_reg, offset);
2514 sltu(scratch, r2, rs);
2515 offset = shifted_branch_offset(L, false);
2516 bne(scratch, zero_reg, offset);
2519 case Ugreater_equal:
2520 if (r2.is(zero_reg)) {
2521 offset = shifted_branch_offset(L, false);
2524 sltu(scratch, rs, r2);
2525 offset = shifted_branch_offset(L, false);
2526 beq(scratch, zero_reg, offset);
2530 if (r2.is(zero_reg)) {
2531 // No code needs to be emitted.
2534 sltu(scratch, rs, r2);
2535 offset = shifted_branch_offset(L, false);
2536 bne(scratch, zero_reg, offset);
2540 if (r2.is(zero_reg)) {
2541 offset = shifted_branch_offset(L, false);
2542 beq(rs, zero_reg, offset);
2544 sltu(scratch, r2, rs);
2545 offset = shifted_branch_offset(L, false);
2546 beq(scratch, zero_reg, offset);
2553 // Be careful to always use shifted_branch_offset only just before the
2554 // branch instruction, as the location will be remember for patching the
2556 BlockTrampolinePoolScope block_trampoline_pool(this);
2559 offset = shifted_branch_offset(L, false);
2563 if (rt.imm64_ == 0) {
2564 offset = shifted_branch_offset(L, false);
2565 beq(rs, zero_reg, offset);
2567 DCHECK(!scratch.is(rs));
2570 offset = shifted_branch_offset(L, false);
2571 beq(rs, r2, offset);
2575 if (rt.imm64_ == 0) {
2576 offset = shifted_branch_offset(L, false);
2577 bne(rs, zero_reg, offset);
2579 DCHECK(!scratch.is(rs));
2582 offset = shifted_branch_offset(L, false);
2583 bne(rs, r2, offset);
2586 // Signed comparison.
2588 if (rt.imm64_ == 0) {
2589 offset = shifted_branch_offset(L, false);
2592 DCHECK(!scratch.is(rs));
2595 slt(scratch, r2, rs);
2596 offset = shifted_branch_offset(L, false);
2597 bne(scratch, zero_reg, offset);
2601 if (rt.imm64_ == 0) {
2602 offset = shifted_branch_offset(L, false);
2604 } else if (is_int16(rt.imm64_)) {
2605 slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
2606 offset = shifted_branch_offset(L, false);
2607 beq(scratch, zero_reg, offset);
2609 DCHECK(!scratch.is(rs));
2612 slt(scratch, rs, r2);
2613 offset = shifted_branch_offset(L, false);
2614 beq(scratch, zero_reg, offset);
2618 if (rt.imm64_ == 0) {
2619 offset = shifted_branch_offset(L, false);
2621 } else if (is_int16(rt.imm64_)) {
2622 slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
2623 offset = shifted_branch_offset(L, false);
2624 bne(scratch, zero_reg, offset);
2626 DCHECK(!scratch.is(rs));
2629 slt(scratch, rs, r2);
2630 offset = shifted_branch_offset(L, false);
2631 bne(scratch, zero_reg, offset);
2635 if (rt.imm64_ == 0) {
2636 offset = shifted_branch_offset(L, false);
2639 DCHECK(!scratch.is(rs));
2642 slt(scratch, r2, rs);
2643 offset = shifted_branch_offset(L, false);
2644 beq(scratch, zero_reg, offset);
2647 // Unsigned comparison.
2649 if (rt.imm64_ == 0) {
2650 offset = shifted_branch_offset(L, false);
2651 bne(rs, zero_reg, offset);
2653 DCHECK(!scratch.is(rs));
2656 sltu(scratch, r2, rs);
2657 offset = shifted_branch_offset(L, false);
2658 bne(scratch, zero_reg, offset);
2661 case Ugreater_equal:
2662 if (rt.imm64_ == 0) {
2663 offset = shifted_branch_offset(L, false);
2665 } else if (is_int16(rt.imm64_)) {
2666 sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
2667 offset = shifted_branch_offset(L, false);
2668 beq(scratch, zero_reg, offset);
2670 DCHECK(!scratch.is(rs));
2673 sltu(scratch, rs, r2);
2674 offset = shifted_branch_offset(L, false);
2675 beq(scratch, zero_reg, offset);
2679 if (rt.imm64_ == 0) {
2680 // No code needs to be emitted.
2682 } else if (is_int16(rt.imm64_)) {
2683 sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
2684 offset = shifted_branch_offset(L, false);
2685 bne(scratch, zero_reg, offset);
2687 DCHECK(!scratch.is(rs));
2690 sltu(scratch, rs, r2);
2691 offset = shifted_branch_offset(L, false);
2692 bne(scratch, zero_reg, offset);
2696 if (rt.imm64_ == 0) {
2697 offset = shifted_branch_offset(L, false);
2698 beq(rs, zero_reg, offset);
2700 DCHECK(!scratch.is(rs));
2703 sltu(scratch, r2, rs);
2704 offset = shifted_branch_offset(L, false);
2705 beq(scratch, zero_reg, offset);
2712 // Check that offset could actually hold on an int16_t.
2713 DCHECK(is_int16(offset));
2714 // Emit a nop in the branch delay slot if required.
2715 if (bdslot == PROTECT)
2720 void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
2721 BranchAndLinkShort(offset, bdslot);
2725 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
2727 BranchDelaySlot bdslot) {
2728 BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2732 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
2733 if (L->is_bound()) {
2735 BranchAndLinkShort(L, bdslot);
2740 if (is_trampoline_emitted()) {
2743 BranchAndLinkShort(L, bdslot);
2749 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2751 BranchDelaySlot bdslot) {
2752 if (L->is_bound()) {
2754 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2757 Condition neg_cond = NegateCondition(cond);
2758 BranchShort(&skip, neg_cond, rs, rt);
2763 if (is_trampoline_emitted()) {
2765 Condition neg_cond = NegateCondition(cond);
2766 BranchShort(&skip, neg_cond, rs, rt);
2770 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2776 // We need to use a bgezal or bltzal, but they can't be used directly with the
2777 // slt instructions. We could use sub or add instead but we would miss overflow
2778 // cases, so we keep slt and add an intermediate third instruction.
2779 void MacroAssembler::BranchAndLinkShort(int16_t offset,
2780 BranchDelaySlot bdslot) {
2783 // Emit a nop in the branch delay slot if required.
2784 if (bdslot == PROTECT)
2789 void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
2790 Register rs, const Operand& rt,
2791 BranchDelaySlot bdslot) {
2792 BRANCH_ARGS_CHECK(cond, rs, rt);
2793 Register r2 = no_reg;
2794 Register scratch = at;
2798 } else if (cond != cc_always) {
2804 BlockTrampolinePoolScope block_trampoline_pool(this);
2820 // Signed comparison.
2823 slt(scratch, r2, rs);
2824 beq(scratch, zero_reg, 2);
2830 slt(scratch, rs, r2);
2831 bne(scratch, zero_reg, 2);
2837 slt(scratch, rs, r2);
2838 bne(scratch, zero_reg, 2);
2844 slt(scratch, r2, rs);
2845 bne(scratch, zero_reg, 2);
2851 // Unsigned comparison.
2854 sltu(scratch, r2, rs);
2855 beq(scratch, zero_reg, 2);
2859 case Ugreater_equal:
2861 sltu(scratch, rs, r2);
2862 bne(scratch, zero_reg, 2);
2868 sltu(scratch, rs, r2);
2869 bne(scratch, zero_reg, 2);
2875 sltu(scratch, r2, rs);
2876 bne(scratch, zero_reg, 2);
2884 // Emit a nop in the branch delay slot if required.
2885 if (bdslot == PROTECT)
2890 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
2891 bal(shifted_branch_offset(L, false));
2893 // Emit a nop in the branch delay slot if required.
2894 if (bdslot == PROTECT)
2899 void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
2901 BranchDelaySlot bdslot) {
2902 BRANCH_ARGS_CHECK(cond, rs, rt);
2905 Register r2 = no_reg;
2906 Register scratch = at;
2909 } else if (cond != cc_always) {
2915 BlockTrampolinePoolScope block_trampoline_pool(this);
2918 offset = shifted_branch_offset(L, false);
2924 offset = shifted_branch_offset(L, false);
2930 offset = shifted_branch_offset(L, false);
2934 // Signed comparison.
2937 slt(scratch, r2, rs);
2938 beq(scratch, zero_reg, 2);
2940 offset = shifted_branch_offset(L, false);
2945 slt(scratch, rs, r2);
2946 bne(scratch, zero_reg, 2);
2948 offset = shifted_branch_offset(L, false);
2953 slt(scratch, rs, r2);
2954 bne(scratch, zero_reg, 2);
2956 offset = shifted_branch_offset(L, false);
2961 slt(scratch, r2, rs);
2962 bne(scratch, zero_reg, 2);
2964 offset = shifted_branch_offset(L, false);
2969 // Unsigned comparison.
2972 sltu(scratch, r2, rs);
2973 beq(scratch, zero_reg, 2);
2975 offset = shifted_branch_offset(L, false);
2978 case Ugreater_equal:
2980 sltu(scratch, rs, r2);
2981 bne(scratch, zero_reg, 2);
2983 offset = shifted_branch_offset(L, false);
2988 sltu(scratch, rs, r2);
2989 bne(scratch, zero_reg, 2);
2991 offset = shifted_branch_offset(L, false);
2996 sltu(scratch, r2, rs);
2997 bne(scratch, zero_reg, 2);
2999 offset = shifted_branch_offset(L, false);
3007 // Check that offset could actually hold on an int16_t.
3008 DCHECK(is_int16(offset));
3010 // Emit a nop in the branch delay slot if required.
3011 if (bdslot == PROTECT)
3016 void MacroAssembler::Jump(Register target,
3020 BranchDelaySlot bd) {
3021 BlockTrampolinePoolScope block_trampoline_pool(this);
3022 if (cond == cc_always) {
3025 BRANCH_ARGS_CHECK(cond, rs, rt);
3026 Branch(2, NegateCondition(cond), rs, rt);
3029 // Emit a nop in the branch delay slot if required.
3035 void MacroAssembler::Jump(intptr_t target,
3036 RelocInfo::Mode rmode,
3040 BranchDelaySlot bd) {
3042 if (cond != cc_always) {
3043 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
3045 // The first instruction of 'li' may be placed in the delay slot.
3046 // This is not an issue, t9 is expected to be clobbered anyway.
3047 li(t9, Operand(target, rmode));
3048 Jump(t9, al, zero_reg, Operand(zero_reg), bd);
3053 void MacroAssembler::Jump(Address target,
3054 RelocInfo::Mode rmode,
3058 BranchDelaySlot bd) {
3059 DCHECK(!RelocInfo::IsCodeTarget(rmode));
3060 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
3064 void MacroAssembler::Jump(Handle<Code> code,
3065 RelocInfo::Mode rmode,
3069 BranchDelaySlot bd) {
3070 DCHECK(RelocInfo::IsCodeTarget(rmode));
3071 AllowDeferredHandleDereference embedding_raw_address;
3072 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
3076 int MacroAssembler::CallSize(Register target,
3080 BranchDelaySlot bd) {
3083 if (cond == cc_always) {
3092 return size * kInstrSize;
3096 // Note: To call gcc-compiled C code on mips, you must call thru t9.
3097 void MacroAssembler::Call(Register target,
3101 BranchDelaySlot bd) {
3102 BlockTrampolinePoolScope block_trampoline_pool(this);
3105 if (cond == cc_always) {
3108 BRANCH_ARGS_CHECK(cond, rs, rt);
3109 Branch(2, NegateCondition(cond), rs, rt);
3112 // Emit a nop in the branch delay slot if required.
3116 DCHECK_EQ(CallSize(target, cond, rs, rt, bd),
3117 SizeOfCodeGeneratedSince(&start));
3121 int MacroAssembler::CallSize(Address target,
3122 RelocInfo::Mode rmode,
3126 BranchDelaySlot bd) {
3127 int size = CallSize(t9, cond, rs, rt, bd);
3128 return size + 4 * kInstrSize;
3132 void MacroAssembler::Call(Address target,
3133 RelocInfo::Mode rmode,
3137 BranchDelaySlot bd) {
3138 BlockTrampolinePoolScope block_trampoline_pool(this);
3141 int64_t target_int = reinterpret_cast<int64_t>(target);
3142 // Must record previous source positions before the
3143 // li() generates a new code target.
3144 positions_recorder()->WriteRecordedPositions();
3145 li(t9, Operand(target_int, rmode), ADDRESS_LOAD);
3146 Call(t9, cond, rs, rt, bd);
3147 DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
3148 SizeOfCodeGeneratedSince(&start));
3152 int MacroAssembler::CallSize(Handle<Code> code,
3153 RelocInfo::Mode rmode,
3154 TypeFeedbackId ast_id,
3158 BranchDelaySlot bd) {
3159 AllowDeferredHandleDereference using_raw_address;
3160 return CallSize(reinterpret_cast<Address>(code.location()),
3161 rmode, cond, rs, rt, bd);
3165 void MacroAssembler::Call(Handle<Code> code,
3166 RelocInfo::Mode rmode,
3167 TypeFeedbackId ast_id,
3171 BranchDelaySlot bd) {
3172 BlockTrampolinePoolScope block_trampoline_pool(this);
3175 DCHECK(RelocInfo::IsCodeTarget(rmode));
3176 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
3177 SetRecordedAstId(ast_id);
3178 rmode = RelocInfo::CODE_TARGET_WITH_ID;
3180 AllowDeferredHandleDereference embedding_raw_address;
3181 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
3182 DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
3183 SizeOfCodeGeneratedSince(&start));
3187 void MacroAssembler::Ret(Condition cond,
3190 BranchDelaySlot bd) {
3191 Jump(ra, cond, rs, rt, bd);
3195 void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
3196 BlockTrampolinePoolScope block_trampoline_pool(this);
3198 BlockGrowBufferScope block_buf_growth(this);
3199 // Buffer growth (and relocation) must be blocked for internal references
3200 // until associated instructions are emitted and available to be patched.
3201 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3204 // Emit a nop in the branch delay slot if required.
3205 if (bdslot == PROTECT) nop();
3209 void MacroAssembler::Jal(Label* L, BranchDelaySlot bdslot) {
3210 BlockTrampolinePoolScope block_trampoline_pool(this);
3212 BlockGrowBufferScope block_buf_growth(this);
3213 // Buffer growth (and relocation) must be blocked for internal references
3214 // until associated instructions are emitted and available to be patched.
3215 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3218 // Emit a nop in the branch delay slot if required.
3219 if (bdslot == PROTECT) nop();
3223 void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
3224 BlockTrampolinePoolScope block_trampoline_pool(this);
3227 imm64 = jump_address(L);
3228 { BlockGrowBufferScope block_buf_growth(this);
3229 // Buffer growth (and relocation) must be blocked for internal references
3230 // until associated instructions are emitted and available to be patched.
3231 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3232 li(at, Operand(imm64), ADDRESS_LOAD);
3236 // Emit a nop in the branch delay slot if required.
3237 if (bdslot == PROTECT)
3242 void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
3243 BlockTrampolinePoolScope block_trampoline_pool(this);
3246 imm64 = jump_address(L);
3247 { BlockGrowBufferScope block_buf_growth(this);
3248 // Buffer growth (and relocation) must be blocked for internal references
3249 // until associated instructions are emitted and available to be patched.
3250 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3251 li(at, Operand(imm64), ADDRESS_LOAD);
3255 // Emit a nop in the branch delay slot if required.
3256 if (bdslot == PROTECT)
3261 void MacroAssembler::DropAndRet(int drop) {
3262 DCHECK(is_int16(drop * kPointerSize));
3263 Ret(USE_DELAY_SLOT);
3264 daddiu(sp, sp, drop * kPointerSize);
3267 void MacroAssembler::DropAndRet(int drop,
3270 const Operand& r2) {
3271 // Both Drop and Ret need to be conditional.
3273 if (cond != cc_always) {
3274 Branch(&skip, NegateCondition(cond), r1, r2);
3280 if (cond != cc_always) {
3286 void MacroAssembler::Drop(int count,
3289 const Operand& op) {
3297 Branch(&skip, NegateCondition(cond), reg, op);
3300 Daddu(sp, sp, Operand(count * kPointerSize));
3309 void MacroAssembler::Swap(Register reg1,
3312 if (scratch.is(no_reg)) {
3313 Xor(reg1, reg1, Operand(reg2));
3314 Xor(reg2, reg2, Operand(reg1));
3315 Xor(reg1, reg1, Operand(reg2));
3324 void MacroAssembler::Call(Label* target) {
3325 BranchAndLink(target);
3329 void MacroAssembler::Push(Handle<Object> handle) {
3330 li(at, Operand(handle));
3335 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
3336 DCHECK(!src.is(scratch));
3338 dsrl32(src, src, 0);
3339 dsll32(src, src, 0);
3341 dsll32(scratch, scratch, 0);
3346 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
3347 DCHECK(!dst.is(scratch));
3349 dsrl32(scratch, scratch, 0);
3351 dsrl32(dst, dst, 0);
3352 dsll32(dst, dst, 0);
3353 or_(dst, dst, scratch);
3357 void MacroAssembler::DebugBreak() {
3358 PrepareCEntryArgs(0);
3359 PrepareCEntryFunction(
3360 ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
3361 CEntryStub ces(isolate(), 1);
3362 DCHECK(AllowThisStubCall(&ces));
3363 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
3367 // ---------------------------------------------------------------------------
3368 // Exception handling.
3370 void MacroAssembler::PushStackHandler() {
3371 // Adjust this code if not the case.
3372 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
3373 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3375 // Link the current handler as the next handler.
3376 li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3377 ld(a5, MemOperand(a6));
3380 // Set this new handler as the current one.
3381 sd(sp, MemOperand(a6));
3385 void MacroAssembler::PopStackHandler() {
3386 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3388 Daddu(sp, sp, Operand(static_cast<int64_t>(StackHandlerConstants::kSize -
3390 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3391 sd(a1, MemOperand(at));
3395 void MacroAssembler::Allocate(int object_size,
3400 AllocationFlags flags) {
3401 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3402 if (!FLAG_inline_new) {
3403 if (emit_debug_code()) {
3404 // Trash the registers to simulate an allocation failure.
3406 li(scratch1, 0x7191);
3407 li(scratch2, 0x7291);
3413 DCHECK(!result.is(scratch1));
3414 DCHECK(!result.is(scratch2));
3415 DCHECK(!scratch1.is(scratch2));
3416 DCHECK(!scratch1.is(t9));
3417 DCHECK(!scratch2.is(t9));
3418 DCHECK(!result.is(t9));
3420 // Make object size into bytes.
3421 if ((flags & SIZE_IN_WORDS) != 0) {
3422 object_size *= kPointerSize;
3424 DCHECK(0 == (object_size & kObjectAlignmentMask));
3426 // Check relative positions of allocation top and limit addresses.
3427 // ARM adds additional checks to make sure the ldm instruction can be
3428 // used. On MIPS we don't have ldm so we don't need additional checks either.
3429 ExternalReference allocation_top =
3430 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3431 ExternalReference allocation_limit =
3432 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3435 reinterpret_cast<intptr_t>(allocation_top.address());
3437 reinterpret_cast<intptr_t>(allocation_limit.address());
3438 DCHECK((limit - top) == kPointerSize);
3440 // Set up allocation top address and object size registers.
3441 Register topaddr = scratch1;
3442 li(topaddr, Operand(allocation_top));
3444 // This code stores a temporary value in t9.
3445 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3446 // Load allocation top into result and allocation limit into t9.
3447 ld(result, MemOperand(topaddr));
3448 ld(t9, MemOperand(topaddr, kPointerSize));
3450 if (emit_debug_code()) {
3451 // Assert that result actually contains top on entry. t9 is used
3452 // immediately below so this use of t9 does not cause difference with
3453 // respect to register content between debug and release mode.
3454 ld(t9, MemOperand(topaddr));
3455 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3457 // Load allocation limit into t9. Result already contains allocation top.
3458 ld(t9, MemOperand(topaddr, static_cast<int32_t>(limit - top)));
3461 DCHECK(kPointerSize == kDoubleSize);
3462 if (emit_debug_code()) {
3463 And(at, result, Operand(kDoubleAlignmentMask));
3464 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
3467 // Calculate new top and bail out if new space is exhausted. Use result
3468 // to calculate the new top.
3469 Daddu(scratch2, result, Operand(object_size));
3470 Branch(gc_required, Ugreater, scratch2, Operand(t9));
3471 sd(scratch2, MemOperand(topaddr));
3473 // Tag object if requested.
3474 if ((flags & TAG_OBJECT) != 0) {
3475 Daddu(result, result, Operand(kHeapObjectTag));
3480 void MacroAssembler::Allocate(Register object_size,
3485 AllocationFlags flags) {
3486 if (!FLAG_inline_new) {
3487 if (emit_debug_code()) {
3488 // Trash the registers to simulate an allocation failure.
3490 li(scratch1, 0x7191);
3491 li(scratch2, 0x7291);
3497 DCHECK(!result.is(scratch1));
3498 DCHECK(!result.is(scratch2));
3499 DCHECK(!scratch1.is(scratch2));
3500 DCHECK(!object_size.is(t9));
3501 DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
3503 // Check relative positions of allocation top and limit addresses.
3504 // ARM adds additional checks to make sure the ldm instruction can be
3505 // used. On MIPS we don't have ldm so we don't need additional checks either.
3506 ExternalReference allocation_top =
3507 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3508 ExternalReference allocation_limit =
3509 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3511 reinterpret_cast<intptr_t>(allocation_top.address());
3513 reinterpret_cast<intptr_t>(allocation_limit.address());
3514 DCHECK((limit - top) == kPointerSize);
3516 // Set up allocation top address and object size registers.
3517 Register topaddr = scratch1;
3518 li(topaddr, Operand(allocation_top));
3520 // This code stores a temporary value in t9.
3521 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3522 // Load allocation top into result and allocation limit into t9.
3523 ld(result, MemOperand(topaddr));
3524 ld(t9, MemOperand(topaddr, kPointerSize));
3526 if (emit_debug_code()) {
3527 // Assert that result actually contains top on entry. t9 is used
3528 // immediately below so this use of t9 does not cause difference with
3529 // respect to register content between debug and release mode.
3530 ld(t9, MemOperand(topaddr));
3531 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3533 // Load allocation limit into t9. Result already contains allocation top.
3534 ld(t9, MemOperand(topaddr, static_cast<int32_t>(limit - top)));
3537 DCHECK(kPointerSize == kDoubleSize);
3538 if (emit_debug_code()) {
3539 And(at, result, Operand(kDoubleAlignmentMask));
3540 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
3543 // Calculate new top and bail out if new space is exhausted. Use result
3544 // to calculate the new top. Object size may be in words so a shift is
3545 // required to get the number of bytes.
3546 if ((flags & SIZE_IN_WORDS) != 0) {
3547 dsll(scratch2, object_size, kPointerSizeLog2);
3548 Daddu(scratch2, result, scratch2);
3550 Daddu(scratch2, result, Operand(object_size));
3552 Branch(gc_required, Ugreater, scratch2, Operand(t9));
3554 // Update allocation top. result temporarily holds the new top.
3555 if (emit_debug_code()) {
3556 And(t9, scratch2, Operand(kObjectAlignmentMask));
3557 Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
3559 sd(scratch2, MemOperand(topaddr));
3561 // Tag object if requested.
3562 if ((flags & TAG_OBJECT) != 0) {
3563 Daddu(result, result, Operand(kHeapObjectTag));
3568 void MacroAssembler::AllocateTwoByteString(Register result,
3573 Label* gc_required) {
3574 // Calculate the number of bytes needed for the characters in the string while
3575 // observing object alignment.
3576 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3577 dsll(scratch1, length, 1); // Length in bytes, not chars.
3578 daddiu(scratch1, scratch1,
3579 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3580 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3582 // Allocate two-byte string in new space.
3590 // Set the map, length and hash field.
3591 InitializeNewString(result,
3593 Heap::kStringMapRootIndex,
3599 void MacroAssembler::AllocateOneByteString(Register result, Register length,
3600 Register scratch1, Register scratch2,
3602 Label* gc_required) {
3603 // Calculate the number of bytes needed for the characters in the string
3604 // while observing object alignment.
3605 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3606 DCHECK(kCharSize == 1);
3607 daddiu(scratch1, length,
3608 kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3609 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3611 // Allocate one-byte string in new space.
3619 // Set the map, length and hash field.
3620 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
3621 scratch1, scratch2);
3625 void MacroAssembler::AllocateTwoByteConsString(Register result,
3629 Label* gc_required) {
3630 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3632 InitializeNewString(result,
3634 Heap::kConsStringMapRootIndex,
3640 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
3643 Label* gc_required) {
3644 Allocate(ConsString::kSize,
3651 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
3652 scratch1, scratch2);
3656 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3660 Label* gc_required) {
3661 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3664 InitializeNewString(result,
3666 Heap::kSlicedStringMapRootIndex,
3672 void MacroAssembler::AllocateOneByteSlicedString(Register result,
3676 Label* gc_required) {
3677 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3680 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
3681 scratch1, scratch2);
3685 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
3686 Label* not_unique_name) {
3687 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3689 And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3690 Branch(&succeed, eq, at, Operand(zero_reg));
3691 Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
3697 // Allocates a heap number or jumps to the label if the young space is full and
3698 // a scavenge is needed.
3699 void MacroAssembler::AllocateHeapNumber(Register result,
3702 Register heap_number_map,
3704 TaggingMode tagging_mode,
3706 // Allocate an object in the heap for the heap number and tag it as a heap
3708 Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
3709 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3711 Heap::RootListIndex map_index = mode == MUTABLE
3712 ? Heap::kMutableHeapNumberMapRootIndex
3713 : Heap::kHeapNumberMapRootIndex;
3714 AssertIsRoot(heap_number_map, map_index);
3716 // Store heap number map in the allocated object.
3717 if (tagging_mode == TAG_RESULT) {
3718 sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3720 sd(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3725 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3729 Label* gc_required) {
3730 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3731 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3732 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3736 // Copies a fixed number of fields of heap objects from src to dst.
3737 void MacroAssembler::CopyFields(Register dst,
3741 DCHECK((temps & dst.bit()) == 0);
3742 DCHECK((temps & src.bit()) == 0);
3743 // Primitive implementation using only one temporary register.
3745 Register tmp = no_reg;
3746 // Find a temp register in temps list.
3747 for (int i = 0; i < kNumRegisters; i++) {
3748 if ((temps & (1 << i)) != 0) {
3753 DCHECK(!tmp.is(no_reg));
3755 for (int i = 0; i < field_count; i++) {
3756 ld(tmp, FieldMemOperand(src, i * kPointerSize));
3757 sd(tmp, FieldMemOperand(dst, i * kPointerSize));
3762 void MacroAssembler::CopyBytes(Register src,
3766 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3768 // Align src before copying in word size chunks.
3769 Branch(&byte_loop, le, length, Operand(kPointerSize));
3770 bind(&align_loop_1);
3771 And(scratch, src, kPointerSize - 1);
3772 Branch(&word_loop, eq, scratch, Operand(zero_reg));
3773 lbu(scratch, MemOperand(src));
3775 sb(scratch, MemOperand(dst));
3777 Dsubu(length, length, Operand(1));
3778 Branch(&align_loop_1, ne, length, Operand(zero_reg));
3780 // Copy bytes in word size chunks.
3782 if (emit_debug_code()) {
3783 And(scratch, src, kPointerSize - 1);
3784 Assert(eq, kExpectingAlignmentForCopyBytes,
3785 scratch, Operand(zero_reg));
3787 Branch(&byte_loop, lt, length, Operand(kPointerSize));
3788 ld(scratch, MemOperand(src));
3789 Daddu(src, src, kPointerSize);
3791 // TODO(kalmard) check if this can be optimized to use sw in most cases.
3792 // Can't use unaligned access - copy byte by byte.
3793 sb(scratch, MemOperand(dst, 0));
3794 dsrl(scratch, scratch, 8);
3795 sb(scratch, MemOperand(dst, 1));
3796 dsrl(scratch, scratch, 8);
3797 sb(scratch, MemOperand(dst, 2));
3798 dsrl(scratch, scratch, 8);
3799 sb(scratch, MemOperand(dst, 3));
3800 dsrl(scratch, scratch, 8);
3801 sb(scratch, MemOperand(dst, 4));
3802 dsrl(scratch, scratch, 8);
3803 sb(scratch, MemOperand(dst, 5));
3804 dsrl(scratch, scratch, 8);
3805 sb(scratch, MemOperand(dst, 6));
3806 dsrl(scratch, scratch, 8);
3807 sb(scratch, MemOperand(dst, 7));
3810 Dsubu(length, length, Operand(kPointerSize));
3813 // Copy the last bytes if any left.
3815 Branch(&done, eq, length, Operand(zero_reg));
3817 lbu(scratch, MemOperand(src));
3819 sb(scratch, MemOperand(dst));
3821 Dsubu(length, length, Operand(1));
3822 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3827 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3828 Register end_offset,
3833 sd(filler, MemOperand(start_offset));
3834 Daddu(start_offset, start_offset, kPointerSize);
3836 Branch(&loop, ult, start_offset, Operand(end_offset));
3840 void MacroAssembler::CheckFastElements(Register map,
3843 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3844 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3845 STATIC_ASSERT(FAST_ELEMENTS == 2);
3846 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3847 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3848 Branch(fail, hi, scratch,
3849 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3853 void MacroAssembler::CheckFastObjectElements(Register map,
3856 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3857 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3858 STATIC_ASSERT(FAST_ELEMENTS == 2);
3859 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3860 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3861 Branch(fail, ls, scratch,
3862 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3863 Branch(fail, hi, scratch,
3864 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3868 void MacroAssembler::CheckFastSmiElements(Register map,
3871 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3872 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3873 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3874 Branch(fail, hi, scratch,
3875 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3879 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3881 Register elements_reg,
3885 int elements_offset) {
3886 Label smi_value, done;
3888 // Handle smi values specially.
3889 JumpIfSmi(value_reg, &smi_value);
3891 // Ensure that the object is a heap number.
3894 Heap::kHeapNumberMapRootIndex,
3898 // Double value, turn potential sNaN into qNan.
3899 DoubleRegister double_result = f0;
3900 DoubleRegister double_scratch = f2;
3902 ldc1(double_result, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
3903 Branch(USE_DELAY_SLOT, &done); // Canonicalization is one instruction.
3904 FPUCanonicalizeNaN(double_result, double_result);
3907 // scratch1 is now effective address of the double element.
3908 // Untag and transfer.
3909 dsrl32(at, value_reg, 0);
3910 mtc1(at, double_scratch);
3911 cvt_d_w(double_result, double_scratch);
3914 Daddu(scratch1, elements_reg,
3915 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
3917 dsra(scratch2, key_reg, 32 - kDoubleSizeLog2);
3918 Daddu(scratch1, scratch1, scratch2);
3919 sdc1(double_result, MemOperand(scratch1, 0));
3923 void MacroAssembler::CompareMapAndBranch(Register obj,
3926 Label* early_success,
3929 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3930 CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
3934 void MacroAssembler::CompareMapAndBranch(Register obj_map,
3936 Label* early_success,
3939 Branch(branch_to, cond, obj_map, Operand(map));
3943 void MacroAssembler::CheckMap(Register obj,
3947 SmiCheckType smi_check_type) {
3948 if (smi_check_type == DO_SMI_CHECK) {
3949 JumpIfSmi(obj, fail);
3952 CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
3957 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
3958 Register scratch2, Handle<WeakCell> cell,
3959 Handle<Code> success,
3960 SmiCheckType smi_check_type) {
3962 if (smi_check_type == DO_SMI_CHECK) {
3963 JumpIfSmi(obj, &fail);
3965 ld(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
3966 GetWeakValue(scratch2, cell);
3967 Jump(success, RelocInfo::CODE_TARGET, eq, scratch1, Operand(scratch2));
3972 void MacroAssembler::CheckMap(Register obj,
3974 Heap::RootListIndex index,
3976 SmiCheckType smi_check_type) {
3977 if (smi_check_type == DO_SMI_CHECK) {
3978 JumpIfSmi(obj, fail);
3980 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3981 LoadRoot(at, index);
3982 Branch(fail, ne, scratch, Operand(at));
3986 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
3987 li(value, Operand(cell));
3988 ld(value, FieldMemOperand(value, WeakCell::kValueOffset));
3991 void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
3992 const DoubleRegister src) {
3993 sub_d(dst, src, kDoubleRegZero);
3996 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
3998 GetWeakValue(value, cell);
3999 JumpIfSmi(value, miss);
4003 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
4004 if (IsMipsSoftFloatABI) {
4007 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
4012 void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
4013 if (IsMipsSoftFloatABI) {
4016 Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
4021 void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
4022 if (!IsMipsSoftFloatABI) {
4030 void MacroAssembler::MovToFloatResult(DoubleRegister src) {
4031 if (!IsMipsSoftFloatABI) {
4039 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
4040 DoubleRegister src2) {
4041 if (!IsMipsSoftFloatABI) {
4042 const DoubleRegister fparg2 = (kMipsAbi == kN64) ? f13 : f14;
4044 DCHECK(!src1.is(fparg2));
4058 // -----------------------------------------------------------------------------
4059 // JavaScript invokes.
4061 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
4062 const ParameterCount& actual,
4063 Handle<Code> code_constant,
4066 bool* definitely_mismatches,
4068 const CallWrapper& call_wrapper) {
4069 bool definitely_matches = false;
4070 *definitely_mismatches = false;
4071 Label regular_invoke;
4073 // Check whether the expected and actual arguments count match. If not,
4074 // setup registers according to contract with ArgumentsAdaptorTrampoline:
4075 // a0: actual arguments count
4076 // a1: function (passed through to callee)
4077 // a2: expected arguments count
4079 // The code below is made a lot easier because the calling code already sets
4080 // up actual and expected registers according to the contract if values are
4081 // passed in registers.
4082 DCHECK(actual.is_immediate() || actual.reg().is(a0));
4083 DCHECK(expected.is_immediate() || expected.reg().is(a2));
4084 DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
4086 if (expected.is_immediate()) {
4087 DCHECK(actual.is_immediate());
4088 if (expected.immediate() == actual.immediate()) {
4089 definitely_matches = true;
4091 li(a0, Operand(actual.immediate()));
4092 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
4093 if (expected.immediate() == sentinel) {
4094 // Don't worry about adapting arguments for builtins that
4095 // don't want that done. Skip adaption code by making it look
4096 // like we have a match between expected and actual number of
4098 definitely_matches = true;
4100 *definitely_mismatches = true;
4101 li(a2, Operand(expected.immediate()));
4104 } else if (actual.is_immediate()) {
4105 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.immediate()));
4106 li(a0, Operand(actual.immediate()));
4108 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg()));
4111 if (!definitely_matches) {
4112 if (!code_constant.is_null()) {
4113 li(a3, Operand(code_constant));
4114 daddiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
4117 Handle<Code> adaptor =
4118 isolate()->builtins()->ArgumentsAdaptorTrampoline();
4119 if (flag == CALL_FUNCTION) {
4120 call_wrapper.BeforeCall(CallSize(adaptor));
4122 call_wrapper.AfterCall();
4123 if (!*definitely_mismatches) {
4127 Jump(adaptor, RelocInfo::CODE_TARGET);
4129 bind(®ular_invoke);
4134 void MacroAssembler::InvokeCode(Register code,
4135 const ParameterCount& expected,
4136 const ParameterCount& actual,
4138 const CallWrapper& call_wrapper) {
4139 // You can't call a function without a valid frame.
4140 DCHECK(flag == JUMP_FUNCTION || has_frame());
4144 bool definitely_mismatches = false;
4145 InvokePrologue(expected, actual, Handle<Code>::null(), code,
4146 &done, &definitely_mismatches, flag,
4148 if (!definitely_mismatches) {
4149 if (flag == CALL_FUNCTION) {
4150 call_wrapper.BeforeCall(CallSize(code));
4152 call_wrapper.AfterCall();
4154 DCHECK(flag == JUMP_FUNCTION);
4157 // Continue here if InvokePrologue does handle the invocation due to
4158 // mismatched parameter counts.
4164 void MacroAssembler::InvokeFunction(Register function,
4165 const ParameterCount& actual,
4167 const CallWrapper& call_wrapper) {
4168 // You can't call a function without a valid frame.
4169 DCHECK(flag == JUMP_FUNCTION || has_frame());
4171 // Contract with called JS functions requires that function is passed in a1.
4172 DCHECK(function.is(a1));
4173 Register expected_reg = a2;
4174 Register code_reg = a3;
4175 ld(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4176 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4177 // The argument count is stored as int32_t on 64-bit platforms.
4178 // TODO(plind): Smi on 32-bit platforms.
4180 FieldMemOperand(code_reg,
4181 SharedFunctionInfo::kFormalParameterCountOffset));
4182 ld(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4183 ParameterCount expected(expected_reg);
4184 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
4188 void MacroAssembler::InvokeFunction(Register function,
4189 const ParameterCount& expected,
4190 const ParameterCount& actual,
4192 const CallWrapper& call_wrapper) {
4193 // You can't call a function without a valid frame.
4194 DCHECK(flag == JUMP_FUNCTION || has_frame());
4196 // Contract with called JS functions requires that function is passed in a1.
4197 DCHECK(function.is(a1));
4199 // Get the function and setup the context.
4200 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4202 // We call indirectly through the code field in the function to
4203 // allow recompilation to take effect without changing any of the
4205 ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4206 InvokeCode(a3, expected, actual, flag, call_wrapper);
4210 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
4211 const ParameterCount& expected,
4212 const ParameterCount& actual,
4214 const CallWrapper& call_wrapper) {
4216 InvokeFunction(a1, expected, actual, flag, call_wrapper);
4220 void MacroAssembler::IsObjectJSStringType(Register object,
4223 DCHECK(kNotStringTag != 0);
4225 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4226 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4227 And(scratch, scratch, Operand(kIsNotStringMask));
4228 Branch(fail, ne, scratch, Operand(zero_reg));
4232 void MacroAssembler::IsObjectNameType(Register object,
4235 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4236 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4237 Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
4241 // ---------------------------------------------------------------------------
4242 // Support functions.
4245 void MacroAssembler::GetMapConstructor(Register result, Register map,
4246 Register temp, Register temp2) {
4248 ld(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
4250 JumpIfSmi(result, &done);
4251 GetObjectType(result, temp, temp2);
4252 Branch(&done, ne, temp2, Operand(MAP_TYPE));
4253 ld(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
4259 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
4260 Register scratch, Label* miss) {
4261 // Get the prototype or initial map from the function.
4263 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4265 // If the prototype or initial map is the hole, don't return it and
4266 // simply miss the cache instead. This will allow us to allocate a
4267 // prototype object on-demand in the runtime system.
4268 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
4269 Branch(miss, eq, result, Operand(t8));
4271 // If the function does not have an initial map, we're done.
4273 GetObjectType(result, scratch, scratch);
4274 Branch(&done, ne, scratch, Operand(MAP_TYPE));
4276 // Get the prototype from the initial map.
4277 ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
4284 void MacroAssembler::GetObjectType(Register object,
4286 Register type_reg) {
4287 ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
4288 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
4292 // -----------------------------------------------------------------------------
4295 void MacroAssembler::CallStub(CodeStub* stub,
4296 TypeFeedbackId ast_id,
4300 BranchDelaySlot bd) {
4301 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
4302 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
4307 void MacroAssembler::TailCallStub(CodeStub* stub,
4311 BranchDelaySlot bd) {
4312 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
4316 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4317 return has_frame_ || !stub->SometimesSetsUpAFrame();
4321 void MacroAssembler::IndexFromHash(Register hash, Register index) {
4322 // If the hash field contains an array index pick it out. The assert checks
4323 // that the constants for the maximum number of digits for an array index
4324 // cached in the hash field and the number of bits reserved for it does not
4326 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
4327 (1 << String::kArrayIndexValueBits));
4328 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
4332 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4336 Register heap_number_map,
4338 ObjectToDoubleFlags flags) {
4340 if ((flags & OBJECT_NOT_SMI) == 0) {
4342 JumpIfNotSmi(object, ¬_smi);
4343 // Remove smi tag and convert to double.
4344 // dsra(scratch1, object, kSmiTagSize);
4345 dsra32(scratch1, object, 0);
4346 mtc1(scratch1, result);
4347 cvt_d_w(result, result);
4351 // Check for heap number and load double value from it.
4352 ld(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4353 Branch(not_number, ne, scratch1, Operand(heap_number_map));
4355 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4356 // If exponent is all ones the number is either a NaN or +/-Infinity.
4357 Register exponent = scratch1;
4358 Register mask_reg = scratch2;
4359 lwu(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4360 li(mask_reg, HeapNumber::kExponentMask);
4362 And(exponent, exponent, mask_reg);
4363 Branch(not_number, eq, exponent, Operand(mask_reg));
4365 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4370 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4372 Register scratch1) {
4373 dsra32(scratch1, smi, 0);
4374 mtc1(scratch1, value);
4375 cvt_d_w(value, value);
4379 void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
4380 const Operand& right,
4381 Register overflow_dst,
4383 if (right.is_reg()) {
4384 AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4387 li(t9, right); // Load right.
4388 mov(scratch, left); // Preserve left.
4389 addu(dst, left, t9); // Left is overwritten.
4390 xor_(scratch, dst, scratch); // Original left.
4391 xor_(overflow_dst, dst, t9);
4392 and_(overflow_dst, overflow_dst, scratch);
4395 addu(dst, left, t9);
4396 xor_(overflow_dst, dst, left);
4397 xor_(scratch, dst, t9);
4398 and_(overflow_dst, scratch, overflow_dst);
4404 void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
4406 Register overflow_dst,
4408 DCHECK(!dst.is(overflow_dst));
4409 DCHECK(!dst.is(scratch));
4410 DCHECK(!overflow_dst.is(scratch));
4411 DCHECK(!overflow_dst.is(left));
4412 DCHECK(!overflow_dst.is(right));
4414 if (left.is(right) && dst.is(left)) {
4415 DCHECK(!dst.is(t9));
4416 DCHECK(!scratch.is(t9));
4417 DCHECK(!left.is(t9));
4418 DCHECK(!right.is(t9));
4419 DCHECK(!overflow_dst.is(t9));
4425 mov(scratch, left); // Preserve left.
4426 addu(dst, left, right); // Left is overwritten.
4427 xor_(scratch, dst, scratch); // Original left.
4428 xor_(overflow_dst, dst, right);
4429 and_(overflow_dst, overflow_dst, scratch);
4430 } else if (dst.is(right)) {
4431 mov(scratch, right); // Preserve right.
4432 addu(dst, left, right); // Right is overwritten.
4433 xor_(scratch, dst, scratch); // Original right.
4434 xor_(overflow_dst, dst, left);
4435 and_(overflow_dst, overflow_dst, scratch);
4437 addu(dst, left, right);
4438 xor_(overflow_dst, dst, left);
4439 xor_(scratch, dst, right);
4440 and_(overflow_dst, scratch, overflow_dst);
4445 void MacroAssembler::DadduAndCheckForOverflow(Register dst, Register left,
4446 const Operand& right,
4447 Register overflow_dst,
4449 if (right.is_reg()) {
4450 DadduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4453 li(t9, right); // Load right.
4454 mov(scratch, left); // Preserve left.
4455 daddu(dst, left, t9); // Left is overwritten.
4456 xor_(scratch, dst, scratch); // Original left.
4457 xor_(overflow_dst, dst, t9);
4458 and_(overflow_dst, overflow_dst, scratch);
4460 li(t9, right); // Load right.
4461 Daddu(dst, left, t9);
4462 xor_(overflow_dst, dst, left);
4463 xor_(scratch, dst, t9);
4464 and_(overflow_dst, scratch, overflow_dst);
4470 void MacroAssembler::DadduAndCheckForOverflow(Register dst, Register left,
4472 Register overflow_dst,
4474 DCHECK(!dst.is(overflow_dst));
4475 DCHECK(!dst.is(scratch));
4476 DCHECK(!overflow_dst.is(scratch));
4477 DCHECK(!overflow_dst.is(left));
4478 DCHECK(!overflow_dst.is(right));
4480 if (left.is(right) && dst.is(left)) {
4481 DCHECK(!dst.is(t9));
4482 DCHECK(!scratch.is(t9));
4483 DCHECK(!left.is(t9));
4484 DCHECK(!right.is(t9));
4485 DCHECK(!overflow_dst.is(t9));
4491 mov(scratch, left); // Preserve left.
4492 daddu(dst, left, right); // Left is overwritten.
4493 xor_(scratch, dst, scratch); // Original left.
4494 xor_(overflow_dst, dst, right);
4495 and_(overflow_dst, overflow_dst, scratch);
4496 } else if (dst.is(right)) {
4497 mov(scratch, right); // Preserve right.
4498 daddu(dst, left, right); // Right is overwritten.
4499 xor_(scratch, dst, scratch); // Original right.
4500 xor_(overflow_dst, dst, left);
4501 and_(overflow_dst, overflow_dst, scratch);
4503 daddu(dst, left, right);
4504 xor_(overflow_dst, dst, left);
4505 xor_(scratch, dst, right);
4506 and_(overflow_dst, scratch, overflow_dst);
4511 void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
4512 const Operand& right,
4513 Register overflow_dst,
4515 if (right.is_reg()) {
4516 SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4519 li(t9, right); // Load right.
4520 mov(scratch, left); // Preserve left.
4521 Subu(dst, left, t9); // Left is overwritten.
4522 xor_(overflow_dst, dst, scratch); // scratch is original left.
4523 xor_(scratch, scratch, t9); // scratch is original left.
4524 and_(overflow_dst, scratch, overflow_dst);
4527 subu(dst, left, t9);
4528 xor_(overflow_dst, dst, left);
4529 xor_(scratch, left, t9);
4530 and_(overflow_dst, scratch, overflow_dst);
4536 void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
4538 Register overflow_dst,
4540 DCHECK(!dst.is(overflow_dst));
4541 DCHECK(!dst.is(scratch));
4542 DCHECK(!overflow_dst.is(scratch));
4543 DCHECK(!overflow_dst.is(left));
4544 DCHECK(!overflow_dst.is(right));
4545 DCHECK(!scratch.is(left));
4546 DCHECK(!scratch.is(right));
4548 // This happens with some crankshaft code. Since Subu works fine if
4549 // left == right, let's not make that restriction here.
4550 if (left.is(right)) {
4552 mov(overflow_dst, zero_reg);
4557 mov(scratch, left); // Preserve left.
4558 subu(dst, left, right); // Left is overwritten.
4559 xor_(overflow_dst, dst, scratch); // scratch is original left.
4560 xor_(scratch, scratch, right); // scratch is original left.
4561 and_(overflow_dst, scratch, overflow_dst);
4562 } else if (dst.is(right)) {
4563 mov(scratch, right); // Preserve right.
4564 subu(dst, left, right); // Right is overwritten.
4565 xor_(overflow_dst, dst, left);
4566 xor_(scratch, left, scratch); // Original right.
4567 and_(overflow_dst, scratch, overflow_dst);
4569 subu(dst, left, right);
4570 xor_(overflow_dst, dst, left);
4571 xor_(scratch, left, right);
4572 and_(overflow_dst, scratch, overflow_dst);
4577 void MacroAssembler::DsubuAndCheckForOverflow(Register dst, Register left,
4578 const Operand& right,
4579 Register overflow_dst,
4581 if (right.is_reg()) {
4582 DsubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4585 li(t9, right); // Load right.
4586 mov(scratch, left); // Preserve left.
4587 dsubu(dst, left, t9); // Left is overwritten.
4588 xor_(overflow_dst, dst, scratch); // scratch is original left.
4589 xor_(scratch, scratch, t9); // scratch is original left.
4590 and_(overflow_dst, scratch, overflow_dst);
4593 dsubu(dst, left, t9);
4594 xor_(overflow_dst, dst, left);
4595 xor_(scratch, left, t9);
4596 and_(overflow_dst, scratch, overflow_dst);
4602 void MacroAssembler::DsubuAndCheckForOverflow(Register dst, Register left,
4604 Register overflow_dst,
4606 DCHECK(!dst.is(overflow_dst));
4607 DCHECK(!dst.is(scratch));
4608 DCHECK(!overflow_dst.is(scratch));
4609 DCHECK(!overflow_dst.is(left));
4610 DCHECK(!overflow_dst.is(right));
4611 DCHECK(!scratch.is(left));
4612 DCHECK(!scratch.is(right));
4614 // This happens with some crankshaft code. Since Subu works fine if
4615 // left == right, let's not make that restriction here.
4616 if (left.is(right)) {
4618 mov(overflow_dst, zero_reg);
4623 mov(scratch, left); // Preserve left.
4624 dsubu(dst, left, right); // Left is overwritten.
4625 xor_(overflow_dst, dst, scratch); // scratch is original left.
4626 xor_(scratch, scratch, right); // scratch is original left.
4627 and_(overflow_dst, scratch, overflow_dst);
4628 } else if (dst.is(right)) {
4629 mov(scratch, right); // Preserve right.
4630 dsubu(dst, left, right); // Right is overwritten.
4631 xor_(overflow_dst, dst, left);
4632 xor_(scratch, left, scratch); // Original right.
4633 and_(overflow_dst, scratch, overflow_dst);
4635 dsubu(dst, left, right);
4636 xor_(overflow_dst, dst, left);
4637 xor_(scratch, left, right);
4638 and_(overflow_dst, scratch, overflow_dst);
4642 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
4643 SaveFPRegsMode save_doubles,
4644 BranchDelaySlot bd) {
4645 // All parameters are on the stack. v0 has the return value after call.
4647 // If the expected number of arguments of the runtime function is
4648 // constant, we check that the actual number of arguments match the
4650 CHECK(f->nargs < 0 || f->nargs == num_arguments);
4652 // TODO(1236192): Most runtime routines don't need the number of
4653 // arguments passed in because it is constant. At some point we
4654 // should remove this need and make the runtime routine entry code
4656 PrepareCEntryArgs(num_arguments);
4657 PrepareCEntryFunction(ExternalReference(f, isolate()));
4658 CEntryStub stub(isolate(), 1, save_doubles);
4659 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
4663 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
4665 BranchDelaySlot bd) {
4666 PrepareCEntryArgs(num_arguments);
4667 PrepareCEntryFunction(ext);
4669 CEntryStub stub(isolate(), 1);
4670 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
4674 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
4677 // TODO(1236192): Most runtime routines don't need the number of
4678 // arguments passed in because it is constant. At some point we
4679 // should remove this need and make the runtime routine entry code
4681 PrepareCEntryArgs(num_arguments);
4682 JumpToExternalReference(ext);
4686 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
4689 TailCallExternalReference(ExternalReference(fid, isolate()),
4695 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
4696 BranchDelaySlot bd) {
4697 PrepareCEntryFunction(builtin);
4698 CEntryStub stub(isolate(), 1);
4699 Jump(stub.GetCode(),
4700 RelocInfo::CODE_TARGET,
4708 void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
4709 const CallWrapper& call_wrapper) {
4710 // You can't call a builtin without a valid frame.
4711 DCHECK(flag == JUMP_FUNCTION || has_frame());
4713 GetBuiltinEntry(t9, native_context_index);
4714 if (flag == CALL_FUNCTION) {
4715 call_wrapper.BeforeCall(CallSize(t9));
4717 call_wrapper.AfterCall();
4719 DCHECK(flag == JUMP_FUNCTION);
4725 void MacroAssembler::GetBuiltinFunction(Register target,
4726 int native_context_index) {
4727 // Load the builtins object into target register.
4728 ld(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4729 ld(target, FieldMemOperand(target, GlobalObject::kNativeContextOffset));
4730 // Load the JavaScript builtin function from the builtins object.
4731 ld(target, ContextOperand(target, native_context_index));
4735 void MacroAssembler::GetBuiltinEntry(Register target,
4736 int native_context_index) {
4737 DCHECK(!target.is(a1));
4738 GetBuiltinFunction(a1, native_context_index);
4739 // Load the code entry point from the builtins object.
4740 ld(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4744 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4745 Register scratch1, Register scratch2) {
4746 if (FLAG_native_code_counters && counter->Enabled()) {
4747 li(scratch1, Operand(value));
4748 li(scratch2, Operand(ExternalReference(counter)));
4749 sd(scratch1, MemOperand(scratch2));
4754 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4755 Register scratch1, Register scratch2) {
4757 if (FLAG_native_code_counters && counter->Enabled()) {
4758 li(scratch2, Operand(ExternalReference(counter)));
4759 ld(scratch1, MemOperand(scratch2));
4760 Daddu(scratch1, scratch1, Operand(value));
4761 sd(scratch1, MemOperand(scratch2));
4766 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4767 Register scratch1, Register scratch2) {
4769 if (FLAG_native_code_counters && counter->Enabled()) {
4770 li(scratch2, Operand(ExternalReference(counter)));
4771 ld(scratch1, MemOperand(scratch2));
4772 Dsubu(scratch1, scratch1, Operand(value));
4773 sd(scratch1, MemOperand(scratch2));
4778 // -----------------------------------------------------------------------------
4781 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
4782 Register rs, Operand rt) {
4783 if (emit_debug_code())
4784 Check(cc, reason, rs, rt);
4788 void MacroAssembler::AssertFastElements(Register elements) {
4789 if (emit_debug_code()) {
4790 DCHECK(!elements.is(at));
4793 ld(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4794 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4795 Branch(&ok, eq, elements, Operand(at));
4796 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4797 Branch(&ok, eq, elements, Operand(at));
4798 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4799 Branch(&ok, eq, elements, Operand(at));
4800 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4807 void MacroAssembler::Check(Condition cc, BailoutReason reason,
4808 Register rs, Operand rt) {
4810 Branch(&L, cc, rs, rt);
4812 // Will not return here.
4817 void MacroAssembler::Abort(BailoutReason reason) {
4821 const char* msg = GetBailoutReason(reason);
4823 RecordComment("Abort message: ");
4827 if (FLAG_trap_on_abort) {
4833 li(a0, Operand(Smi::FromInt(reason)));
4835 // Disable stub call restrictions to always allow calls to abort.
4837 // We don't actually want to generate a pile of code for this, so just
4838 // claim there is a stack frame, without generating one.
4839 FrameScope scope(this, StackFrame::NONE);
4840 CallRuntime(Runtime::kAbort, 1);
4842 CallRuntime(Runtime::kAbort, 1);
4844 // Will not return here.
4845 if (is_trampoline_pool_blocked()) {
4846 // If the calling code cares about the exact number of
4847 // instructions generated, we insert padding here to keep the size
4848 // of the Abort macro constant.
4849 // Currently in debug mode with debug_code enabled the number of
4850 // generated instructions is 10, so we use this as a maximum value.
4851 static const int kExpectedAbortInstructions = 10;
4852 int abort_instructions = InstructionsGeneratedSince(&abort_start);
4853 DCHECK(abort_instructions <= kExpectedAbortInstructions);
4854 while (abort_instructions++ < kExpectedAbortInstructions) {
4861 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4862 if (context_chain_length > 0) {
4863 // Move up the chain of contexts to the context containing the slot.
4864 ld(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4865 for (int i = 1; i < context_chain_length; i++) {
4866 ld(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4869 // Slot is in the current function context. Move it into the
4870 // destination register in case we store into it (the write barrier
4871 // cannot be allowed to destroy the context in esi).
4877 void MacroAssembler::LoadGlobalProxy(Register dst) {
4878 ld(dst, GlobalObjectOperand());
4879 ld(dst, FieldMemOperand(dst, GlobalObject::kGlobalProxyOffset));
4883 void MacroAssembler::LoadTransitionedArrayMapConditional(
4884 ElementsKind expected_kind,
4885 ElementsKind transitioned_kind,
4886 Register map_in_out,
4888 Label* no_map_match) {
4889 // Load the global or builtins object from the current context.
4891 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4892 ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
4894 // Check that the function's map is the same as the expected cached map.
4897 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4898 int offset = expected_kind * kPointerSize + FixedArrayBase::kHeaderSize;
4899 ld(at, FieldMemOperand(scratch, offset));
4900 Branch(no_map_match, ne, map_in_out, Operand(at));
4902 // Use the transitioned cached map.
4903 offset = transitioned_kind * kPointerSize +
4904 FixedArrayBase::kHeaderSize;
4905 ld(map_in_out, FieldMemOperand(scratch, offset));
4909 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4910 // Load the global or builtins object from the current context.
4912 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4913 // Load the native context from the global or builtins object.
4914 ld(function, FieldMemOperand(function,
4915 GlobalObject::kNativeContextOffset));
4916 // Load the function from the native context.
4917 ld(function, MemOperand(function, Context::SlotOffset(index)));
4921 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4924 // Load the initial map. The global functions all have initial maps.
4925 ld(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4926 if (emit_debug_code()) {
4928 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4931 Abort(kGlobalFunctionsMustHaveInitialMap);
4937 void MacroAssembler::StubPrologue() {
4939 Push(Smi::FromInt(StackFrame::STUB));
4940 // Adjust FP to point to saved FP.
4941 Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4945 void MacroAssembler::Prologue(bool code_pre_aging) {
4946 PredictableCodeSizeScope predictible_code_size_scope(
4947 this, kNoCodeAgeSequenceLength);
4948 // The following three instructions must remain together and unmodified
4949 // for code aging to work properly.
4950 if (code_pre_aging) {
4951 // Pre-age the code.
4952 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
4953 nop(Assembler::CODE_AGE_MARKER_NOP);
4954 // Load the stub address to t9 and call it,
4955 // GetCodeAgeAndParity() extracts the stub address from this instruction.
4957 Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
4959 nop(); // Prevent jalr to jal optimization.
4961 nop(); // Branch delay slot nop.
4962 nop(); // Pad the empty space.
4964 Push(ra, fp, cp, a1);
4965 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4966 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4967 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4968 // Adjust fp to point to caller's fp.
4969 Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4974 void MacroAssembler::EnterFrame(StackFrame::Type type,
4975 bool load_constant_pool_pointer_reg) {
4976 // Out-of-line constant pool not implemented on mips64.
4981 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4982 daddiu(sp, sp, -5 * kPointerSize);
4983 li(t8, Operand(Smi::FromInt(type)));
4984 li(t9, Operand(CodeObject()), CONSTANT_SIZE);
4985 sd(ra, MemOperand(sp, 4 * kPointerSize));
4986 sd(fp, MemOperand(sp, 3 * kPointerSize));
4987 sd(cp, MemOperand(sp, 2 * kPointerSize));
4988 sd(t8, MemOperand(sp, 1 * kPointerSize));
4989 sd(t9, MemOperand(sp, 0 * kPointerSize));
4990 // Adjust FP to point to saved FP.
4992 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
4996 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4998 ld(fp, MemOperand(sp, 0 * kPointerSize));
4999 ld(ra, MemOperand(sp, 1 * kPointerSize));
5000 daddiu(sp, sp, 2 * kPointerSize);
5004 void MacroAssembler::EnterExitFrame(bool save_doubles,
5006 // Set up the frame structure on the stack.
5007 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
5008 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
5009 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
5011 // This is how the stack will look:
5012 // fp + 2 (==kCallerSPDisplacement) - old stack's end
5013 // [fp + 1 (==kCallerPCOffset)] - saved old ra
5014 // [fp + 0 (==kCallerFPOffset)] - saved old fp
5015 // [fp - 1 (==kSPOffset)] - sp of the called function
5016 // [fp - 2 (==kCodeOffset)] - CodeObject
5017 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
5018 // new stack (will contain saved ra)
5021 daddiu(sp, sp, -4 * kPointerSize);
5022 sd(ra, MemOperand(sp, 3 * kPointerSize));
5023 sd(fp, MemOperand(sp, 2 * kPointerSize));
5024 daddiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
5026 if (emit_debug_code()) {
5027 sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
5030 // Accessed from ExitFrame::code_slot.
5031 li(t8, Operand(CodeObject()), CONSTANT_SIZE);
5032 sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
5034 // Save the frame pointer and the context in top.
5035 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
5036 sd(fp, MemOperand(t8));
5037 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5038 sd(cp, MemOperand(t8));
5040 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
5042 // The stack is already aligned to 0 modulo 8 for stores with sdc1.
5043 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
5044 int space = kNumOfSavedRegisters * kDoubleSize ;
5045 Dsubu(sp, sp, Operand(space));
5046 // Remember: we only need to save every 2nd double FPU value.
5047 for (int i = 0; i < kNumOfSavedRegisters; i++) {
5048 FPURegister reg = FPURegister::from_code(2 * i);
5049 sdc1(reg, MemOperand(sp, i * kDoubleSize));
5053 // Reserve place for the return address, stack space and an optional slot
5054 // (used by the DirectCEntryStub to hold the return value if a struct is
5055 // returned) and align the frame preparing for calling the runtime function.
5056 DCHECK(stack_space >= 0);
5057 Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize));
5058 if (frame_alignment > 0) {
5059 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5060 And(sp, sp, Operand(-frame_alignment)); // Align stack.
5063 // Set the exit frame sp value to point just before the return address
5065 daddiu(at, sp, kPointerSize);
5066 sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
5070 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
5071 bool restore_context, bool do_return,
5072 bool argument_count_is_length) {
5073 // Optionally restore all double registers.
5075 // Remember: we only need to restore every 2nd double FPU value.
5076 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
5077 Dsubu(t8, fp, Operand(ExitFrameConstants::kFrameSize +
5078 kNumOfSavedRegisters * kDoubleSize));
5079 for (int i = 0; i < kNumOfSavedRegisters; i++) {
5080 FPURegister reg = FPURegister::from_code(2 * i);
5081 ldc1(reg, MemOperand(t8, i * kDoubleSize));
5086 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
5087 sd(zero_reg, MemOperand(t8));
5089 // Restore current context from top and clear it in debug mode.
5090 if (restore_context) {
5091 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5092 ld(cp, MemOperand(t8));
5095 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5096 sd(a3, MemOperand(t8));
5099 // Pop the arguments, restore registers, and return.
5100 mov(sp, fp); // Respect ABI stack constraint.
5101 ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
5102 ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
5104 if (argument_count.is_valid()) {
5105 if (argument_count_is_length) {
5106 daddu(sp, sp, argument_count);
5108 dsll(t8, argument_count, kPointerSizeLog2);
5114 Ret(USE_DELAY_SLOT);
5115 // If returning, the instruction in the delay slot will be the addiu below.
5117 daddiu(sp, sp, 2 * kPointerSize);
5121 void MacroAssembler::InitializeNewString(Register string,
5123 Heap::RootListIndex map_index,
5125 Register scratch2) {
5126 // dsll(scratch1, length, kSmiTagSize);
5127 dsll32(scratch1, length, 0);
5128 LoadRoot(scratch2, map_index);
5129 sd(scratch1, FieldMemOperand(string, String::kLengthOffset));
5130 li(scratch1, Operand(String::kEmptyHashField));
5131 sd(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
5132 sd(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
5136 int MacroAssembler::ActivationFrameAlignment() {
5137 #if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
5138 // Running on the real platform. Use the alignment as mandated by the local
5140 // Note: This will break if we ever start generating snapshots on one Mips
5141 // platform for another Mips platform with a different alignment.
5142 return base::OS::ActivationFrameAlignment();
5143 #else // V8_HOST_ARCH_MIPS
5144 // If we are using the simulator then we should always align to the expected
5145 // alignment. As the simulator is used to generate snapshots we do not know
5146 // if the target platform will need alignment, so this is controlled from a
5148 return FLAG_sim_stack_alignment;
5149 #endif // V8_HOST_ARCH_MIPS
5153 void MacroAssembler::AssertStackIsAligned() {
5154 if (emit_debug_code()) {
5155 const int frame_alignment = ActivationFrameAlignment();
5156 const int frame_alignment_mask = frame_alignment - 1;
5158 if (frame_alignment > kPointerSize) {
5159 Label alignment_as_expected;
5160 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5161 andi(at, sp, frame_alignment_mask);
5162 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5163 // Don't use Check here, as it will call Runtime_Abort re-entering here.
5164 stop("Unexpected stack alignment");
5165 bind(&alignment_as_expected);
5171 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
5174 Label* not_power_of_two_or_zero) {
5175 Dsubu(scratch, reg, Operand(1));
5176 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
5177 scratch, Operand(zero_reg));
5178 and_(at, scratch, reg); // In the delay slot.
5179 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
5183 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
5184 DCHECK(!reg.is(overflow));
5185 mov(overflow, reg); // Save original value.
5187 xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
5191 void MacroAssembler::SmiTagCheckOverflow(Register dst,
5193 Register overflow) {
5195 // Fall back to slower case.
5196 SmiTagCheckOverflow(dst, overflow);
5198 DCHECK(!dst.is(src));
5199 DCHECK(!dst.is(overflow));
5200 DCHECK(!src.is(overflow));
5202 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
5207 void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) {
5208 if (SmiValuesAre32Bits()) {
5209 lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
5217 void MacroAssembler::SmiLoadScale(Register dst, MemOperand src, int scale) {
5218 if (SmiValuesAre32Bits()) {
5219 // TODO(plind): not clear if lw or ld faster here, need micro-benchmark.
5220 lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
5221 dsll(dst, dst, scale);
5224 DCHECK(scale >= kSmiTagSize);
5225 sll(dst, dst, scale - kSmiTagSize);
5230 // Returns 2 values: the Smi and a scaled version of the int within the Smi.
5231 void MacroAssembler::SmiLoadWithScale(Register d_smi,
5235 if (SmiValuesAre32Bits()) {
5237 dsra(d_scaled, d_smi, kSmiShift - scale);
5240 DCHECK(scale >= kSmiTagSize);
5241 sll(d_scaled, d_smi, scale - kSmiTagSize);
5246 // Returns 2 values: the untagged Smi (int32) and scaled version of that int.
5247 void MacroAssembler::SmiLoadUntagWithScale(Register d_int,
5251 if (SmiValuesAre32Bits()) {
5252 lw(d_int, UntagSmiMemOperand(src.rm(), src.offset()));
5253 dsll(d_scaled, d_int, scale);
5256 // Need both the int and the scaled in, so use two instructions.
5258 sll(d_scaled, d_int, scale);
5263 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
5266 // DCHECK(!dst.is(src));
5267 JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
5272 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
5274 Label* non_smi_case) {
5275 // DCHECK(!dst.is(src));
5276 JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
5280 void MacroAssembler::JumpIfSmi(Register value,
5283 BranchDelaySlot bd) {
5284 DCHECK_EQ(0, kSmiTag);
5285 andi(scratch, value, kSmiTagMask);
5286 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
5289 void MacroAssembler::JumpIfNotSmi(Register value,
5290 Label* not_smi_label,
5292 BranchDelaySlot bd) {
5293 DCHECK_EQ(0, kSmiTag);
5294 andi(scratch, value, kSmiTagMask);
5295 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
5299 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
5301 Label* on_not_both_smi) {
5302 STATIC_ASSERT(kSmiTag == 0);
5303 // TODO(plind): Find some better to fix this assert issue.
5304 #if defined(__APPLE__)
5305 DCHECK_EQ(1, kSmiTagMask);
5307 DCHECK_EQ((int64_t)1, kSmiTagMask);
5309 or_(at, reg1, reg2);
5310 JumpIfNotSmi(at, on_not_both_smi);
5314 void MacroAssembler::JumpIfEitherSmi(Register reg1,
5316 Label* on_either_smi) {
5317 STATIC_ASSERT(kSmiTag == 0);
5318 // TODO(plind): Find some better to fix this assert issue.
5319 #if defined(__APPLE__)
5320 DCHECK_EQ(1, kSmiTagMask);
5322 DCHECK_EQ((int64_t)1, kSmiTagMask);
5324 // Both Smi tags must be 1 (not Smi).
5325 and_(at, reg1, reg2);
5326 JumpIfSmi(at, on_either_smi);
5330 void MacroAssembler::AssertNotSmi(Register object) {
5331 if (emit_debug_code()) {
5332 STATIC_ASSERT(kSmiTag == 0);
5333 andi(at, object, kSmiTagMask);
5334 Check(ne, kOperandIsASmi, at, Operand(zero_reg));
5339 void MacroAssembler::AssertSmi(Register object) {
5340 if (emit_debug_code()) {
5341 STATIC_ASSERT(kSmiTag == 0);
5342 andi(at, object, kSmiTagMask);
5343 Check(eq, kOperandIsASmi, at, Operand(zero_reg));
5348 void MacroAssembler::AssertString(Register object) {
5349 if (emit_debug_code()) {
5350 STATIC_ASSERT(kSmiTag == 0);
5352 Check(ne, kOperandIsASmiAndNotAString, a4, Operand(zero_reg));
5354 ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
5355 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
5356 Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
5362 void MacroAssembler::AssertName(Register object) {
5363 if (emit_debug_code()) {
5364 STATIC_ASSERT(kSmiTag == 0);
5366 Check(ne, kOperandIsASmiAndNotAName, a4, Operand(zero_reg));
5368 ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
5369 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
5370 Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
5376 void MacroAssembler::AssertFunction(Register object) {
5377 if (emit_debug_code()) {
5378 STATIC_ASSERT(kSmiTag == 0);
5380 Check(ne, kOperandIsASmiAndNotAFunction, t0, Operand(zero_reg));
5382 GetObjectType(object, object, object);
5384 Check(ne, kOperandIsNotAFunction, object, Operand(JS_FUNCTION_TYPE));
5389 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
5391 if (emit_debug_code()) {
5392 Label done_checking;
5393 AssertNotSmi(object);
5394 LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5395 Branch(&done_checking, eq, object, Operand(scratch));
5397 ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
5398 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
5399 Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
5401 bind(&done_checking);
5406 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
5407 if (emit_debug_code()) {
5408 DCHECK(!reg.is(at));
5409 LoadRoot(at, index);
5410 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
5415 void MacroAssembler::JumpIfNotHeapNumber(Register object,
5416 Register heap_number_map,
5418 Label* on_not_heap_number) {
5419 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5420 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
5421 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
5425 void MacroAssembler::LookupNumberStringCache(Register object,
5431 // Use of registers. Register result is used as a temporary.
5432 Register number_string_cache = result;
5433 Register mask = scratch3;
5435 // Load the number string cache.
5436 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
5438 // Make the hash mask from the length of the number string cache. It
5439 // contains two elements (number and string) for each cache entry.
5440 ld(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
5441 // Divide length by two (length is a smi).
5442 // dsra(mask, mask, kSmiTagSize + 1);
5443 dsra32(mask, mask, 1);
5444 Daddu(mask, mask, -1); // Make mask.
5446 // Calculate the entry in the number string cache. The hash value in the
5447 // number string cache for smis is just the smi value, and the hash for
5448 // doubles is the xor of the upper and lower words. See
5449 // Heap::GetNumberStringCache.
5451 Label load_result_from_cache;
5452 JumpIfSmi(object, &is_smi);
5455 Heap::kHeapNumberMapRootIndex,
5459 STATIC_ASSERT(8 == kDoubleSize);
5462 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
5463 ld(scratch2, MemOperand(scratch1, kPointerSize));
5464 ld(scratch1, MemOperand(scratch1, 0));
5465 Xor(scratch1, scratch1, Operand(scratch2));
5466 And(scratch1, scratch1, Operand(mask));
5468 // Calculate address of entry in string cache: each entry consists
5469 // of two pointer sized fields.
5470 dsll(scratch1, scratch1, kPointerSizeLog2 + 1);
5471 Daddu(scratch1, number_string_cache, scratch1);
5473 Register probe = mask;
5474 ld(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
5475 JumpIfSmi(probe, not_found);
5476 ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
5477 ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
5478 BranchF(&load_result_from_cache, NULL, eq, f12, f14);
5482 Register scratch = scratch1;
5483 // dsra(scratch, object, 1); // Shift away the tag.
5484 dsra32(scratch, scratch, 0);
5485 And(scratch, mask, Operand(scratch));
5487 // Calculate address of entry in string cache: each entry consists
5488 // of two pointer sized fields.
5489 dsll(scratch, scratch, kPointerSizeLog2 + 1);
5490 Daddu(scratch, number_string_cache, scratch);
5492 // Check if the entry is the smi we are looking for.
5493 ld(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
5494 Branch(not_found, ne, object, Operand(probe));
5496 // Get the result from the cache.
5497 bind(&load_result_from_cache);
5498 ld(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
5500 IncrementCounter(isolate()->counters()->number_to_string_native(),
5507 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
5508 Register first, Register second, Register scratch1, Register scratch2,
5510 // Test that both first and second are sequential one-byte strings.
5511 // Assume that they are non-smis.
5512 ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
5513 ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
5514 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
5515 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
5517 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
5522 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
5527 // Check that neither is a smi.
5528 STATIC_ASSERT(kSmiTag == 0);
5529 And(scratch1, first, Operand(second));
5530 JumpIfSmi(scratch1, failure);
5531 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
5536 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
5537 Register first, Register second, Register scratch1, Register scratch2,
5539 const int kFlatOneByteStringMask =
5540 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5541 const int kFlatOneByteStringTag =
5542 kStringTag | kOneByteStringTag | kSeqStringTag;
5543 DCHECK(kFlatOneByteStringTag <= 0xffff); // Ensure this fits 16-bit immed.
5544 andi(scratch1, first, kFlatOneByteStringMask);
5545 Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
5546 andi(scratch2, second, kFlatOneByteStringMask);
5547 Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
5551 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
5554 const int kFlatOneByteStringMask =
5555 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5556 const int kFlatOneByteStringTag =
5557 kStringTag | kOneByteStringTag | kSeqStringTag;
5558 And(scratch, type, Operand(kFlatOneByteStringMask));
5559 Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
5563 static const int kRegisterPassedArguments = (kMipsAbi == kN64) ? 8 : 4;
5565 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
5566 int num_double_arguments) {
5567 int stack_passed_words = 0;
5568 num_reg_arguments += 2 * num_double_arguments;
5570 // O32: Up to four simple arguments are passed in registers a0..a3.
5571 // N64: Up to eight simple arguments are passed in registers a0..a7.
5572 if (num_reg_arguments > kRegisterPassedArguments) {
5573 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5575 stack_passed_words += kCArgSlotCount;
5576 return stack_passed_words;
5580 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5584 uint32_t encoding_mask) {
5587 Check(ne, kNonObject, at, Operand(zero_reg));
5589 ld(at, FieldMemOperand(string, HeapObject::kMapOffset));
5590 lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
5592 andi(at, at, kStringRepresentationMask | kStringEncodingMask);
5593 li(scratch, Operand(encoding_mask));
5594 Check(eq, kUnexpectedStringType, at, Operand(scratch));
5596 // TODO(plind): requires Smi size check code for mips32.
5598 ld(at, FieldMemOperand(string, String::kLengthOffset));
5599 Check(lt, kIndexIsTooLarge, index, Operand(at));
5601 DCHECK(Smi::FromInt(0) == 0);
5602 Check(ge, kIndexIsNegative, index, Operand(zero_reg));
5606 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5607 int num_double_arguments,
5609 int frame_alignment = ActivationFrameAlignment();
5611 // n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots.
5612 // O32: Up to four simple arguments are passed in registers a0..a3.
5613 // Those four arguments must have reserved argument slots on the stack for
5614 // mips, even though those argument slots are not normally used.
5615 // Both ABIs: Remaining arguments are pushed on the stack, above (higher
5616 // address than) the (O32) argument slots. (arg slot calculation handled by
5617 // CalculateStackPassedWords()).
5618 int stack_passed_arguments = CalculateStackPassedWords(
5619 num_reg_arguments, num_double_arguments);
5620 if (frame_alignment > kPointerSize) {
5621 // Make stack end at alignment and make room for num_arguments - 4 words
5622 // and the original value of sp.
5624 Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5625 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5626 And(sp, sp, Operand(-frame_alignment));
5627 sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
5629 Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5634 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5636 PrepareCallCFunction(num_reg_arguments, 0, scratch);
5640 void MacroAssembler::CallCFunction(ExternalReference function,
5641 int num_reg_arguments,
5642 int num_double_arguments) {
5643 li(t8, Operand(function));
5644 CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
5648 void MacroAssembler::CallCFunction(Register function,
5649 int num_reg_arguments,
5650 int num_double_arguments) {
5651 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
5655 void MacroAssembler::CallCFunction(ExternalReference function,
5656 int num_arguments) {
5657 CallCFunction(function, num_arguments, 0);
5661 void MacroAssembler::CallCFunction(Register function,
5662 int num_arguments) {
5663 CallCFunction(function, num_arguments, 0);
5667 void MacroAssembler::CallCFunctionHelper(Register function,
5668 int num_reg_arguments,
5669 int num_double_arguments) {
5670 DCHECK(has_frame());
5671 // Make sure that the stack is aligned before calling a C function unless
5672 // running in the simulator. The simulator has its own alignment check which
5673 // provides more information.
5674 // The argument stots are presumed to have been set up by
5675 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
5677 #if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
5678 if (emit_debug_code()) {
5679 int frame_alignment = base::OS::ActivationFrameAlignment();
5680 int frame_alignment_mask = frame_alignment - 1;
5681 if (frame_alignment > kPointerSize) {
5682 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5683 Label alignment_as_expected;
5684 And(at, sp, Operand(frame_alignment_mask));
5685 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5686 // Don't use Check here, as it will call Runtime_Abort possibly
5687 // re-entering here.
5688 stop("Unexpected alignment in CallCFunction");
5689 bind(&alignment_as_expected);
5692 #endif // V8_HOST_ARCH_MIPS
5694 // Just call directly. The function called cannot cause a GC, or
5695 // allow preemption, so the return address in the link register
5698 if (!function.is(t9)) {
5705 int stack_passed_arguments = CalculateStackPassedWords(
5706 num_reg_arguments, num_double_arguments);
5708 if (base::OS::ActivationFrameAlignment() > kPointerSize) {
5709 ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5711 Daddu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5716 #undef BRANCH_ARGS_CHECK
5719 void MacroAssembler::CheckPageFlag(
5724 Label* condition_met) {
5725 And(scratch, object, Operand(~Page::kPageAlignmentMask));
5726 ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5727 And(scratch, scratch, Operand(mask));
5728 Branch(condition_met, cc, scratch, Operand(zero_reg));
5732 void MacroAssembler::JumpIfBlack(Register object,
5736 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
5737 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5741 void MacroAssembler::HasColor(Register object,
5742 Register bitmap_scratch,
5743 Register mask_scratch,
5747 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
5748 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
5750 GetMarkBits(object, bitmap_scratch, mask_scratch);
5753 // Note that we are using a 4-byte aligned 8-byte load.
5754 Uld(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5755 And(t8, t9, Operand(mask_scratch));
5756 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
5757 // Shift left 1 by adding.
5758 Daddu(mask_scratch, mask_scratch, Operand(mask_scratch));
5759 And(t8, t9, Operand(mask_scratch));
5760 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
5766 // Detect some, but not all, common pointer-free objects. This is used by the
5767 // incremental write barrier which doesn't care about oddballs (they are always
5768 // marked black immediately so this code is not hit).
5769 void MacroAssembler::JumpIfDataObject(Register value,
5771 Label* not_data_object) {
5772 DCHECK(!AreAliased(value, scratch, t8, no_reg));
5773 Label is_data_object;
5774 ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5775 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5776 Branch(&is_data_object, eq, t8, Operand(scratch));
5777 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5778 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5779 // If it's a string and it's not a cons string then it's an object containing
5781 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5782 And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
5783 Branch(not_data_object, ne, t8, Operand(zero_reg));
5784 bind(&is_data_object);
5788 void MacroAssembler::GetMarkBits(Register addr_reg,
5789 Register bitmap_reg,
5790 Register mask_reg) {
5791 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
5792 // addr_reg is divided into fields:
5793 // |63 page base 20|19 high 8|7 shift 3|2 0|
5794 // 'high' gives the index of the cell holding color bits for the object.
5795 // 'shift' gives the offset in the cell for this object's color.
5796 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5797 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
5798 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
5799 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
5800 dsll(t8, t8, Bitmap::kBytesPerCellLog2);
5801 Daddu(bitmap_reg, bitmap_reg, t8);
5803 dsllv(mask_reg, t8, mask_reg);
5807 void MacroAssembler::EnsureNotWhite(
5809 Register bitmap_scratch,
5810 Register mask_scratch,
5811 Register load_scratch,
5812 Label* value_is_white_and_not_data) {
5813 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
5814 GetMarkBits(value, bitmap_scratch, mask_scratch);
5816 // If the value is black or grey we don't need to do anything.
5817 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5818 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5819 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
5820 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5824 // Since both black and grey have a 1 in the first position and white does
5825 // not have a 1 there we only need to check one bit.
5826 // Note that we are using a 4-byte aligned 8-byte load.
5827 Uld(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5828 And(t8, mask_scratch, load_scratch);
5829 Branch(&done, ne, t8, Operand(zero_reg));
5831 if (emit_debug_code()) {
5832 // Check for impossible bit pattern.
5834 // sll may overflow, making the check conservative.
5835 dsll(t8, mask_scratch, 1);
5836 And(t8, load_scratch, t8);
5837 Branch(&ok, eq, t8, Operand(zero_reg));
5838 stop("Impossible marking bit pattern");
5842 // Value is white. We check whether it is data that doesn't need scanning.
5843 // Currently only checks for HeapNumber and non-cons strings.
5844 Register map = load_scratch; // Holds map while checking type.
5845 Register length = load_scratch; // Holds length of object after testing type.
5846 Label is_data_object;
5848 // Check for heap-number
5849 ld(map, FieldMemOperand(value, HeapObject::kMapOffset));
5850 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5853 Branch(&skip, ne, t8, Operand(map));
5854 li(length, HeapNumber::kSize);
5855 Branch(&is_data_object);
5859 // Check for strings.
5860 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5861 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5862 // If it's a string and it's not a cons string then it's an object containing
5864 Register instance_type = load_scratch;
5865 lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
5866 And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
5867 Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5868 // It's a non-indirect (non-cons and non-slice) string.
5869 // If it's external, the length is just ExternalString::kSize.
5870 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5871 // External strings are the only ones with the kExternalStringTag bit
5873 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
5874 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
5875 And(t8, instance_type, Operand(kExternalStringTag));
5878 Branch(&skip, eq, t8, Operand(zero_reg));
5879 li(length, ExternalString::kSize);
5880 Branch(&is_data_object);
5884 // Sequential string, either Latin1 or UC16.
5885 // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
5886 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
5887 // getting the length multiplied by 2.
5888 DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
5889 DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
5890 lw(t9, UntagSmiFieldMemOperand(value, String::kLengthOffset));
5891 And(t8, instance_type, Operand(kStringEncodingMask));
5894 Branch(&skip, ne, t8, Operand(zero_reg));
5895 // Adjust length for UC16.
5899 Daddu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
5900 DCHECK(!length.is(t8));
5901 And(length, length, Operand(~kObjectAlignmentMask));
5903 bind(&is_data_object);
5904 // Value is a data object, and it is white. Mark it black. Since we know
5905 // that the object is white we can make it black by flipping one bit.
5906 Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5907 Or(t8, t8, Operand(mask_scratch));
5908 Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5910 And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5911 Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5912 Daddu(t8, t8, Operand(length));
5913 Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5919 void MacroAssembler::LoadInstanceDescriptors(Register map,
5920 Register descriptors) {
5921 ld(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
5925 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
5926 ld(dst, FieldMemOperand(map, Map::kBitField3Offset));
5927 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
5931 void MacroAssembler::EnumLength(Register dst, Register map) {
5932 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
5933 ld(dst, FieldMemOperand(map, Map::kBitField3Offset));
5934 And(dst, dst, Operand(Map::EnumLengthBits::kMask));
5939 void MacroAssembler::LoadAccessor(Register dst, Register holder,
5941 AccessorComponent accessor) {
5942 ld(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
5943 LoadInstanceDescriptors(dst, dst);
5945 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
5946 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
5947 : AccessorPair::kSetterOffset;
5948 ld(dst, FieldMemOperand(dst, offset));
5952 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5953 Register empty_fixed_array_value = a6;
5954 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5958 // Check if the enum length field is properly initialized, indicating that
5959 // there is an enum cache.
5960 ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5964 call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
5969 ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5971 // For all objects but the receiver, check that the cache is empty.
5973 Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
5977 // Check that there are no elements. Register a2 contains the current JS
5978 // object we've reached through the prototype chain.
5980 ld(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
5981 Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
5983 // Second chance, the object may be using the empty slow element dictionary.
5984 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
5985 Branch(call_runtime, ne, a2, Operand(at));
5988 ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
5989 Branch(&next, ne, a2, Operand(null_value));
5993 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5994 DCHECK(!output_reg.is(input_reg));
5996 li(output_reg, Operand(255));
5997 // Normal branch: nop in delay slot.
5998 Branch(&done, gt, input_reg, Operand(output_reg));
5999 // Use delay slot in this branch.
6000 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
6001 mov(output_reg, zero_reg); // In delay slot.
6002 mov(output_reg, input_reg); // Value is in range 0..255.
6007 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
6008 DoubleRegister input_reg,
6009 DoubleRegister temp_double_reg) {
6014 Move(temp_double_reg, 0.0);
6015 BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
6017 // Double value is less than zero, NaN or Inf, return 0.
6018 mov(result_reg, zero_reg);
6021 // Double value is >= 255, return 255.
6023 Move(temp_double_reg, 255.0);
6024 BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
6025 li(result_reg, Operand(255));
6028 // In 0-255 range, round and truncate.
6030 cvt_w_d(temp_double_reg, input_reg);
6031 mfc1(result_reg, temp_double_reg);
6036 void MacroAssembler::TestJSArrayForAllocationMemento(
6037 Register receiver_reg,
6038 Register scratch_reg,
6039 Label* no_memento_found,
6041 Label* allocation_memento_present) {
6042 ExternalReference new_space_start =
6043 ExternalReference::new_space_start(isolate());
6044 ExternalReference new_space_allocation_top =
6045 ExternalReference::new_space_allocation_top_address(isolate());
6046 Daddu(scratch_reg, receiver_reg,
6047 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
6048 Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
6049 li(at, Operand(new_space_allocation_top));
6050 ld(at, MemOperand(at));
6051 Branch(no_memento_found, gt, scratch_reg, Operand(at));
6052 ld(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
6053 if (allocation_memento_present) {
6054 Branch(allocation_memento_present, cond, scratch_reg,
6055 Operand(isolate()->factory()->allocation_memento_map()));
6060 Register GetRegisterThatIsNotOneOf(Register reg1,
6067 if (reg1.is_valid()) regs |= reg1.bit();
6068 if (reg2.is_valid()) regs |= reg2.bit();
6069 if (reg3.is_valid()) regs |= reg3.bit();
6070 if (reg4.is_valid()) regs |= reg4.bit();
6071 if (reg5.is_valid()) regs |= reg5.bit();
6072 if (reg6.is_valid()) regs |= reg6.bit();
6074 for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
6075 Register candidate = Register::FromAllocationIndex(i);
6076 if (regs & candidate.bit()) continue;
6084 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
6089 DCHECK(!scratch1.is(scratch0));
6090 Factory* factory = isolate()->factory();
6091 Register current = scratch0;
6092 Label loop_again, end;
6094 // Scratch contained elements pointer.
6095 Move(current, object);
6096 ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
6097 ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
6098 Branch(&end, eq, current, Operand(factory->null_value()));
6100 // Loop based on the map going up the prototype chain.
6102 ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
6103 lbu(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
6104 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
6105 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
6106 Branch(found, lo, scratch1, Operand(JS_OBJECT_TYPE));
6107 lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
6108 DecodeField<Map::ElementsKindBits>(scratch1);
6109 Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
6110 ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
6111 Branch(&loop_again, ne, current, Operand(factory->null_value()));
6117 bool AreAliased(Register reg1,
6125 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
6126 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
6127 reg7.is_valid() + reg8.is_valid();
6130 if (reg1.is_valid()) regs |= reg1.bit();
6131 if (reg2.is_valid()) regs |= reg2.bit();
6132 if (reg3.is_valid()) regs |= reg3.bit();
6133 if (reg4.is_valid()) regs |= reg4.bit();
6134 if (reg5.is_valid()) regs |= reg5.bit();
6135 if (reg6.is_valid()) regs |= reg6.bit();
6136 if (reg7.is_valid()) regs |= reg7.bit();
6137 if (reg8.is_valid()) regs |= reg8.bit();
6138 int n_of_non_aliasing_regs = NumRegs(regs);
6140 return n_of_valid_regs != n_of_non_aliasing_regs;
6144 CodePatcher::CodePatcher(byte* address,
6146 FlushICache flush_cache)
6147 : address_(address),
6148 size_(instructions * Assembler::kInstrSize),
6149 masm_(NULL, address, size_ + Assembler::kGap),
6150 flush_cache_(flush_cache) {
6151 // Create a new macro assembler pointing to the address of the code to patch.
6152 // The size is adjusted with kGap on order for the assembler to generate size
6153 // bytes of instructions without failing with buffer size constraints.
6154 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6158 CodePatcher::~CodePatcher() {
6159 // Indicate that code has changed.
6160 if (flush_cache_ == FLUSH) {
6161 CpuFeatures::FlushICache(address_, size_);
6163 // Check that the code was patched as expected.
6164 DCHECK(masm_.pc_ == address_ + size_);
6165 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6169 void CodePatcher::Emit(Instr instr) {
6170 masm()->emit(instr);
6174 void CodePatcher::Emit(Address addr) {
6175 // masm()->emit(reinterpret_cast<Instr>(addr));
6179 void CodePatcher::ChangeBranchCondition(Condition cond) {
6180 Instr instr = Assembler::instr_at(masm_.pc_);
6181 DCHECK(Assembler::IsBranch(instr));
6182 uint32_t opcode = Assembler::GetOpcodeField(instr);
6183 // Currently only the 'eq' and 'ne' cond values are supported and the simple
6184 // branch instructions (with opcode being the branch type).
6185 // There are some special cases (see Assembler::IsBranch()) so extending this
6187 DCHECK(opcode == BEQ ||
6195 opcode = (cond == eq) ? BEQ : BNE;
6196 instr = (instr & ~kOpcodeMask) | opcode;
6201 void MacroAssembler::TruncatingDiv(Register result,
6204 DCHECK(!dividend.is(result));
6205 DCHECK(!dividend.is(at));
6206 DCHECK(!result.is(at));
6207 base::MagicNumbersForDivision<uint32_t> mag =
6208 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
6209 li(at, Operand(static_cast<int32_t>(mag.multiplier)));
6210 Mulh(result, dividend, Operand(at));
6211 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
6212 if (divisor > 0 && neg) {
6213 Addu(result, result, Operand(dividend));
6215 if (divisor < 0 && !neg && mag.multiplier > 0) {
6216 Subu(result, result, Operand(dividend));
6218 if (mag.shift > 0) sra(result, result, mag.shift);
6219 srl(at, dividend, 31);
6220 Addu(result, result, Operand(at));
6224 } // namespace internal
6227 #endif // V8_TARGET_ARCH_MIPS64