1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include <limits.h> // For LONG_MIN, LONG_MAX.
9 #if V8_TARGET_ARCH_MIPS64
11 #include "src/base/division-by-constant.h"
12 #include "src/bootstrapper.h"
13 #include "src/codegen.h"
14 #include "src/cpu-profiler.h"
15 #include "src/debug.h"
16 #include "src/isolate-inl.h"
17 #include "src/runtime.h"
22 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
23 : Assembler(arg_isolate, buffer, size),
24 generating_stub_(false),
26 if (isolate() != NULL) {
27 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
33 void MacroAssembler::Load(Register dst,
34 const MemOperand& src,
36 DCHECK(!r.IsDouble());
39 } else if (r.IsUInteger8()) {
41 } else if (r.IsInteger16()) {
43 } else if (r.IsUInteger16()) {
45 } else if (r.IsInteger32()) {
53 void MacroAssembler::Store(Register src,
54 const MemOperand& dst,
56 DCHECK(!r.IsDouble());
57 if (r.IsInteger8() || r.IsUInteger8()) {
59 } else if (r.IsInteger16() || r.IsUInteger16()) {
61 } else if (r.IsInteger32()) {
64 if (r.IsHeapObject()) {
66 } else if (r.IsSmi()) {
74 void MacroAssembler::LoadRoot(Register destination,
75 Heap::RootListIndex index) {
76 ld(destination, MemOperand(s6, index << kPointerSizeLog2));
80 void MacroAssembler::LoadRoot(Register destination,
81 Heap::RootListIndex index,
83 Register src1, const Operand& src2) {
84 Branch(2, NegateCondition(cond), src1, src2);
85 ld(destination, MemOperand(s6, index << kPointerSizeLog2));
89 void MacroAssembler::StoreRoot(Register source,
90 Heap::RootListIndex index) {
91 sd(source, MemOperand(s6, index << kPointerSizeLog2));
95 void MacroAssembler::StoreRoot(Register source,
96 Heap::RootListIndex index,
98 Register src1, const Operand& src2) {
99 Branch(2, NegateCondition(cond), src1, src2);
100 sd(source, MemOperand(s6, index << kPointerSizeLog2));
104 // Push and pop all registers that can hold pointers.
105 void MacroAssembler::PushSafepointRegisters() {
106 // Safepoints expect a block of kNumSafepointRegisters values on the
107 // stack, so adjust the stack for unsaved registers.
108 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
109 DCHECK(num_unsaved >= 0);
110 if (num_unsaved > 0) {
111 Dsubu(sp, sp, Operand(num_unsaved * kPointerSize));
113 MultiPush(kSafepointSavedRegisters);
117 void MacroAssembler::PopSafepointRegisters() {
118 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
119 MultiPop(kSafepointSavedRegisters);
120 if (num_unsaved > 0) {
121 Daddu(sp, sp, Operand(num_unsaved * kPointerSize));
126 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
127 sd(src, SafepointRegisterSlot(dst));
131 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
132 ld(dst, SafepointRegisterSlot(src));
136 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
137 // The registers are pushed starting with the highest encoding,
138 // which means that lowest encodings are closest to the stack pointer.
139 return kSafepointRegisterStackIndexMap[reg_code];
143 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
144 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
148 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
149 UNIMPLEMENTED_MIPS();
150 // General purpose registers are pushed last on the stack.
151 int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
152 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
153 return MemOperand(sp, doubles_size + register_offset);
157 void MacroAssembler::InNewSpace(Register object,
161 DCHECK(cc == eq || cc == ne);
162 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
163 Branch(branch, cc, scratch,
164 Operand(ExternalReference::new_space_start(isolate())));
168 void MacroAssembler::RecordWriteField(
174 SaveFPRegsMode save_fp,
175 RememberedSetAction remembered_set_action,
177 PointersToHereCheck pointers_to_here_check_for_value) {
178 DCHECK(!AreAliased(value, dst, t8, object));
179 // First, check if a write barrier is even needed. The tests below
180 // catch stores of Smis.
183 // Skip barrier if writing a smi.
184 if (smi_check == INLINE_SMI_CHECK) {
185 JumpIfSmi(value, &done);
188 // Although the object register is tagged, the offset is relative to the start
189 // of the object, so so offset must be a multiple of kPointerSize.
190 DCHECK(IsAligned(offset, kPointerSize));
192 Daddu(dst, object, Operand(offset - kHeapObjectTag));
193 if (emit_debug_code()) {
195 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
196 Branch(&ok, eq, t8, Operand(zero_reg));
197 stop("Unaligned cell in write barrier");
206 remembered_set_action,
208 pointers_to_here_check_for_value);
212 // Clobber clobbered input registers when running with the debug-code flag
213 // turned on to provoke errors.
214 if (emit_debug_code()) {
215 li(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
216 li(dst, Operand(bit_cast<int64_t>(kZapValue + 8)));
221 // Will clobber 4 registers: object, map, dst, ip. The
222 // register 'object' contains a heap object pointer.
223 void MacroAssembler::RecordWriteForMap(Register object,
227 SaveFPRegsMode fp_mode) {
228 if (emit_debug_code()) {
230 ld(dst, FieldMemOperand(map, HeapObject::kMapOffset));
232 kWrongAddressOrValuePassedToRecordWrite,
234 Operand(isolate()->factory()->meta_map()));
237 if (!FLAG_incremental_marking) {
241 if (emit_debug_code()) {
242 ld(at, FieldMemOperand(object, HeapObject::kMapOffset));
244 kWrongAddressOrValuePassedToRecordWrite,
251 // A single check of the map's pages interesting flag suffices, since it is
252 // only set during incremental collection, and then it's also guaranteed that
253 // the from object's page's interesting flag is also set. This optimization
254 // relies on the fact that maps can never be in new space.
256 map, // Used as scratch.
257 MemoryChunk::kPointersToHereAreInterestingMask,
261 Daddu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
262 if (emit_debug_code()) {
264 And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
265 Branch(&ok, eq, at, Operand(zero_reg));
266 stop("Unaligned cell in write barrier");
270 // Record the actual write.
271 if (ra_status == kRAHasNotBeenSaved) {
274 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
277 if (ra_status == kRAHasNotBeenSaved) {
283 // Count number of write barriers in generated code.
284 isolate()->counters()->write_barriers_static()->Increment();
285 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
287 // Clobber clobbered registers when running with the debug-code flag
288 // turned on to provoke errors.
289 if (emit_debug_code()) {
290 li(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
291 li(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
296 // Will clobber 4 registers: object, address, scratch, ip. The
297 // register 'object' contains a heap object pointer. The heap object
298 // tag is shifted away.
299 void MacroAssembler::RecordWrite(
304 SaveFPRegsMode fp_mode,
305 RememberedSetAction remembered_set_action,
307 PointersToHereCheck pointers_to_here_check_for_value) {
308 DCHECK(!AreAliased(object, address, value, t8));
309 DCHECK(!AreAliased(object, address, value, t9));
311 if (emit_debug_code()) {
312 ld(at, MemOperand(address));
314 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
317 if (remembered_set_action == OMIT_REMEMBERED_SET &&
318 !FLAG_incremental_marking) {
322 // First, check if a write barrier is even needed. The tests below
323 // catch stores of smis and stores into the young generation.
326 if (smi_check == INLINE_SMI_CHECK) {
327 DCHECK_EQ(0, kSmiTag);
328 JumpIfSmi(value, &done);
331 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
333 value, // Used as scratch.
334 MemoryChunk::kPointersToHereAreInterestingMask,
338 CheckPageFlag(object,
339 value, // Used as scratch.
340 MemoryChunk::kPointersFromHereAreInterestingMask,
344 // Record the actual write.
345 if (ra_status == kRAHasNotBeenSaved) {
348 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
351 if (ra_status == kRAHasNotBeenSaved) {
357 // Count number of write barriers in generated code.
358 isolate()->counters()->write_barriers_static()->Increment();
359 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
362 // Clobber clobbered registers when running with the debug-code flag
363 // turned on to provoke errors.
364 if (emit_debug_code()) {
365 li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
366 li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
371 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
374 SaveFPRegsMode fp_mode,
375 RememberedSetFinalAction and_then) {
377 if (emit_debug_code()) {
379 JumpIfNotInNewSpace(object, scratch, &ok);
380 stop("Remembered set pointer is in new space");
383 // Load store buffer top.
384 ExternalReference store_buffer =
385 ExternalReference::store_buffer_top(isolate());
386 li(t8, Operand(store_buffer));
387 ld(scratch, MemOperand(t8));
388 // Store pointer to buffer and increment buffer top.
389 sd(address, MemOperand(scratch));
390 Daddu(scratch, scratch, kPointerSize);
391 // Write back new top of buffer.
392 sd(scratch, MemOperand(t8));
393 // Call stub on end of buffer.
394 // Check for end of buffer.
395 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
396 DCHECK(!scratch.is(t8));
397 if (and_then == kFallThroughAtEnd) {
398 Branch(&done, eq, t8, Operand(zero_reg));
400 DCHECK(and_then == kReturnAtEnd);
401 Ret(eq, t8, Operand(zero_reg));
404 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
405 CallStub(&store_buffer_overflow);
408 if (and_then == kReturnAtEnd) {
414 // -----------------------------------------------------------------------------
415 // Allocation support.
418 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
423 DCHECK(!holder_reg.is(scratch));
424 DCHECK(!holder_reg.is(at));
425 DCHECK(!scratch.is(at));
427 // Load current lexical context from the stack frame.
428 ld(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
429 // In debug mode, make sure the lexical context is set.
431 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
432 scratch, Operand(zero_reg));
435 // Load the native context of the current context.
437 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
438 ld(scratch, FieldMemOperand(scratch, offset));
439 ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
441 // Check the context is a native context.
442 if (emit_debug_code()) {
443 push(holder_reg); // Temporarily save holder on the stack.
444 // Read the first word and compare to the native_context_map.
445 ld(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
446 LoadRoot(at, Heap::kNativeContextMapRootIndex);
447 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
448 holder_reg, Operand(at));
449 pop(holder_reg); // Restore holder.
452 // Check if both contexts are the same.
453 ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
454 Branch(&same_contexts, eq, scratch, Operand(at));
456 // Check the context is a native context.
457 if (emit_debug_code()) {
458 push(holder_reg); // Temporarily save holder on the stack.
459 mov(holder_reg, at); // Move at to its holding place.
460 LoadRoot(at, Heap::kNullValueRootIndex);
461 Check(ne, kJSGlobalProxyContextShouldNotBeNull,
462 holder_reg, Operand(at));
464 ld(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
465 LoadRoot(at, Heap::kNativeContextMapRootIndex);
466 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
467 holder_reg, Operand(at));
468 // Restore at is not needed. at is reloaded below.
469 pop(holder_reg); // Restore holder.
470 // Restore at to holder's context.
471 ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
474 // Check that the security token in the calling global object is
475 // compatible with the security token in the receiving global
477 int token_offset = Context::kHeaderSize +
478 Context::SECURITY_TOKEN_INDEX * kPointerSize;
480 ld(scratch, FieldMemOperand(scratch, token_offset));
481 ld(at, FieldMemOperand(at, token_offset));
482 Branch(miss, ne, scratch, Operand(at));
484 bind(&same_contexts);
488 // Compute the hash code from the untagged key. This must be kept in sync with
489 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
490 // code-stub-hydrogen.cc
491 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
492 // First of all we assign the hash seed to scratch.
493 LoadRoot(scratch, Heap::kHashSeedRootIndex);
496 // Xor original key with a seed.
497 xor_(reg0, reg0, scratch);
499 // Compute the hash code from the untagged key. This must be kept in sync
500 // with ComputeIntegerHash in utils.h.
502 // hash = ~hash + (hash << 15);
503 // The algorithm uses 32-bit integer values.
504 nor(scratch, reg0, zero_reg);
506 addu(reg0, scratch, at);
508 // hash = hash ^ (hash >> 12);
510 xor_(reg0, reg0, at);
512 // hash = hash + (hash << 2);
514 addu(reg0, reg0, at);
516 // hash = hash ^ (hash >> 4);
518 xor_(reg0, reg0, at);
520 // hash = hash * 2057;
521 sll(scratch, reg0, 11);
523 addu(reg0, reg0, at);
524 addu(reg0, reg0, scratch);
526 // hash = hash ^ (hash >> 16);
528 xor_(reg0, reg0, at);
532 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
541 // elements - holds the slow-case elements of the receiver on entry.
542 // Unchanged unless 'result' is the same register.
544 // key - holds the smi key on entry.
545 // Unchanged unless 'result' is the same register.
548 // result - holds the result on exit if the load succeeded.
549 // Allowed to be the same as 'key' or 'result'.
550 // Unchanged on bailout so 'key' or 'result' can be used
551 // in further computation.
553 // Scratch registers:
555 // reg0 - holds the untagged key on entry and holds the hash once computed.
557 // reg1 - Used to hold the capacity mask of the dictionary.
559 // reg2 - Used for the index into the dictionary.
560 // at - Temporary (avoid MacroAssembler instructions also using 'at').
563 GetNumberHash(reg0, reg1);
565 // Compute the capacity mask.
566 ld(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
567 SmiUntag(reg1, reg1);
568 Dsubu(reg1, reg1, Operand(1));
570 // Generate an unrolled loop that performs a few probes before giving up.
571 for (int i = 0; i < kNumberDictionaryProbes; i++) {
572 // Use reg2 for index calculations and keep the hash intact in reg0.
574 // Compute the masked index: (hash + i + i * i) & mask.
576 Daddu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
578 and_(reg2, reg2, reg1);
580 // Scale the index by multiplying by the element size.
581 DCHECK(SeededNumberDictionary::kEntrySize == 3);
582 dsll(at, reg2, 1); // 2x.
583 daddu(reg2, reg2, at); // reg2 = reg2 * 3.
585 // Check if the key is identical to the name.
586 dsll(at, reg2, kPointerSizeLog2);
587 daddu(reg2, elements, at);
589 ld(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
590 if (i != kNumberDictionaryProbes - 1) {
591 Branch(&done, eq, key, Operand(at));
593 Branch(miss, ne, key, Operand(at));
598 // Check that the value is a normal property.
599 // reg2: elements + (index * kPointerSize).
600 const int kDetailsOffset =
601 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
602 ld(reg1, FieldMemOperand(reg2, kDetailsOffset));
603 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
604 Branch(miss, ne, at, Operand(zero_reg));
606 // Get the value at the masked, scaled index and return.
607 const int kValueOffset =
608 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
609 ld(result, FieldMemOperand(reg2, kValueOffset));
613 // ---------------------------------------------------------------------------
614 // Instruction macros.
616 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
618 addu(rd, rs, rt.rm());
620 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
621 addiu(rd, rs, rt.imm64_);
623 // li handles the relocation.
632 void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
634 daddu(rd, rs, rt.rm());
636 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
637 daddiu(rd, rs, rt.imm64_);
639 // li handles the relocation.
648 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
650 subu(rd, rs, rt.rm());
652 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
653 addiu(rd, rs, -rt.imm64_); // No subiu instr, use addiu(x, y, -imm).
655 // li handles the relocation.
664 void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
666 dsubu(rd, rs, rt.rm());
668 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
669 daddiu(rd, rs, -rt.imm64_); // No subiu instr, use addiu(x, y, -imm).
671 // li handles the relocation.
680 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
682 mul(rd, rs, rt.rm());
684 // li handles the relocation.
692 void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
694 if (kArchVariant != kMips64r6) {
698 muh(rd, rs, rt.rm());
701 // li handles the relocation.
704 if (kArchVariant != kMips64r6) {
714 void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
716 if (kArchVariant == kMips64r6) {
717 dmul(rd, rs, rt.rm());
723 // li handles the relocation.
726 if (kArchVariant == kMips64r6) {
736 void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
738 if (kArchVariant == kMips64r6) {
739 dmuh(rd, rs, rt.rm());
745 // li handles the relocation.
748 if (kArchVariant == kMips64r6) {
758 void MacroAssembler::Mult(Register rs, const Operand& rt) {
762 // li handles the relocation.
770 void MacroAssembler::Dmult(Register rs, const Operand& rt) {
774 // li handles the relocation.
782 void MacroAssembler::Multu(Register rs, const Operand& rt) {
786 // li handles the relocation.
794 void MacroAssembler::Dmultu(Register rs, const Operand& rt) {
798 // li handles the relocation.
806 void MacroAssembler::Div(Register rs, const Operand& rt) {
810 // li handles the relocation.
818 void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
822 // li handles the relocation.
830 void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
831 if (kArchVariant != kMips64r6) {
836 // li handles the relocation.
844 ddiv(rd, rs, rt.rm());
846 // li handles the relocation.
855 void MacroAssembler::Divu(Register rs, const Operand& rt) {
859 // li handles the relocation.
867 void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
871 // li handles the relocation.
879 void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
880 if (kArchVariant != kMips64r6) {
885 // li handles the relocation.
893 dmod(rd, rs, rt.rm());
895 // li handles the relocation.
904 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
906 and_(rd, rs, rt.rm());
908 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
909 andi(rd, rs, rt.imm64_);
911 // li handles the relocation.
920 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
922 or_(rd, rs, rt.rm());
924 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
925 ori(rd, rs, rt.imm64_);
927 // li handles the relocation.
936 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
938 xor_(rd, rs, rt.rm());
940 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
941 xori(rd, rs, rt.imm64_);
943 // li handles the relocation.
952 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
954 nor(rd, rs, rt.rm());
956 // li handles the relocation.
964 void MacroAssembler::Neg(Register rs, const Operand& rt) {
967 DCHECK(!at.is(rt.rm()));
969 xor_(rs, rt.rm(), at);
973 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
975 slt(rd, rs, rt.rm());
977 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
978 slti(rd, rs, rt.imm64_);
980 // li handles the relocation.
989 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
991 sltu(rd, rs, rt.rm());
993 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
994 sltiu(rd, rs, rt.imm64_);
996 // li handles the relocation.
1005 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
1006 if (kArchVariant == kMips64r2) {
1008 rotrv(rd, rs, rt.rm());
1010 rotr(rd, rs, rt.imm64_);
1014 subu(at, zero_reg, rt.rm());
1016 srlv(rd, rs, rt.rm());
1019 if (rt.imm64_ == 0) {
1022 srl(at, rs, rt.imm64_);
1023 sll(rd, rs, (0x20 - rt.imm64_) & 0x1f);
1031 void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) {
1033 drotrv(rd, rs, rt.rm());
1035 drotr(rd, rs, rt.imm64_);
1040 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
1045 // ------------Pseudo-instructions-------------
1047 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
1049 lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1053 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
1055 swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1059 // Do 64-bit load from unaligned address. Note this only handles
1060 // the specific case of 32-bit aligned, but not 64-bit aligned.
1061 void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) {
1062 // Assert fail if the offset from start of object IS actually aligned.
1063 // ONLY use with known misalignment, since there is performance cost.
1064 DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
1065 // TODO(plind): endian dependency.
1067 lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1068 dsll32(scratch, scratch, 0);
1069 Daddu(rd, rd, scratch);
1073 // Do 64-bit store to unaligned address. Note this only handles
1074 // the specific case of 32-bit aligned, but not 64-bit aligned.
1075 void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) {
1076 // Assert fail if the offset from start of object IS actually aligned.
1077 // ONLY use with known misalignment, since there is performance cost.
1078 DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
1079 // TODO(plind): endian dependency.
1081 dsrl32(scratch, rd, 0);
1082 sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1086 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1087 AllowDeferredHandleDereference smi_check;
1088 if (value->IsSmi()) {
1089 li(dst, Operand(value), mode);
1091 DCHECK(value->IsHeapObject());
1092 if (isolate()->heap()->InNewSpace(*value)) {
1093 Handle<Cell> cell = isolate()->factory()->NewCell(value);
1094 li(dst, Operand(cell));
1095 ld(dst, FieldMemOperand(dst, Cell::kValueOffset));
1097 li(dst, Operand(value));
1103 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1104 DCHECK(!j.is_reg());
1105 BlockTrampolinePoolScope block_trampoline_pool(this);
1106 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
1107 // Normal load of an immediate value which does not need Relocation Info.
1108 if (is_int32(j.imm64_)) {
1109 if (is_int16(j.imm64_)) {
1110 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
1111 } else if (!(j.imm64_ & kHiMask)) {
1112 ori(rd, zero_reg, (j.imm64_ & kImm16Mask));
1113 } else if (!(j.imm64_ & kImm16Mask)) {
1114 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1116 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1117 ori(rd, rd, (j.imm64_ & kImm16Mask));
1120 lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1121 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1123 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1125 ori(rd, rd, j.imm64_ & kImm16Mask);
1127 } else if (MustUseReg(j.rmode_)) {
1128 RecordRelocInfo(j.rmode_, j.imm64_);
1129 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1130 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1132 ori(rd, rd, j.imm64_ & kImm16Mask);
1133 } else if (mode == ADDRESS_LOAD) {
1134 // We always need the same number of instructions as we may need to patch
1135 // this code to load another value which may need all 4 instructions.
1136 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1137 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1139 ori(rd, rd, j.imm64_ & kImm16Mask);
1141 lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1142 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1144 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1146 ori(rd, rd, j.imm64_ & kImm16Mask);
1151 void MacroAssembler::MultiPush(RegList regs) {
1152 int16_t num_to_push = NumberOfBitsSet(regs);
1153 int16_t stack_offset = num_to_push * kPointerSize;
1155 Dsubu(sp, sp, Operand(stack_offset));
1156 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1157 if ((regs & (1 << i)) != 0) {
1158 stack_offset -= kPointerSize;
1159 sd(ToRegister(i), MemOperand(sp, stack_offset));
1165 void MacroAssembler::MultiPushReversed(RegList regs) {
1166 int16_t num_to_push = NumberOfBitsSet(regs);
1167 int16_t stack_offset = num_to_push * kPointerSize;
1169 Dsubu(sp, sp, Operand(stack_offset));
1170 for (int16_t i = 0; i < kNumRegisters; i++) {
1171 if ((regs & (1 << i)) != 0) {
1172 stack_offset -= kPointerSize;
1173 sd(ToRegister(i), MemOperand(sp, stack_offset));
1179 void MacroAssembler::MultiPop(RegList regs) {
1180 int16_t stack_offset = 0;
1182 for (int16_t i = 0; i < kNumRegisters; i++) {
1183 if ((regs & (1 << i)) != 0) {
1184 ld(ToRegister(i), MemOperand(sp, stack_offset));
1185 stack_offset += kPointerSize;
1188 daddiu(sp, sp, stack_offset);
1192 void MacroAssembler::MultiPopReversed(RegList regs) {
1193 int16_t stack_offset = 0;
1195 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1196 if ((regs & (1 << i)) != 0) {
1197 ld(ToRegister(i), MemOperand(sp, stack_offset));
1198 stack_offset += kPointerSize;
1201 daddiu(sp, sp, stack_offset);
1205 void MacroAssembler::MultiPushFPU(RegList regs) {
1206 int16_t num_to_push = NumberOfBitsSet(regs);
1207 int16_t stack_offset = num_to_push * kDoubleSize;
1209 Dsubu(sp, sp, Operand(stack_offset));
1210 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1211 if ((regs & (1 << i)) != 0) {
1212 stack_offset -= kDoubleSize;
1213 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1219 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
1220 int16_t num_to_push = NumberOfBitsSet(regs);
1221 int16_t stack_offset = num_to_push * kDoubleSize;
1223 Dsubu(sp, sp, Operand(stack_offset));
1224 for (int16_t i = 0; i < kNumRegisters; i++) {
1225 if ((regs & (1 << i)) != 0) {
1226 stack_offset -= kDoubleSize;
1227 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1233 void MacroAssembler::MultiPopFPU(RegList regs) {
1234 int16_t stack_offset = 0;
1236 for (int16_t i = 0; i < kNumRegisters; i++) {
1237 if ((regs & (1 << i)) != 0) {
1238 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1239 stack_offset += kDoubleSize;
1242 daddiu(sp, sp, stack_offset);
1246 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
1247 int16_t stack_offset = 0;
1249 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1250 if ((regs & (1 << i)) != 0) {
1251 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1252 stack_offset += kDoubleSize;
1255 daddiu(sp, sp, stack_offset);
1259 void MacroAssembler::FlushICache(Register address, unsigned instructions) {
1260 RegList saved_regs = kJSCallerSaved | ra.bit();
1261 MultiPush(saved_regs);
1262 AllowExternalCallThatCantCauseGC scope(this);
1264 // Save to a0 in case address == a4.
1266 PrepareCallCFunction(2, a4);
1268 li(a1, instructions * kInstrSize);
1269 CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
1270 MultiPop(saved_regs);
1274 void MacroAssembler::Ext(Register rt,
1279 DCHECK(pos + size < 33);
1280 ext_(rt, rs, pos, size);
1284 void MacroAssembler::Ins(Register rt,
1289 DCHECK(pos + size <= 32);
1291 ins_(rt, rs, pos, size);
1295 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1297 FPURegister scratch) {
1298 // Move the data from fs to t8.
1300 Cvt_d_uw(fd, t8, scratch);
1304 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1306 FPURegister scratch) {
1307 // Convert rs to a FP value in fd (and fd + 1).
1308 // We do this by converting rs minus the MSB to avoid sign conversion,
1309 // then adding 2^31 to the result (if needed).
1311 DCHECK(!fd.is(scratch));
1315 // Save rs's MSB to t9.
1319 // Move the result to fd.
1321 mthc1(zero_reg, fd);
1323 // Convert fd to a real FP value.
1326 Label conversion_done;
1328 // If rs's MSB was 0, it's done.
1329 // Otherwise we need to add that to the FP register.
1330 Branch(&conversion_done, eq, t9, Operand(zero_reg));
1332 // Load 2^31 into f20 as its float representation.
1334 mtc1(zero_reg, scratch);
1337 add_d(fd, fd, scratch);
1339 bind(&conversion_done);
1343 void MacroAssembler::Round_l_d(FPURegister fd, FPURegister fs) {
1348 void MacroAssembler::Floor_l_d(FPURegister fd, FPURegister fs) {
1353 void MacroAssembler::Ceil_l_d(FPURegister fd, FPURegister fs) {
1358 void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) {
1363 void MacroAssembler::Trunc_l_ud(FPURegister fd,
1365 FPURegister scratch) {
1369 li(at, 0x7fffffffffffffff);
1376 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1378 FPURegister scratch) {
1379 Trunc_uw_d(fs, t8, scratch);
1384 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1389 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1394 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1399 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1404 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1406 FPURegister scratch) {
1407 DCHECK(!fd.is(scratch));
1410 // Load 2^31 into scratch as its float representation.
1412 mtc1(zero_reg, scratch);
1414 // Test if scratch > fd.
1415 // If fd < 2^31 we can convert it normally.
1416 Label simple_convert;
1417 BranchF(&simple_convert, NULL, lt, fd, scratch);
1419 // First we subtract 2^31 from fd, then trunc it to rs
1420 // and add 2^31 to rs.
1421 sub_d(scratch, fd, scratch);
1422 trunc_w_d(scratch, scratch);
1424 Or(rs, rs, 1 << 31);
1428 // Simple conversion.
1429 bind(&simple_convert);
1430 trunc_w_d(scratch, fd);
1437 void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
1438 FPURegister ft, FPURegister scratch) {
1439 if (0) { // TODO(plind): find reasonable arch-variant symbol names.
1440 madd_d(fd, fr, fs, ft);
1442 // Can not change source regs's value.
1443 DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
1444 mul_d(scratch, fs, ft);
1445 add_d(fd, fr, scratch);
1450 void MacroAssembler::BranchF(Label* target,
1455 BranchDelaySlot bd) {
1456 BlockTrampolinePoolScope block_trampoline_pool(this);
1462 DCHECK(nan || target);
1463 // Check for unordered (NaN) cases.
1465 if (kArchVariant != kMips64r6) {
1466 c(UN, D, cmp1, cmp2);
1469 // Use f31 for comparison result. It has to be unavailable to lithium
1470 // register allocator.
1471 DCHECK(!cmp1.is(f31) && !cmp2.is(f31));
1472 cmp(UN, L, f31, cmp1, cmp2);
1477 if (kArchVariant != kMips64r6) {
1479 // Here NaN cases were either handled by this function or are assumed to
1480 // have been handled by the caller.
1483 c(OLT, D, cmp1, cmp2);
1487 c(ULE, D, cmp1, cmp2);
1491 c(ULT, D, cmp1, cmp2);
1495 c(OLE, D, cmp1, cmp2);
1499 c(EQ, D, cmp1, cmp2);
1503 c(UEQ, D, cmp1, cmp2);
1507 c(EQ, D, cmp1, cmp2);
1511 c(UEQ, D, cmp1, cmp2);
1520 // Here NaN cases were either handled by this function or are assumed to
1521 // have been handled by the caller.
1522 // Unsigned conditions are treated as their signed counterpart.
1523 // Use f31 for comparison result, it is valid in fp64 (FR = 1) mode.
1524 DCHECK(!cmp1.is(f31) && !cmp2.is(f31));
1527 cmp(OLT, L, f31, cmp1, cmp2);
1528 bc1nez(target, f31);
1531 cmp(ULE, L, f31, cmp1, cmp2);
1532 bc1eqz(target, f31);
1535 cmp(ULT, L, f31, cmp1, cmp2);
1536 bc1eqz(target, f31);
1539 cmp(OLE, L, f31, cmp1, cmp2);
1540 bc1nez(target, f31);
1543 cmp(EQ, L, f31, cmp1, cmp2);
1544 bc1nez(target, f31);
1547 cmp(UEQ, L, f31, cmp1, cmp2);
1548 bc1nez(target, f31);
1551 cmp(EQ, L, f31, cmp1, cmp2);
1552 bc1eqz(target, f31);
1555 cmp(UEQ, L, f31, cmp1, cmp2);
1556 bc1eqz(target, f31);
1564 if (bd == PROTECT) {
1570 void MacroAssembler::Move(FPURegister dst, double imm) {
1571 static const DoubleRepresentation minus_zero(-0.0);
1572 static const DoubleRepresentation zero(0.0);
1573 DoubleRepresentation value_rep(imm);
1574 // Handle special values first.
1575 bool force_load = dst.is(kDoubleRegZero);
1576 if (value_rep == zero && !force_load) {
1577 mov_d(dst, kDoubleRegZero);
1578 } else if (value_rep == minus_zero && !force_load) {
1579 neg_d(dst, kDoubleRegZero);
1582 DoubleAsTwoUInt32(imm, &lo, &hi);
1583 // Move the low part of the double into the lower bits of the corresponding
1586 li(at, Operand(lo));
1589 mtc1(zero_reg, dst);
1591 // Move the high part of the double into the high bits of the corresponding
1594 li(at, Operand(hi));
1597 mthc1(zero_reg, dst);
1603 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1604 if (kArchVariant == kMips64r6) {
1606 Branch(&done, ne, rt, Operand(zero_reg));
1615 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1616 if (kArchVariant == kMips64r6) {
1618 Branch(&done, eq, rt, Operand(zero_reg));
1627 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
1632 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
1637 void MacroAssembler::Clz(Register rd, Register rs) {
1642 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1644 DoubleRegister double_input,
1646 DoubleRegister double_scratch,
1647 Register except_flag,
1648 CheckForInexactConversion check_inexact) {
1649 DCHECK(!result.is(scratch));
1650 DCHECK(!double_input.is(double_scratch));
1651 DCHECK(!except_flag.is(scratch));
1655 // Clear the except flag (0 = no exception)
1656 mov(except_flag, zero_reg);
1658 // Test for values that can be exactly represented as a signed 32-bit integer.
1659 cvt_w_d(double_scratch, double_input);
1660 mfc1(result, double_scratch);
1661 cvt_d_w(double_scratch, double_scratch);
1662 BranchF(&done, NULL, eq, double_input, double_scratch);
1664 int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
1666 if (check_inexact == kDontCheckForInexactConversion) {
1667 // Ignore inexact exceptions.
1668 except_mask &= ~kFCSRInexactFlagMask;
1672 cfc1(scratch, FCSR);
1673 // Disable FPU exceptions.
1674 ctc1(zero_reg, FCSR);
1676 // Do operation based on rounding mode.
1677 switch (rounding_mode) {
1678 case kRoundToNearest:
1679 Round_w_d(double_scratch, double_input);
1682 Trunc_w_d(double_scratch, double_input);
1684 case kRoundToPlusInf:
1685 Ceil_w_d(double_scratch, double_input);
1687 case kRoundToMinusInf:
1688 Floor_w_d(double_scratch, double_input);
1690 } // End of switch-statement.
1693 cfc1(except_flag, FCSR);
1695 ctc1(scratch, FCSR);
1696 // Move the converted value into the result register.
1697 mfc1(result, double_scratch);
1699 // Check for fpu exceptions.
1700 And(except_flag, except_flag, Operand(except_mask));
1706 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
1707 DoubleRegister double_input,
1709 DoubleRegister single_scratch = kLithiumScratchDouble.low();
1710 Register scratch = at;
1711 Register scratch2 = t9;
1713 // Clear cumulative exception flags and save the FCSR.
1714 cfc1(scratch2, FCSR);
1715 ctc1(zero_reg, FCSR);
1716 // Try a conversion to a signed integer.
1717 trunc_w_d(single_scratch, double_input);
1718 mfc1(result, single_scratch);
1719 // Retrieve and restore the FCSR.
1720 cfc1(scratch, FCSR);
1721 ctc1(scratch2, FCSR);
1722 // Check for overflow and NaNs.
1725 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
1726 // If we had no exceptions we are done.
1727 Branch(done, eq, scratch, Operand(zero_reg));
1731 void MacroAssembler::TruncateDoubleToI(Register result,
1732 DoubleRegister double_input) {
1735 TryInlineTruncateDoubleToI(result, double_input, &done);
1737 // If we fell through then inline version didn't succeed - call stub instead.
1739 Dsubu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
1740 sdc1(double_input, MemOperand(sp, 0));
1742 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
1745 Daddu(sp, sp, Operand(kDoubleSize));
1752 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
1754 DoubleRegister double_scratch = f12;
1755 DCHECK(!result.is(object));
1757 ldc1(double_scratch,
1758 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
1759 TryInlineTruncateDoubleToI(result, double_scratch, &done);
1761 // If we fell through then inline version didn't succeed - call stub instead.
1763 DoubleToIStub stub(isolate(),
1766 HeapNumber::kValueOffset - kHeapObjectTag,
1776 void MacroAssembler::TruncateNumberToI(Register object,
1778 Register heap_number_map,
1780 Label* not_number) {
1782 DCHECK(!result.is(object));
1784 UntagAndJumpIfSmi(result, object, &done);
1785 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
1786 TruncateHeapNumberToI(result, object);
1792 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1794 int num_least_bits) {
1795 // Ext(dst, src, kSmiTagSize, num_least_bits);
1797 And(dst, dst, Operand((1 << num_least_bits) - 1));
1801 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1803 int num_least_bits) {
1804 DCHECK(!src.is(dst));
1805 And(dst, src, Operand((1 << num_least_bits) - 1));
1809 // Emulated condtional branches do not emit a nop in the branch delay slot.
1811 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1812 #define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \
1813 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1814 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1817 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1818 BranchShort(offset, bdslot);
1822 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1824 BranchDelaySlot bdslot) {
1825 BranchShort(offset, cond, rs, rt, bdslot);
1829 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1830 if (L->is_bound()) {
1832 BranchShort(L, bdslot);
1837 if (is_trampoline_emitted()) {
1840 BranchShort(L, bdslot);
1846 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1848 BranchDelaySlot bdslot) {
1849 if (L->is_bound()) {
1851 BranchShort(L, cond, rs, rt, bdslot);
1853 if (cond != cc_always) {
1855 Condition neg_cond = NegateCondition(cond);
1856 BranchShort(&skip, neg_cond, rs, rt);
1864 if (is_trampoline_emitted()) {
1865 if (cond != cc_always) {
1867 Condition neg_cond = NegateCondition(cond);
1868 BranchShort(&skip, neg_cond, rs, rt);
1875 BranchShort(L, cond, rs, rt, bdslot);
1881 void MacroAssembler::Branch(Label* L,
1884 Heap::RootListIndex index,
1885 BranchDelaySlot bdslot) {
1886 LoadRoot(at, index);
1887 Branch(L, cond, rs, Operand(at), bdslot);
1891 void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
1894 // Emit a nop in the branch delay slot if required.
1895 if (bdslot == PROTECT)
1900 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1902 BranchDelaySlot bdslot) {
1903 BRANCH_ARGS_CHECK(cond, rs, rt);
1904 DCHECK(!rs.is(zero_reg));
1905 Register r2 = no_reg;
1906 Register scratch = at;
1909 // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
1911 BlockTrampolinePoolScope block_trampoline_pool(this);
1918 beq(rs, r2, offset);
1921 bne(rs, r2, offset);
1923 // Signed comparison.
1925 if (r2.is(zero_reg)) {
1928 slt(scratch, r2, rs);
1929 bne(scratch, zero_reg, offset);
1933 if (r2.is(zero_reg)) {
1936 slt(scratch, rs, r2);
1937 beq(scratch, zero_reg, offset);
1941 if (r2.is(zero_reg)) {
1944 slt(scratch, rs, r2);
1945 bne(scratch, zero_reg, offset);
1949 if (r2.is(zero_reg)) {
1952 slt(scratch, r2, rs);
1953 beq(scratch, zero_reg, offset);
1956 // Unsigned comparison.
1958 if (r2.is(zero_reg)) {
1961 sltu(scratch, r2, rs);
1962 bne(scratch, zero_reg, offset);
1965 case Ugreater_equal:
1966 if (r2.is(zero_reg)) {
1969 sltu(scratch, rs, r2);
1970 beq(scratch, zero_reg, offset);
1974 if (r2.is(zero_reg)) {
1975 // No code needs to be emitted.
1978 sltu(scratch, rs, r2);
1979 bne(scratch, zero_reg, offset);
1983 if (r2.is(zero_reg)) {
1986 sltu(scratch, r2, rs);
1987 beq(scratch, zero_reg, offset);
1994 // Be careful to always use shifted_branch_offset only just before the
1995 // branch instruction, as the location will be remember for patching the
1997 BlockTrampolinePoolScope block_trampoline_pool(this);
2003 // We don't want any other register but scratch clobbered.
2004 DCHECK(!scratch.is(rs));
2007 beq(rs, r2, offset);
2010 // We don't want any other register but scratch clobbered.
2011 DCHECK(!scratch.is(rs));
2014 bne(rs, r2, offset);
2016 // Signed comparison.
2018 if (rt.imm64_ == 0) {
2023 slt(scratch, r2, rs);
2024 bne(scratch, zero_reg, offset);
2028 if (rt.imm64_ == 0) {
2030 } else if (is_int16(rt.imm64_)) {
2031 slti(scratch, rs, rt.imm64_);
2032 beq(scratch, zero_reg, offset);
2036 slt(scratch, rs, r2);
2037 beq(scratch, zero_reg, offset);
2041 if (rt.imm64_ == 0) {
2043 } else if (is_int16(rt.imm64_)) {
2044 slti(scratch, rs, rt.imm64_);
2045 bne(scratch, zero_reg, offset);
2049 slt(scratch, rs, r2);
2050 bne(scratch, zero_reg, offset);
2054 if (rt.imm64_ == 0) {
2059 slt(scratch, r2, rs);
2060 beq(scratch, zero_reg, offset);
2063 // Unsigned comparison.
2065 if (rt.imm64_ == 0) {
2070 sltu(scratch, r2, rs);
2071 bne(scratch, zero_reg, offset);
2074 case Ugreater_equal:
2075 if (rt.imm64_ == 0) {
2077 } else if (is_int16(rt.imm64_)) {
2078 sltiu(scratch, rs, rt.imm64_);
2079 beq(scratch, zero_reg, offset);
2083 sltu(scratch, rs, r2);
2084 beq(scratch, zero_reg, offset);
2088 if (rt.imm64_ == 0) {
2089 // No code needs to be emitted.
2091 } else if (is_int16(rt.imm64_)) {
2092 sltiu(scratch, rs, rt.imm64_);
2093 bne(scratch, zero_reg, offset);
2097 sltu(scratch, rs, r2);
2098 bne(scratch, zero_reg, offset);
2102 if (rt.imm64_ == 0) {
2107 sltu(scratch, r2, rs);
2108 beq(scratch, zero_reg, offset);
2115 // Emit a nop in the branch delay slot if required.
2116 if (bdslot == PROTECT)
2121 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
2122 // We use branch_offset as an argument for the branch instructions to be sure
2123 // it is called just before generating the branch instruction, as needed.
2125 b(shifted_branch_offset(L, false));
2127 // Emit a nop in the branch delay slot if required.
2128 if (bdslot == PROTECT)
2133 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
2135 BranchDelaySlot bdslot) {
2136 BRANCH_ARGS_CHECK(cond, rs, rt);
2139 Register r2 = no_reg;
2140 Register scratch = at;
2142 BlockTrampolinePoolScope block_trampoline_pool(this);
2144 // Be careful to always use shifted_branch_offset only just before the
2145 // branch instruction, as the location will be remember for patching the
2149 offset = shifted_branch_offset(L, false);
2153 offset = shifted_branch_offset(L, false);
2154 beq(rs, r2, offset);
2157 offset = shifted_branch_offset(L, false);
2158 bne(rs, r2, offset);
2160 // Signed comparison.
2162 if (r2.is(zero_reg)) {
2163 offset = shifted_branch_offset(L, false);
2166 slt(scratch, r2, rs);
2167 offset = shifted_branch_offset(L, false);
2168 bne(scratch, zero_reg, offset);
2172 if (r2.is(zero_reg)) {
2173 offset = shifted_branch_offset(L, false);
2176 slt(scratch, rs, r2);
2177 offset = shifted_branch_offset(L, false);
2178 beq(scratch, zero_reg, offset);
2182 if (r2.is(zero_reg)) {
2183 offset = shifted_branch_offset(L, false);
2186 slt(scratch, rs, r2);
2187 offset = shifted_branch_offset(L, false);
2188 bne(scratch, zero_reg, offset);
2192 if (r2.is(zero_reg)) {
2193 offset = shifted_branch_offset(L, false);
2196 slt(scratch, r2, rs);
2197 offset = shifted_branch_offset(L, false);
2198 beq(scratch, zero_reg, offset);
2201 // Unsigned comparison.
2203 if (r2.is(zero_reg)) {
2204 offset = shifted_branch_offset(L, false);
2207 sltu(scratch, r2, rs);
2208 offset = shifted_branch_offset(L, false);
2209 bne(scratch, zero_reg, offset);
2212 case Ugreater_equal:
2213 if (r2.is(zero_reg)) {
2214 offset = shifted_branch_offset(L, false);
2217 sltu(scratch, rs, r2);
2218 offset = shifted_branch_offset(L, false);
2219 beq(scratch, zero_reg, offset);
2223 if (r2.is(zero_reg)) {
2224 // No code needs to be emitted.
2227 sltu(scratch, rs, r2);
2228 offset = shifted_branch_offset(L, false);
2229 bne(scratch, zero_reg, offset);
2233 if (r2.is(zero_reg)) {
2234 offset = shifted_branch_offset(L, false);
2237 sltu(scratch, r2, rs);
2238 offset = shifted_branch_offset(L, false);
2239 beq(scratch, zero_reg, offset);
2246 // Be careful to always use shifted_branch_offset only just before the
2247 // branch instruction, as the location will be remember for patching the
2249 BlockTrampolinePoolScope block_trampoline_pool(this);
2252 offset = shifted_branch_offset(L, false);
2256 DCHECK(!scratch.is(rs));
2259 offset = shifted_branch_offset(L, false);
2260 beq(rs, r2, offset);
2263 DCHECK(!scratch.is(rs));
2266 offset = shifted_branch_offset(L, false);
2267 bne(rs, r2, offset);
2269 // Signed comparison.
2271 if (rt.imm64_ == 0) {
2272 offset = shifted_branch_offset(L, false);
2275 DCHECK(!scratch.is(rs));
2278 slt(scratch, r2, rs);
2279 offset = shifted_branch_offset(L, false);
2280 bne(scratch, zero_reg, offset);
2284 if (rt.imm64_ == 0) {
2285 offset = shifted_branch_offset(L, false);
2287 } else if (is_int16(rt.imm64_)) {
2288 slti(scratch, rs, rt.imm64_);
2289 offset = shifted_branch_offset(L, false);
2290 beq(scratch, zero_reg, offset);
2292 DCHECK(!scratch.is(rs));
2295 slt(scratch, rs, r2);
2296 offset = shifted_branch_offset(L, false);
2297 beq(scratch, zero_reg, offset);
2301 if (rt.imm64_ == 0) {
2302 offset = shifted_branch_offset(L, false);
2304 } else if (is_int16(rt.imm64_)) {
2305 slti(scratch, rs, rt.imm64_);
2306 offset = shifted_branch_offset(L, false);
2307 bne(scratch, zero_reg, offset);
2309 DCHECK(!scratch.is(rs));
2312 slt(scratch, rs, r2);
2313 offset = shifted_branch_offset(L, false);
2314 bne(scratch, zero_reg, offset);
2318 if (rt.imm64_ == 0) {
2319 offset = shifted_branch_offset(L, false);
2322 DCHECK(!scratch.is(rs));
2325 slt(scratch, r2, rs);
2326 offset = shifted_branch_offset(L, false);
2327 beq(scratch, zero_reg, offset);
2330 // Unsigned comparison.
2332 if (rt.imm64_ == 0) {
2333 offset = shifted_branch_offset(L, false);
2334 bne(rs, zero_reg, offset);
2336 DCHECK(!scratch.is(rs));
2339 sltu(scratch, r2, rs);
2340 offset = shifted_branch_offset(L, false);
2341 bne(scratch, zero_reg, offset);
2344 case Ugreater_equal:
2345 if (rt.imm64_ == 0) {
2346 offset = shifted_branch_offset(L, false);
2348 } else if (is_int16(rt.imm64_)) {
2349 sltiu(scratch, rs, rt.imm64_);
2350 offset = shifted_branch_offset(L, false);
2351 beq(scratch, zero_reg, offset);
2353 DCHECK(!scratch.is(rs));
2356 sltu(scratch, rs, r2);
2357 offset = shifted_branch_offset(L, false);
2358 beq(scratch, zero_reg, offset);
2362 if (rt.imm64_ == 0) {
2363 // No code needs to be emitted.
2365 } else if (is_int16(rt.imm64_)) {
2366 sltiu(scratch, rs, rt.imm64_);
2367 offset = shifted_branch_offset(L, false);
2368 bne(scratch, zero_reg, offset);
2370 DCHECK(!scratch.is(rs));
2373 sltu(scratch, rs, r2);
2374 offset = shifted_branch_offset(L, false);
2375 bne(scratch, zero_reg, offset);
2379 if (rt.imm64_ == 0) {
2380 offset = shifted_branch_offset(L, false);
2381 beq(rs, zero_reg, offset);
2383 DCHECK(!scratch.is(rs));
2386 sltu(scratch, r2, rs);
2387 offset = shifted_branch_offset(L, false);
2388 beq(scratch, zero_reg, offset);
2395 // Check that offset could actually hold on an int16_t.
2396 DCHECK(is_int16(offset));
2397 // Emit a nop in the branch delay slot if required.
2398 if (bdslot == PROTECT)
2403 void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
2404 BranchAndLinkShort(offset, bdslot);
2408 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
2410 BranchDelaySlot bdslot) {
2411 BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2415 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
2416 if (L->is_bound()) {
2418 BranchAndLinkShort(L, bdslot);
2423 if (is_trampoline_emitted()) {
2426 BranchAndLinkShort(L, bdslot);
2432 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2434 BranchDelaySlot bdslot) {
2435 if (L->is_bound()) {
2437 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2440 Condition neg_cond = NegateCondition(cond);
2441 BranchShort(&skip, neg_cond, rs, rt);
2446 if (is_trampoline_emitted()) {
2448 Condition neg_cond = NegateCondition(cond);
2449 BranchShort(&skip, neg_cond, rs, rt);
2453 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2459 // We need to use a bgezal or bltzal, but they can't be used directly with the
2460 // slt instructions. We could use sub or add instead but we would miss overflow
2461 // cases, so we keep slt and add an intermediate third instruction.
2462 void MacroAssembler::BranchAndLinkShort(int16_t offset,
2463 BranchDelaySlot bdslot) {
2466 // Emit a nop in the branch delay slot if required.
2467 if (bdslot == PROTECT)
2472 void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
2473 Register rs, const Operand& rt,
2474 BranchDelaySlot bdslot) {
2475 BRANCH_ARGS_CHECK(cond, rs, rt);
2476 Register r2 = no_reg;
2477 Register scratch = at;
2481 } else if (cond != cc_always) {
2487 BlockTrampolinePoolScope block_trampoline_pool(this);
2503 // Signed comparison.
2506 slt(scratch, r2, rs);
2507 beq(scratch, zero_reg, 2);
2513 slt(scratch, rs, r2);
2514 bne(scratch, zero_reg, 2);
2520 slt(scratch, rs, r2);
2521 bne(scratch, zero_reg, 2);
2527 slt(scratch, r2, rs);
2528 bne(scratch, zero_reg, 2);
2534 // Unsigned comparison.
2537 sltu(scratch, r2, rs);
2538 beq(scratch, zero_reg, 2);
2542 case Ugreater_equal:
2544 sltu(scratch, rs, r2);
2545 bne(scratch, zero_reg, 2);
2551 sltu(scratch, rs, r2);
2552 bne(scratch, zero_reg, 2);
2558 sltu(scratch, r2, rs);
2559 bne(scratch, zero_reg, 2);
2567 // Emit a nop in the branch delay slot if required.
2568 if (bdslot == PROTECT)
2573 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
2574 bal(shifted_branch_offset(L, false));
2576 // Emit a nop in the branch delay slot if required.
2577 if (bdslot == PROTECT)
2582 void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
2584 BranchDelaySlot bdslot) {
2585 BRANCH_ARGS_CHECK(cond, rs, rt);
2588 Register r2 = no_reg;
2589 Register scratch = at;
2592 } else if (cond != cc_always) {
2598 BlockTrampolinePoolScope block_trampoline_pool(this);
2601 offset = shifted_branch_offset(L, false);
2607 offset = shifted_branch_offset(L, false);
2613 offset = shifted_branch_offset(L, false);
2617 // Signed comparison.
2620 slt(scratch, r2, rs);
2621 beq(scratch, zero_reg, 2);
2623 offset = shifted_branch_offset(L, false);
2628 slt(scratch, rs, r2);
2629 bne(scratch, zero_reg, 2);
2631 offset = shifted_branch_offset(L, false);
2636 slt(scratch, rs, r2);
2637 bne(scratch, zero_reg, 2);
2639 offset = shifted_branch_offset(L, false);
2644 slt(scratch, r2, rs);
2645 bne(scratch, zero_reg, 2);
2647 offset = shifted_branch_offset(L, false);
2652 // Unsigned comparison.
2655 sltu(scratch, r2, rs);
2656 beq(scratch, zero_reg, 2);
2658 offset = shifted_branch_offset(L, false);
2661 case Ugreater_equal:
2663 sltu(scratch, rs, r2);
2664 bne(scratch, zero_reg, 2);
2666 offset = shifted_branch_offset(L, false);
2671 sltu(scratch, rs, r2);
2672 bne(scratch, zero_reg, 2);
2674 offset = shifted_branch_offset(L, false);
2679 sltu(scratch, r2, rs);
2680 bne(scratch, zero_reg, 2);
2682 offset = shifted_branch_offset(L, false);
2690 // Check that offset could actually hold on an int16_t.
2691 DCHECK(is_int16(offset));
2693 // Emit a nop in the branch delay slot if required.
2694 if (bdslot == PROTECT)
2699 void MacroAssembler::Jump(Register target,
2703 BranchDelaySlot bd) {
2704 BlockTrampolinePoolScope block_trampoline_pool(this);
2705 if (cond == cc_always) {
2708 BRANCH_ARGS_CHECK(cond, rs, rt);
2709 Branch(2, NegateCondition(cond), rs, rt);
2712 // Emit a nop in the branch delay slot if required.
2718 void MacroAssembler::Jump(intptr_t target,
2719 RelocInfo::Mode rmode,
2723 BranchDelaySlot bd) {
2725 if (cond != cc_always) {
2726 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
2728 // The first instruction of 'li' may be placed in the delay slot.
2729 // This is not an issue, t9 is expected to be clobbered anyway.
2730 li(t9, Operand(target, rmode));
2731 Jump(t9, al, zero_reg, Operand(zero_reg), bd);
2736 void MacroAssembler::Jump(Address target,
2737 RelocInfo::Mode rmode,
2741 BranchDelaySlot bd) {
2742 DCHECK(!RelocInfo::IsCodeTarget(rmode));
2743 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2747 void MacroAssembler::Jump(Handle<Code> code,
2748 RelocInfo::Mode rmode,
2752 BranchDelaySlot bd) {
2753 DCHECK(RelocInfo::IsCodeTarget(rmode));
2754 AllowDeferredHandleDereference embedding_raw_address;
2755 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2759 int MacroAssembler::CallSize(Register target,
2763 BranchDelaySlot bd) {
2766 if (cond == cc_always) {
2775 return size * kInstrSize;
2779 // Note: To call gcc-compiled C code on mips, you must call thru t9.
2780 void MacroAssembler::Call(Register target,
2784 BranchDelaySlot bd) {
2785 BlockTrampolinePoolScope block_trampoline_pool(this);
2788 if (cond == cc_always) {
2791 BRANCH_ARGS_CHECK(cond, rs, rt);
2792 Branch(2, NegateCondition(cond), rs, rt);
2795 // Emit a nop in the branch delay slot if required.
2799 DCHECK_EQ(CallSize(target, cond, rs, rt, bd),
2800 SizeOfCodeGeneratedSince(&start));
2804 int MacroAssembler::CallSize(Address target,
2805 RelocInfo::Mode rmode,
2809 BranchDelaySlot bd) {
2810 int size = CallSize(t9, cond, rs, rt, bd);
2811 return size + 4 * kInstrSize;
2815 void MacroAssembler::Call(Address target,
2816 RelocInfo::Mode rmode,
2820 BranchDelaySlot bd) {
2821 BlockTrampolinePoolScope block_trampoline_pool(this);
2824 int64_t target_int = reinterpret_cast<int64_t>(target);
2825 // Must record previous source positions before the
2826 // li() generates a new code target.
2827 positions_recorder()->WriteRecordedPositions();
2828 li(t9, Operand(target_int, rmode), ADDRESS_LOAD);
2829 Call(t9, cond, rs, rt, bd);
2830 DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2831 SizeOfCodeGeneratedSince(&start));
2835 int MacroAssembler::CallSize(Handle<Code> code,
2836 RelocInfo::Mode rmode,
2837 TypeFeedbackId ast_id,
2841 BranchDelaySlot bd) {
2842 AllowDeferredHandleDereference using_raw_address;
2843 return CallSize(reinterpret_cast<Address>(code.location()),
2844 rmode, cond, rs, rt, bd);
2848 void MacroAssembler::Call(Handle<Code> code,
2849 RelocInfo::Mode rmode,
2850 TypeFeedbackId ast_id,
2854 BranchDelaySlot bd) {
2855 BlockTrampolinePoolScope block_trampoline_pool(this);
2858 DCHECK(RelocInfo::IsCodeTarget(rmode));
2859 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
2860 SetRecordedAstId(ast_id);
2861 rmode = RelocInfo::CODE_TARGET_WITH_ID;
2863 AllowDeferredHandleDereference embedding_raw_address;
2864 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
2865 DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
2866 SizeOfCodeGeneratedSince(&start));
2870 void MacroAssembler::Ret(Condition cond,
2873 BranchDelaySlot bd) {
2874 Jump(ra, cond, rs, rt, bd);
2878 void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
2879 BlockTrampolinePoolScope block_trampoline_pool(this);
2882 imm28 = jump_address(L);
2883 imm28 &= kImm28Mask;
2884 { BlockGrowBufferScope block_buf_growth(this);
2885 // Buffer growth (and relocation) must be blocked for internal references
2886 // until associated instructions are emitted and available to be patched.
2887 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2890 // Emit a nop in the branch delay slot if required.
2891 if (bdslot == PROTECT)
2896 void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
2897 BlockTrampolinePoolScope block_trampoline_pool(this);
2900 imm64 = jump_address(L);
2901 { BlockGrowBufferScope block_buf_growth(this);
2902 // Buffer growth (and relocation) must be blocked for internal references
2903 // until associated instructions are emitted and available to be patched.
2904 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2905 li(at, Operand(imm64), ADDRESS_LOAD);
2909 // Emit a nop in the branch delay slot if required.
2910 if (bdslot == PROTECT)
2915 void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
2916 BlockTrampolinePoolScope block_trampoline_pool(this);
2919 imm64 = jump_address(L);
2920 { BlockGrowBufferScope block_buf_growth(this);
2921 // Buffer growth (and relocation) must be blocked for internal references
2922 // until associated instructions are emitted and available to be patched.
2923 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2924 li(at, Operand(imm64), ADDRESS_LOAD);
2928 // Emit a nop in the branch delay slot if required.
2929 if (bdslot == PROTECT)
2934 void MacroAssembler::DropAndRet(int drop) {
2935 Ret(USE_DELAY_SLOT);
2936 daddiu(sp, sp, drop * kPointerSize);
2939 void MacroAssembler::DropAndRet(int drop,
2942 const Operand& r2) {
2943 // Both Drop and Ret need to be conditional.
2945 if (cond != cc_always) {
2946 Branch(&skip, NegateCondition(cond), r1, r2);
2952 if (cond != cc_always) {
2958 void MacroAssembler::Drop(int count,
2961 const Operand& op) {
2969 Branch(&skip, NegateCondition(cond), reg, op);
2972 daddiu(sp, sp, count * kPointerSize);
2981 void MacroAssembler::Swap(Register reg1,
2984 if (scratch.is(no_reg)) {
2985 Xor(reg1, reg1, Operand(reg2));
2986 Xor(reg2, reg2, Operand(reg1));
2987 Xor(reg1, reg1, Operand(reg2));
2996 void MacroAssembler::Call(Label* target) {
2997 BranchAndLink(target);
3001 void MacroAssembler::Push(Handle<Object> handle) {
3002 li(at, Operand(handle));
3007 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
3008 DCHECK(!src.is(scratch));
3010 dsrl32(src, src, 0);
3011 dsll32(src, src, 0);
3013 dsll32(scratch, scratch, 0);
3018 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
3019 DCHECK(!dst.is(scratch));
3021 dsrl32(scratch, scratch, 0);
3023 dsrl32(dst, dst, 0);
3024 dsll32(dst, dst, 0);
3025 or_(dst, dst, scratch);
3029 void MacroAssembler::DebugBreak() {
3030 PrepareCEntryArgs(0);
3031 PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
3032 CEntryStub ces(isolate(), 1);
3033 DCHECK(AllowThisStubCall(&ces));
3034 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3038 // ---------------------------------------------------------------------------
3039 // Exception handling.
3041 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
3042 int handler_index) {
3043 // Adjust this code if not the case.
3044 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
3045 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3046 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3047 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3048 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3049 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3051 // For the JSEntry handler, we must preserve a0-a3 and s0.
3052 // a5-a7 are available. We will build up the handler from the bottom by
3053 // pushing on the stack.
3054 // Set up the code object (a5) and the state (a6) for pushing.
3056 StackHandler::IndexField::encode(handler_index) |
3057 StackHandler::KindField::encode(kind);
3058 li(a5, Operand(CodeObject()), CONSTANT_SIZE);
3059 li(a6, Operand(state));
3061 // Push the frame pointer, context, state, and code object.
3062 if (kind == StackHandler::JS_ENTRY) {
3063 DCHECK_EQ(Smi::FromInt(0), 0);
3064 // The second zero_reg indicates no context.
3065 // The first zero_reg is the NULL frame pointer.
3066 // The operands are reversed to match the order of MultiPush/Pop.
3067 Push(zero_reg, zero_reg, a6, a5);
3069 MultiPush(a5.bit() | a6.bit() | cp.bit() | fp.bit());
3072 // Link the current handler as the next handler.
3073 li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3074 ld(a5, MemOperand(a6));
3076 // Set this new handler as the current one.
3077 sd(sp, MemOperand(a6));
3081 void MacroAssembler::PopTryHandler() {
3082 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3084 Daddu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
3085 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3086 sd(a1, MemOperand(at));
3090 void MacroAssembler::JumpToHandlerEntry() {
3091 // Compute the handler entry address and jump to it. The handler table is
3092 // a fixed array of (smi-tagged) code offsets.
3093 // v0 = exception, a1 = code object, a2 = state.
3094 Uld(a3, FieldMemOperand(a1, Code::kHandlerTableOffset));
3095 Daddu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3096 dsrl(a2, a2, StackHandler::kKindWidth); // Handler index.
3097 dsll(a2, a2, kPointerSizeLog2);
3099 ld(a2, MemOperand(a2)); // Smi-tagged offset.
3100 Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
3107 void MacroAssembler::Throw(Register value) {
3108 // Adjust this code if not the case.
3109 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
3110 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3111 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3112 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3113 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3114 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3116 // The exception is expected in v0.
3119 // Drop the stack pointer to the top of the top handler.
3120 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
3122 ld(sp, MemOperand(a3));
3124 // Restore the next handler.
3126 sd(a2, MemOperand(a3));
3128 // Get the code object (a1) and state (a2). Restore the context and frame
3130 MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
3132 // If the handler is a JS frame, restore the context to the frame.
3133 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
3136 Branch(&done, eq, cp, Operand(zero_reg));
3137 sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3140 JumpToHandlerEntry();
3144 void MacroAssembler::ThrowUncatchable(Register value) {
3145 // Adjust this code if not the case.
3146 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
3147 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3148 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3149 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3150 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3151 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3153 // The exception is expected in v0.
3154 if (!value.is(v0)) {
3157 // Drop the stack pointer to the top of the top stack handler.
3158 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3159 ld(sp, MemOperand(a3));
3161 // Unwind the handlers until the ENTRY handler is found.
3162 Label fetch_next, check_kind;
3165 ld(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
3168 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
3169 ld(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
3170 And(a2, a2, Operand(StackHandler::KindField::kMask));
3171 Branch(&fetch_next, ne, a2, Operand(zero_reg));
3173 // Set the top handler address to next handler past the top ENTRY handler.
3175 sd(a2, MemOperand(a3));
3177 // Get the code object (a1) and state (a2). Clear the context and frame
3178 // pointer (0 was saved in the handler).
3179 MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
3181 JumpToHandlerEntry();
3185 void MacroAssembler::Allocate(int object_size,
3190 AllocationFlags flags) {
3191 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3192 if (!FLAG_inline_new) {
3193 if (emit_debug_code()) {
3194 // Trash the registers to simulate an allocation failure.
3196 li(scratch1, 0x7191);
3197 li(scratch2, 0x7291);
3203 DCHECK(!result.is(scratch1));
3204 DCHECK(!result.is(scratch2));
3205 DCHECK(!scratch1.is(scratch2));
3206 DCHECK(!scratch1.is(t9));
3207 DCHECK(!scratch2.is(t9));
3208 DCHECK(!result.is(t9));
3210 // Make object size into bytes.
3211 if ((flags & SIZE_IN_WORDS) != 0) {
3212 object_size *= kPointerSize;
3214 DCHECK(0 == (object_size & kObjectAlignmentMask));
3216 // Check relative positions of allocation top and limit addresses.
3217 // ARM adds additional checks to make sure the ldm instruction can be
3218 // used. On MIPS we don't have ldm so we don't need additional checks either.
3219 ExternalReference allocation_top =
3220 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3221 ExternalReference allocation_limit =
3222 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3225 reinterpret_cast<intptr_t>(allocation_top.address());
3227 reinterpret_cast<intptr_t>(allocation_limit.address());
3228 DCHECK((limit - top) == kPointerSize);
3230 // Set up allocation top address and object size registers.
3231 Register topaddr = scratch1;
3232 li(topaddr, Operand(allocation_top));
3234 // This code stores a temporary value in t9.
3235 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3236 // Load allocation top into result and allocation limit into t9.
3237 ld(result, MemOperand(topaddr));
3238 ld(t9, MemOperand(topaddr, kPointerSize));
3240 if (emit_debug_code()) {
3241 // Assert that result actually contains top on entry. t9 is used
3242 // immediately below so this use of t9 does not cause difference with
3243 // respect to register content between debug and release mode.
3244 ld(t9, MemOperand(topaddr));
3245 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3247 // Load allocation limit into t9. Result already contains allocation top.
3248 ld(t9, MemOperand(topaddr, limit - top));
3251 DCHECK(kPointerSize == kDoubleSize);
3252 if (emit_debug_code()) {
3253 And(at, result, Operand(kDoubleAlignmentMask));
3254 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
3257 // Calculate new top and bail out if new space is exhausted. Use result
3258 // to calculate the new top.
3259 Daddu(scratch2, result, Operand(object_size));
3260 Branch(gc_required, Ugreater, scratch2, Operand(t9));
3261 sd(scratch2, MemOperand(topaddr));
3263 // Tag object if requested.
3264 if ((flags & TAG_OBJECT) != 0) {
3265 Daddu(result, result, Operand(kHeapObjectTag));
3270 void MacroAssembler::Allocate(Register object_size,
3275 AllocationFlags flags) {
3276 if (!FLAG_inline_new) {
3277 if (emit_debug_code()) {
3278 // Trash the registers to simulate an allocation failure.
3280 li(scratch1, 0x7191);
3281 li(scratch2, 0x7291);
3287 DCHECK(!result.is(scratch1));
3288 DCHECK(!result.is(scratch2));
3289 DCHECK(!scratch1.is(scratch2));
3290 DCHECK(!object_size.is(t9));
3291 DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
3293 // Check relative positions of allocation top and limit addresses.
3294 // ARM adds additional checks to make sure the ldm instruction can be
3295 // used. On MIPS we don't have ldm so we don't need additional checks either.
3296 ExternalReference allocation_top =
3297 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3298 ExternalReference allocation_limit =
3299 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3301 reinterpret_cast<intptr_t>(allocation_top.address());
3303 reinterpret_cast<intptr_t>(allocation_limit.address());
3304 DCHECK((limit - top) == kPointerSize);
3306 // Set up allocation top address and object size registers.
3307 Register topaddr = scratch1;
3308 li(topaddr, Operand(allocation_top));
3310 // This code stores a temporary value in t9.
3311 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3312 // Load allocation top into result and allocation limit into t9.
3313 ld(result, MemOperand(topaddr));
3314 ld(t9, MemOperand(topaddr, kPointerSize));
3316 if (emit_debug_code()) {
3317 // Assert that result actually contains top on entry. t9 is used
3318 // immediately below so this use of t9 does not cause difference with
3319 // respect to register content between debug and release mode.
3320 ld(t9, MemOperand(topaddr));
3321 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3323 // Load allocation limit into t9. Result already contains allocation top.
3324 ld(t9, MemOperand(topaddr, limit - top));
3327 DCHECK(kPointerSize == kDoubleSize);
3328 if (emit_debug_code()) {
3329 And(at, result, Operand(kDoubleAlignmentMask));
3330 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
3333 // Calculate new top and bail out if new space is exhausted. Use result
3334 // to calculate the new top. Object size may be in words so a shift is
3335 // required to get the number of bytes.
3336 if ((flags & SIZE_IN_WORDS) != 0) {
3337 dsll(scratch2, object_size, kPointerSizeLog2);
3338 Daddu(scratch2, result, scratch2);
3340 Daddu(scratch2, result, Operand(object_size));
3342 Branch(gc_required, Ugreater, scratch2, Operand(t9));
3344 // Update allocation top. result temporarily holds the new top.
3345 if (emit_debug_code()) {
3346 And(t9, scratch2, Operand(kObjectAlignmentMask));
3347 Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
3349 sd(scratch2, MemOperand(topaddr));
3351 // Tag object if requested.
3352 if ((flags & TAG_OBJECT) != 0) {
3353 Daddu(result, result, Operand(kHeapObjectTag));
3358 void MacroAssembler::UndoAllocationInNewSpace(Register object,
3360 ExternalReference new_space_allocation_top =
3361 ExternalReference::new_space_allocation_top_address(isolate());
3363 // Make sure the object has no tag before resetting top.
3364 And(object, object, Operand(~kHeapObjectTagMask));
3366 // Check that the object un-allocated is below the current top.
3367 li(scratch, Operand(new_space_allocation_top));
3368 ld(scratch, MemOperand(scratch));
3369 Check(less, kUndoAllocationOfNonAllocatedMemory,
3370 object, Operand(scratch));
3372 // Write the address of the object to un-allocate as the current top.
3373 li(scratch, Operand(new_space_allocation_top));
3374 sd(object, MemOperand(scratch));
3378 void MacroAssembler::AllocateTwoByteString(Register result,
3383 Label* gc_required) {
3384 // Calculate the number of bytes needed for the characters in the string while
3385 // observing object alignment.
3386 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3387 dsll(scratch1, length, 1); // Length in bytes, not chars.
3388 daddiu(scratch1, scratch1,
3389 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3390 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3392 // Allocate two-byte string in new space.
3400 // Set the map, length and hash field.
3401 InitializeNewString(result,
3403 Heap::kStringMapRootIndex,
3409 void MacroAssembler::AllocateOneByteString(Register result, Register length,
3410 Register scratch1, Register scratch2,
3412 Label* gc_required) {
3413 // Calculate the number of bytes needed for the characters in the string
3414 // while observing object alignment.
3415 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3416 DCHECK(kCharSize == 1);
3417 daddiu(scratch1, length,
3418 kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3419 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3421 // Allocate one-byte string in new space.
3429 // Set the map, length and hash field.
3430 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
3431 scratch1, scratch2);
3435 void MacroAssembler::AllocateTwoByteConsString(Register result,
3439 Label* gc_required) {
3440 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3442 InitializeNewString(result,
3444 Heap::kConsStringMapRootIndex,
3450 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
3453 Label* gc_required) {
3454 Allocate(ConsString::kSize,
3461 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
3462 scratch1, scratch2);
3466 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3470 Label* gc_required) {
3471 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3474 InitializeNewString(result,
3476 Heap::kSlicedStringMapRootIndex,
3482 void MacroAssembler::AllocateOneByteSlicedString(Register result,
3486 Label* gc_required) {
3487 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3490 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
3491 scratch1, scratch2);
3495 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
3496 Label* not_unique_name) {
3497 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3499 And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3500 Branch(&succeed, eq, at, Operand(zero_reg));
3501 Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
3507 // Allocates a heap number or jumps to the label if the young space is full and
3508 // a scavenge is needed.
3509 void MacroAssembler::AllocateHeapNumber(Register result,
3512 Register heap_number_map,
3514 TaggingMode tagging_mode,
3516 // Allocate an object in the heap for the heap number and tag it as a heap
3518 Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
3519 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3521 Heap::RootListIndex map_index = mode == MUTABLE
3522 ? Heap::kMutableHeapNumberMapRootIndex
3523 : Heap::kHeapNumberMapRootIndex;
3524 AssertIsRoot(heap_number_map, map_index);
3526 // Store heap number map in the allocated object.
3527 if (tagging_mode == TAG_RESULT) {
3528 sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3530 sd(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3535 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3539 Label* gc_required) {
3540 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3541 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3542 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3546 // Copies a fixed number of fields of heap objects from src to dst.
3547 void MacroAssembler::CopyFields(Register dst,
3551 DCHECK((temps & dst.bit()) == 0);
3552 DCHECK((temps & src.bit()) == 0);
3553 // Primitive implementation using only one temporary register.
3555 Register tmp = no_reg;
3556 // Find a temp register in temps list.
3557 for (int i = 0; i < kNumRegisters; i++) {
3558 if ((temps & (1 << i)) != 0) {
3563 DCHECK(!tmp.is(no_reg));
3565 for (int i = 0; i < field_count; i++) {
3566 ld(tmp, FieldMemOperand(src, i * kPointerSize));
3567 sd(tmp, FieldMemOperand(dst, i * kPointerSize));
3572 void MacroAssembler::CopyBytes(Register src,
3576 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3578 // Align src before copying in word size chunks.
3579 Branch(&byte_loop, le, length, Operand(kPointerSize));
3580 bind(&align_loop_1);
3581 And(scratch, src, kPointerSize - 1);
3582 Branch(&word_loop, eq, scratch, Operand(zero_reg));
3583 lbu(scratch, MemOperand(src));
3585 sb(scratch, MemOperand(dst));
3587 Dsubu(length, length, Operand(1));
3588 Branch(&align_loop_1, ne, length, Operand(zero_reg));
3590 // Copy bytes in word size chunks.
3592 if (emit_debug_code()) {
3593 And(scratch, src, kPointerSize - 1);
3594 Assert(eq, kExpectingAlignmentForCopyBytes,
3595 scratch, Operand(zero_reg));
3597 Branch(&byte_loop, lt, length, Operand(kPointerSize));
3598 ld(scratch, MemOperand(src));
3599 Daddu(src, src, kPointerSize);
3601 // TODO(kalmard) check if this can be optimized to use sw in most cases.
3602 // Can't use unaligned access - copy byte by byte.
3603 sb(scratch, MemOperand(dst, 0));
3604 dsrl(scratch, scratch, 8);
3605 sb(scratch, MemOperand(dst, 1));
3606 dsrl(scratch, scratch, 8);
3607 sb(scratch, MemOperand(dst, 2));
3608 dsrl(scratch, scratch, 8);
3609 sb(scratch, MemOperand(dst, 3));
3610 dsrl(scratch, scratch, 8);
3611 sb(scratch, MemOperand(dst, 4));
3612 dsrl(scratch, scratch, 8);
3613 sb(scratch, MemOperand(dst, 5));
3614 dsrl(scratch, scratch, 8);
3615 sb(scratch, MemOperand(dst, 6));
3616 dsrl(scratch, scratch, 8);
3617 sb(scratch, MemOperand(dst, 7));
3620 Dsubu(length, length, Operand(kPointerSize));
3623 // Copy the last bytes if any left.
3625 Branch(&done, eq, length, Operand(zero_reg));
3627 lbu(scratch, MemOperand(src));
3629 sb(scratch, MemOperand(dst));
3631 Dsubu(length, length, Operand(1));
3632 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3637 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3638 Register end_offset,
3643 sd(filler, MemOperand(start_offset));
3644 Daddu(start_offset, start_offset, kPointerSize);
3646 Branch(&loop, lt, start_offset, Operand(end_offset));
3650 void MacroAssembler::CheckFastElements(Register map,
3653 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3654 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3655 STATIC_ASSERT(FAST_ELEMENTS == 2);
3656 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3657 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3658 Branch(fail, hi, scratch,
3659 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3663 void MacroAssembler::CheckFastObjectElements(Register map,
3666 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3667 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3668 STATIC_ASSERT(FAST_ELEMENTS == 2);
3669 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3670 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3671 Branch(fail, ls, scratch,
3672 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3673 Branch(fail, hi, scratch,
3674 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3678 void MacroAssembler::CheckFastSmiElements(Register map,
3681 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3682 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3683 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3684 Branch(fail, hi, scratch,
3685 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3689 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3691 Register elements_reg,
3696 int elements_offset) {
3697 Label smi_value, maybe_nan, have_double_value, is_nan, done;
3698 Register mantissa_reg = scratch2;
3699 Register exponent_reg = scratch3;
3701 // Handle smi values specially.
3702 JumpIfSmi(value_reg, &smi_value);
3704 // Ensure that the object is a heap number
3707 Heap::kHeapNumberMapRootIndex,
3711 // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
3713 li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
3714 lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3715 Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
3717 lwu(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3719 bind(&have_double_value);
3720 // dsll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3721 dsra(scratch1, key_reg, 32 - kDoubleSizeLog2);
3722 Daddu(scratch1, scratch1, elements_reg);
3723 sw(mantissa_reg, FieldMemOperand(
3724 scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
3725 uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
3726 sizeof(kHoleNanLower32);
3727 sw(exponent_reg, FieldMemOperand(scratch1, offset));
3731 // Could be NaN, Infinity or -Infinity. If fraction is not zero, it's NaN,
3732 // otherwise it's Infinity or -Infinity, and the non-NaN code path applies.
3733 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3734 Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
3736 // Load canonical NaN for storing into the double array.
3737 LoadRoot(at, Heap::kNanValueRootIndex);
3738 lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
3739 lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
3740 jmp(&have_double_value);
3743 Daddu(scratch1, elements_reg,
3744 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
3746 // dsll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3747 dsra(scratch2, key_reg, 32 - kDoubleSizeLog2);
3748 Daddu(scratch1, scratch1, scratch2);
3749 // scratch1 is now effective address of the double element
3751 Register untagged_value = elements_reg;
3752 SmiUntag(untagged_value, value_reg);
3753 mtc1(untagged_value, f2);
3755 sdc1(f0, MemOperand(scratch1, 0));
3760 void MacroAssembler::CompareMapAndBranch(Register obj,
3763 Label* early_success,
3766 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3767 CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
3771 void MacroAssembler::CompareMapAndBranch(Register obj_map,
3773 Label* early_success,
3776 Branch(branch_to, cond, obj_map, Operand(map));
3780 void MacroAssembler::CheckMap(Register obj,
3784 SmiCheckType smi_check_type) {
3785 if (smi_check_type == DO_SMI_CHECK) {
3786 JumpIfSmi(obj, fail);
3789 CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
3794 void MacroAssembler::DispatchMap(Register obj,
3797 Handle<Code> success,
3798 SmiCheckType smi_check_type) {
3800 if (smi_check_type == DO_SMI_CHECK) {
3801 JumpIfSmi(obj, &fail);
3803 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3804 Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
3809 void MacroAssembler::CheckMap(Register obj,
3811 Heap::RootListIndex index,
3813 SmiCheckType smi_check_type) {
3814 if (smi_check_type == DO_SMI_CHECK) {
3815 JumpIfSmi(obj, fail);
3817 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3818 LoadRoot(at, index);
3819 Branch(fail, ne, scratch, Operand(at));
3823 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
3824 if (IsMipsSoftFloatABI) {
3827 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
3832 void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
3833 if (IsMipsSoftFloatABI) {
3836 Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
3841 void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
3842 if (!IsMipsSoftFloatABI) {
3850 void MacroAssembler::MovToFloatResult(DoubleRegister src) {
3851 if (!IsMipsSoftFloatABI) {
3859 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
3860 DoubleRegister src2) {
3861 if (!IsMipsSoftFloatABI) {
3862 const DoubleRegister fparg2 = (kMipsAbi == kN64) ? f13 : f14;
3864 DCHECK(!src1.is(fparg2));
3878 // -----------------------------------------------------------------------------
3879 // JavaScript invokes.
3881 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3882 const ParameterCount& actual,
3883 Handle<Code> code_constant,
3886 bool* definitely_mismatches,
3888 const CallWrapper& call_wrapper) {
3889 bool definitely_matches = false;
3890 *definitely_mismatches = false;
3891 Label regular_invoke;
3893 // Check whether the expected and actual arguments count match. If not,
3894 // setup registers according to contract with ArgumentsAdaptorTrampoline:
3895 // a0: actual arguments count
3896 // a1: function (passed through to callee)
3897 // a2: expected arguments count
3899 // The code below is made a lot easier because the calling code already sets
3900 // up actual and expected registers according to the contract if values are
3901 // passed in registers.
3902 DCHECK(actual.is_immediate() || actual.reg().is(a0));
3903 DCHECK(expected.is_immediate() || expected.reg().is(a2));
3904 DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
3906 if (expected.is_immediate()) {
3907 DCHECK(actual.is_immediate());
3908 if (expected.immediate() == actual.immediate()) {
3909 definitely_matches = true;
3911 li(a0, Operand(actual.immediate()));
3912 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3913 if (expected.immediate() == sentinel) {
3914 // Don't worry about adapting arguments for builtins that
3915 // don't want that done. Skip adaption code by making it look
3916 // like we have a match between expected and actual number of
3918 definitely_matches = true;
3920 *definitely_mismatches = true;
3921 li(a2, Operand(expected.immediate()));
3924 } else if (actual.is_immediate()) {
3925 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.immediate()));
3926 li(a0, Operand(actual.immediate()));
3928 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg()));
3931 if (!definitely_matches) {
3932 if (!code_constant.is_null()) {
3933 li(a3, Operand(code_constant));
3934 daddiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
3937 Handle<Code> adaptor =
3938 isolate()->builtins()->ArgumentsAdaptorTrampoline();
3939 if (flag == CALL_FUNCTION) {
3940 call_wrapper.BeforeCall(CallSize(adaptor));
3942 call_wrapper.AfterCall();
3943 if (!*definitely_mismatches) {
3947 Jump(adaptor, RelocInfo::CODE_TARGET);
3949 bind(®ular_invoke);
3954 void MacroAssembler::InvokeCode(Register code,
3955 const ParameterCount& expected,
3956 const ParameterCount& actual,
3958 const CallWrapper& call_wrapper) {
3959 // You can't call a function without a valid frame.
3960 DCHECK(flag == JUMP_FUNCTION || has_frame());
3964 bool definitely_mismatches = false;
3965 InvokePrologue(expected, actual, Handle<Code>::null(), code,
3966 &done, &definitely_mismatches, flag,
3968 if (!definitely_mismatches) {
3969 if (flag == CALL_FUNCTION) {
3970 call_wrapper.BeforeCall(CallSize(code));
3972 call_wrapper.AfterCall();
3974 DCHECK(flag == JUMP_FUNCTION);
3977 // Continue here if InvokePrologue does handle the invocation due to
3978 // mismatched parameter counts.
3984 void MacroAssembler::InvokeFunction(Register function,
3985 const ParameterCount& actual,
3987 const CallWrapper& call_wrapper) {
3988 // You can't call a function without a valid frame.
3989 DCHECK(flag == JUMP_FUNCTION || has_frame());
3991 // Contract with called JS functions requires that function is passed in a1.
3992 DCHECK(function.is(a1));
3993 Register expected_reg = a2;
3994 Register code_reg = a3;
3995 ld(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3996 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3997 // The argument count is stored as int32_t on 64-bit platforms.
3998 // TODO(plind): Smi on 32-bit platforms.
4000 FieldMemOperand(code_reg,
4001 SharedFunctionInfo::kFormalParameterCountOffset));
4002 ld(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4003 ParameterCount expected(expected_reg);
4004 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
4008 void MacroAssembler::InvokeFunction(Register function,
4009 const ParameterCount& expected,
4010 const ParameterCount& actual,
4012 const CallWrapper& call_wrapper) {
4013 // You can't call a function without a valid frame.
4014 DCHECK(flag == JUMP_FUNCTION || has_frame());
4016 // Contract with called JS functions requires that function is passed in a1.
4017 DCHECK(function.is(a1));
4019 // Get the function and setup the context.
4020 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4022 // We call indirectly through the code field in the function to
4023 // allow recompilation to take effect without changing any of the
4025 ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4026 InvokeCode(a3, expected, actual, flag, call_wrapper);
4030 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
4031 const ParameterCount& expected,
4032 const ParameterCount& actual,
4034 const CallWrapper& call_wrapper) {
4036 InvokeFunction(a1, expected, actual, flag, call_wrapper);
4040 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
4044 ld(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
4045 IsInstanceJSObjectType(map, scratch, fail);
4049 void MacroAssembler::IsInstanceJSObjectType(Register map,
4052 lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
4053 Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
4054 Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
4058 void MacroAssembler::IsObjectJSStringType(Register object,
4061 DCHECK(kNotStringTag != 0);
4063 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4064 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4065 And(scratch, scratch, Operand(kIsNotStringMask));
4066 Branch(fail, ne, scratch, Operand(zero_reg));
4070 void MacroAssembler::IsObjectNameType(Register object,
4073 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4074 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4075 Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
4079 // ---------------------------------------------------------------------------
4080 // Support functions.
4083 void MacroAssembler::TryGetFunctionPrototype(Register function,
4087 bool miss_on_bound_function) {
4089 if (miss_on_bound_function) {
4090 // Check that the receiver isn't a smi.
4091 JumpIfSmi(function, miss);
4093 // Check that the function really is a function. Load map into result reg.
4094 GetObjectType(function, result, scratch);
4095 Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
4098 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
4100 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
4101 And(scratch, scratch,
4102 Operand(1 << SharedFunctionInfo::kBoundFunction));
4103 Branch(miss, ne, scratch, Operand(zero_reg));
4105 // Make sure that the function has an instance prototype.
4106 lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
4107 And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
4108 Branch(&non_instance, ne, scratch, Operand(zero_reg));
4111 // Get the prototype or initial map from the function.
4113 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4115 // If the prototype or initial map is the hole, don't return it and
4116 // simply miss the cache instead. This will allow us to allocate a
4117 // prototype object on-demand in the runtime system.
4118 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
4119 Branch(miss, eq, result, Operand(t8));
4121 // If the function does not have an initial map, we're done.
4123 GetObjectType(result, scratch, scratch);
4124 Branch(&done, ne, scratch, Operand(MAP_TYPE));
4126 // Get the prototype from the initial map.
4127 ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
4129 if (miss_on_bound_function) {
4132 // Non-instance prototype: Fetch prototype from constructor field
4134 bind(&non_instance);
4135 ld(result, FieldMemOperand(result, Map::kConstructorOffset));
4143 void MacroAssembler::GetObjectType(Register object,
4145 Register type_reg) {
4146 ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
4147 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
4151 // -----------------------------------------------------------------------------
4154 void MacroAssembler::CallStub(CodeStub* stub,
4155 TypeFeedbackId ast_id,
4159 BranchDelaySlot bd) {
4160 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
4161 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
4166 void MacroAssembler::TailCallStub(CodeStub* stub,
4170 BranchDelaySlot bd) {
4171 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
4175 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
4176 int64_t offset = (ref0.address() - ref1.address());
4177 DCHECK(static_cast<int>(offset) == offset);
4178 return static_cast<int>(offset);
4182 void MacroAssembler::CallApiFunctionAndReturn(
4183 Register function_address,
4184 ExternalReference thunk_ref,
4186 MemOperand return_value_operand,
4187 MemOperand* context_restore_operand) {
4188 ExternalReference next_address =
4189 ExternalReference::handle_scope_next_address(isolate());
4190 const int kNextOffset = 0;
4191 const int kLimitOffset = AddressOffset(
4192 ExternalReference::handle_scope_limit_address(isolate()),
4194 const int kLevelOffset = AddressOffset(
4195 ExternalReference::handle_scope_level_address(isolate()),
4198 DCHECK(function_address.is(a1) || function_address.is(a2));
4200 Label profiler_disabled;
4201 Label end_profiler_check;
4202 li(t9, Operand(ExternalReference::is_profiling_address(isolate())));
4203 lb(t9, MemOperand(t9, 0));
4204 Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
4206 // Additional parameter is the address of the actual callback.
4207 li(t9, Operand(thunk_ref));
4208 jmp(&end_profiler_check);
4210 bind(&profiler_disabled);
4211 mov(t9, function_address);
4212 bind(&end_profiler_check);
4214 // Allocate HandleScope in callee-save registers.
4215 li(s3, Operand(next_address));
4216 ld(s0, MemOperand(s3, kNextOffset));
4217 ld(s1, MemOperand(s3, kLimitOffset));
4218 ld(s2, MemOperand(s3, kLevelOffset));
4219 Daddu(s2, s2, Operand(1));
4220 sd(s2, MemOperand(s3, kLevelOffset));
4222 if (FLAG_log_timer_events) {
4223 FrameScope frame(this, StackFrame::MANUAL);
4224 PushSafepointRegisters();
4225 PrepareCallCFunction(1, a0);
4226 li(a0, Operand(ExternalReference::isolate_address(isolate())));
4227 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
4228 PopSafepointRegisters();
4231 // Native call returns to the DirectCEntry stub which redirects to the
4232 // return address pushed on stack (could have moved after GC).
4233 // DirectCEntry stub itself is generated early and never moves.
4234 DirectCEntryStub stub(isolate());
4235 stub.GenerateCall(this, t9);
4237 if (FLAG_log_timer_events) {
4238 FrameScope frame(this, StackFrame::MANUAL);
4239 PushSafepointRegisters();
4240 PrepareCallCFunction(1, a0);
4241 li(a0, Operand(ExternalReference::isolate_address(isolate())));
4242 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
4243 PopSafepointRegisters();
4246 Label promote_scheduled_exception;
4247 Label exception_handled;
4248 Label delete_allocated_handles;
4249 Label leave_exit_frame;
4250 Label return_value_loaded;
4252 // Load value from ReturnValue.
4253 ld(v0, return_value_operand);
4254 bind(&return_value_loaded);
4256 // No more valid handles (the result handle was the last one). Restore
4257 // previous handle scope.
4258 sd(s0, MemOperand(s3, kNextOffset));
4259 if (emit_debug_code()) {
4260 ld(a1, MemOperand(s3, kLevelOffset));
4261 Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
4263 Dsubu(s2, s2, Operand(1));
4264 sd(s2, MemOperand(s3, kLevelOffset));
4265 ld(at, MemOperand(s3, kLimitOffset));
4266 Branch(&delete_allocated_handles, ne, s1, Operand(at));
4268 // Check if the function scheduled an exception.
4269 bind(&leave_exit_frame);
4270 LoadRoot(a4, Heap::kTheHoleValueRootIndex);
4271 li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
4272 ld(a5, MemOperand(at));
4273 Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
4274 bind(&exception_handled);
4276 bool restore_context = context_restore_operand != NULL;
4277 if (restore_context) {
4278 ld(cp, *context_restore_operand);
4280 li(s0, Operand(stack_space));
4281 LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN);
4283 bind(&promote_scheduled_exception);
4285 FrameScope frame(this, StackFrame::INTERNAL);
4286 CallExternalReference(
4287 ExternalReference(Runtime::kPromoteScheduledException, isolate()),
4290 jmp(&exception_handled);
4292 // HandleScope limit has changed. Delete allocated extensions.
4293 bind(&delete_allocated_handles);
4294 sd(s1, MemOperand(s3, kLimitOffset));
4297 PrepareCallCFunction(1, s1);
4298 li(a0, Operand(ExternalReference::isolate_address(isolate())));
4299 CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
4302 jmp(&leave_exit_frame);
4306 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4307 return has_frame_ || !stub->SometimesSetsUpAFrame();
4311 void MacroAssembler::IndexFromHash(Register hash, Register index) {
4312 // If the hash field contains an array index pick it out. The assert checks
4313 // that the constants for the maximum number of digits for an array index
4314 // cached in the hash field and the number of bits reserved for it does not
4316 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
4317 (1 << String::kArrayIndexValueBits));
4318 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
4322 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4326 Register heap_number_map,
4328 ObjectToDoubleFlags flags) {
4330 if ((flags & OBJECT_NOT_SMI) == 0) {
4332 JumpIfNotSmi(object, ¬_smi);
4333 // Remove smi tag and convert to double.
4334 // dsra(scratch1, object, kSmiTagSize);
4335 dsra32(scratch1, object, 0);
4336 mtc1(scratch1, result);
4337 cvt_d_w(result, result);
4341 // Check for heap number and load double value from it.
4342 ld(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4343 Branch(not_number, ne, scratch1, Operand(heap_number_map));
4345 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4346 // If exponent is all ones the number is either a NaN or +/-Infinity.
4347 Register exponent = scratch1;
4348 Register mask_reg = scratch2;
4349 lwu(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4350 li(mask_reg, HeapNumber::kExponentMask);
4352 And(exponent, exponent, mask_reg);
4353 Branch(not_number, eq, exponent, Operand(mask_reg));
4355 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4360 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4362 Register scratch1) {
4363 // dsra(scratch1, smi, kSmiTagSize);
4364 dsra32(scratch1, smi, 0);
4365 mtc1(scratch1, value);
4366 cvt_d_w(value, value);
4370 void MacroAssembler::AdduAndCheckForOverflow(Register dst,
4373 Register overflow_dst,
4375 DCHECK(!dst.is(overflow_dst));
4376 DCHECK(!dst.is(scratch));
4377 DCHECK(!overflow_dst.is(scratch));
4378 DCHECK(!overflow_dst.is(left));
4379 DCHECK(!overflow_dst.is(right));
4381 if (left.is(right) && dst.is(left)) {
4382 DCHECK(!dst.is(t9));
4383 DCHECK(!scratch.is(t9));
4384 DCHECK(!left.is(t9));
4385 DCHECK(!right.is(t9));
4386 DCHECK(!overflow_dst.is(t9));
4392 mov(scratch, left); // Preserve left.
4393 daddu(dst, left, right); // Left is overwritten.
4394 xor_(scratch, dst, scratch); // Original left.
4395 xor_(overflow_dst, dst, right);
4396 and_(overflow_dst, overflow_dst, scratch);
4397 } else if (dst.is(right)) {
4398 mov(scratch, right); // Preserve right.
4399 daddu(dst, left, right); // Right is overwritten.
4400 xor_(scratch, dst, scratch); // Original right.
4401 xor_(overflow_dst, dst, left);
4402 and_(overflow_dst, overflow_dst, scratch);
4404 daddu(dst, left, right);
4405 xor_(overflow_dst, dst, left);
4406 xor_(scratch, dst, right);
4407 and_(overflow_dst, scratch, overflow_dst);
4412 void MacroAssembler::SubuAndCheckForOverflow(Register dst,
4415 Register overflow_dst,
4417 DCHECK(!dst.is(overflow_dst));
4418 DCHECK(!dst.is(scratch));
4419 DCHECK(!overflow_dst.is(scratch));
4420 DCHECK(!overflow_dst.is(left));
4421 DCHECK(!overflow_dst.is(right));
4422 DCHECK(!scratch.is(left));
4423 DCHECK(!scratch.is(right));
4425 // This happens with some crankshaft code. Since Subu works fine if
4426 // left == right, let's not make that restriction here.
4427 if (left.is(right)) {
4429 mov(overflow_dst, zero_reg);
4434 mov(scratch, left); // Preserve left.
4435 dsubu(dst, left, right); // Left is overwritten.
4436 xor_(overflow_dst, dst, scratch); // scratch is original left.
4437 xor_(scratch, scratch, right); // scratch is original left.
4438 and_(overflow_dst, scratch, overflow_dst);
4439 } else if (dst.is(right)) {
4440 mov(scratch, right); // Preserve right.
4441 dsubu(dst, left, right); // Right is overwritten.
4442 xor_(overflow_dst, dst, left);
4443 xor_(scratch, left, scratch); // Original right.
4444 and_(overflow_dst, scratch, overflow_dst);
4446 dsubu(dst, left, right);
4447 xor_(overflow_dst, dst, left);
4448 xor_(scratch, left, right);
4449 and_(overflow_dst, scratch, overflow_dst);
4454 void MacroAssembler::CallRuntime(const Runtime::Function* f,
4456 SaveFPRegsMode save_doubles) {
4457 // All parameters are on the stack. v0 has the return value after call.
4459 // If the expected number of arguments of the runtime function is
4460 // constant, we check that the actual number of arguments match the
4462 CHECK(f->nargs < 0 || f->nargs == num_arguments);
4464 // TODO(1236192): Most runtime routines don't need the number of
4465 // arguments passed in because it is constant. At some point we
4466 // should remove this need and make the runtime routine entry code
4468 PrepareCEntryArgs(num_arguments);
4469 PrepareCEntryFunction(ExternalReference(f, isolate()));
4470 CEntryStub stub(isolate(), 1, save_doubles);
4475 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
4477 BranchDelaySlot bd) {
4478 PrepareCEntryArgs(num_arguments);
4479 PrepareCEntryFunction(ext);
4481 CEntryStub stub(isolate(), 1);
4482 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
4486 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
4489 // TODO(1236192): Most runtime routines don't need the number of
4490 // arguments passed in because it is constant. At some point we
4491 // should remove this need and make the runtime routine entry code
4493 PrepareCEntryArgs(num_arguments);
4494 JumpToExternalReference(ext);
4498 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
4501 TailCallExternalReference(ExternalReference(fid, isolate()),
4507 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
4508 BranchDelaySlot bd) {
4509 PrepareCEntryFunction(builtin);
4510 CEntryStub stub(isolate(), 1);
4511 Jump(stub.GetCode(),
4512 RelocInfo::CODE_TARGET,
4520 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
4522 const CallWrapper& call_wrapper) {
4523 // You can't call a builtin without a valid frame.
4524 DCHECK(flag == JUMP_FUNCTION || has_frame());
4526 GetBuiltinEntry(t9, id);
4527 if (flag == CALL_FUNCTION) {
4528 call_wrapper.BeforeCall(CallSize(t9));
4530 call_wrapper.AfterCall();
4532 DCHECK(flag == JUMP_FUNCTION);
4538 void MacroAssembler::GetBuiltinFunction(Register target,
4539 Builtins::JavaScript id) {
4540 // Load the builtins object into target register.
4541 ld(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4542 ld(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
4543 // Load the JavaScript builtin function from the builtins object.
4544 ld(target, FieldMemOperand(target,
4545 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
4549 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
4550 DCHECK(!target.is(a1));
4551 GetBuiltinFunction(a1, id);
4552 // Load the code entry point from the builtins object.
4553 ld(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4557 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4558 Register scratch1, Register scratch2) {
4559 if (FLAG_native_code_counters && counter->Enabled()) {
4560 li(scratch1, Operand(value));
4561 li(scratch2, Operand(ExternalReference(counter)));
4562 sd(scratch1, MemOperand(scratch2));
4567 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4568 Register scratch1, Register scratch2) {
4570 if (FLAG_native_code_counters && counter->Enabled()) {
4571 li(scratch2, Operand(ExternalReference(counter)));
4572 ld(scratch1, MemOperand(scratch2));
4573 Daddu(scratch1, scratch1, Operand(value));
4574 sd(scratch1, MemOperand(scratch2));
4579 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4580 Register scratch1, Register scratch2) {
4582 if (FLAG_native_code_counters && counter->Enabled()) {
4583 li(scratch2, Operand(ExternalReference(counter)));
4584 ld(scratch1, MemOperand(scratch2));
4585 Dsubu(scratch1, scratch1, Operand(value));
4586 sd(scratch1, MemOperand(scratch2));
4591 // -----------------------------------------------------------------------------
4594 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
4595 Register rs, Operand rt) {
4596 if (emit_debug_code())
4597 Check(cc, reason, rs, rt);
4601 void MacroAssembler::AssertFastElements(Register elements) {
4602 if (emit_debug_code()) {
4603 DCHECK(!elements.is(at));
4606 ld(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4607 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4608 Branch(&ok, eq, elements, Operand(at));
4609 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4610 Branch(&ok, eq, elements, Operand(at));
4611 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4612 Branch(&ok, eq, elements, Operand(at));
4613 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4620 void MacroAssembler::Check(Condition cc, BailoutReason reason,
4621 Register rs, Operand rt) {
4623 Branch(&L, cc, rs, rt);
4625 // Will not return here.
4630 void MacroAssembler::Abort(BailoutReason reason) {
4634 const char* msg = GetBailoutReason(reason);
4636 RecordComment("Abort message: ");
4640 if (FLAG_trap_on_abort) {
4646 li(a0, Operand(Smi::FromInt(reason)));
4648 // Disable stub call restrictions to always allow calls to abort.
4650 // We don't actually want to generate a pile of code for this, so just
4651 // claim there is a stack frame, without generating one.
4652 FrameScope scope(this, StackFrame::NONE);
4653 CallRuntime(Runtime::kAbort, 1);
4655 CallRuntime(Runtime::kAbort, 1);
4657 // Will not return here.
4658 if (is_trampoline_pool_blocked()) {
4659 // If the calling code cares about the exact number of
4660 // instructions generated, we insert padding here to keep the size
4661 // of the Abort macro constant.
4662 // Currently in debug mode with debug_code enabled the number of
4663 // generated instructions is 10, so we use this as a maximum value.
4664 static const int kExpectedAbortInstructions = 10;
4665 int abort_instructions = InstructionsGeneratedSince(&abort_start);
4666 DCHECK(abort_instructions <= kExpectedAbortInstructions);
4667 while (abort_instructions++ < kExpectedAbortInstructions) {
4674 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4675 if (context_chain_length > 0) {
4676 // Move up the chain of contexts to the context containing the slot.
4677 ld(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4678 for (int i = 1; i < context_chain_length; i++) {
4679 ld(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4682 // Slot is in the current function context. Move it into the
4683 // destination register in case we store into it (the write barrier
4684 // cannot be allowed to destroy the context in esi).
4690 void MacroAssembler::LoadTransitionedArrayMapConditional(
4691 ElementsKind expected_kind,
4692 ElementsKind transitioned_kind,
4693 Register map_in_out,
4695 Label* no_map_match) {
4696 // Load the global or builtins object from the current context.
4698 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4699 ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
4701 // Check that the function's map is the same as the expected cached map.
4704 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4705 size_t offset = expected_kind * kPointerSize +
4706 FixedArrayBase::kHeaderSize;
4707 ld(at, FieldMemOperand(scratch, offset));
4708 Branch(no_map_match, ne, map_in_out, Operand(at));
4710 // Use the transitioned cached map.
4711 offset = transitioned_kind * kPointerSize +
4712 FixedArrayBase::kHeaderSize;
4713 ld(map_in_out, FieldMemOperand(scratch, offset));
4717 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4718 // Load the global or builtins object from the current context.
4720 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4721 // Load the native context from the global or builtins object.
4722 ld(function, FieldMemOperand(function,
4723 GlobalObject::kNativeContextOffset));
4724 // Load the function from the native context.
4725 ld(function, MemOperand(function, Context::SlotOffset(index)));
4729 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4732 // Load the initial map. The global functions all have initial maps.
4733 ld(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4734 if (emit_debug_code()) {
4736 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4739 Abort(kGlobalFunctionsMustHaveInitialMap);
4745 void MacroAssembler::StubPrologue() {
4747 Push(Smi::FromInt(StackFrame::STUB));
4748 // Adjust FP to point to saved FP.
4749 Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4753 void MacroAssembler::Prologue(bool code_pre_aging) {
4754 PredictableCodeSizeScope predictible_code_size_scope(
4755 this, kNoCodeAgeSequenceLength);
4756 // The following three instructions must remain together and unmodified
4757 // for code aging to work properly.
4758 if (code_pre_aging) {
4759 // Pre-age the code.
4760 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
4761 nop(Assembler::CODE_AGE_MARKER_NOP);
4762 // Load the stub address to t9 and call it,
4763 // GetCodeAgeAndParity() extracts the stub address from this instruction.
4765 Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
4767 nop(); // Prevent jalr to jal optimization.
4769 nop(); // Branch delay slot nop.
4770 nop(); // Pad the empty space.
4772 Push(ra, fp, cp, a1);
4773 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4774 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4775 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4776 // Adjust fp to point to caller's fp.
4777 Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4782 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4783 daddiu(sp, sp, -5 * kPointerSize);
4784 li(t8, Operand(Smi::FromInt(type)));
4785 li(t9, Operand(CodeObject()), CONSTANT_SIZE);
4786 sd(ra, MemOperand(sp, 4 * kPointerSize));
4787 sd(fp, MemOperand(sp, 3 * kPointerSize));
4788 sd(cp, MemOperand(sp, 2 * kPointerSize));
4789 sd(t8, MemOperand(sp, 1 * kPointerSize));
4790 sd(t9, MemOperand(sp, 0 * kPointerSize));
4791 // Adjust FP to point to saved FP.
4793 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
4797 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4799 ld(fp, MemOperand(sp, 0 * kPointerSize));
4800 ld(ra, MemOperand(sp, 1 * kPointerSize));
4801 daddiu(sp, sp, 2 * kPointerSize);
4805 void MacroAssembler::EnterExitFrame(bool save_doubles,
4807 // Set up the frame structure on the stack.
4808 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4809 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4810 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4812 // This is how the stack will look:
4813 // fp + 2 (==kCallerSPDisplacement) - old stack's end
4814 // [fp + 1 (==kCallerPCOffset)] - saved old ra
4815 // [fp + 0 (==kCallerFPOffset)] - saved old fp
4816 // [fp - 1 (==kSPOffset)] - sp of the called function
4817 // [fp - 2 (==kCodeOffset)] - CodeObject
4818 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
4819 // new stack (will contain saved ra)
4822 daddiu(sp, sp, -4 * kPointerSize);
4823 sd(ra, MemOperand(sp, 3 * kPointerSize));
4824 sd(fp, MemOperand(sp, 2 * kPointerSize));
4825 daddiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
4827 if (emit_debug_code()) {
4828 sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
4831 // Accessed from ExitFrame::code_slot.
4832 li(t8, Operand(CodeObject()), CONSTANT_SIZE);
4833 sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
4835 // Save the frame pointer and the context in top.
4836 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4837 sd(fp, MemOperand(t8));
4838 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4839 sd(cp, MemOperand(t8));
4841 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4843 // The stack is already aligned to 0 modulo 8 for stores with sdc1.
4844 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
4845 int space = kNumOfSavedRegisters * kDoubleSize ;
4846 Dsubu(sp, sp, Operand(space));
4847 // Remember: we only need to save every 2nd double FPU value.
4848 for (int i = 0; i < kNumOfSavedRegisters; i++) {
4849 FPURegister reg = FPURegister::from_code(2 * i);
4850 sdc1(reg, MemOperand(sp, i * kDoubleSize));
4854 // Reserve place for the return address, stack space and an optional slot
4855 // (used by the DirectCEntryStub to hold the return value if a struct is
4856 // returned) and align the frame preparing for calling the runtime function.
4857 DCHECK(stack_space >= 0);
4858 Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4859 if (frame_alignment > 0) {
4860 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4861 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4864 // Set the exit frame sp value to point just before the return address
4866 daddiu(at, sp, kPointerSize);
4867 sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
4871 void MacroAssembler::LeaveExitFrame(bool save_doubles,
4872 Register argument_count,
4873 bool restore_context,
4875 // Optionally restore all double registers.
4877 // Remember: we only need to restore every 2nd double FPU value.
4878 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
4879 Dsubu(t8, fp, Operand(ExitFrameConstants::kFrameSize +
4880 kNumOfSavedRegisters * kDoubleSize));
4881 for (int i = 0; i < kNumOfSavedRegisters; i++) {
4882 FPURegister reg = FPURegister::from_code(2 * i);
4883 ldc1(reg, MemOperand(t8, i * kDoubleSize));
4888 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4889 sd(zero_reg, MemOperand(t8));
4891 // Restore current context from top and clear it in debug mode.
4892 if (restore_context) {
4893 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4894 ld(cp, MemOperand(t8));
4897 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4898 sd(a3, MemOperand(t8));
4901 // Pop the arguments, restore registers, and return.
4902 mov(sp, fp); // Respect ABI stack constraint.
4903 ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
4904 ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
4906 if (argument_count.is_valid()) {
4907 dsll(t8, argument_count, kPointerSizeLog2);
4912 Ret(USE_DELAY_SLOT);
4913 // If returning, the instruction in the delay slot will be the addiu below.
4915 daddiu(sp, sp, 2 * kPointerSize);
4919 void MacroAssembler::InitializeNewString(Register string,
4921 Heap::RootListIndex map_index,
4923 Register scratch2) {
4924 // dsll(scratch1, length, kSmiTagSize);
4925 dsll32(scratch1, length, 0);
4926 LoadRoot(scratch2, map_index);
4927 sd(scratch1, FieldMemOperand(string, String::kLengthOffset));
4928 li(scratch1, Operand(String::kEmptyHashField));
4929 sd(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
4930 sd(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
4934 int MacroAssembler::ActivationFrameAlignment() {
4935 #if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
4936 // Running on the real platform. Use the alignment as mandated by the local
4938 // Note: This will break if we ever start generating snapshots on one Mips
4939 // platform for another Mips platform with a different alignment.
4940 return base::OS::ActivationFrameAlignment();
4941 #else // V8_HOST_ARCH_MIPS
4942 // If we are using the simulator then we should always align to the expected
4943 // alignment. As the simulator is used to generate snapshots we do not know
4944 // if the target platform will need alignment, so this is controlled from a
4946 return FLAG_sim_stack_alignment;
4947 #endif // V8_HOST_ARCH_MIPS
4951 void MacroAssembler::AssertStackIsAligned() {
4952 if (emit_debug_code()) {
4953 const int frame_alignment = ActivationFrameAlignment();
4954 const int frame_alignment_mask = frame_alignment - 1;
4956 if (frame_alignment > kPointerSize) {
4957 Label alignment_as_expected;
4958 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4959 andi(at, sp, frame_alignment_mask);
4960 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
4961 // Don't use Check here, as it will call Runtime_Abort re-entering here.
4962 stop("Unexpected stack alignment");
4963 bind(&alignment_as_expected);
4969 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
4972 Label* not_power_of_two_or_zero) {
4973 Dsubu(scratch, reg, Operand(1));
4974 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
4975 scratch, Operand(zero_reg));
4976 and_(at, scratch, reg); // In the delay slot.
4977 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
4981 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
4982 DCHECK(!reg.is(overflow));
4983 mov(overflow, reg); // Save original value.
4985 xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
4989 void MacroAssembler::SmiTagCheckOverflow(Register dst,
4991 Register overflow) {
4993 // Fall back to slower case.
4994 SmiTagCheckOverflow(dst, overflow);
4996 DCHECK(!dst.is(src));
4997 DCHECK(!dst.is(overflow));
4998 DCHECK(!src.is(overflow));
5000 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
5005 void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) {
5006 if (SmiValuesAre32Bits()) {
5007 lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
5015 void MacroAssembler::SmiLoadScale(Register dst, MemOperand src, int scale) {
5016 if (SmiValuesAre32Bits()) {
5017 // TODO(plind): not clear if lw or ld faster here, need micro-benchmark.
5018 lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
5019 dsll(dst, dst, scale);
5022 DCHECK(scale >= kSmiTagSize);
5023 sll(dst, dst, scale - kSmiTagSize);
5028 // Returns 2 values: the Smi and a scaled version of the int within the Smi.
5029 void MacroAssembler::SmiLoadWithScale(Register d_smi,
5033 if (SmiValuesAre32Bits()) {
5035 dsra(d_scaled, d_smi, kSmiShift - scale);
5038 DCHECK(scale >= kSmiTagSize);
5039 sll(d_scaled, d_smi, scale - kSmiTagSize);
5044 // Returns 2 values: the untagged Smi (int32) and scaled version of that int.
5045 void MacroAssembler::SmiLoadUntagWithScale(Register d_int,
5049 if (SmiValuesAre32Bits()) {
5050 lw(d_int, UntagSmiMemOperand(src.rm(), src.offset()));
5051 dsll(d_scaled, d_int, scale);
5054 // Need both the int and the scaled in, so use two instructions.
5056 sll(d_scaled, d_int, scale);
5061 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
5064 // DCHECK(!dst.is(src));
5065 JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
5070 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
5072 Label* non_smi_case) {
5073 // DCHECK(!dst.is(src));
5074 JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
5078 void MacroAssembler::JumpIfSmi(Register value,
5081 BranchDelaySlot bd) {
5082 DCHECK_EQ(0, kSmiTag);
5083 andi(scratch, value, kSmiTagMask);
5084 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
5087 void MacroAssembler::JumpIfNotSmi(Register value,
5088 Label* not_smi_label,
5090 BranchDelaySlot bd) {
5091 DCHECK_EQ(0, kSmiTag);
5092 andi(scratch, value, kSmiTagMask);
5093 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
5097 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
5099 Label* on_not_both_smi) {
5100 STATIC_ASSERT(kSmiTag == 0);
5101 // TODO(plind): Find some better to fix this assert issue.
5102 #if defined(__APPLE__)
5103 DCHECK_EQ(1, kSmiTagMask);
5105 DCHECK_EQ((uint64_t)1, kSmiTagMask);
5107 or_(at, reg1, reg2);
5108 JumpIfNotSmi(at, on_not_both_smi);
5112 void MacroAssembler::JumpIfEitherSmi(Register reg1,
5114 Label* on_either_smi) {
5115 STATIC_ASSERT(kSmiTag == 0);
5116 // TODO(plind): Find some better to fix this assert issue.
5117 #if defined(__APPLE__)
5118 DCHECK_EQ(1, kSmiTagMask);
5120 DCHECK_EQ((uint64_t)1, kSmiTagMask);
5122 // Both Smi tags must be 1 (not Smi).
5123 and_(at, reg1, reg2);
5124 JumpIfSmi(at, on_either_smi);
5128 void MacroAssembler::AssertNotSmi(Register object) {
5129 if (emit_debug_code()) {
5130 STATIC_ASSERT(kSmiTag == 0);
5131 andi(at, object, kSmiTagMask);
5132 Check(ne, kOperandIsASmi, at, Operand(zero_reg));
5137 void MacroAssembler::AssertSmi(Register object) {
5138 if (emit_debug_code()) {
5139 STATIC_ASSERT(kSmiTag == 0);
5140 andi(at, object, kSmiTagMask);
5141 Check(eq, kOperandIsASmi, at, Operand(zero_reg));
5146 void MacroAssembler::AssertString(Register object) {
5147 if (emit_debug_code()) {
5148 STATIC_ASSERT(kSmiTag == 0);
5150 Check(ne, kOperandIsASmiAndNotAString, a4, Operand(zero_reg));
5152 ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
5153 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
5154 Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
5160 void MacroAssembler::AssertName(Register object) {
5161 if (emit_debug_code()) {
5162 STATIC_ASSERT(kSmiTag == 0);
5164 Check(ne, kOperandIsASmiAndNotAName, a4, Operand(zero_reg));
5166 ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
5167 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
5168 Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
5174 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
5176 if (emit_debug_code()) {
5177 Label done_checking;
5178 AssertNotSmi(object);
5179 LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5180 Branch(&done_checking, eq, object, Operand(scratch));
5182 ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
5183 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
5184 Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
5186 bind(&done_checking);
5191 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
5192 if (emit_debug_code()) {
5193 DCHECK(!reg.is(at));
5194 LoadRoot(at, index);
5195 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
5200 void MacroAssembler::JumpIfNotHeapNumber(Register object,
5201 Register heap_number_map,
5203 Label* on_not_heap_number) {
5204 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5205 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
5206 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
5210 void MacroAssembler::LookupNumberStringCache(Register object,
5216 // Use of registers. Register result is used as a temporary.
5217 Register number_string_cache = result;
5218 Register mask = scratch3;
5220 // Load the number string cache.
5221 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
5223 // Make the hash mask from the length of the number string cache. It
5224 // contains two elements (number and string) for each cache entry.
5225 ld(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
5226 // Divide length by two (length is a smi).
5227 // dsra(mask, mask, kSmiTagSize + 1);
5228 dsra32(mask, mask, 1);
5229 Daddu(mask, mask, -1); // Make mask.
5231 // Calculate the entry in the number string cache. The hash value in the
5232 // number string cache for smis is just the smi value, and the hash for
5233 // doubles is the xor of the upper and lower words. See
5234 // Heap::GetNumberStringCache.
5236 Label load_result_from_cache;
5237 JumpIfSmi(object, &is_smi);
5240 Heap::kHeapNumberMapRootIndex,
5244 STATIC_ASSERT(8 == kDoubleSize);
5247 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
5248 ld(scratch2, MemOperand(scratch1, kPointerSize));
5249 ld(scratch1, MemOperand(scratch1, 0));
5250 Xor(scratch1, scratch1, Operand(scratch2));
5251 And(scratch1, scratch1, Operand(mask));
5253 // Calculate address of entry in string cache: each entry consists
5254 // of two pointer sized fields.
5255 dsll(scratch1, scratch1, kPointerSizeLog2 + 1);
5256 Daddu(scratch1, number_string_cache, scratch1);
5258 Register probe = mask;
5259 ld(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
5260 JumpIfSmi(probe, not_found);
5261 ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
5262 ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
5263 BranchF(&load_result_from_cache, NULL, eq, f12, f14);
5267 Register scratch = scratch1;
5268 // dsra(scratch, object, 1); // Shift away the tag.
5269 dsra32(scratch, scratch, 0);
5270 And(scratch, mask, Operand(scratch));
5272 // Calculate address of entry in string cache: each entry consists
5273 // of two pointer sized fields.
5274 dsll(scratch, scratch, kPointerSizeLog2 + 1);
5275 Daddu(scratch, number_string_cache, scratch);
5277 // Check if the entry is the smi we are looking for.
5278 ld(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
5279 Branch(not_found, ne, object, Operand(probe));
5281 // Get the result from the cache.
5282 bind(&load_result_from_cache);
5283 ld(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
5285 IncrementCounter(isolate()->counters()->number_to_string_native(),
5292 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
5293 Register first, Register second, Register scratch1, Register scratch2,
5295 // Test that both first and second are sequential one-byte strings.
5296 // Assume that they are non-smis.
5297 ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
5298 ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
5299 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
5300 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
5302 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
5307 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
5312 // Check that neither is a smi.
5313 STATIC_ASSERT(kSmiTag == 0);
5314 And(scratch1, first, Operand(second));
5315 JumpIfSmi(scratch1, failure);
5316 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
5321 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
5322 Register first, Register second, Register scratch1, Register scratch2,
5324 const int kFlatOneByteStringMask =
5325 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5326 const int kFlatOneByteStringTag =
5327 kStringTag | kOneByteStringTag | kSeqStringTag;
5328 DCHECK(kFlatOneByteStringTag <= 0xffff); // Ensure this fits 16-bit immed.
5329 andi(scratch1, first, kFlatOneByteStringMask);
5330 Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
5331 andi(scratch2, second, kFlatOneByteStringMask);
5332 Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
5336 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
5339 const int kFlatOneByteStringMask =
5340 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5341 const int kFlatOneByteStringTag =
5342 kStringTag | kOneByteStringTag | kSeqStringTag;
5343 And(scratch, type, Operand(kFlatOneByteStringMask));
5344 Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
5348 static const int kRegisterPassedArguments = (kMipsAbi == kN64) ? 8 : 4;
5350 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
5351 int num_double_arguments) {
5352 int stack_passed_words = 0;
5353 num_reg_arguments += 2 * num_double_arguments;
5355 // O32: Up to four simple arguments are passed in registers a0..a3.
5356 // N64: Up to eight simple arguments are passed in registers a0..a7.
5357 if (num_reg_arguments > kRegisterPassedArguments) {
5358 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5360 stack_passed_words += kCArgSlotCount;
5361 return stack_passed_words;
5365 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5369 uint32_t encoding_mask) {
5372 Check(ne, kNonObject, at, Operand(zero_reg));
5374 ld(at, FieldMemOperand(string, HeapObject::kMapOffset));
5375 lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
5377 andi(at, at, kStringRepresentationMask | kStringEncodingMask);
5378 li(scratch, Operand(encoding_mask));
5379 Check(eq, kUnexpectedStringType, at, Operand(scratch));
5381 // TODO(plind): requires Smi size check code for mips32.
5383 ld(at, FieldMemOperand(string, String::kLengthOffset));
5384 Check(lt, kIndexIsTooLarge, index, Operand(at));
5386 DCHECK(Smi::FromInt(0) == 0);
5387 Check(ge, kIndexIsNegative, index, Operand(zero_reg));
5391 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5392 int num_double_arguments,
5394 int frame_alignment = ActivationFrameAlignment();
5396 // n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots.
5397 // O32: Up to four simple arguments are passed in registers a0..a3.
5398 // Those four arguments must have reserved argument slots on the stack for
5399 // mips, even though those argument slots are not normally used.
5400 // Both ABIs: Remaining arguments are pushed on the stack, above (higher
5401 // address than) the (O32) argument slots. (arg slot calculation handled by
5402 // CalculateStackPassedWords()).
5403 int stack_passed_arguments = CalculateStackPassedWords(
5404 num_reg_arguments, num_double_arguments);
5405 if (frame_alignment > kPointerSize) {
5406 // Make stack end at alignment and make room for num_arguments - 4 words
5407 // and the original value of sp.
5409 Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5410 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5411 And(sp, sp, Operand(-frame_alignment));
5412 sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
5414 Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5419 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5421 PrepareCallCFunction(num_reg_arguments, 0, scratch);
5425 void MacroAssembler::CallCFunction(ExternalReference function,
5426 int num_reg_arguments,
5427 int num_double_arguments) {
5428 li(t8, Operand(function));
5429 CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
5433 void MacroAssembler::CallCFunction(Register function,
5434 int num_reg_arguments,
5435 int num_double_arguments) {
5436 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
5440 void MacroAssembler::CallCFunction(ExternalReference function,
5441 int num_arguments) {
5442 CallCFunction(function, num_arguments, 0);
5446 void MacroAssembler::CallCFunction(Register function,
5447 int num_arguments) {
5448 CallCFunction(function, num_arguments, 0);
5452 void MacroAssembler::CallCFunctionHelper(Register function,
5453 int num_reg_arguments,
5454 int num_double_arguments) {
5455 DCHECK(has_frame());
5456 // Make sure that the stack is aligned before calling a C function unless
5457 // running in the simulator. The simulator has its own alignment check which
5458 // provides more information.
5459 // The argument stots are presumed to have been set up by
5460 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
5462 #if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
5463 if (emit_debug_code()) {
5464 int frame_alignment = base::OS::ActivationFrameAlignment();
5465 int frame_alignment_mask = frame_alignment - 1;
5466 if (frame_alignment > kPointerSize) {
5467 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5468 Label alignment_as_expected;
5469 And(at, sp, Operand(frame_alignment_mask));
5470 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5471 // Don't use Check here, as it will call Runtime_Abort possibly
5472 // re-entering here.
5473 stop("Unexpected alignment in CallCFunction");
5474 bind(&alignment_as_expected);
5477 #endif // V8_HOST_ARCH_MIPS
5479 // Just call directly. The function called cannot cause a GC, or
5480 // allow preemption, so the return address in the link register
5483 if (!function.is(t9)) {
5490 int stack_passed_arguments = CalculateStackPassedWords(
5491 num_reg_arguments, num_double_arguments);
5493 if (base::OS::ActivationFrameAlignment() > kPointerSize) {
5494 ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5496 Daddu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5501 #undef BRANCH_ARGS_CHECK
5504 void MacroAssembler::PatchRelocatedValue(Register li_location,
5506 Register new_value) {
5507 lwu(scratch, MemOperand(li_location));
5508 // At this point scratch is a lui(at, ...) instruction.
5509 if (emit_debug_code()) {
5510 And(scratch, scratch, kOpcodeMask);
5511 Check(eq, kTheInstructionToPatchShouldBeALui,
5512 scratch, Operand(LUI));
5513 lwu(scratch, MemOperand(li_location));
5515 dsrl32(t9, new_value, 0);
5516 Ins(scratch, t9, 0, kImm16Bits);
5517 sw(scratch, MemOperand(li_location));
5519 lwu(scratch, MemOperand(li_location, kInstrSize));
5520 // scratch is now ori(at, ...).
5521 if (emit_debug_code()) {
5522 And(scratch, scratch, kOpcodeMask);
5523 Check(eq, kTheInstructionToPatchShouldBeAnOri,
5524 scratch, Operand(ORI));
5525 lwu(scratch, MemOperand(li_location, kInstrSize));
5527 dsrl(t9, new_value, kImm16Bits);
5528 Ins(scratch, t9, 0, kImm16Bits);
5529 sw(scratch, MemOperand(li_location, kInstrSize));
5531 lwu(scratch, MemOperand(li_location, kInstrSize * 3));
5532 // scratch is now ori(at, ...).
5533 if (emit_debug_code()) {
5534 And(scratch, scratch, kOpcodeMask);
5535 Check(eq, kTheInstructionToPatchShouldBeAnOri,
5536 scratch, Operand(ORI));
5537 lwu(scratch, MemOperand(li_location, kInstrSize * 3));
5540 Ins(scratch, new_value, 0, kImm16Bits);
5541 sw(scratch, MemOperand(li_location, kInstrSize * 3));
5543 // Update the I-cache so the new lui and ori can be executed.
5544 FlushICache(li_location, 4);
5547 void MacroAssembler::GetRelocatedValue(Register li_location,
5550 lwu(value, MemOperand(li_location));
5551 if (emit_debug_code()) {
5552 And(value, value, kOpcodeMask);
5553 Check(eq, kTheInstructionShouldBeALui,
5554 value, Operand(LUI));
5555 lwu(value, MemOperand(li_location));
5558 // value now holds a lui instruction. Extract the immediate.
5559 andi(value, value, kImm16Mask);
5560 dsll32(value, value, kImm16Bits);
5562 lwu(scratch, MemOperand(li_location, kInstrSize));
5563 if (emit_debug_code()) {
5564 And(scratch, scratch, kOpcodeMask);
5565 Check(eq, kTheInstructionShouldBeAnOri,
5566 scratch, Operand(ORI));
5567 lwu(scratch, MemOperand(li_location, kInstrSize));
5569 // "scratch" now holds an ori instruction. Extract the immediate.
5570 andi(scratch, scratch, kImm16Mask);
5571 dsll32(scratch, scratch, 0);
5573 or_(value, value, scratch);
5575 lwu(scratch, MemOperand(li_location, kInstrSize * 3));
5576 if (emit_debug_code()) {
5577 And(scratch, scratch, kOpcodeMask);
5578 Check(eq, kTheInstructionShouldBeAnOri,
5579 scratch, Operand(ORI));
5580 lwu(scratch, MemOperand(li_location, kInstrSize * 3));
5582 // "scratch" now holds an ori instruction. Extract the immediate.
5583 andi(scratch, scratch, kImm16Mask);
5584 dsll(scratch, scratch, kImm16Bits);
5586 or_(value, value, scratch);
5587 // Sign extend extracted address.
5588 dsra(value, value, kImm16Bits);
5592 void MacroAssembler::CheckPageFlag(
5597 Label* condition_met) {
5598 And(scratch, object, Operand(~Page::kPageAlignmentMask));
5599 ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5600 And(scratch, scratch, Operand(mask));
5601 Branch(condition_met, cc, scratch, Operand(zero_reg));
5605 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
5607 Label* if_deprecated) {
5608 if (map->CanBeDeprecated()) {
5609 li(scratch, Operand(map));
5610 ld(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
5611 And(scratch, scratch, Operand(Map::Deprecated::kMask));
5612 Branch(if_deprecated, ne, scratch, Operand(zero_reg));
5617 void MacroAssembler::JumpIfBlack(Register object,
5621 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
5622 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5626 void MacroAssembler::HasColor(Register object,
5627 Register bitmap_scratch,
5628 Register mask_scratch,
5632 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
5633 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
5635 GetMarkBits(object, bitmap_scratch, mask_scratch);
5638 // Note that we are using a 4-byte aligned 8-byte load.
5639 Uld(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5640 And(t8, t9, Operand(mask_scratch));
5641 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
5642 // Shift left 1 by adding.
5643 Daddu(mask_scratch, mask_scratch, Operand(mask_scratch));
5644 And(t8, t9, Operand(mask_scratch));
5645 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
5651 // Detect some, but not all, common pointer-free objects. This is used by the
5652 // incremental write barrier which doesn't care about oddballs (they are always
5653 // marked black immediately so this code is not hit).
5654 void MacroAssembler::JumpIfDataObject(Register value,
5656 Label* not_data_object) {
5657 DCHECK(!AreAliased(value, scratch, t8, no_reg));
5658 Label is_data_object;
5659 ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5660 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5661 Branch(&is_data_object, eq, t8, Operand(scratch));
5662 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5663 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5664 // If it's a string and it's not a cons string then it's an object containing
5666 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5667 And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
5668 Branch(not_data_object, ne, t8, Operand(zero_reg));
5669 bind(&is_data_object);
5673 void MacroAssembler::GetMarkBits(Register addr_reg,
5674 Register bitmap_reg,
5675 Register mask_reg) {
5676 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
5677 // addr_reg is divided into fields:
5678 // |63 page base 20|19 high 8|7 shift 3|2 0|
5679 // 'high' gives the index of the cell holding color bits for the object.
5680 // 'shift' gives the offset in the cell for this object's color.
5681 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5682 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
5683 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
5684 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
5685 dsll(t8, t8, Bitmap::kBytesPerCellLog2);
5686 Daddu(bitmap_reg, bitmap_reg, t8);
5688 dsllv(mask_reg, t8, mask_reg);
5692 void MacroAssembler::EnsureNotWhite(
5694 Register bitmap_scratch,
5695 Register mask_scratch,
5696 Register load_scratch,
5697 Label* value_is_white_and_not_data) {
5698 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
5699 GetMarkBits(value, bitmap_scratch, mask_scratch);
5701 // If the value is black or grey we don't need to do anything.
5702 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5703 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5704 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
5705 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5709 // Since both black and grey have a 1 in the first position and white does
5710 // not have a 1 there we only need to check one bit.
5711 // Note that we are using a 4-byte aligned 8-byte load.
5712 Uld(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5713 And(t8, mask_scratch, load_scratch);
5714 Branch(&done, ne, t8, Operand(zero_reg));
5716 if (emit_debug_code()) {
5717 // Check for impossible bit pattern.
5719 // sll may overflow, making the check conservative.
5720 dsll(t8, mask_scratch, 1);
5721 And(t8, load_scratch, t8);
5722 Branch(&ok, eq, t8, Operand(zero_reg));
5723 stop("Impossible marking bit pattern");
5727 // Value is white. We check whether it is data that doesn't need scanning.
5728 // Currently only checks for HeapNumber and non-cons strings.
5729 Register map = load_scratch; // Holds map while checking type.
5730 Register length = load_scratch; // Holds length of object after testing type.
5731 Label is_data_object;
5733 // Check for heap-number
5734 ld(map, FieldMemOperand(value, HeapObject::kMapOffset));
5735 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5738 Branch(&skip, ne, t8, Operand(map));
5739 li(length, HeapNumber::kSize);
5740 Branch(&is_data_object);
5744 // Check for strings.
5745 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5746 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5747 // If it's a string and it's not a cons string then it's an object containing
5749 Register instance_type = load_scratch;
5750 lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
5751 And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
5752 Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5753 // It's a non-indirect (non-cons and non-slice) string.
5754 // If it's external, the length is just ExternalString::kSize.
5755 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5756 // External strings are the only ones with the kExternalStringTag bit
5758 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
5759 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
5760 And(t8, instance_type, Operand(kExternalStringTag));
5763 Branch(&skip, eq, t8, Operand(zero_reg));
5764 li(length, ExternalString::kSize);
5765 Branch(&is_data_object);
5769 // Sequential string, either Latin1 or UC16.
5770 // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
5771 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
5772 // getting the length multiplied by 2.
5773 DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
5774 DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
5775 lw(t9, UntagSmiFieldMemOperand(value, String::kLengthOffset));
5776 And(t8, instance_type, Operand(kStringEncodingMask));
5779 Branch(&skip, ne, t8, Operand(zero_reg));
5780 // Adjust length for UC16.
5784 Daddu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
5785 DCHECK(!length.is(t8));
5786 And(length, length, Operand(~kObjectAlignmentMask));
5788 bind(&is_data_object);
5789 // Value is a data object, and it is white. Mark it black. Since we know
5790 // that the object is white we can make it black by flipping one bit.
5791 Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5792 Or(t8, t8, Operand(mask_scratch));
5793 Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5795 And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5796 Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5797 Daddu(t8, t8, Operand(length));
5798 Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5804 void MacroAssembler::LoadInstanceDescriptors(Register map,
5805 Register descriptors) {
5806 ld(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
5810 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
5811 ld(dst, FieldMemOperand(map, Map::kBitField3Offset));
5812 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
5816 void MacroAssembler::EnumLength(Register dst, Register map) {
5817 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
5818 ld(dst, FieldMemOperand(map, Map::kBitField3Offset));
5819 And(dst, dst, Operand(Map::EnumLengthBits::kMask));
5824 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5825 Register empty_fixed_array_value = a6;
5826 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5830 // Check if the enum length field is properly initialized, indicating that
5831 // there is an enum cache.
5832 ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5836 call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
5841 ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5843 // For all objects but the receiver, check that the cache is empty.
5845 Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
5849 // Check that there are no elements. Register a2 contains the current JS
5850 // object we've reached through the prototype chain.
5852 ld(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
5853 Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
5855 // Second chance, the object may be using the empty slow element dictionary.
5856 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
5857 Branch(call_runtime, ne, a2, Operand(at));
5860 ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
5861 Branch(&next, ne, a2, Operand(null_value));
5865 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5866 DCHECK(!output_reg.is(input_reg));
5868 li(output_reg, Operand(255));
5869 // Normal branch: nop in delay slot.
5870 Branch(&done, gt, input_reg, Operand(output_reg));
5871 // Use delay slot in this branch.
5872 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
5873 mov(output_reg, zero_reg); // In delay slot.
5874 mov(output_reg, input_reg); // Value is in range 0..255.
5879 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
5880 DoubleRegister input_reg,
5881 DoubleRegister temp_double_reg) {
5886 Move(temp_double_reg, 0.0);
5887 BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
5889 // Double value is less than zero, NaN or Inf, return 0.
5890 mov(result_reg, zero_reg);
5893 // Double value is >= 255, return 255.
5895 Move(temp_double_reg, 255.0);
5896 BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
5897 li(result_reg, Operand(255));
5900 // In 0-255 range, round and truncate.
5902 cvt_w_d(temp_double_reg, input_reg);
5903 mfc1(result_reg, temp_double_reg);
5908 void MacroAssembler::TestJSArrayForAllocationMemento(
5909 Register receiver_reg,
5910 Register scratch_reg,
5911 Label* no_memento_found,
5913 Label* allocation_memento_present) {
5914 ExternalReference new_space_start =
5915 ExternalReference::new_space_start(isolate());
5916 ExternalReference new_space_allocation_top =
5917 ExternalReference::new_space_allocation_top_address(isolate());
5918 Daddu(scratch_reg, receiver_reg,
5919 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5920 Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
5921 li(at, Operand(new_space_allocation_top));
5922 ld(at, MemOperand(at));
5923 Branch(no_memento_found, gt, scratch_reg, Operand(at));
5924 ld(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
5925 if (allocation_memento_present) {
5926 Branch(allocation_memento_present, cond, scratch_reg,
5927 Operand(isolate()->factory()->allocation_memento_map()));
5932 Register GetRegisterThatIsNotOneOf(Register reg1,
5939 if (reg1.is_valid()) regs |= reg1.bit();
5940 if (reg2.is_valid()) regs |= reg2.bit();
5941 if (reg3.is_valid()) regs |= reg3.bit();
5942 if (reg4.is_valid()) regs |= reg4.bit();
5943 if (reg5.is_valid()) regs |= reg5.bit();
5944 if (reg6.is_valid()) regs |= reg6.bit();
5946 for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
5947 Register candidate = Register::FromAllocationIndex(i);
5948 if (regs & candidate.bit()) continue;
5956 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5961 DCHECK(!scratch1.is(scratch0));
5962 Factory* factory = isolate()->factory();
5963 Register current = scratch0;
5966 // Scratch contained elements pointer.
5967 Move(current, object);
5969 // Loop based on the map going up the prototype chain.
5971 ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
5972 lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
5973 DecodeField<Map::ElementsKindBits>(scratch1);
5974 Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
5975 ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
5976 Branch(&loop_again, ne, current, Operand(factory->null_value()));
5980 bool AreAliased(Register reg1,
5988 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
5989 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
5990 reg7.is_valid() + reg8.is_valid();
5993 if (reg1.is_valid()) regs |= reg1.bit();
5994 if (reg2.is_valid()) regs |= reg2.bit();
5995 if (reg3.is_valid()) regs |= reg3.bit();
5996 if (reg4.is_valid()) regs |= reg4.bit();
5997 if (reg5.is_valid()) regs |= reg5.bit();
5998 if (reg6.is_valid()) regs |= reg6.bit();
5999 if (reg7.is_valid()) regs |= reg7.bit();
6000 if (reg8.is_valid()) regs |= reg8.bit();
6001 int n_of_non_aliasing_regs = NumRegs(regs);
6003 return n_of_valid_regs != n_of_non_aliasing_regs;
6007 CodePatcher::CodePatcher(byte* address,
6009 FlushICache flush_cache)
6010 : address_(address),
6011 size_(instructions * Assembler::kInstrSize),
6012 masm_(NULL, address, size_ + Assembler::kGap),
6013 flush_cache_(flush_cache) {
6014 // Create a new macro assembler pointing to the address of the code to patch.
6015 // The size is adjusted with kGap on order for the assembler to generate size
6016 // bytes of instructions without failing with buffer size constraints.
6017 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6021 CodePatcher::~CodePatcher() {
6022 // Indicate that code has changed.
6023 if (flush_cache_ == FLUSH) {
6024 CpuFeatures::FlushICache(address_, size_);
6026 // Check that the code was patched as expected.
6027 DCHECK(masm_.pc_ == address_ + size_);
6028 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6032 void CodePatcher::Emit(Instr instr) {
6033 masm()->emit(instr);
6037 void CodePatcher::Emit(Address addr) {
6038 // masm()->emit(reinterpret_cast<Instr>(addr));
6042 void CodePatcher::ChangeBranchCondition(Condition cond) {
6043 Instr instr = Assembler::instr_at(masm_.pc_);
6044 DCHECK(Assembler::IsBranch(instr));
6045 uint32_t opcode = Assembler::GetOpcodeField(instr);
6046 // Currently only the 'eq' and 'ne' cond values are supported and the simple
6047 // branch instructions (with opcode being the branch type).
6048 // There are some special cases (see Assembler::IsBranch()) so extending this
6050 DCHECK(opcode == BEQ ||
6058 opcode = (cond == eq) ? BEQ : BNE;
6059 instr = (instr & ~kOpcodeMask) | opcode;
6064 void MacroAssembler::TruncatingDiv(Register result,
6067 DCHECK(!dividend.is(result));
6068 DCHECK(!dividend.is(at));
6069 DCHECK(!result.is(at));
6070 base::MagicNumbersForDivision<uint32_t> mag =
6071 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
6072 li(at, Operand(mag.multiplier));
6073 Mulh(result, dividend, Operand(at));
6074 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
6075 if (divisor > 0 && neg) {
6076 Addu(result, result, Operand(dividend));
6078 if (divisor < 0 && !neg && mag.multiplier > 0) {
6079 Subu(result, result, Operand(dividend));
6081 if (mag.shift > 0) sra(result, result, mag.shift);
6082 srl(at, dividend, 31);
6083 Addu(result, result, Operand(at));
6087 } } // namespace v8::internal
6089 #endif // V8_TARGET_ARCH_MIPS64