1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include <limits.h> // For LONG_MIN, LONG_MAX.
9 #if V8_TARGET_ARCH_MIPS64
11 #include "src/base/division-by-constant.h"
12 #include "src/bootstrapper.h"
13 #include "src/codegen.h"
14 #include "src/cpu-profiler.h"
15 #include "src/debug.h"
16 #include "src/isolate-inl.h"
17 #include "src/runtime/runtime.h"
22 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
23 : Assembler(arg_isolate, buffer, size),
24 generating_stub_(false),
26 has_double_zero_reg_set_(false) {
27 if (isolate() != NULL) {
28 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
34 void MacroAssembler::Load(Register dst,
35 const MemOperand& src,
37 DCHECK(!r.IsDouble());
40 } else if (r.IsUInteger8()) {
42 } else if (r.IsInteger16()) {
44 } else if (r.IsUInteger16()) {
46 } else if (r.IsInteger32()) {
54 void MacroAssembler::Store(Register src,
55 const MemOperand& dst,
57 DCHECK(!r.IsDouble());
58 if (r.IsInteger8() || r.IsUInteger8()) {
60 } else if (r.IsInteger16() || r.IsUInteger16()) {
62 } else if (r.IsInteger32()) {
65 if (r.IsHeapObject()) {
67 } else if (r.IsSmi()) {
75 void MacroAssembler::LoadRoot(Register destination,
76 Heap::RootListIndex index) {
77 ld(destination, MemOperand(s6, index << kPointerSizeLog2));
81 void MacroAssembler::LoadRoot(Register destination,
82 Heap::RootListIndex index,
84 Register src1, const Operand& src2) {
85 Branch(2, NegateCondition(cond), src1, src2);
86 ld(destination, MemOperand(s6, index << kPointerSizeLog2));
90 void MacroAssembler::StoreRoot(Register source,
91 Heap::RootListIndex index) {
92 sd(source, MemOperand(s6, index << kPointerSizeLog2));
96 void MacroAssembler::StoreRoot(Register source,
97 Heap::RootListIndex index,
99 Register src1, const Operand& src2) {
100 Branch(2, NegateCondition(cond), src1, src2);
101 sd(source, MemOperand(s6, index << kPointerSizeLog2));
105 // Push and pop all registers that can hold pointers.
106 void MacroAssembler::PushSafepointRegisters() {
107 // Safepoints expect a block of kNumSafepointRegisters values on the
108 // stack, so adjust the stack for unsaved registers.
109 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
110 DCHECK(num_unsaved >= 0);
111 if (num_unsaved > 0) {
112 Dsubu(sp, sp, Operand(num_unsaved * kPointerSize));
114 MultiPush(kSafepointSavedRegisters);
118 void MacroAssembler::PopSafepointRegisters() {
119 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
120 MultiPop(kSafepointSavedRegisters);
121 if (num_unsaved > 0) {
122 Daddu(sp, sp, Operand(num_unsaved * kPointerSize));
127 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
128 sd(src, SafepointRegisterSlot(dst));
132 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
133 ld(dst, SafepointRegisterSlot(src));
137 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
138 // The registers are pushed starting with the highest encoding,
139 // which means that lowest encodings are closest to the stack pointer.
140 return kSafepointRegisterStackIndexMap[reg_code];
144 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
145 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
149 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
150 UNIMPLEMENTED_MIPS();
151 // General purpose registers are pushed last on the stack.
152 int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
153 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
154 return MemOperand(sp, doubles_size + register_offset);
158 void MacroAssembler::InNewSpace(Register object,
162 DCHECK(cc == eq || cc == ne);
163 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
164 Branch(branch, cc, scratch,
165 Operand(ExternalReference::new_space_start(isolate())));
169 void MacroAssembler::RecordWriteField(
175 SaveFPRegsMode save_fp,
176 RememberedSetAction remembered_set_action,
178 PointersToHereCheck pointers_to_here_check_for_value) {
179 DCHECK(!AreAliased(value, dst, t8, object));
180 // First, check if a write barrier is even needed. The tests below
181 // catch stores of Smis.
184 // Skip barrier if writing a smi.
185 if (smi_check == INLINE_SMI_CHECK) {
186 JumpIfSmi(value, &done);
189 // Although the object register is tagged, the offset is relative to the start
190 // of the object, so so offset must be a multiple of kPointerSize.
191 DCHECK(IsAligned(offset, kPointerSize));
193 Daddu(dst, object, Operand(offset - kHeapObjectTag));
194 if (emit_debug_code()) {
196 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
197 Branch(&ok, eq, t8, Operand(zero_reg));
198 stop("Unaligned cell in write barrier");
207 remembered_set_action,
209 pointers_to_here_check_for_value);
213 // Clobber clobbered input registers when running with the debug-code flag
214 // turned on to provoke errors.
215 if (emit_debug_code()) {
216 li(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
217 li(dst, Operand(bit_cast<int64_t>(kZapValue + 8)));
222 // Will clobber 4 registers: object, map, dst, ip. The
223 // register 'object' contains a heap object pointer.
224 void MacroAssembler::RecordWriteForMap(Register object,
228 SaveFPRegsMode fp_mode) {
229 if (emit_debug_code()) {
231 ld(dst, FieldMemOperand(map, HeapObject::kMapOffset));
233 kWrongAddressOrValuePassedToRecordWrite,
235 Operand(isolate()->factory()->meta_map()));
238 if (!FLAG_incremental_marking) {
242 if (emit_debug_code()) {
243 ld(at, FieldMemOperand(object, HeapObject::kMapOffset));
245 kWrongAddressOrValuePassedToRecordWrite,
252 // A single check of the map's pages interesting flag suffices, since it is
253 // only set during incremental collection, and then it's also guaranteed that
254 // the from object's page's interesting flag is also set. This optimization
255 // relies on the fact that maps can never be in new space.
257 map, // Used as scratch.
258 MemoryChunk::kPointersToHereAreInterestingMask,
262 Daddu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
263 if (emit_debug_code()) {
265 And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
266 Branch(&ok, eq, at, Operand(zero_reg));
267 stop("Unaligned cell in write barrier");
271 // Record the actual write.
272 if (ra_status == kRAHasNotBeenSaved) {
275 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
278 if (ra_status == kRAHasNotBeenSaved) {
284 // Count number of write barriers in generated code.
285 isolate()->counters()->write_barriers_static()->Increment();
286 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
288 // Clobber clobbered registers when running with the debug-code flag
289 // turned on to provoke errors.
290 if (emit_debug_code()) {
291 li(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
292 li(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
297 // Will clobber 4 registers: object, address, scratch, ip. The
298 // register 'object' contains a heap object pointer. The heap object
299 // tag is shifted away.
300 void MacroAssembler::RecordWrite(
305 SaveFPRegsMode fp_mode,
306 RememberedSetAction remembered_set_action,
308 PointersToHereCheck pointers_to_here_check_for_value) {
309 DCHECK(!AreAliased(object, address, value, t8));
310 DCHECK(!AreAliased(object, address, value, t9));
312 if (emit_debug_code()) {
313 ld(at, MemOperand(address));
315 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
318 if (remembered_set_action == OMIT_REMEMBERED_SET &&
319 !FLAG_incremental_marking) {
323 // First, check if a write barrier is even needed. The tests below
324 // catch stores of smis and stores into the young generation.
327 if (smi_check == INLINE_SMI_CHECK) {
328 DCHECK_EQ(0, kSmiTag);
329 JumpIfSmi(value, &done);
332 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
334 value, // Used as scratch.
335 MemoryChunk::kPointersToHereAreInterestingMask,
339 CheckPageFlag(object,
340 value, // Used as scratch.
341 MemoryChunk::kPointersFromHereAreInterestingMask,
345 // Record the actual write.
346 if (ra_status == kRAHasNotBeenSaved) {
349 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
352 if (ra_status == kRAHasNotBeenSaved) {
358 // Count number of write barriers in generated code.
359 isolate()->counters()->write_barriers_static()->Increment();
360 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
363 // Clobber clobbered registers when running with the debug-code flag
364 // turned on to provoke errors.
365 if (emit_debug_code()) {
366 li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
367 li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
372 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
375 SaveFPRegsMode fp_mode,
376 RememberedSetFinalAction and_then) {
378 if (emit_debug_code()) {
380 JumpIfNotInNewSpace(object, scratch, &ok);
381 stop("Remembered set pointer is in new space");
384 // Load store buffer top.
385 ExternalReference store_buffer =
386 ExternalReference::store_buffer_top(isolate());
387 li(t8, Operand(store_buffer));
388 ld(scratch, MemOperand(t8));
389 // Store pointer to buffer and increment buffer top.
390 sd(address, MemOperand(scratch));
391 Daddu(scratch, scratch, kPointerSize);
392 // Write back new top of buffer.
393 sd(scratch, MemOperand(t8));
394 // Call stub on end of buffer.
395 // Check for end of buffer.
396 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
397 DCHECK(!scratch.is(t8));
398 if (and_then == kFallThroughAtEnd) {
399 Branch(&done, eq, t8, Operand(zero_reg));
401 DCHECK(and_then == kReturnAtEnd);
402 Ret(eq, t8, Operand(zero_reg));
405 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
406 CallStub(&store_buffer_overflow);
409 if (and_then == kReturnAtEnd) {
415 // -----------------------------------------------------------------------------
416 // Allocation support.
419 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
424 DCHECK(!holder_reg.is(scratch));
425 DCHECK(!holder_reg.is(at));
426 DCHECK(!scratch.is(at));
428 // Load current lexical context from the stack frame.
429 ld(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
430 // In debug mode, make sure the lexical context is set.
432 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
433 scratch, Operand(zero_reg));
436 // Load the native context of the current context.
438 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
439 ld(scratch, FieldMemOperand(scratch, offset));
440 ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
442 // Check the context is a native context.
443 if (emit_debug_code()) {
444 push(holder_reg); // Temporarily save holder on the stack.
445 // Read the first word and compare to the native_context_map.
446 ld(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
447 LoadRoot(at, Heap::kNativeContextMapRootIndex);
448 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
449 holder_reg, Operand(at));
450 pop(holder_reg); // Restore holder.
453 // Check if both contexts are the same.
454 ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
455 Branch(&same_contexts, eq, scratch, Operand(at));
457 // Check the context is a native context.
458 if (emit_debug_code()) {
459 push(holder_reg); // Temporarily save holder on the stack.
460 mov(holder_reg, at); // Move at to its holding place.
461 LoadRoot(at, Heap::kNullValueRootIndex);
462 Check(ne, kJSGlobalProxyContextShouldNotBeNull,
463 holder_reg, Operand(at));
465 ld(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
466 LoadRoot(at, Heap::kNativeContextMapRootIndex);
467 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
468 holder_reg, Operand(at));
469 // Restore at is not needed. at is reloaded below.
470 pop(holder_reg); // Restore holder.
471 // Restore at to holder's context.
472 ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
475 // Check that the security token in the calling global object is
476 // compatible with the security token in the receiving global
478 int token_offset = Context::kHeaderSize +
479 Context::SECURITY_TOKEN_INDEX * kPointerSize;
481 ld(scratch, FieldMemOperand(scratch, token_offset));
482 ld(at, FieldMemOperand(at, token_offset));
483 Branch(miss, ne, scratch, Operand(at));
485 bind(&same_contexts);
489 // Compute the hash code from the untagged key. This must be kept in sync with
490 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
491 // code-stub-hydrogen.cc
492 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
493 // First of all we assign the hash seed to scratch.
494 LoadRoot(scratch, Heap::kHashSeedRootIndex);
497 // Xor original key with a seed.
498 xor_(reg0, reg0, scratch);
500 // Compute the hash code from the untagged key. This must be kept in sync
501 // with ComputeIntegerHash in utils.h.
503 // hash = ~hash + (hash << 15);
504 // The algorithm uses 32-bit integer values.
505 nor(scratch, reg0, zero_reg);
507 addu(reg0, scratch, at);
509 // hash = hash ^ (hash >> 12);
511 xor_(reg0, reg0, at);
513 // hash = hash + (hash << 2);
515 addu(reg0, reg0, at);
517 // hash = hash ^ (hash >> 4);
519 xor_(reg0, reg0, at);
521 // hash = hash * 2057;
522 sll(scratch, reg0, 11);
524 addu(reg0, reg0, at);
525 addu(reg0, reg0, scratch);
527 // hash = hash ^ (hash >> 16);
529 xor_(reg0, reg0, at);
533 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
542 // elements - holds the slow-case elements of the receiver on entry.
543 // Unchanged unless 'result' is the same register.
545 // key - holds the smi key on entry.
546 // Unchanged unless 'result' is the same register.
549 // result - holds the result on exit if the load succeeded.
550 // Allowed to be the same as 'key' or 'result'.
551 // Unchanged on bailout so 'key' or 'result' can be used
552 // in further computation.
554 // Scratch registers:
556 // reg0 - holds the untagged key on entry and holds the hash once computed.
558 // reg1 - Used to hold the capacity mask of the dictionary.
560 // reg2 - Used for the index into the dictionary.
561 // at - Temporary (avoid MacroAssembler instructions also using 'at').
564 GetNumberHash(reg0, reg1);
566 // Compute the capacity mask.
567 ld(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
568 SmiUntag(reg1, reg1);
569 Dsubu(reg1, reg1, Operand(1));
571 // Generate an unrolled loop that performs a few probes before giving up.
572 for (int i = 0; i < kNumberDictionaryProbes; i++) {
573 // Use reg2 for index calculations and keep the hash intact in reg0.
575 // Compute the masked index: (hash + i + i * i) & mask.
577 Daddu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
579 and_(reg2, reg2, reg1);
581 // Scale the index by multiplying by the element size.
582 DCHECK(SeededNumberDictionary::kEntrySize == 3);
583 dsll(at, reg2, 1); // 2x.
584 daddu(reg2, reg2, at); // reg2 = reg2 * 3.
586 // Check if the key is identical to the name.
587 dsll(at, reg2, kPointerSizeLog2);
588 daddu(reg2, elements, at);
590 ld(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
591 if (i != kNumberDictionaryProbes - 1) {
592 Branch(&done, eq, key, Operand(at));
594 Branch(miss, ne, key, Operand(at));
599 // Check that the value is a field property.
600 // reg2: elements + (index * kPointerSize).
601 const int kDetailsOffset =
602 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
603 ld(reg1, FieldMemOperand(reg2, kDetailsOffset));
605 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
606 Branch(miss, ne, at, Operand(zero_reg));
608 // Get the value at the masked, scaled index and return.
609 const int kValueOffset =
610 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
611 ld(result, FieldMemOperand(reg2, kValueOffset));
615 // ---------------------------------------------------------------------------
616 // Instruction macros.
618 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
620 addu(rd, rs, rt.rm());
622 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
623 addiu(rd, rs, rt.imm64_);
625 // li handles the relocation.
634 void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
636 daddu(rd, rs, rt.rm());
638 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
639 daddiu(rd, rs, rt.imm64_);
641 // li handles the relocation.
650 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
652 subu(rd, rs, rt.rm());
654 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
655 addiu(rd, rs, -rt.imm64_); // No subiu instr, use addiu(x, y, -imm).
657 // li handles the relocation.
666 void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
668 dsubu(rd, rs, rt.rm());
670 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
671 daddiu(rd, rs, -rt.imm64_); // No subiu instr, use addiu(x, y, -imm).
673 // li handles the relocation.
682 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
684 mul(rd, rs, rt.rm());
686 // li handles the relocation.
694 void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
696 if (kArchVariant != kMips64r6) {
700 muh(rd, rs, rt.rm());
703 // li handles the relocation.
706 if (kArchVariant != kMips64r6) {
716 void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
718 if (kArchVariant != kMips64r6) {
722 muhu(rd, rs, rt.rm());
725 // li handles the relocation.
728 if (kArchVariant != kMips64r6) {
738 void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
740 if (kArchVariant == kMips64r6) {
741 dmul(rd, rs, rt.rm());
747 // li handles the relocation.
750 if (kArchVariant == kMips64r6) {
760 void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
762 if (kArchVariant == kMips64r6) {
763 dmuh(rd, rs, rt.rm());
769 // li handles the relocation.
772 if (kArchVariant == kMips64r6) {
782 void MacroAssembler::Mult(Register rs, const Operand& rt) {
786 // li handles the relocation.
794 void MacroAssembler::Dmult(Register rs, const Operand& rt) {
798 // li handles the relocation.
806 void MacroAssembler::Multu(Register rs, const Operand& rt) {
810 // li handles the relocation.
818 void MacroAssembler::Dmultu(Register rs, const Operand& rt) {
822 // li handles the relocation.
830 void MacroAssembler::Div(Register rs, const Operand& rt) {
834 // li handles the relocation.
842 void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
844 if (kArchVariant != kMips64r6) {
848 div(res, rs, rt.rm());
851 // li handles the relocation.
854 if (kArchVariant != kMips64r6) {
864 void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
866 if (kArchVariant != kMips64r6) {
870 mod(rd, rs, rt.rm());
873 // li handles the relocation.
876 if (kArchVariant != kMips64r6) {
886 void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
888 if (kArchVariant != kMips64r6) {
892 modu(rd, rs, rt.rm());
895 // li handles the relocation.
898 if (kArchVariant != kMips64r6) {
908 void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
912 // li handles the relocation.
920 void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
921 if (kArchVariant != kMips64r6) {
926 // li handles the relocation.
934 ddiv(rd, rs, rt.rm());
936 // li handles the relocation.
945 void MacroAssembler::Divu(Register rs, const Operand& rt) {
949 // li handles the relocation.
957 void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
959 if (kArchVariant != kMips64r6) {
963 divu(res, rs, rt.rm());
966 // li handles the relocation.
969 if (kArchVariant != kMips64r6) {
979 void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
983 // li handles the relocation.
991 void MacroAssembler::Ddivu(Register res, Register rs, const Operand& rt) {
993 if (kArchVariant != kMips64r6) {
997 ddivu(res, rs, rt.rm());
1000 // li handles the relocation.
1003 if (kArchVariant != kMips64r6) {
1013 void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
1014 if (kArchVariant != kMips64r6) {
1019 // li handles the relocation.
1027 dmod(rd, rs, rt.rm());
1029 // li handles the relocation.
1038 void MacroAssembler::Dmodu(Register rd, Register rs, const Operand& rt) {
1039 if (kArchVariant != kMips64r6) {
1044 // li handles the relocation.
1052 dmodu(rd, rs, rt.rm());
1054 // li handles the relocation.
1063 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
1065 and_(rd, rs, rt.rm());
1067 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1068 andi(rd, rs, rt.imm64_);
1070 // li handles the relocation.
1079 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
1081 or_(rd, rs, rt.rm());
1083 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1084 ori(rd, rs, rt.imm64_);
1086 // li handles the relocation.
1095 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
1097 xor_(rd, rs, rt.rm());
1099 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1100 xori(rd, rs, rt.imm64_);
1102 // li handles the relocation.
1111 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
1113 nor(rd, rs, rt.rm());
1115 // li handles the relocation.
1123 void MacroAssembler::Neg(Register rs, const Operand& rt) {
1124 DCHECK(rt.is_reg());
1126 DCHECK(!at.is(rt.rm()));
1128 xor_(rs, rt.rm(), at);
1132 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
1134 slt(rd, rs, rt.rm());
1136 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1137 slti(rd, rs, rt.imm64_);
1139 // li handles the relocation.
1148 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
1150 sltu(rd, rs, rt.rm());
1152 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1153 sltiu(rd, rs, rt.imm64_);
1155 // li handles the relocation.
1164 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
1166 rotrv(rd, rs, rt.rm());
1168 rotr(rd, rs, rt.imm64_);
1173 void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) {
1175 drotrv(rd, rs, rt.rm());
1177 drotr(rd, rs, rt.imm64_);
1182 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
1187 // ------------Pseudo-instructions-------------
1189 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
1191 lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1195 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
1197 swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1201 // Do 64-bit load from unaligned address. Note this only handles
1202 // the specific case of 32-bit aligned, but not 64-bit aligned.
1203 void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) {
1204 // Assert fail if the offset from start of object IS actually aligned.
1205 // ONLY use with known misalignment, since there is performance cost.
1206 DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
1207 // TODO(plind): endian dependency.
1209 lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1210 dsll32(scratch, scratch, 0);
1211 Daddu(rd, rd, scratch);
1215 // Do 64-bit store to unaligned address. Note this only handles
1216 // the specific case of 32-bit aligned, but not 64-bit aligned.
1217 void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) {
1218 // Assert fail if the offset from start of object IS actually aligned.
1219 // ONLY use with known misalignment, since there is performance cost.
1220 DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
1221 // TODO(plind): endian dependency.
1223 dsrl32(scratch, rd, 0);
1224 sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1228 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1229 AllowDeferredHandleDereference smi_check;
1230 if (value->IsSmi()) {
1231 li(dst, Operand(value), mode);
1233 DCHECK(value->IsHeapObject());
1234 if (isolate()->heap()->InNewSpace(*value)) {
1235 Handle<Cell> cell = isolate()->factory()->NewCell(value);
1236 li(dst, Operand(cell));
1237 ld(dst, FieldMemOperand(dst, Cell::kValueOffset));
1239 li(dst, Operand(value));
1245 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1246 DCHECK(!j.is_reg());
1247 BlockTrampolinePoolScope block_trampoline_pool(this);
1248 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
1249 // Normal load of an immediate value which does not need Relocation Info.
1250 if (is_int32(j.imm64_)) {
1251 if (is_int16(j.imm64_)) {
1252 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
1253 } else if (!(j.imm64_ & kHiMask)) {
1254 ori(rd, zero_reg, (j.imm64_ & kImm16Mask));
1255 } else if (!(j.imm64_ & kImm16Mask)) {
1256 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1258 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1259 ori(rd, rd, (j.imm64_ & kImm16Mask));
1262 lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1263 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1265 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1267 ori(rd, rd, j.imm64_ & kImm16Mask);
1269 } else if (MustUseReg(j.rmode_)) {
1270 RecordRelocInfo(j.rmode_, j.imm64_);
1271 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1272 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1274 ori(rd, rd, j.imm64_ & kImm16Mask);
1275 } else if (mode == ADDRESS_LOAD) {
1276 // We always need the same number of instructions as we may need to patch
1277 // this code to load another value which may need all 4 instructions.
1278 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1279 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1281 ori(rd, rd, j.imm64_ & kImm16Mask);
1283 lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1284 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1286 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1288 ori(rd, rd, j.imm64_ & kImm16Mask);
1293 void MacroAssembler::MultiPush(RegList regs) {
1294 int16_t num_to_push = NumberOfBitsSet(regs);
1295 int16_t stack_offset = num_to_push * kPointerSize;
1297 Dsubu(sp, sp, Operand(stack_offset));
1298 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1299 if ((regs & (1 << i)) != 0) {
1300 stack_offset -= kPointerSize;
1301 sd(ToRegister(i), MemOperand(sp, stack_offset));
1307 void MacroAssembler::MultiPushReversed(RegList regs) {
1308 int16_t num_to_push = NumberOfBitsSet(regs);
1309 int16_t stack_offset = num_to_push * kPointerSize;
1311 Dsubu(sp, sp, Operand(stack_offset));
1312 for (int16_t i = 0; i < kNumRegisters; i++) {
1313 if ((regs & (1 << i)) != 0) {
1314 stack_offset -= kPointerSize;
1315 sd(ToRegister(i), MemOperand(sp, stack_offset));
1321 void MacroAssembler::MultiPop(RegList regs) {
1322 int16_t stack_offset = 0;
1324 for (int16_t i = 0; i < kNumRegisters; i++) {
1325 if ((regs & (1 << i)) != 0) {
1326 ld(ToRegister(i), MemOperand(sp, stack_offset));
1327 stack_offset += kPointerSize;
1330 daddiu(sp, sp, stack_offset);
1334 void MacroAssembler::MultiPopReversed(RegList regs) {
1335 int16_t stack_offset = 0;
1337 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1338 if ((regs & (1 << i)) != 0) {
1339 ld(ToRegister(i), MemOperand(sp, stack_offset));
1340 stack_offset += kPointerSize;
1343 daddiu(sp, sp, stack_offset);
1347 void MacroAssembler::MultiPushFPU(RegList regs) {
1348 int16_t num_to_push = NumberOfBitsSet(regs);
1349 int16_t stack_offset = num_to_push * kDoubleSize;
1351 Dsubu(sp, sp, Operand(stack_offset));
1352 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1353 if ((regs & (1 << i)) != 0) {
1354 stack_offset -= kDoubleSize;
1355 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1361 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
1362 int16_t num_to_push = NumberOfBitsSet(regs);
1363 int16_t stack_offset = num_to_push * kDoubleSize;
1365 Dsubu(sp, sp, Operand(stack_offset));
1366 for (int16_t i = 0; i < kNumRegisters; i++) {
1367 if ((regs & (1 << i)) != 0) {
1368 stack_offset -= kDoubleSize;
1369 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1375 void MacroAssembler::MultiPopFPU(RegList regs) {
1376 int16_t stack_offset = 0;
1378 for (int16_t i = 0; i < kNumRegisters; i++) {
1379 if ((regs & (1 << i)) != 0) {
1380 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1381 stack_offset += kDoubleSize;
1384 daddiu(sp, sp, stack_offset);
1388 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
1389 int16_t stack_offset = 0;
1391 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1392 if ((regs & (1 << i)) != 0) {
1393 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1394 stack_offset += kDoubleSize;
1397 daddiu(sp, sp, stack_offset);
1401 void MacroAssembler::FlushICache(Register address, unsigned instructions) {
1402 RegList saved_regs = kJSCallerSaved | ra.bit();
1403 MultiPush(saved_regs);
1404 AllowExternalCallThatCantCauseGC scope(this);
1406 // Save to a0 in case address == a4.
1408 PrepareCallCFunction(2, a4);
1410 li(a1, instructions * kInstrSize);
1411 CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
1412 MultiPop(saved_regs);
1416 void MacroAssembler::Ext(Register rt,
1421 DCHECK(pos + size < 33);
1422 ext_(rt, rs, pos, size);
1426 void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
1429 DCHECK(pos + size < 33);
1430 dext_(rt, rs, pos, size);
1434 void MacroAssembler::Ins(Register rt,
1439 DCHECK(pos + size <= 32);
1441 ins_(rt, rs, pos, size);
1445 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1447 FPURegister scratch) {
1448 // Move the data from fs to t8.
1450 Cvt_d_uw(fd, t8, scratch);
1454 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1456 FPURegister scratch) {
1457 // Convert rs to a FP value in fd (and fd + 1).
1458 // We do this by converting rs minus the MSB to avoid sign conversion,
1459 // then adding 2^31 to the result (if needed).
1461 DCHECK(!fd.is(scratch));
1465 // Save rs's MSB to t9.
1469 // Move the result to fd.
1471 mthc1(zero_reg, fd);
1473 // Convert fd to a real FP value.
1476 Label conversion_done;
1478 // If rs's MSB was 0, it's done.
1479 // Otherwise we need to add that to the FP register.
1480 Branch(&conversion_done, eq, t9, Operand(zero_reg));
1482 // Load 2^31 into f20 as its float representation.
1484 mtc1(zero_reg, scratch);
1487 add_d(fd, fd, scratch);
1489 bind(&conversion_done);
1493 void MacroAssembler::Round_l_d(FPURegister fd, FPURegister fs) {
1498 void MacroAssembler::Floor_l_d(FPURegister fd, FPURegister fs) {
1503 void MacroAssembler::Ceil_l_d(FPURegister fd, FPURegister fs) {
1508 void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) {
1513 void MacroAssembler::Trunc_l_ud(FPURegister fd,
1515 FPURegister scratch) {
1519 li(at, 0x7fffffffffffffff);
1526 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1528 FPURegister scratch) {
1529 Trunc_uw_d(fs, t8, scratch);
1534 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1539 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1544 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1549 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1554 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1556 FPURegister scratch) {
1557 DCHECK(!fd.is(scratch));
1560 // Load 2^31 into scratch as its float representation.
1562 mtc1(zero_reg, scratch);
1564 // Test if scratch > fd.
1565 // If fd < 2^31 we can convert it normally.
1566 Label simple_convert;
1567 BranchF(&simple_convert, NULL, lt, fd, scratch);
1569 // First we subtract 2^31 from fd, then trunc it to rs
1570 // and add 2^31 to rs.
1571 sub_d(scratch, fd, scratch);
1572 trunc_w_d(scratch, scratch);
1574 Or(rs, rs, 1 << 31);
1578 // Simple conversion.
1579 bind(&simple_convert);
1580 trunc_w_d(scratch, fd);
1587 void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
1588 FPURegister ft, FPURegister scratch) {
1589 if (0) { // TODO(plind): find reasonable arch-variant symbol names.
1590 madd_d(fd, fr, fs, ft);
1592 // Can not change source regs's value.
1593 DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
1594 mul_d(scratch, fs, ft);
1595 add_d(fd, fr, scratch);
1600 void MacroAssembler::BranchF(Label* target,
1605 BranchDelaySlot bd) {
1606 BlockTrampolinePoolScope block_trampoline_pool(this);
1612 DCHECK(nan || target);
1613 // Check for unordered (NaN) cases.
1615 if (kArchVariant != kMips64r6) {
1616 c(UN, D, cmp1, cmp2);
1619 // Use f31 for comparison result. It has to be unavailable to lithium
1620 // register allocator.
1621 DCHECK(!cmp1.is(f31) && !cmp2.is(f31));
1622 cmp(UN, L, f31, cmp1, cmp2);
1627 if (kArchVariant != kMips64r6) {
1629 // Here NaN cases were either handled by this function or are assumed to
1630 // have been handled by the caller.
1633 c(OLT, D, cmp1, cmp2);
1637 c(ULE, D, cmp1, cmp2);
1641 c(ULT, D, cmp1, cmp2);
1645 c(OLE, D, cmp1, cmp2);
1649 c(EQ, D, cmp1, cmp2);
1653 c(UEQ, D, cmp1, cmp2);
1657 c(EQ, D, cmp1, cmp2);
1661 c(UEQ, D, cmp1, cmp2);
1670 // Here NaN cases were either handled by this function or are assumed to
1671 // have been handled by the caller.
1672 // Unsigned conditions are treated as their signed counterpart.
1673 // Use f31 for comparison result, it is valid in fp64 (FR = 1) mode.
1674 DCHECK(!cmp1.is(f31) && !cmp2.is(f31));
1677 cmp(OLT, L, f31, cmp1, cmp2);
1678 bc1nez(target, f31);
1681 cmp(ULE, L, f31, cmp1, cmp2);
1682 bc1eqz(target, f31);
1685 cmp(ULT, L, f31, cmp1, cmp2);
1686 bc1eqz(target, f31);
1689 cmp(OLE, L, f31, cmp1, cmp2);
1690 bc1nez(target, f31);
1693 cmp(EQ, L, f31, cmp1, cmp2);
1694 bc1nez(target, f31);
1697 cmp(UEQ, L, f31, cmp1, cmp2);
1698 bc1nez(target, f31);
1701 cmp(EQ, L, f31, cmp1, cmp2);
1702 bc1eqz(target, f31);
1705 cmp(UEQ, L, f31, cmp1, cmp2);
1706 bc1eqz(target, f31);
1714 if (bd == PROTECT) {
1720 void MacroAssembler::Move(FPURegister dst, float imm) {
1721 li(at, Operand(bit_cast<int32_t>(imm)));
1726 void MacroAssembler::Move(FPURegister dst, double imm) {
1727 static const DoubleRepresentation minus_zero(-0.0);
1728 static const DoubleRepresentation zero(0.0);
1729 DoubleRepresentation value_rep(imm);
1730 // Handle special values first.
1731 if (value_rep == zero && has_double_zero_reg_set_) {
1732 mov_d(dst, kDoubleRegZero);
1733 } else if (value_rep == minus_zero && has_double_zero_reg_set_) {
1734 neg_d(dst, kDoubleRegZero);
1737 DoubleAsTwoUInt32(imm, &lo, &hi);
1738 // Move the low part of the double into the lower bits of the corresponding
1741 li(at, Operand(lo));
1744 mtc1(zero_reg, dst);
1746 // Move the high part of the double into the high bits of the corresponding
1749 li(at, Operand(hi));
1752 mthc1(zero_reg, dst);
1754 if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true;
1759 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1760 if (kArchVariant == kMips64r6) {
1762 Branch(&done, ne, rt, Operand(zero_reg));
1771 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1772 if (kArchVariant == kMips64r6) {
1774 Branch(&done, eq, rt, Operand(zero_reg));
1783 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
1788 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
1793 void MacroAssembler::Clz(Register rd, Register rs) {
1798 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1800 DoubleRegister double_input,
1802 DoubleRegister double_scratch,
1803 Register except_flag,
1804 CheckForInexactConversion check_inexact) {
1805 DCHECK(!result.is(scratch));
1806 DCHECK(!double_input.is(double_scratch));
1807 DCHECK(!except_flag.is(scratch));
1811 // Clear the except flag (0 = no exception)
1812 mov(except_flag, zero_reg);
1814 // Test for values that can be exactly represented as a signed 32-bit integer.
1815 cvt_w_d(double_scratch, double_input);
1816 mfc1(result, double_scratch);
1817 cvt_d_w(double_scratch, double_scratch);
1818 BranchF(&done, NULL, eq, double_input, double_scratch);
1820 int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
1822 if (check_inexact == kDontCheckForInexactConversion) {
1823 // Ignore inexact exceptions.
1824 except_mask &= ~kFCSRInexactFlagMask;
1828 cfc1(scratch, FCSR);
1829 // Disable FPU exceptions.
1830 ctc1(zero_reg, FCSR);
1832 // Do operation based on rounding mode.
1833 switch (rounding_mode) {
1834 case kRoundToNearest:
1835 Round_w_d(double_scratch, double_input);
1838 Trunc_w_d(double_scratch, double_input);
1840 case kRoundToPlusInf:
1841 Ceil_w_d(double_scratch, double_input);
1843 case kRoundToMinusInf:
1844 Floor_w_d(double_scratch, double_input);
1846 } // End of switch-statement.
1849 cfc1(except_flag, FCSR);
1851 ctc1(scratch, FCSR);
1852 // Move the converted value into the result register.
1853 mfc1(result, double_scratch);
1855 // Check for fpu exceptions.
1856 And(except_flag, except_flag, Operand(except_mask));
1862 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
1863 DoubleRegister double_input,
1865 DoubleRegister single_scratch = kLithiumScratchDouble.low();
1866 Register scratch = at;
1867 Register scratch2 = t9;
1869 // Clear cumulative exception flags and save the FCSR.
1870 cfc1(scratch2, FCSR);
1871 ctc1(zero_reg, FCSR);
1872 // Try a conversion to a signed integer.
1873 trunc_w_d(single_scratch, double_input);
1874 mfc1(result, single_scratch);
1875 // Retrieve and restore the FCSR.
1876 cfc1(scratch, FCSR);
1877 ctc1(scratch2, FCSR);
1878 // Check for overflow and NaNs.
1881 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
1882 // If we had no exceptions we are done.
1883 Branch(done, eq, scratch, Operand(zero_reg));
1887 void MacroAssembler::TruncateDoubleToI(Register result,
1888 DoubleRegister double_input) {
1891 TryInlineTruncateDoubleToI(result, double_input, &done);
1893 // If we fell through then inline version didn't succeed - call stub instead.
1895 Dsubu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
1896 sdc1(double_input, MemOperand(sp, 0));
1898 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
1901 Daddu(sp, sp, Operand(kDoubleSize));
1908 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
1910 DoubleRegister double_scratch = f12;
1911 DCHECK(!result.is(object));
1913 ldc1(double_scratch,
1914 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
1915 TryInlineTruncateDoubleToI(result, double_scratch, &done);
1917 // If we fell through then inline version didn't succeed - call stub instead.
1919 DoubleToIStub stub(isolate(),
1922 HeapNumber::kValueOffset - kHeapObjectTag,
1932 void MacroAssembler::TruncateNumberToI(Register object,
1934 Register heap_number_map,
1936 Label* not_number) {
1938 DCHECK(!result.is(object));
1940 UntagAndJumpIfSmi(result, object, &done);
1941 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
1942 TruncateHeapNumberToI(result, object);
1948 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1950 int num_least_bits) {
1951 // Ext(dst, src, kSmiTagSize, num_least_bits);
1953 And(dst, dst, Operand((1 << num_least_bits) - 1));
1957 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1959 int num_least_bits) {
1960 DCHECK(!src.is(dst));
1961 And(dst, src, Operand((1 << num_least_bits) - 1));
1965 // Emulated condtional branches do not emit a nop in the branch delay slot.
1967 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1968 #define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \
1969 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1970 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1973 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1974 BranchShort(offset, bdslot);
1978 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1980 BranchDelaySlot bdslot) {
1981 BranchShort(offset, cond, rs, rt, bdslot);
1985 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1986 if (L->is_bound()) {
1988 BranchShort(L, bdslot);
1993 if (is_trampoline_emitted()) {
1996 BranchShort(L, bdslot);
2002 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
2004 BranchDelaySlot bdslot) {
2005 if (L->is_bound()) {
2007 BranchShort(L, cond, rs, rt, bdslot);
2009 if (cond != cc_always) {
2011 Condition neg_cond = NegateCondition(cond);
2012 BranchShort(&skip, neg_cond, rs, rt);
2020 if (is_trampoline_emitted()) {
2021 if (cond != cc_always) {
2023 Condition neg_cond = NegateCondition(cond);
2024 BranchShort(&skip, neg_cond, rs, rt);
2031 BranchShort(L, cond, rs, rt, bdslot);
2037 void MacroAssembler::Branch(Label* L,
2040 Heap::RootListIndex index,
2041 BranchDelaySlot bdslot) {
2042 LoadRoot(at, index);
2043 Branch(L, cond, rs, Operand(at), bdslot);
2047 void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
2050 // Emit a nop in the branch delay slot if required.
2051 if (bdslot == PROTECT)
2056 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
2058 BranchDelaySlot bdslot) {
2059 BRANCH_ARGS_CHECK(cond, rs, rt);
2060 DCHECK(!rs.is(zero_reg));
2061 Register r2 = no_reg;
2062 Register scratch = at;
2065 // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
2067 BlockTrampolinePoolScope block_trampoline_pool(this);
2074 beq(rs, r2, offset);
2077 bne(rs, r2, offset);
2079 // Signed comparison.
2081 if (r2.is(zero_reg)) {
2084 slt(scratch, r2, rs);
2085 bne(scratch, zero_reg, offset);
2089 if (r2.is(zero_reg)) {
2092 slt(scratch, rs, r2);
2093 beq(scratch, zero_reg, offset);
2097 if (r2.is(zero_reg)) {
2100 slt(scratch, rs, r2);
2101 bne(scratch, zero_reg, offset);
2105 if (r2.is(zero_reg)) {
2108 slt(scratch, r2, rs);
2109 beq(scratch, zero_reg, offset);
2112 // Unsigned comparison.
2114 if (r2.is(zero_reg)) {
2117 sltu(scratch, r2, rs);
2118 bne(scratch, zero_reg, offset);
2121 case Ugreater_equal:
2122 if (r2.is(zero_reg)) {
2125 sltu(scratch, rs, r2);
2126 beq(scratch, zero_reg, offset);
2130 if (r2.is(zero_reg)) {
2131 // No code needs to be emitted.
2134 sltu(scratch, rs, r2);
2135 bne(scratch, zero_reg, offset);
2139 if (r2.is(zero_reg)) {
2142 sltu(scratch, r2, rs);
2143 beq(scratch, zero_reg, offset);
2150 // Be careful to always use shifted_branch_offset only just before the
2151 // branch instruction, as the location will be remember for patching the
2153 BlockTrampolinePoolScope block_trampoline_pool(this);
2159 if (rt.imm64_ == 0) {
2160 beq(rs, zero_reg, offset);
2162 // We don't want any other register but scratch clobbered.
2163 DCHECK(!scratch.is(rs));
2166 beq(rs, r2, offset);
2170 if (rt.imm64_ == 0) {
2171 bne(rs, zero_reg, offset);
2173 // We don't want any other register but scratch clobbered.
2174 DCHECK(!scratch.is(rs));
2177 bne(rs, r2, offset);
2180 // Signed comparison.
2182 if (rt.imm64_ == 0) {
2187 slt(scratch, r2, rs);
2188 bne(scratch, zero_reg, offset);
2192 if (rt.imm64_ == 0) {
2194 } else if (is_int16(rt.imm64_)) {
2195 slti(scratch, rs, rt.imm64_);
2196 beq(scratch, zero_reg, offset);
2200 slt(scratch, rs, r2);
2201 beq(scratch, zero_reg, offset);
2205 if (rt.imm64_ == 0) {
2207 } else if (is_int16(rt.imm64_)) {
2208 slti(scratch, rs, rt.imm64_);
2209 bne(scratch, zero_reg, offset);
2213 slt(scratch, rs, r2);
2214 bne(scratch, zero_reg, offset);
2218 if (rt.imm64_ == 0) {
2223 slt(scratch, r2, rs);
2224 beq(scratch, zero_reg, offset);
2227 // Unsigned comparison.
2229 if (rt.imm64_ == 0) {
2234 sltu(scratch, r2, rs);
2235 bne(scratch, zero_reg, offset);
2238 case Ugreater_equal:
2239 if (rt.imm64_ == 0) {
2241 } else if (is_int16(rt.imm64_)) {
2242 sltiu(scratch, rs, rt.imm64_);
2243 beq(scratch, zero_reg, offset);
2247 sltu(scratch, rs, r2);
2248 beq(scratch, zero_reg, offset);
2252 if (rt.imm64_ == 0) {
2253 // No code needs to be emitted.
2255 } else if (is_int16(rt.imm64_)) {
2256 sltiu(scratch, rs, rt.imm64_);
2257 bne(scratch, zero_reg, offset);
2261 sltu(scratch, rs, r2);
2262 bne(scratch, zero_reg, offset);
2266 if (rt.imm64_ == 0) {
2271 sltu(scratch, r2, rs);
2272 beq(scratch, zero_reg, offset);
2279 // Emit a nop in the branch delay slot if required.
2280 if (bdslot == PROTECT)
2285 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
2286 // We use branch_offset as an argument for the branch instructions to be sure
2287 // it is called just before generating the branch instruction, as needed.
2289 b(shifted_branch_offset(L, false));
2291 // Emit a nop in the branch delay slot if required.
2292 if (bdslot == PROTECT)
2297 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
2299 BranchDelaySlot bdslot) {
2300 BRANCH_ARGS_CHECK(cond, rs, rt);
2303 Register r2 = no_reg;
2304 Register scratch = at;
2306 BlockTrampolinePoolScope block_trampoline_pool(this);
2308 // Be careful to always use shifted_branch_offset only just before the
2309 // branch instruction, as the location will be remember for patching the
2313 offset = shifted_branch_offset(L, false);
2317 offset = shifted_branch_offset(L, false);
2318 beq(rs, r2, offset);
2321 offset = shifted_branch_offset(L, false);
2322 bne(rs, r2, offset);
2324 // Signed comparison.
2326 if (r2.is(zero_reg)) {
2327 offset = shifted_branch_offset(L, false);
2330 slt(scratch, r2, rs);
2331 offset = shifted_branch_offset(L, false);
2332 bne(scratch, zero_reg, offset);
2336 if (r2.is(zero_reg)) {
2337 offset = shifted_branch_offset(L, false);
2340 slt(scratch, rs, r2);
2341 offset = shifted_branch_offset(L, false);
2342 beq(scratch, zero_reg, offset);
2346 if (r2.is(zero_reg)) {
2347 offset = shifted_branch_offset(L, false);
2350 slt(scratch, rs, r2);
2351 offset = shifted_branch_offset(L, false);
2352 bne(scratch, zero_reg, offset);
2356 if (r2.is(zero_reg)) {
2357 offset = shifted_branch_offset(L, false);
2360 slt(scratch, r2, rs);
2361 offset = shifted_branch_offset(L, false);
2362 beq(scratch, zero_reg, offset);
2365 // Unsigned comparison.
2367 if (r2.is(zero_reg)) {
2368 offset = shifted_branch_offset(L, false);
2371 sltu(scratch, r2, rs);
2372 offset = shifted_branch_offset(L, false);
2373 bne(scratch, zero_reg, offset);
2376 case Ugreater_equal:
2377 if (r2.is(zero_reg)) {
2378 offset = shifted_branch_offset(L, false);
2381 sltu(scratch, rs, r2);
2382 offset = shifted_branch_offset(L, false);
2383 beq(scratch, zero_reg, offset);
2387 if (r2.is(zero_reg)) {
2388 // No code needs to be emitted.
2391 sltu(scratch, rs, r2);
2392 offset = shifted_branch_offset(L, false);
2393 bne(scratch, zero_reg, offset);
2397 if (r2.is(zero_reg)) {
2398 offset = shifted_branch_offset(L, false);
2401 sltu(scratch, r2, rs);
2402 offset = shifted_branch_offset(L, false);
2403 beq(scratch, zero_reg, offset);
2410 // Be careful to always use shifted_branch_offset only just before the
2411 // branch instruction, as the location will be remember for patching the
2413 BlockTrampolinePoolScope block_trampoline_pool(this);
2416 offset = shifted_branch_offset(L, false);
2420 if (rt.imm64_ == 0) {
2421 offset = shifted_branch_offset(L, false);
2422 beq(rs, zero_reg, offset);
2424 DCHECK(!scratch.is(rs));
2427 offset = shifted_branch_offset(L, false);
2428 beq(rs, r2, offset);
2432 if (rt.imm64_ == 0) {
2433 offset = shifted_branch_offset(L, false);
2434 bne(rs, zero_reg, offset);
2436 DCHECK(!scratch.is(rs));
2439 offset = shifted_branch_offset(L, false);
2440 bne(rs, r2, offset);
2443 // Signed comparison.
2445 if (rt.imm64_ == 0) {
2446 offset = shifted_branch_offset(L, false);
2449 DCHECK(!scratch.is(rs));
2452 slt(scratch, r2, rs);
2453 offset = shifted_branch_offset(L, false);
2454 bne(scratch, zero_reg, offset);
2458 if (rt.imm64_ == 0) {
2459 offset = shifted_branch_offset(L, false);
2461 } else if (is_int16(rt.imm64_)) {
2462 slti(scratch, rs, rt.imm64_);
2463 offset = shifted_branch_offset(L, false);
2464 beq(scratch, zero_reg, offset);
2466 DCHECK(!scratch.is(rs));
2469 slt(scratch, rs, r2);
2470 offset = shifted_branch_offset(L, false);
2471 beq(scratch, zero_reg, offset);
2475 if (rt.imm64_ == 0) {
2476 offset = shifted_branch_offset(L, false);
2478 } else if (is_int16(rt.imm64_)) {
2479 slti(scratch, rs, rt.imm64_);
2480 offset = shifted_branch_offset(L, false);
2481 bne(scratch, zero_reg, offset);
2483 DCHECK(!scratch.is(rs));
2486 slt(scratch, rs, r2);
2487 offset = shifted_branch_offset(L, false);
2488 bne(scratch, zero_reg, offset);
2492 if (rt.imm64_ == 0) {
2493 offset = shifted_branch_offset(L, false);
2496 DCHECK(!scratch.is(rs));
2499 slt(scratch, r2, rs);
2500 offset = shifted_branch_offset(L, false);
2501 beq(scratch, zero_reg, offset);
2504 // Unsigned comparison.
2506 if (rt.imm64_ == 0) {
2507 offset = shifted_branch_offset(L, false);
2508 bne(rs, zero_reg, offset);
2510 DCHECK(!scratch.is(rs));
2513 sltu(scratch, r2, rs);
2514 offset = shifted_branch_offset(L, false);
2515 bne(scratch, zero_reg, offset);
2518 case Ugreater_equal:
2519 if (rt.imm64_ == 0) {
2520 offset = shifted_branch_offset(L, false);
2522 } else if (is_int16(rt.imm64_)) {
2523 sltiu(scratch, rs, rt.imm64_);
2524 offset = shifted_branch_offset(L, false);
2525 beq(scratch, zero_reg, offset);
2527 DCHECK(!scratch.is(rs));
2530 sltu(scratch, rs, r2);
2531 offset = shifted_branch_offset(L, false);
2532 beq(scratch, zero_reg, offset);
2536 if (rt.imm64_ == 0) {
2537 // No code needs to be emitted.
2539 } else if (is_int16(rt.imm64_)) {
2540 sltiu(scratch, rs, rt.imm64_);
2541 offset = shifted_branch_offset(L, false);
2542 bne(scratch, zero_reg, offset);
2544 DCHECK(!scratch.is(rs));
2547 sltu(scratch, rs, r2);
2548 offset = shifted_branch_offset(L, false);
2549 bne(scratch, zero_reg, offset);
2553 if (rt.imm64_ == 0) {
2554 offset = shifted_branch_offset(L, false);
2555 beq(rs, zero_reg, offset);
2557 DCHECK(!scratch.is(rs));
2560 sltu(scratch, r2, rs);
2561 offset = shifted_branch_offset(L, false);
2562 beq(scratch, zero_reg, offset);
2569 // Check that offset could actually hold on an int16_t.
2570 DCHECK(is_int16(offset));
2571 // Emit a nop in the branch delay slot if required.
2572 if (bdslot == PROTECT)
2577 void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
2578 BranchAndLinkShort(offset, bdslot);
2582 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
2584 BranchDelaySlot bdslot) {
2585 BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2589 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
2590 if (L->is_bound()) {
2592 BranchAndLinkShort(L, bdslot);
2597 if (is_trampoline_emitted()) {
2600 BranchAndLinkShort(L, bdslot);
2606 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2608 BranchDelaySlot bdslot) {
2609 if (L->is_bound()) {
2611 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2614 Condition neg_cond = NegateCondition(cond);
2615 BranchShort(&skip, neg_cond, rs, rt);
2620 if (is_trampoline_emitted()) {
2622 Condition neg_cond = NegateCondition(cond);
2623 BranchShort(&skip, neg_cond, rs, rt);
2627 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2633 // We need to use a bgezal or bltzal, but they can't be used directly with the
2634 // slt instructions. We could use sub or add instead but we would miss overflow
2635 // cases, so we keep slt and add an intermediate third instruction.
2636 void MacroAssembler::BranchAndLinkShort(int16_t offset,
2637 BranchDelaySlot bdslot) {
2640 // Emit a nop in the branch delay slot if required.
2641 if (bdslot == PROTECT)
2646 void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
2647 Register rs, const Operand& rt,
2648 BranchDelaySlot bdslot) {
2649 BRANCH_ARGS_CHECK(cond, rs, rt);
2650 Register r2 = no_reg;
2651 Register scratch = at;
2655 } else if (cond != cc_always) {
2661 BlockTrampolinePoolScope block_trampoline_pool(this);
2677 // Signed comparison.
2680 slt(scratch, r2, rs);
2681 beq(scratch, zero_reg, 2);
2687 slt(scratch, rs, r2);
2688 bne(scratch, zero_reg, 2);
2694 slt(scratch, rs, r2);
2695 bne(scratch, zero_reg, 2);
2701 slt(scratch, r2, rs);
2702 bne(scratch, zero_reg, 2);
2708 // Unsigned comparison.
2711 sltu(scratch, r2, rs);
2712 beq(scratch, zero_reg, 2);
2716 case Ugreater_equal:
2718 sltu(scratch, rs, r2);
2719 bne(scratch, zero_reg, 2);
2725 sltu(scratch, rs, r2);
2726 bne(scratch, zero_reg, 2);
2732 sltu(scratch, r2, rs);
2733 bne(scratch, zero_reg, 2);
2741 // Emit a nop in the branch delay slot if required.
2742 if (bdslot == PROTECT)
2747 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
2748 bal(shifted_branch_offset(L, false));
2750 // Emit a nop in the branch delay slot if required.
2751 if (bdslot == PROTECT)
2756 void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
2758 BranchDelaySlot bdslot) {
2759 BRANCH_ARGS_CHECK(cond, rs, rt);
2762 Register r2 = no_reg;
2763 Register scratch = at;
2766 } else if (cond != cc_always) {
2772 BlockTrampolinePoolScope block_trampoline_pool(this);
2775 offset = shifted_branch_offset(L, false);
2781 offset = shifted_branch_offset(L, false);
2787 offset = shifted_branch_offset(L, false);
2791 // Signed comparison.
2794 slt(scratch, r2, rs);
2795 beq(scratch, zero_reg, 2);
2797 offset = shifted_branch_offset(L, false);
2802 slt(scratch, rs, r2);
2803 bne(scratch, zero_reg, 2);
2805 offset = shifted_branch_offset(L, false);
2810 slt(scratch, rs, r2);
2811 bne(scratch, zero_reg, 2);
2813 offset = shifted_branch_offset(L, false);
2818 slt(scratch, r2, rs);
2819 bne(scratch, zero_reg, 2);
2821 offset = shifted_branch_offset(L, false);
2826 // Unsigned comparison.
2829 sltu(scratch, r2, rs);
2830 beq(scratch, zero_reg, 2);
2832 offset = shifted_branch_offset(L, false);
2835 case Ugreater_equal:
2837 sltu(scratch, rs, r2);
2838 bne(scratch, zero_reg, 2);
2840 offset = shifted_branch_offset(L, false);
2845 sltu(scratch, rs, r2);
2846 bne(scratch, zero_reg, 2);
2848 offset = shifted_branch_offset(L, false);
2853 sltu(scratch, r2, rs);
2854 bne(scratch, zero_reg, 2);
2856 offset = shifted_branch_offset(L, false);
2864 // Check that offset could actually hold on an int16_t.
2865 DCHECK(is_int16(offset));
2867 // Emit a nop in the branch delay slot if required.
2868 if (bdslot == PROTECT)
2873 void MacroAssembler::Jump(Register target,
2877 BranchDelaySlot bd) {
2878 BlockTrampolinePoolScope block_trampoline_pool(this);
2879 if (cond == cc_always) {
2882 BRANCH_ARGS_CHECK(cond, rs, rt);
2883 Branch(2, NegateCondition(cond), rs, rt);
2886 // Emit a nop in the branch delay slot if required.
2892 void MacroAssembler::Jump(intptr_t target,
2893 RelocInfo::Mode rmode,
2897 BranchDelaySlot bd) {
2899 if (cond != cc_always) {
2900 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
2902 // The first instruction of 'li' may be placed in the delay slot.
2903 // This is not an issue, t9 is expected to be clobbered anyway.
2904 li(t9, Operand(target, rmode));
2905 Jump(t9, al, zero_reg, Operand(zero_reg), bd);
2910 void MacroAssembler::Jump(Address target,
2911 RelocInfo::Mode rmode,
2915 BranchDelaySlot bd) {
2916 DCHECK(!RelocInfo::IsCodeTarget(rmode));
2917 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2921 void MacroAssembler::Jump(Handle<Code> code,
2922 RelocInfo::Mode rmode,
2926 BranchDelaySlot bd) {
2927 DCHECK(RelocInfo::IsCodeTarget(rmode));
2928 AllowDeferredHandleDereference embedding_raw_address;
2929 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2933 int MacroAssembler::CallSize(Register target,
2937 BranchDelaySlot bd) {
2940 if (cond == cc_always) {
2949 return size * kInstrSize;
2953 // Note: To call gcc-compiled C code on mips, you must call thru t9.
2954 void MacroAssembler::Call(Register target,
2958 BranchDelaySlot bd) {
2959 BlockTrampolinePoolScope block_trampoline_pool(this);
2962 if (cond == cc_always) {
2965 BRANCH_ARGS_CHECK(cond, rs, rt);
2966 Branch(2, NegateCondition(cond), rs, rt);
2969 // Emit a nop in the branch delay slot if required.
2973 DCHECK_EQ(CallSize(target, cond, rs, rt, bd),
2974 SizeOfCodeGeneratedSince(&start));
2978 int MacroAssembler::CallSize(Address target,
2979 RelocInfo::Mode rmode,
2983 BranchDelaySlot bd) {
2984 int size = CallSize(t9, cond, rs, rt, bd);
2985 return size + 4 * kInstrSize;
2989 void MacroAssembler::Call(Address target,
2990 RelocInfo::Mode rmode,
2994 BranchDelaySlot bd) {
2995 BlockTrampolinePoolScope block_trampoline_pool(this);
2998 int64_t target_int = reinterpret_cast<int64_t>(target);
2999 // Must record previous source positions before the
3000 // li() generates a new code target.
3001 positions_recorder()->WriteRecordedPositions();
3002 li(t9, Operand(target_int, rmode), ADDRESS_LOAD);
3003 Call(t9, cond, rs, rt, bd);
3004 DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
3005 SizeOfCodeGeneratedSince(&start));
3009 int MacroAssembler::CallSize(Handle<Code> code,
3010 RelocInfo::Mode rmode,
3011 TypeFeedbackId ast_id,
3015 BranchDelaySlot bd) {
3016 AllowDeferredHandleDereference using_raw_address;
3017 return CallSize(reinterpret_cast<Address>(code.location()),
3018 rmode, cond, rs, rt, bd);
3022 void MacroAssembler::Call(Handle<Code> code,
3023 RelocInfo::Mode rmode,
3024 TypeFeedbackId ast_id,
3028 BranchDelaySlot bd) {
3029 BlockTrampolinePoolScope block_trampoline_pool(this);
3032 DCHECK(RelocInfo::IsCodeTarget(rmode));
3033 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
3034 SetRecordedAstId(ast_id);
3035 rmode = RelocInfo::CODE_TARGET_WITH_ID;
3037 AllowDeferredHandleDereference embedding_raw_address;
3038 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
3039 DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
3040 SizeOfCodeGeneratedSince(&start));
3044 void MacroAssembler::Ret(Condition cond,
3047 BranchDelaySlot bd) {
3048 Jump(ra, cond, rs, rt, bd);
3052 void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
3053 BlockTrampolinePoolScope block_trampoline_pool(this);
3056 imm28 = jump_address(L);
3057 imm28 &= kImm28Mask;
3058 { BlockGrowBufferScope block_buf_growth(this);
3059 // Buffer growth (and relocation) must be blocked for internal references
3060 // until associated instructions are emitted and available to be patched.
3061 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3064 // Emit a nop in the branch delay slot if required.
3065 if (bdslot == PROTECT)
3070 void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
3071 BlockTrampolinePoolScope block_trampoline_pool(this);
3074 imm64 = jump_address(L);
3075 { BlockGrowBufferScope block_buf_growth(this);
3076 // Buffer growth (and relocation) must be blocked for internal references
3077 // until associated instructions are emitted and available to be patched.
3078 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3079 li(at, Operand(imm64), ADDRESS_LOAD);
3083 // Emit a nop in the branch delay slot if required.
3084 if (bdslot == PROTECT)
3089 void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
3090 BlockTrampolinePoolScope block_trampoline_pool(this);
3093 imm64 = jump_address(L);
3094 { BlockGrowBufferScope block_buf_growth(this);
3095 // Buffer growth (and relocation) must be blocked for internal references
3096 // until associated instructions are emitted and available to be patched.
3097 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3098 li(at, Operand(imm64), ADDRESS_LOAD);
3102 // Emit a nop in the branch delay slot if required.
3103 if (bdslot == PROTECT)
3108 void MacroAssembler::DropAndRet(int drop) {
3109 Ret(USE_DELAY_SLOT);
3110 daddiu(sp, sp, drop * kPointerSize);
3113 void MacroAssembler::DropAndRet(int drop,
3116 const Operand& r2) {
3117 // Both Drop and Ret need to be conditional.
3119 if (cond != cc_always) {
3120 Branch(&skip, NegateCondition(cond), r1, r2);
3126 if (cond != cc_always) {
3132 void MacroAssembler::Drop(int count,
3135 const Operand& op) {
3143 Branch(&skip, NegateCondition(cond), reg, op);
3146 daddiu(sp, sp, count * kPointerSize);
3155 void MacroAssembler::Swap(Register reg1,
3158 if (scratch.is(no_reg)) {
3159 Xor(reg1, reg1, Operand(reg2));
3160 Xor(reg2, reg2, Operand(reg1));
3161 Xor(reg1, reg1, Operand(reg2));
3170 void MacroAssembler::Call(Label* target) {
3171 BranchAndLink(target);
3175 void MacroAssembler::Push(Handle<Object> handle) {
3176 li(at, Operand(handle));
3181 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
3182 DCHECK(!src.is(scratch));
3184 dsrl32(src, src, 0);
3185 dsll32(src, src, 0);
3187 dsll32(scratch, scratch, 0);
3192 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
3193 DCHECK(!dst.is(scratch));
3195 dsrl32(scratch, scratch, 0);
3197 dsrl32(dst, dst, 0);
3198 dsll32(dst, dst, 0);
3199 or_(dst, dst, scratch);
3203 void MacroAssembler::DebugBreak() {
3204 PrepareCEntryArgs(0);
3205 PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
3206 CEntryStub ces(isolate(), 1);
3207 DCHECK(AllowThisStubCall(&ces));
3208 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3212 // ---------------------------------------------------------------------------
3213 // Exception handling.
3215 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
3216 int handler_index) {
3217 // Adjust this code if not the case.
3218 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
3219 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3220 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3221 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3222 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3223 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3225 // For the JSEntry handler, we must preserve a0-a3 and s0.
3226 // a5-a7 are available. We will build up the handler from the bottom by
3227 // pushing on the stack.
3228 // Set up the code object (a5) and the state (a6) for pushing.
3230 StackHandler::IndexField::encode(handler_index) |
3231 StackHandler::KindField::encode(kind);
3232 li(a5, Operand(CodeObject()), CONSTANT_SIZE);
3233 li(a6, Operand(state));
3235 // Push the frame pointer, context, state, and code object.
3236 if (kind == StackHandler::JS_ENTRY) {
3237 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
3238 // The second zero_reg indicates no context.
3239 // The first zero_reg is the NULL frame pointer.
3240 // The operands are reversed to match the order of MultiPush/Pop.
3241 Push(zero_reg, zero_reg, a6, a5);
3243 MultiPush(a5.bit() | a6.bit() | cp.bit() | fp.bit());
3246 // Link the current handler as the next handler.
3247 li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3248 ld(a5, MemOperand(a6));
3250 // Set this new handler as the current one.
3251 sd(sp, MemOperand(a6));
3255 void MacroAssembler::PopTryHandler() {
3256 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3258 Daddu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
3259 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3260 sd(a1, MemOperand(at));
3264 void MacroAssembler::JumpToHandlerEntry() {
3265 // Compute the handler entry address and jump to it. The handler table is
3266 // a fixed array of (smi-tagged) code offsets.
3267 // v0 = exception, a1 = code object, a2 = state.
3268 ld(a3, FieldMemOperand(a1, Code::kHandlerTableOffset));
3269 Daddu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3270 dsrl(a2, a2, StackHandler::kKindWidth); // Handler index.
3271 dsll(a2, a2, kPointerSizeLog2);
3273 ld(a2, MemOperand(a2)); // Smi-tagged offset.
3274 Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
3281 void MacroAssembler::Throw(Register value) {
3282 // Adjust this code if not the case.
3283 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
3284 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3285 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3286 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3287 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3288 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3290 // The exception is expected in v0.
3293 // Drop the stack pointer to the top of the top handler.
3294 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
3296 ld(sp, MemOperand(a3));
3298 // Restore the next handler.
3300 sd(a2, MemOperand(a3));
3302 // Get the code object (a1) and state (a2). Restore the context and frame
3304 MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
3306 // If the handler is a JS frame, restore the context to the frame.
3307 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
3310 Branch(&done, eq, cp, Operand(zero_reg));
3311 sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3314 JumpToHandlerEntry();
3318 void MacroAssembler::ThrowUncatchable(Register value) {
3319 // Adjust this code if not the case.
3320 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
3321 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3322 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3323 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3324 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3325 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3327 // The exception is expected in v0.
3328 if (!value.is(v0)) {
3331 // Drop the stack pointer to the top of the top stack handler.
3332 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3333 ld(sp, MemOperand(a3));
3335 // Unwind the handlers until the ENTRY handler is found.
3336 Label fetch_next, check_kind;
3339 ld(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
3342 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
3343 ld(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
3344 And(a2, a2, Operand(StackHandler::KindField::kMask));
3345 Branch(&fetch_next, ne, a2, Operand(zero_reg));
3347 // Set the top handler address to next handler past the top ENTRY handler.
3349 sd(a2, MemOperand(a3));
3351 // Get the code object (a1) and state (a2). Clear the context and frame
3352 // pointer (0 was saved in the handler).
3353 MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
3355 JumpToHandlerEntry();
3359 void MacroAssembler::Allocate(int object_size,
3364 AllocationFlags flags) {
3365 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3366 if (!FLAG_inline_new) {
3367 if (emit_debug_code()) {
3368 // Trash the registers to simulate an allocation failure.
3370 li(scratch1, 0x7191);
3371 li(scratch2, 0x7291);
3377 DCHECK(!result.is(scratch1));
3378 DCHECK(!result.is(scratch2));
3379 DCHECK(!scratch1.is(scratch2));
3380 DCHECK(!scratch1.is(t9));
3381 DCHECK(!scratch2.is(t9));
3382 DCHECK(!result.is(t9));
3384 // Make object size into bytes.
3385 if ((flags & SIZE_IN_WORDS) != 0) {
3386 object_size *= kPointerSize;
3388 DCHECK(0 == (object_size & kObjectAlignmentMask));
3390 // Check relative positions of allocation top and limit addresses.
3391 // ARM adds additional checks to make sure the ldm instruction can be
3392 // used. On MIPS we don't have ldm so we don't need additional checks either.
3393 ExternalReference allocation_top =
3394 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3395 ExternalReference allocation_limit =
3396 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3399 reinterpret_cast<intptr_t>(allocation_top.address());
3401 reinterpret_cast<intptr_t>(allocation_limit.address());
3402 DCHECK((limit - top) == kPointerSize);
3404 // Set up allocation top address and object size registers.
3405 Register topaddr = scratch1;
3406 li(topaddr, Operand(allocation_top));
3408 // This code stores a temporary value in t9.
3409 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3410 // Load allocation top into result and allocation limit into t9.
3411 ld(result, MemOperand(topaddr));
3412 ld(t9, MemOperand(topaddr, kPointerSize));
3414 if (emit_debug_code()) {
3415 // Assert that result actually contains top on entry. t9 is used
3416 // immediately below so this use of t9 does not cause difference with
3417 // respect to register content between debug and release mode.
3418 ld(t9, MemOperand(topaddr));
3419 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3421 // Load allocation limit into t9. Result already contains allocation top.
3422 ld(t9, MemOperand(topaddr, limit - top));
3425 DCHECK(kPointerSize == kDoubleSize);
3426 if (emit_debug_code()) {
3427 And(at, result, Operand(kDoubleAlignmentMask));
3428 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
3431 // Calculate new top and bail out if new space is exhausted. Use result
3432 // to calculate the new top.
3433 Daddu(scratch2, result, Operand(object_size));
3434 Branch(gc_required, Ugreater, scratch2, Operand(t9));
3435 sd(scratch2, MemOperand(topaddr));
3437 // Tag object if requested.
3438 if ((flags & TAG_OBJECT) != 0) {
3439 Daddu(result, result, Operand(kHeapObjectTag));
3444 void MacroAssembler::Allocate(Register object_size,
3449 AllocationFlags flags) {
3450 if (!FLAG_inline_new) {
3451 if (emit_debug_code()) {
3452 // Trash the registers to simulate an allocation failure.
3454 li(scratch1, 0x7191);
3455 li(scratch2, 0x7291);
3461 DCHECK(!result.is(scratch1));
3462 DCHECK(!result.is(scratch2));
3463 DCHECK(!scratch1.is(scratch2));
3464 DCHECK(!object_size.is(t9));
3465 DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
3467 // Check relative positions of allocation top and limit addresses.
3468 // ARM adds additional checks to make sure the ldm instruction can be
3469 // used. On MIPS we don't have ldm so we don't need additional checks either.
3470 ExternalReference allocation_top =
3471 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3472 ExternalReference allocation_limit =
3473 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3475 reinterpret_cast<intptr_t>(allocation_top.address());
3477 reinterpret_cast<intptr_t>(allocation_limit.address());
3478 DCHECK((limit - top) == kPointerSize);
3480 // Set up allocation top address and object size registers.
3481 Register topaddr = scratch1;
3482 li(topaddr, Operand(allocation_top));
3484 // This code stores a temporary value in t9.
3485 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3486 // Load allocation top into result and allocation limit into t9.
3487 ld(result, MemOperand(topaddr));
3488 ld(t9, MemOperand(topaddr, kPointerSize));
3490 if (emit_debug_code()) {
3491 // Assert that result actually contains top on entry. t9 is used
3492 // immediately below so this use of t9 does not cause difference with
3493 // respect to register content between debug and release mode.
3494 ld(t9, MemOperand(topaddr));
3495 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3497 // Load allocation limit into t9. Result already contains allocation top.
3498 ld(t9, MemOperand(topaddr, limit - top));
3501 DCHECK(kPointerSize == kDoubleSize);
3502 if (emit_debug_code()) {
3503 And(at, result, Operand(kDoubleAlignmentMask));
3504 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
3507 // Calculate new top and bail out if new space is exhausted. Use result
3508 // to calculate the new top. Object size may be in words so a shift is
3509 // required to get the number of bytes.
3510 if ((flags & SIZE_IN_WORDS) != 0) {
3511 dsll(scratch2, object_size, kPointerSizeLog2);
3512 Daddu(scratch2, result, scratch2);
3514 Daddu(scratch2, result, Operand(object_size));
3516 Branch(gc_required, Ugreater, scratch2, Operand(t9));
3518 // Update allocation top. result temporarily holds the new top.
3519 if (emit_debug_code()) {
3520 And(t9, scratch2, Operand(kObjectAlignmentMask));
3521 Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
3523 sd(scratch2, MemOperand(topaddr));
3525 // Tag object if requested.
3526 if ((flags & TAG_OBJECT) != 0) {
3527 Daddu(result, result, Operand(kHeapObjectTag));
3532 void MacroAssembler::UndoAllocationInNewSpace(Register object,
3534 ExternalReference new_space_allocation_top =
3535 ExternalReference::new_space_allocation_top_address(isolate());
3537 // Make sure the object has no tag before resetting top.
3538 And(object, object, Operand(~kHeapObjectTagMask));
3540 // Check that the object un-allocated is below the current top.
3541 li(scratch, Operand(new_space_allocation_top));
3542 ld(scratch, MemOperand(scratch));
3543 Check(less, kUndoAllocationOfNonAllocatedMemory,
3544 object, Operand(scratch));
3546 // Write the address of the object to un-allocate as the current top.
3547 li(scratch, Operand(new_space_allocation_top));
3548 sd(object, MemOperand(scratch));
3552 void MacroAssembler::AllocateTwoByteString(Register result,
3557 Label* gc_required) {
3558 // Calculate the number of bytes needed for the characters in the string while
3559 // observing object alignment.
3560 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3561 dsll(scratch1, length, 1); // Length in bytes, not chars.
3562 daddiu(scratch1, scratch1,
3563 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3564 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3566 // Allocate two-byte string in new space.
3574 // Set the map, length and hash field.
3575 InitializeNewString(result,
3577 Heap::kStringMapRootIndex,
3583 void MacroAssembler::AllocateOneByteString(Register result, Register length,
3584 Register scratch1, Register scratch2,
3586 Label* gc_required) {
3587 // Calculate the number of bytes needed for the characters in the string
3588 // while observing object alignment.
3589 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3590 DCHECK(kCharSize == 1);
3591 daddiu(scratch1, length,
3592 kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3593 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3595 // Allocate one-byte string in new space.
3603 // Set the map, length and hash field.
3604 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
3605 scratch1, scratch2);
3609 void MacroAssembler::AllocateTwoByteConsString(Register result,
3613 Label* gc_required) {
3614 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3616 InitializeNewString(result,
3618 Heap::kConsStringMapRootIndex,
3624 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
3627 Label* gc_required) {
3628 Allocate(ConsString::kSize,
3635 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
3636 scratch1, scratch2);
3640 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3644 Label* gc_required) {
3645 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3648 InitializeNewString(result,
3650 Heap::kSlicedStringMapRootIndex,
3656 void MacroAssembler::AllocateOneByteSlicedString(Register result,
3660 Label* gc_required) {
3661 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3664 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
3665 scratch1, scratch2);
3669 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
3670 Label* not_unique_name) {
3671 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3673 And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3674 Branch(&succeed, eq, at, Operand(zero_reg));
3675 Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
3681 // Allocates a heap number or jumps to the label if the young space is full and
3682 // a scavenge is needed.
3683 void MacroAssembler::AllocateHeapNumber(Register result,
3686 Register heap_number_map,
3688 TaggingMode tagging_mode,
3690 // Allocate an object in the heap for the heap number and tag it as a heap
3692 Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
3693 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3695 Heap::RootListIndex map_index = mode == MUTABLE
3696 ? Heap::kMutableHeapNumberMapRootIndex
3697 : Heap::kHeapNumberMapRootIndex;
3698 AssertIsRoot(heap_number_map, map_index);
3700 // Store heap number map in the allocated object.
3701 if (tagging_mode == TAG_RESULT) {
3702 sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3704 sd(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3709 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3713 Label* gc_required) {
3714 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3715 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3716 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3720 // Copies a fixed number of fields of heap objects from src to dst.
3721 void MacroAssembler::CopyFields(Register dst,
3725 DCHECK((temps & dst.bit()) == 0);
3726 DCHECK((temps & src.bit()) == 0);
3727 // Primitive implementation using only one temporary register.
3729 Register tmp = no_reg;
3730 // Find a temp register in temps list.
3731 for (int i = 0; i < kNumRegisters; i++) {
3732 if ((temps & (1 << i)) != 0) {
3737 DCHECK(!tmp.is(no_reg));
3739 for (int i = 0; i < field_count; i++) {
3740 ld(tmp, FieldMemOperand(src, i * kPointerSize));
3741 sd(tmp, FieldMemOperand(dst, i * kPointerSize));
3746 void MacroAssembler::CopyBytes(Register src,
3750 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3752 // Align src before copying in word size chunks.
3753 Branch(&byte_loop, le, length, Operand(kPointerSize));
3754 bind(&align_loop_1);
3755 And(scratch, src, kPointerSize - 1);
3756 Branch(&word_loop, eq, scratch, Operand(zero_reg));
3757 lbu(scratch, MemOperand(src));
3759 sb(scratch, MemOperand(dst));
3761 Dsubu(length, length, Operand(1));
3762 Branch(&align_loop_1, ne, length, Operand(zero_reg));
3764 // Copy bytes in word size chunks.
3766 if (emit_debug_code()) {
3767 And(scratch, src, kPointerSize - 1);
3768 Assert(eq, kExpectingAlignmentForCopyBytes,
3769 scratch, Operand(zero_reg));
3771 Branch(&byte_loop, lt, length, Operand(kPointerSize));
3772 ld(scratch, MemOperand(src));
3773 Daddu(src, src, kPointerSize);
3775 // TODO(kalmard) check if this can be optimized to use sw in most cases.
3776 // Can't use unaligned access - copy byte by byte.
3777 sb(scratch, MemOperand(dst, 0));
3778 dsrl(scratch, scratch, 8);
3779 sb(scratch, MemOperand(dst, 1));
3780 dsrl(scratch, scratch, 8);
3781 sb(scratch, MemOperand(dst, 2));
3782 dsrl(scratch, scratch, 8);
3783 sb(scratch, MemOperand(dst, 3));
3784 dsrl(scratch, scratch, 8);
3785 sb(scratch, MemOperand(dst, 4));
3786 dsrl(scratch, scratch, 8);
3787 sb(scratch, MemOperand(dst, 5));
3788 dsrl(scratch, scratch, 8);
3789 sb(scratch, MemOperand(dst, 6));
3790 dsrl(scratch, scratch, 8);
3791 sb(scratch, MemOperand(dst, 7));
3794 Dsubu(length, length, Operand(kPointerSize));
3797 // Copy the last bytes if any left.
3799 Branch(&done, eq, length, Operand(zero_reg));
3801 lbu(scratch, MemOperand(src));
3803 sb(scratch, MemOperand(dst));
3805 Dsubu(length, length, Operand(1));
3806 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3811 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3812 Register end_offset,
3817 sd(filler, MemOperand(start_offset));
3818 Daddu(start_offset, start_offset, kPointerSize);
3820 Branch(&loop, lt, start_offset, Operand(end_offset));
3824 void MacroAssembler::CheckFastElements(Register map,
3827 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3828 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3829 STATIC_ASSERT(FAST_ELEMENTS == 2);
3830 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3831 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3832 Branch(fail, hi, scratch,
3833 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3837 void MacroAssembler::CheckFastObjectElements(Register map,
3840 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3841 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3842 STATIC_ASSERT(FAST_ELEMENTS == 2);
3843 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3844 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3845 Branch(fail, ls, scratch,
3846 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3847 Branch(fail, hi, scratch,
3848 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3852 void MacroAssembler::CheckFastSmiElements(Register map,
3855 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3856 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3857 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3858 Branch(fail, hi, scratch,
3859 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3863 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3865 Register elements_reg,
3870 int elements_offset) {
3871 Label smi_value, maybe_nan, have_double_value, is_nan, done;
3872 Register mantissa_reg = scratch2;
3873 Register exponent_reg = scratch3;
3875 // Handle smi values specially.
3876 JumpIfSmi(value_reg, &smi_value);
3878 // Ensure that the object is a heap number
3881 Heap::kHeapNumberMapRootIndex,
3885 // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
3887 li(scratch1, Operand(kHoleNanUpper32 & HeapNumber::kExponentMask));
3888 lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3889 Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
3891 lwu(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3893 bind(&have_double_value);
3894 // dsll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3895 dsra(scratch1, key_reg, 32 - kDoubleSizeLog2);
3896 Daddu(scratch1, scratch1, elements_reg);
3897 sw(mantissa_reg, FieldMemOperand(
3898 scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
3899 uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
3900 sizeof(kHoleNanLower32);
3901 sw(exponent_reg, FieldMemOperand(scratch1, offset));
3905 // Could be NaN, Infinity or -Infinity. If fraction is not zero, it's NaN,
3906 // otherwise it's Infinity or -Infinity, and the non-NaN code path applies.
3907 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3908 Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
3910 // Load canonical NaN for storing into the double array.
3911 LoadRoot(at, Heap::kNanValueRootIndex);
3912 lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
3913 lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
3914 jmp(&have_double_value);
3917 Daddu(scratch1, elements_reg,
3918 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
3920 // dsll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3921 dsra(scratch2, key_reg, 32 - kDoubleSizeLog2);
3922 Daddu(scratch1, scratch1, scratch2);
3923 // scratch1 is now effective address of the double element
3925 Register untagged_value = elements_reg;
3926 SmiUntag(untagged_value, value_reg);
3927 mtc1(untagged_value, f2);
3929 sdc1(f0, MemOperand(scratch1, 0));
3934 void MacroAssembler::CompareMapAndBranch(Register obj,
3937 Label* early_success,
3940 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3941 CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
3945 void MacroAssembler::CompareMapAndBranch(Register obj_map,
3947 Label* early_success,
3950 Branch(branch_to, cond, obj_map, Operand(map));
3954 void MacroAssembler::CheckMap(Register obj,
3958 SmiCheckType smi_check_type) {
3959 if (smi_check_type == DO_SMI_CHECK) {
3960 JumpIfSmi(obj, fail);
3963 CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
3968 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
3969 Register scratch2, Handle<WeakCell> cell,
3970 Handle<Code> success,
3971 SmiCheckType smi_check_type) {
3973 if (smi_check_type == DO_SMI_CHECK) {
3974 JumpIfSmi(obj, &fail);
3976 ld(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
3977 GetWeakValue(scratch2, cell);
3978 Jump(success, RelocInfo::CODE_TARGET, eq, scratch1, Operand(scratch2));
3983 void MacroAssembler::CheckMap(Register obj,
3985 Heap::RootListIndex index,
3987 SmiCheckType smi_check_type) {
3988 if (smi_check_type == DO_SMI_CHECK) {
3989 JumpIfSmi(obj, fail);
3991 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3992 LoadRoot(at, index);
3993 Branch(fail, ne, scratch, Operand(at));
3997 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
3998 li(value, Operand(cell));
3999 ld(value, FieldMemOperand(value, WeakCell::kValueOffset));
4003 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
4005 GetWeakValue(value, cell);
4006 JumpIfSmi(value, miss);
4010 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
4011 if (IsMipsSoftFloatABI) {
4014 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
4019 void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
4020 if (IsMipsSoftFloatABI) {
4023 Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
4028 void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
4029 if (!IsMipsSoftFloatABI) {
4037 void MacroAssembler::MovToFloatResult(DoubleRegister src) {
4038 if (!IsMipsSoftFloatABI) {
4046 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
4047 DoubleRegister src2) {
4048 if (!IsMipsSoftFloatABI) {
4049 const DoubleRegister fparg2 = (kMipsAbi == kN64) ? f13 : f14;
4051 DCHECK(!src1.is(fparg2));
4065 // -----------------------------------------------------------------------------
4066 // JavaScript invokes.
4068 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
4069 const ParameterCount& actual,
4070 Handle<Code> code_constant,
4073 bool* definitely_mismatches,
4075 const CallWrapper& call_wrapper) {
4076 bool definitely_matches = false;
4077 *definitely_mismatches = false;
4078 Label regular_invoke;
4080 // Check whether the expected and actual arguments count match. If not,
4081 // setup registers according to contract with ArgumentsAdaptorTrampoline:
4082 // a0: actual arguments count
4083 // a1: function (passed through to callee)
4084 // a2: expected arguments count
4086 // The code below is made a lot easier because the calling code already sets
4087 // up actual and expected registers according to the contract if values are
4088 // passed in registers.
4089 DCHECK(actual.is_immediate() || actual.reg().is(a0));
4090 DCHECK(expected.is_immediate() || expected.reg().is(a2));
4091 DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
4093 if (expected.is_immediate()) {
4094 DCHECK(actual.is_immediate());
4095 if (expected.immediate() == actual.immediate()) {
4096 definitely_matches = true;
4098 li(a0, Operand(actual.immediate()));
4099 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
4100 if (expected.immediate() == sentinel) {
4101 // Don't worry about adapting arguments for builtins that
4102 // don't want that done. Skip adaption code by making it look
4103 // like we have a match between expected and actual number of
4105 definitely_matches = true;
4107 *definitely_mismatches = true;
4108 li(a2, Operand(expected.immediate()));
4111 } else if (actual.is_immediate()) {
4112 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.immediate()));
4113 li(a0, Operand(actual.immediate()));
4115 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg()));
4118 if (!definitely_matches) {
4119 if (!code_constant.is_null()) {
4120 li(a3, Operand(code_constant));
4121 daddiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
4124 Handle<Code> adaptor =
4125 isolate()->builtins()->ArgumentsAdaptorTrampoline();
4126 if (flag == CALL_FUNCTION) {
4127 call_wrapper.BeforeCall(CallSize(adaptor));
4129 call_wrapper.AfterCall();
4130 if (!*definitely_mismatches) {
4134 Jump(adaptor, RelocInfo::CODE_TARGET);
4136 bind(®ular_invoke);
4141 void MacroAssembler::InvokeCode(Register code,
4142 const ParameterCount& expected,
4143 const ParameterCount& actual,
4145 const CallWrapper& call_wrapper) {
4146 // You can't call a function without a valid frame.
4147 DCHECK(flag == JUMP_FUNCTION || has_frame());
4151 bool definitely_mismatches = false;
4152 InvokePrologue(expected, actual, Handle<Code>::null(), code,
4153 &done, &definitely_mismatches, flag,
4155 if (!definitely_mismatches) {
4156 if (flag == CALL_FUNCTION) {
4157 call_wrapper.BeforeCall(CallSize(code));
4159 call_wrapper.AfterCall();
4161 DCHECK(flag == JUMP_FUNCTION);
4164 // Continue here if InvokePrologue does handle the invocation due to
4165 // mismatched parameter counts.
4171 void MacroAssembler::InvokeFunction(Register function,
4172 const ParameterCount& actual,
4174 const CallWrapper& call_wrapper) {
4175 // You can't call a function without a valid frame.
4176 DCHECK(flag == JUMP_FUNCTION || has_frame());
4178 // Contract with called JS functions requires that function is passed in a1.
4179 DCHECK(function.is(a1));
4180 Register expected_reg = a2;
4181 Register code_reg = a3;
4182 ld(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4183 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4184 // The argument count is stored as int32_t on 64-bit platforms.
4185 // TODO(plind): Smi on 32-bit platforms.
4187 FieldMemOperand(code_reg,
4188 SharedFunctionInfo::kFormalParameterCountOffset));
4189 ld(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4190 ParameterCount expected(expected_reg);
4191 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
4195 void MacroAssembler::InvokeFunction(Register function,
4196 const ParameterCount& expected,
4197 const ParameterCount& actual,
4199 const CallWrapper& call_wrapper) {
4200 // You can't call a function without a valid frame.
4201 DCHECK(flag == JUMP_FUNCTION || has_frame());
4203 // Contract with called JS functions requires that function is passed in a1.
4204 DCHECK(function.is(a1));
4206 // Get the function and setup the context.
4207 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4209 // We call indirectly through the code field in the function to
4210 // allow recompilation to take effect without changing any of the
4212 ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4213 InvokeCode(a3, expected, actual, flag, call_wrapper);
4217 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
4218 const ParameterCount& expected,
4219 const ParameterCount& actual,
4221 const CallWrapper& call_wrapper) {
4223 InvokeFunction(a1, expected, actual, flag, call_wrapper);
4227 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
4231 ld(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
4232 IsInstanceJSObjectType(map, scratch, fail);
4236 void MacroAssembler::IsInstanceJSObjectType(Register map,
4239 lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
4240 Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
4241 Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
4245 void MacroAssembler::IsObjectJSStringType(Register object,
4248 DCHECK(kNotStringTag != 0);
4250 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4251 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4252 And(scratch, scratch, Operand(kIsNotStringMask));
4253 Branch(fail, ne, scratch, Operand(zero_reg));
4257 void MacroAssembler::IsObjectNameType(Register object,
4260 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4261 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4262 Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
4266 // ---------------------------------------------------------------------------
4267 // Support functions.
4270 void MacroAssembler::TryGetFunctionPrototype(Register function,
4274 bool miss_on_bound_function) {
4276 if (miss_on_bound_function) {
4277 // Check that the receiver isn't a smi.
4278 JumpIfSmi(function, miss);
4280 // Check that the function really is a function. Load map into result reg.
4281 GetObjectType(function, result, scratch);
4282 Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
4285 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
4287 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
4288 And(scratch, scratch,
4289 Operand(1 << SharedFunctionInfo::kBoundFunction));
4290 Branch(miss, ne, scratch, Operand(zero_reg));
4292 // Make sure that the function has an instance prototype.
4293 lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
4294 And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
4295 Branch(&non_instance, ne, scratch, Operand(zero_reg));
4298 // Get the prototype or initial map from the function.
4300 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4302 // If the prototype or initial map is the hole, don't return it and
4303 // simply miss the cache instead. This will allow us to allocate a
4304 // prototype object on-demand in the runtime system.
4305 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
4306 Branch(miss, eq, result, Operand(t8));
4308 // If the function does not have an initial map, we're done.
4310 GetObjectType(result, scratch, scratch);
4311 Branch(&done, ne, scratch, Operand(MAP_TYPE));
4313 // Get the prototype from the initial map.
4314 ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
4316 if (miss_on_bound_function) {
4319 // Non-instance prototype: Fetch prototype from constructor field
4321 bind(&non_instance);
4322 ld(result, FieldMemOperand(result, Map::kConstructorOffset));
4330 void MacroAssembler::GetObjectType(Register object,
4332 Register type_reg) {
4333 ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
4334 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
4338 // -----------------------------------------------------------------------------
4341 void MacroAssembler::CallStub(CodeStub* stub,
4342 TypeFeedbackId ast_id,
4346 BranchDelaySlot bd) {
4347 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
4348 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
4353 void MacroAssembler::TailCallStub(CodeStub* stub,
4357 BranchDelaySlot bd) {
4358 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
4362 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4363 return has_frame_ || !stub->SometimesSetsUpAFrame();
4367 void MacroAssembler::IndexFromHash(Register hash, Register index) {
4368 // If the hash field contains an array index pick it out. The assert checks
4369 // that the constants for the maximum number of digits for an array index
4370 // cached in the hash field and the number of bits reserved for it does not
4372 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
4373 (1 << String::kArrayIndexValueBits));
4374 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
4378 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4382 Register heap_number_map,
4384 ObjectToDoubleFlags flags) {
4386 if ((flags & OBJECT_NOT_SMI) == 0) {
4388 JumpIfNotSmi(object, ¬_smi);
4389 // Remove smi tag and convert to double.
4390 // dsra(scratch1, object, kSmiTagSize);
4391 dsra32(scratch1, object, 0);
4392 mtc1(scratch1, result);
4393 cvt_d_w(result, result);
4397 // Check for heap number and load double value from it.
4398 ld(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4399 Branch(not_number, ne, scratch1, Operand(heap_number_map));
4401 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4402 // If exponent is all ones the number is either a NaN or +/-Infinity.
4403 Register exponent = scratch1;
4404 Register mask_reg = scratch2;
4405 lwu(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4406 li(mask_reg, HeapNumber::kExponentMask);
4408 And(exponent, exponent, mask_reg);
4409 Branch(not_number, eq, exponent, Operand(mask_reg));
4411 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4416 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4418 Register scratch1) {
4419 // dsra(scratch1, smi, kSmiTagSize);
4420 dsra32(scratch1, smi, 0);
4421 mtc1(scratch1, value);
4422 cvt_d_w(value, value);
4426 void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
4427 const Operand& right,
4428 Register overflow_dst,
4430 if (right.is_reg()) {
4431 AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4434 mov(scratch, left); // Preserve left.
4435 daddiu(dst, left, right.immediate()); // Left is overwritten.
4436 xor_(scratch, dst, scratch); // Original left.
4437 // Load right since xori takes uint16 as immediate.
4438 daddiu(t9, zero_reg, right.immediate());
4439 xor_(overflow_dst, dst, t9);
4440 and_(overflow_dst, overflow_dst, scratch);
4442 daddiu(dst, left, right.immediate());
4443 xor_(overflow_dst, dst, left);
4444 // Load right since xori takes uint16 as immediate.
4445 daddiu(t9, zero_reg, right.immediate());
4446 xor_(scratch, dst, t9);
4447 and_(overflow_dst, scratch, overflow_dst);
4453 void MacroAssembler::AdduAndCheckForOverflow(Register dst,
4456 Register overflow_dst,
4458 DCHECK(!dst.is(overflow_dst));
4459 DCHECK(!dst.is(scratch));
4460 DCHECK(!overflow_dst.is(scratch));
4461 DCHECK(!overflow_dst.is(left));
4462 DCHECK(!overflow_dst.is(right));
4464 if (left.is(right) && dst.is(left)) {
4465 DCHECK(!dst.is(t9));
4466 DCHECK(!scratch.is(t9));
4467 DCHECK(!left.is(t9));
4468 DCHECK(!right.is(t9));
4469 DCHECK(!overflow_dst.is(t9));
4475 mov(scratch, left); // Preserve left.
4476 daddu(dst, left, right); // Left is overwritten.
4477 xor_(scratch, dst, scratch); // Original left.
4478 xor_(overflow_dst, dst, right);
4479 and_(overflow_dst, overflow_dst, scratch);
4480 } else if (dst.is(right)) {
4481 mov(scratch, right); // Preserve right.
4482 daddu(dst, left, right); // Right is overwritten.
4483 xor_(scratch, dst, scratch); // Original right.
4484 xor_(overflow_dst, dst, left);
4485 and_(overflow_dst, overflow_dst, scratch);
4487 daddu(dst, left, right);
4488 xor_(overflow_dst, dst, left);
4489 xor_(scratch, dst, right);
4490 and_(overflow_dst, scratch, overflow_dst);
4495 void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
4496 const Operand& right,
4497 Register overflow_dst,
4499 if (right.is_reg()) {
4500 SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4503 mov(scratch, left); // Preserve left.
4504 daddiu(dst, left, -(right.immediate())); // Left is overwritten.
4505 xor_(overflow_dst, dst, scratch); // scratch is original left.
4506 // Load right since xori takes uint16 as immediate.
4507 daddiu(t9, zero_reg, right.immediate());
4508 xor_(scratch, scratch, t9); // scratch is original left.
4509 and_(overflow_dst, scratch, overflow_dst);
4511 daddiu(dst, left, -(right.immediate()));
4512 xor_(overflow_dst, dst, left);
4513 // Load right since xori takes uint16 as immediate.
4514 daddiu(t9, zero_reg, right.immediate());
4515 xor_(scratch, left, t9);
4516 and_(overflow_dst, scratch, overflow_dst);
4522 void MacroAssembler::SubuAndCheckForOverflow(Register dst,
4525 Register overflow_dst,
4527 DCHECK(!dst.is(overflow_dst));
4528 DCHECK(!dst.is(scratch));
4529 DCHECK(!overflow_dst.is(scratch));
4530 DCHECK(!overflow_dst.is(left));
4531 DCHECK(!overflow_dst.is(right));
4532 DCHECK(!scratch.is(left));
4533 DCHECK(!scratch.is(right));
4535 // This happens with some crankshaft code. Since Subu works fine if
4536 // left == right, let's not make that restriction here.
4537 if (left.is(right)) {
4539 mov(overflow_dst, zero_reg);
4544 mov(scratch, left); // Preserve left.
4545 dsubu(dst, left, right); // Left is overwritten.
4546 xor_(overflow_dst, dst, scratch); // scratch is original left.
4547 xor_(scratch, scratch, right); // scratch is original left.
4548 and_(overflow_dst, scratch, overflow_dst);
4549 } else if (dst.is(right)) {
4550 mov(scratch, right); // Preserve right.
4551 dsubu(dst, left, right); // Right is overwritten.
4552 xor_(overflow_dst, dst, left);
4553 xor_(scratch, left, scratch); // Original right.
4554 and_(overflow_dst, scratch, overflow_dst);
4556 dsubu(dst, left, right);
4557 xor_(overflow_dst, dst, left);
4558 xor_(scratch, left, right);
4559 and_(overflow_dst, scratch, overflow_dst);
4564 void MacroAssembler::CallRuntime(const Runtime::Function* f,
4566 SaveFPRegsMode save_doubles) {
4567 // All parameters are on the stack. v0 has the return value after call.
4569 // If the expected number of arguments of the runtime function is
4570 // constant, we check that the actual number of arguments match the
4572 CHECK(f->nargs < 0 || f->nargs == num_arguments);
4574 // TODO(1236192): Most runtime routines don't need the number of
4575 // arguments passed in because it is constant. At some point we
4576 // should remove this need and make the runtime routine entry code
4578 PrepareCEntryArgs(num_arguments);
4579 PrepareCEntryFunction(ExternalReference(f, isolate()));
4580 CEntryStub stub(isolate(), 1, save_doubles);
4585 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
4587 BranchDelaySlot bd) {
4588 PrepareCEntryArgs(num_arguments);
4589 PrepareCEntryFunction(ext);
4591 CEntryStub stub(isolate(), 1);
4592 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
4596 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
4599 // TODO(1236192): Most runtime routines don't need the number of
4600 // arguments passed in because it is constant. At some point we
4601 // should remove this need and make the runtime routine entry code
4603 PrepareCEntryArgs(num_arguments);
4604 JumpToExternalReference(ext);
4608 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
4611 TailCallExternalReference(ExternalReference(fid, isolate()),
4617 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
4618 BranchDelaySlot bd) {
4619 PrepareCEntryFunction(builtin);
4620 CEntryStub stub(isolate(), 1);
4621 Jump(stub.GetCode(),
4622 RelocInfo::CODE_TARGET,
4630 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
4632 const CallWrapper& call_wrapper) {
4633 // You can't call a builtin without a valid frame.
4634 DCHECK(flag == JUMP_FUNCTION || has_frame());
4636 GetBuiltinEntry(t9, id);
4637 if (flag == CALL_FUNCTION) {
4638 call_wrapper.BeforeCall(CallSize(t9));
4640 call_wrapper.AfterCall();
4642 DCHECK(flag == JUMP_FUNCTION);
4648 void MacroAssembler::GetBuiltinFunction(Register target,
4649 Builtins::JavaScript id) {
4650 // Load the builtins object into target register.
4651 ld(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4652 ld(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
4653 // Load the JavaScript builtin function from the builtins object.
4654 ld(target, FieldMemOperand(target,
4655 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
4659 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
4660 DCHECK(!target.is(a1));
4661 GetBuiltinFunction(a1, id);
4662 // Load the code entry point from the builtins object.
4663 ld(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4667 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4668 Register scratch1, Register scratch2) {
4669 if (FLAG_native_code_counters && counter->Enabled()) {
4670 li(scratch1, Operand(value));
4671 li(scratch2, Operand(ExternalReference(counter)));
4672 sd(scratch1, MemOperand(scratch2));
4677 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4678 Register scratch1, Register scratch2) {
4680 if (FLAG_native_code_counters && counter->Enabled()) {
4681 li(scratch2, Operand(ExternalReference(counter)));
4682 ld(scratch1, MemOperand(scratch2));
4683 Daddu(scratch1, scratch1, Operand(value));
4684 sd(scratch1, MemOperand(scratch2));
4689 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4690 Register scratch1, Register scratch2) {
4692 if (FLAG_native_code_counters && counter->Enabled()) {
4693 li(scratch2, Operand(ExternalReference(counter)));
4694 ld(scratch1, MemOperand(scratch2));
4695 Dsubu(scratch1, scratch1, Operand(value));
4696 sd(scratch1, MemOperand(scratch2));
4701 // -----------------------------------------------------------------------------
4704 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
4705 Register rs, Operand rt) {
4706 if (emit_debug_code())
4707 Check(cc, reason, rs, rt);
4711 void MacroAssembler::AssertFastElements(Register elements) {
4712 if (emit_debug_code()) {
4713 DCHECK(!elements.is(at));
4716 ld(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4717 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4718 Branch(&ok, eq, elements, Operand(at));
4719 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4720 Branch(&ok, eq, elements, Operand(at));
4721 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4722 Branch(&ok, eq, elements, Operand(at));
4723 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4730 void MacroAssembler::Check(Condition cc, BailoutReason reason,
4731 Register rs, Operand rt) {
4733 Branch(&L, cc, rs, rt);
4735 // Will not return here.
4740 void MacroAssembler::Abort(BailoutReason reason) {
4744 const char* msg = GetBailoutReason(reason);
4746 RecordComment("Abort message: ");
4750 if (FLAG_trap_on_abort) {
4756 li(a0, Operand(Smi::FromInt(reason)));
4758 // Disable stub call restrictions to always allow calls to abort.
4760 // We don't actually want to generate a pile of code for this, so just
4761 // claim there is a stack frame, without generating one.
4762 FrameScope scope(this, StackFrame::NONE);
4763 CallRuntime(Runtime::kAbort, 1);
4765 CallRuntime(Runtime::kAbort, 1);
4767 // Will not return here.
4768 if (is_trampoline_pool_blocked()) {
4769 // If the calling code cares about the exact number of
4770 // instructions generated, we insert padding here to keep the size
4771 // of the Abort macro constant.
4772 // Currently in debug mode with debug_code enabled the number of
4773 // generated instructions is 10, so we use this as a maximum value.
4774 static const int kExpectedAbortInstructions = 10;
4775 int abort_instructions = InstructionsGeneratedSince(&abort_start);
4776 DCHECK(abort_instructions <= kExpectedAbortInstructions);
4777 while (abort_instructions++ < kExpectedAbortInstructions) {
4784 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4785 if (context_chain_length > 0) {
4786 // Move up the chain of contexts to the context containing the slot.
4787 ld(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4788 for (int i = 1; i < context_chain_length; i++) {
4789 ld(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4792 // Slot is in the current function context. Move it into the
4793 // destination register in case we store into it (the write barrier
4794 // cannot be allowed to destroy the context in esi).
4800 void MacroAssembler::LoadTransitionedArrayMapConditional(
4801 ElementsKind expected_kind,
4802 ElementsKind transitioned_kind,
4803 Register map_in_out,
4805 Label* no_map_match) {
4806 // Load the global or builtins object from the current context.
4808 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4809 ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
4811 // Check that the function's map is the same as the expected cached map.
4814 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4815 size_t offset = expected_kind * kPointerSize +
4816 FixedArrayBase::kHeaderSize;
4817 ld(at, FieldMemOperand(scratch, offset));
4818 Branch(no_map_match, ne, map_in_out, Operand(at));
4820 // Use the transitioned cached map.
4821 offset = transitioned_kind * kPointerSize +
4822 FixedArrayBase::kHeaderSize;
4823 ld(map_in_out, FieldMemOperand(scratch, offset));
4827 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4828 // Load the global or builtins object from the current context.
4830 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4831 // Load the native context from the global or builtins object.
4832 ld(function, FieldMemOperand(function,
4833 GlobalObject::kNativeContextOffset));
4834 // Load the function from the native context.
4835 ld(function, MemOperand(function, Context::SlotOffset(index)));
4839 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4842 // Load the initial map. The global functions all have initial maps.
4843 ld(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4844 if (emit_debug_code()) {
4846 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4849 Abort(kGlobalFunctionsMustHaveInitialMap);
4855 void MacroAssembler::StubPrologue() {
4857 Push(Smi::FromInt(StackFrame::STUB));
4858 // Adjust FP to point to saved FP.
4859 Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4863 void MacroAssembler::Prologue(bool code_pre_aging) {
4864 PredictableCodeSizeScope predictible_code_size_scope(
4865 this, kNoCodeAgeSequenceLength);
4866 // The following three instructions must remain together and unmodified
4867 // for code aging to work properly.
4868 if (code_pre_aging) {
4869 // Pre-age the code.
4870 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
4871 nop(Assembler::CODE_AGE_MARKER_NOP);
4872 // Load the stub address to t9 and call it,
4873 // GetCodeAgeAndParity() extracts the stub address from this instruction.
4875 Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
4877 nop(); // Prevent jalr to jal optimization.
4879 nop(); // Branch delay slot nop.
4880 nop(); // Pad the empty space.
4882 Push(ra, fp, cp, a1);
4883 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4884 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4885 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4886 // Adjust fp to point to caller's fp.
4887 Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4892 void MacroAssembler::EnterFrame(StackFrame::Type type,
4893 bool load_constant_pool_pointer_reg) {
4894 // Out-of-line constant pool not implemented on mips64.
4899 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4900 daddiu(sp, sp, -5 * kPointerSize);
4901 li(t8, Operand(Smi::FromInt(type)));
4902 li(t9, Operand(CodeObject()), CONSTANT_SIZE);
4903 sd(ra, MemOperand(sp, 4 * kPointerSize));
4904 sd(fp, MemOperand(sp, 3 * kPointerSize));
4905 sd(cp, MemOperand(sp, 2 * kPointerSize));
4906 sd(t8, MemOperand(sp, 1 * kPointerSize));
4907 sd(t9, MemOperand(sp, 0 * kPointerSize));
4908 // Adjust FP to point to saved FP.
4910 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
4914 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4916 ld(fp, MemOperand(sp, 0 * kPointerSize));
4917 ld(ra, MemOperand(sp, 1 * kPointerSize));
4918 daddiu(sp, sp, 2 * kPointerSize);
4922 void MacroAssembler::EnterExitFrame(bool save_doubles,
4924 // Set up the frame structure on the stack.
4925 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4926 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4927 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4929 // This is how the stack will look:
4930 // fp + 2 (==kCallerSPDisplacement) - old stack's end
4931 // [fp + 1 (==kCallerPCOffset)] - saved old ra
4932 // [fp + 0 (==kCallerFPOffset)] - saved old fp
4933 // [fp - 1 (==kSPOffset)] - sp of the called function
4934 // [fp - 2 (==kCodeOffset)] - CodeObject
4935 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
4936 // new stack (will contain saved ra)
4939 daddiu(sp, sp, -4 * kPointerSize);
4940 sd(ra, MemOperand(sp, 3 * kPointerSize));
4941 sd(fp, MemOperand(sp, 2 * kPointerSize));
4942 daddiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
4944 if (emit_debug_code()) {
4945 sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
4948 // Accessed from ExitFrame::code_slot.
4949 li(t8, Operand(CodeObject()), CONSTANT_SIZE);
4950 sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
4952 // Save the frame pointer and the context in top.
4953 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4954 sd(fp, MemOperand(t8));
4955 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4956 sd(cp, MemOperand(t8));
4958 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4960 // The stack is already aligned to 0 modulo 8 for stores with sdc1.
4961 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
4962 int space = kNumOfSavedRegisters * kDoubleSize ;
4963 Dsubu(sp, sp, Operand(space));
4964 // Remember: we only need to save every 2nd double FPU value.
4965 for (int i = 0; i < kNumOfSavedRegisters; i++) {
4966 FPURegister reg = FPURegister::from_code(2 * i);
4967 sdc1(reg, MemOperand(sp, i * kDoubleSize));
4971 // Reserve place for the return address, stack space and an optional slot
4972 // (used by the DirectCEntryStub to hold the return value if a struct is
4973 // returned) and align the frame preparing for calling the runtime function.
4974 DCHECK(stack_space >= 0);
4975 Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4976 if (frame_alignment > 0) {
4977 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4978 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4981 // Set the exit frame sp value to point just before the return address
4983 daddiu(at, sp, kPointerSize);
4984 sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
4988 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
4989 bool restore_context, bool do_return,
4990 bool argument_count_is_length) {
4991 // Optionally restore all double registers.
4993 // Remember: we only need to restore every 2nd double FPU value.
4994 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
4995 Dsubu(t8, fp, Operand(ExitFrameConstants::kFrameSize +
4996 kNumOfSavedRegisters * kDoubleSize));
4997 for (int i = 0; i < kNumOfSavedRegisters; i++) {
4998 FPURegister reg = FPURegister::from_code(2 * i);
4999 ldc1(reg, MemOperand(t8, i * kDoubleSize));
5004 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
5005 sd(zero_reg, MemOperand(t8));
5007 // Restore current context from top and clear it in debug mode.
5008 if (restore_context) {
5009 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5010 ld(cp, MemOperand(t8));
5013 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5014 sd(a3, MemOperand(t8));
5017 // Pop the arguments, restore registers, and return.
5018 mov(sp, fp); // Respect ABI stack constraint.
5019 ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
5020 ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
5022 if (argument_count.is_valid()) {
5023 if (argument_count_is_length) {
5024 daddu(sp, sp, argument_count);
5026 dsll(t8, argument_count, kPointerSizeLog2);
5032 Ret(USE_DELAY_SLOT);
5033 // If returning, the instruction in the delay slot will be the addiu below.
5035 daddiu(sp, sp, 2 * kPointerSize);
5039 void MacroAssembler::InitializeNewString(Register string,
5041 Heap::RootListIndex map_index,
5043 Register scratch2) {
5044 // dsll(scratch1, length, kSmiTagSize);
5045 dsll32(scratch1, length, 0);
5046 LoadRoot(scratch2, map_index);
5047 sd(scratch1, FieldMemOperand(string, String::kLengthOffset));
5048 li(scratch1, Operand(String::kEmptyHashField));
5049 sd(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
5050 sd(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
5054 int MacroAssembler::ActivationFrameAlignment() {
5055 #if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
5056 // Running on the real platform. Use the alignment as mandated by the local
5058 // Note: This will break if we ever start generating snapshots on one Mips
5059 // platform for another Mips platform with a different alignment.
5060 return base::OS::ActivationFrameAlignment();
5061 #else // V8_HOST_ARCH_MIPS
5062 // If we are using the simulator then we should always align to the expected
5063 // alignment. As the simulator is used to generate snapshots we do not know
5064 // if the target platform will need alignment, so this is controlled from a
5066 return FLAG_sim_stack_alignment;
5067 #endif // V8_HOST_ARCH_MIPS
5071 void MacroAssembler::AssertStackIsAligned() {
5072 if (emit_debug_code()) {
5073 const int frame_alignment = ActivationFrameAlignment();
5074 const int frame_alignment_mask = frame_alignment - 1;
5076 if (frame_alignment > kPointerSize) {
5077 Label alignment_as_expected;
5078 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5079 andi(at, sp, frame_alignment_mask);
5080 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5081 // Don't use Check here, as it will call Runtime_Abort re-entering here.
5082 stop("Unexpected stack alignment");
5083 bind(&alignment_as_expected);
5089 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
5092 Label* not_power_of_two_or_zero) {
5093 Dsubu(scratch, reg, Operand(1));
5094 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
5095 scratch, Operand(zero_reg));
5096 and_(at, scratch, reg); // In the delay slot.
5097 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
5101 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
5102 DCHECK(!reg.is(overflow));
5103 mov(overflow, reg); // Save original value.
5105 xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
5109 void MacroAssembler::SmiTagCheckOverflow(Register dst,
5111 Register overflow) {
5113 // Fall back to slower case.
5114 SmiTagCheckOverflow(dst, overflow);
5116 DCHECK(!dst.is(src));
5117 DCHECK(!dst.is(overflow));
5118 DCHECK(!src.is(overflow));
5120 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
5125 void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) {
5126 if (SmiValuesAre32Bits()) {
5127 lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
5135 void MacroAssembler::SmiLoadScale(Register dst, MemOperand src, int scale) {
5136 if (SmiValuesAre32Bits()) {
5137 // TODO(plind): not clear if lw or ld faster here, need micro-benchmark.
5138 lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
5139 dsll(dst, dst, scale);
5142 DCHECK(scale >= kSmiTagSize);
5143 sll(dst, dst, scale - kSmiTagSize);
5148 // Returns 2 values: the Smi and a scaled version of the int within the Smi.
5149 void MacroAssembler::SmiLoadWithScale(Register d_smi,
5153 if (SmiValuesAre32Bits()) {
5155 dsra(d_scaled, d_smi, kSmiShift - scale);
5158 DCHECK(scale >= kSmiTagSize);
5159 sll(d_scaled, d_smi, scale - kSmiTagSize);
5164 // Returns 2 values: the untagged Smi (int32) and scaled version of that int.
5165 void MacroAssembler::SmiLoadUntagWithScale(Register d_int,
5169 if (SmiValuesAre32Bits()) {
5170 lw(d_int, UntagSmiMemOperand(src.rm(), src.offset()));
5171 dsll(d_scaled, d_int, scale);
5174 // Need both the int and the scaled in, so use two instructions.
5176 sll(d_scaled, d_int, scale);
5181 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
5184 // DCHECK(!dst.is(src));
5185 JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
5190 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
5192 Label* non_smi_case) {
5193 // DCHECK(!dst.is(src));
5194 JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
5198 void MacroAssembler::JumpIfSmi(Register value,
5201 BranchDelaySlot bd) {
5202 DCHECK_EQ(0, kSmiTag);
5203 andi(scratch, value, kSmiTagMask);
5204 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
5207 void MacroAssembler::JumpIfNotSmi(Register value,
5208 Label* not_smi_label,
5210 BranchDelaySlot bd) {
5211 DCHECK_EQ(0, kSmiTag);
5212 andi(scratch, value, kSmiTagMask);
5213 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
5217 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
5219 Label* on_not_both_smi) {
5220 STATIC_ASSERT(kSmiTag == 0);
5221 // TODO(plind): Find some better to fix this assert issue.
5222 #if defined(__APPLE__)
5223 DCHECK_EQ(1, kSmiTagMask);
5225 DCHECK_EQ((int64_t)1, kSmiTagMask);
5227 or_(at, reg1, reg2);
5228 JumpIfNotSmi(at, on_not_both_smi);
5232 void MacroAssembler::JumpIfEitherSmi(Register reg1,
5234 Label* on_either_smi) {
5235 STATIC_ASSERT(kSmiTag == 0);
5236 // TODO(plind): Find some better to fix this assert issue.
5237 #if defined(__APPLE__)
5238 DCHECK_EQ(1, kSmiTagMask);
5240 DCHECK_EQ((int64_t)1, kSmiTagMask);
5242 // Both Smi tags must be 1 (not Smi).
5243 and_(at, reg1, reg2);
5244 JumpIfSmi(at, on_either_smi);
5248 void MacroAssembler::AssertNotSmi(Register object) {
5249 if (emit_debug_code()) {
5250 STATIC_ASSERT(kSmiTag == 0);
5251 andi(at, object, kSmiTagMask);
5252 Check(ne, kOperandIsASmi, at, Operand(zero_reg));
5257 void MacroAssembler::AssertSmi(Register object) {
5258 if (emit_debug_code()) {
5259 STATIC_ASSERT(kSmiTag == 0);
5260 andi(at, object, kSmiTagMask);
5261 Check(eq, kOperandIsASmi, at, Operand(zero_reg));
5266 void MacroAssembler::AssertString(Register object) {
5267 if (emit_debug_code()) {
5268 STATIC_ASSERT(kSmiTag == 0);
5270 Check(ne, kOperandIsASmiAndNotAString, a4, Operand(zero_reg));
5272 ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
5273 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
5274 Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
5280 void MacroAssembler::AssertName(Register object) {
5281 if (emit_debug_code()) {
5282 STATIC_ASSERT(kSmiTag == 0);
5284 Check(ne, kOperandIsASmiAndNotAName, a4, Operand(zero_reg));
5286 ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
5287 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
5288 Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
5294 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
5296 if (emit_debug_code()) {
5297 Label done_checking;
5298 AssertNotSmi(object);
5299 LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5300 Branch(&done_checking, eq, object, Operand(scratch));
5302 ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
5303 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
5304 Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
5306 bind(&done_checking);
5311 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
5312 if (emit_debug_code()) {
5313 DCHECK(!reg.is(at));
5314 LoadRoot(at, index);
5315 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
5320 void MacroAssembler::JumpIfNotHeapNumber(Register object,
5321 Register heap_number_map,
5323 Label* on_not_heap_number) {
5324 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5325 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
5326 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
5330 void MacroAssembler::LookupNumberStringCache(Register object,
5336 // Use of registers. Register result is used as a temporary.
5337 Register number_string_cache = result;
5338 Register mask = scratch3;
5340 // Load the number string cache.
5341 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
5343 // Make the hash mask from the length of the number string cache. It
5344 // contains two elements (number and string) for each cache entry.
5345 ld(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
5346 // Divide length by two (length is a smi).
5347 // dsra(mask, mask, kSmiTagSize + 1);
5348 dsra32(mask, mask, 1);
5349 Daddu(mask, mask, -1); // Make mask.
5351 // Calculate the entry in the number string cache. The hash value in the
5352 // number string cache for smis is just the smi value, and the hash for
5353 // doubles is the xor of the upper and lower words. See
5354 // Heap::GetNumberStringCache.
5356 Label load_result_from_cache;
5357 JumpIfSmi(object, &is_smi);
5360 Heap::kHeapNumberMapRootIndex,
5364 STATIC_ASSERT(8 == kDoubleSize);
5367 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
5368 ld(scratch2, MemOperand(scratch1, kPointerSize));
5369 ld(scratch1, MemOperand(scratch1, 0));
5370 Xor(scratch1, scratch1, Operand(scratch2));
5371 And(scratch1, scratch1, Operand(mask));
5373 // Calculate address of entry in string cache: each entry consists
5374 // of two pointer sized fields.
5375 dsll(scratch1, scratch1, kPointerSizeLog2 + 1);
5376 Daddu(scratch1, number_string_cache, scratch1);
5378 Register probe = mask;
5379 ld(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
5380 JumpIfSmi(probe, not_found);
5381 ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
5382 ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
5383 BranchF(&load_result_from_cache, NULL, eq, f12, f14);
5387 Register scratch = scratch1;
5388 // dsra(scratch, object, 1); // Shift away the tag.
5389 dsra32(scratch, scratch, 0);
5390 And(scratch, mask, Operand(scratch));
5392 // Calculate address of entry in string cache: each entry consists
5393 // of two pointer sized fields.
5394 dsll(scratch, scratch, kPointerSizeLog2 + 1);
5395 Daddu(scratch, number_string_cache, scratch);
5397 // Check if the entry is the smi we are looking for.
5398 ld(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
5399 Branch(not_found, ne, object, Operand(probe));
5401 // Get the result from the cache.
5402 bind(&load_result_from_cache);
5403 ld(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
5405 IncrementCounter(isolate()->counters()->number_to_string_native(),
5412 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
5413 Register first, Register second, Register scratch1, Register scratch2,
5415 // Test that both first and second are sequential one-byte strings.
5416 // Assume that they are non-smis.
5417 ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
5418 ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
5419 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
5420 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
5422 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
5427 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
5432 // Check that neither is a smi.
5433 STATIC_ASSERT(kSmiTag == 0);
5434 And(scratch1, first, Operand(second));
5435 JumpIfSmi(scratch1, failure);
5436 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
5441 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
5442 Register first, Register second, Register scratch1, Register scratch2,
5444 const int kFlatOneByteStringMask =
5445 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5446 const int kFlatOneByteStringTag =
5447 kStringTag | kOneByteStringTag | kSeqStringTag;
5448 DCHECK(kFlatOneByteStringTag <= 0xffff); // Ensure this fits 16-bit immed.
5449 andi(scratch1, first, kFlatOneByteStringMask);
5450 Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
5451 andi(scratch2, second, kFlatOneByteStringMask);
5452 Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
5456 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
5459 const int kFlatOneByteStringMask =
5460 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5461 const int kFlatOneByteStringTag =
5462 kStringTag | kOneByteStringTag | kSeqStringTag;
5463 And(scratch, type, Operand(kFlatOneByteStringMask));
5464 Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
5468 static const int kRegisterPassedArguments = (kMipsAbi == kN64) ? 8 : 4;
5470 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
5471 int num_double_arguments) {
5472 int stack_passed_words = 0;
5473 num_reg_arguments += 2 * num_double_arguments;
5475 // O32: Up to four simple arguments are passed in registers a0..a3.
5476 // N64: Up to eight simple arguments are passed in registers a0..a7.
5477 if (num_reg_arguments > kRegisterPassedArguments) {
5478 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5480 stack_passed_words += kCArgSlotCount;
5481 return stack_passed_words;
5485 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5489 uint32_t encoding_mask) {
5492 Check(ne, kNonObject, at, Operand(zero_reg));
5494 ld(at, FieldMemOperand(string, HeapObject::kMapOffset));
5495 lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
5497 andi(at, at, kStringRepresentationMask | kStringEncodingMask);
5498 li(scratch, Operand(encoding_mask));
5499 Check(eq, kUnexpectedStringType, at, Operand(scratch));
5501 // TODO(plind): requires Smi size check code for mips32.
5503 ld(at, FieldMemOperand(string, String::kLengthOffset));
5504 Check(lt, kIndexIsTooLarge, index, Operand(at));
5506 DCHECK(Smi::FromInt(0) == 0);
5507 Check(ge, kIndexIsNegative, index, Operand(zero_reg));
5511 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5512 int num_double_arguments,
5514 int frame_alignment = ActivationFrameAlignment();
5516 // n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots.
5517 // O32: Up to four simple arguments are passed in registers a0..a3.
5518 // Those four arguments must have reserved argument slots on the stack for
5519 // mips, even though those argument slots are not normally used.
5520 // Both ABIs: Remaining arguments are pushed on the stack, above (higher
5521 // address than) the (O32) argument slots. (arg slot calculation handled by
5522 // CalculateStackPassedWords()).
5523 int stack_passed_arguments = CalculateStackPassedWords(
5524 num_reg_arguments, num_double_arguments);
5525 if (frame_alignment > kPointerSize) {
5526 // Make stack end at alignment and make room for num_arguments - 4 words
5527 // and the original value of sp.
5529 Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5530 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5531 And(sp, sp, Operand(-frame_alignment));
5532 sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
5534 Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5539 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5541 PrepareCallCFunction(num_reg_arguments, 0, scratch);
5545 void MacroAssembler::CallCFunction(ExternalReference function,
5546 int num_reg_arguments,
5547 int num_double_arguments) {
5548 li(t8, Operand(function));
5549 CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
5553 void MacroAssembler::CallCFunction(Register function,
5554 int num_reg_arguments,
5555 int num_double_arguments) {
5556 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
5560 void MacroAssembler::CallCFunction(ExternalReference function,
5561 int num_arguments) {
5562 CallCFunction(function, num_arguments, 0);
5566 void MacroAssembler::CallCFunction(Register function,
5567 int num_arguments) {
5568 CallCFunction(function, num_arguments, 0);
5572 void MacroAssembler::CallCFunctionHelper(Register function,
5573 int num_reg_arguments,
5574 int num_double_arguments) {
5575 DCHECK(has_frame());
5576 // Make sure that the stack is aligned before calling a C function unless
5577 // running in the simulator. The simulator has its own alignment check which
5578 // provides more information.
5579 // The argument stots are presumed to have been set up by
5580 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
5582 #if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
5583 if (emit_debug_code()) {
5584 int frame_alignment = base::OS::ActivationFrameAlignment();
5585 int frame_alignment_mask = frame_alignment - 1;
5586 if (frame_alignment > kPointerSize) {
5587 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5588 Label alignment_as_expected;
5589 And(at, sp, Operand(frame_alignment_mask));
5590 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5591 // Don't use Check here, as it will call Runtime_Abort possibly
5592 // re-entering here.
5593 stop("Unexpected alignment in CallCFunction");
5594 bind(&alignment_as_expected);
5597 #endif // V8_HOST_ARCH_MIPS
5599 // Just call directly. The function called cannot cause a GC, or
5600 // allow preemption, so the return address in the link register
5603 if (!function.is(t9)) {
5610 int stack_passed_arguments = CalculateStackPassedWords(
5611 num_reg_arguments, num_double_arguments);
5613 if (base::OS::ActivationFrameAlignment() > kPointerSize) {
5614 ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5616 Daddu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5621 #undef BRANCH_ARGS_CHECK
5624 void MacroAssembler::PatchRelocatedValue(Register li_location,
5626 Register new_value) {
5627 lwu(scratch, MemOperand(li_location));
5628 // At this point scratch is a lui(at, ...) instruction.
5629 if (emit_debug_code()) {
5630 And(scratch, scratch, kOpcodeMask);
5631 Check(eq, kTheInstructionToPatchShouldBeALui,
5632 scratch, Operand(LUI));
5633 lwu(scratch, MemOperand(li_location));
5635 dsrl32(t9, new_value, 0);
5636 Ins(scratch, t9, 0, kImm16Bits);
5637 sw(scratch, MemOperand(li_location));
5639 lwu(scratch, MemOperand(li_location, kInstrSize));
5640 // scratch is now ori(at, ...).
5641 if (emit_debug_code()) {
5642 And(scratch, scratch, kOpcodeMask);
5643 Check(eq, kTheInstructionToPatchShouldBeAnOri,
5644 scratch, Operand(ORI));
5645 lwu(scratch, MemOperand(li_location, kInstrSize));
5647 dsrl(t9, new_value, kImm16Bits);
5648 Ins(scratch, t9, 0, kImm16Bits);
5649 sw(scratch, MemOperand(li_location, kInstrSize));
5651 lwu(scratch, MemOperand(li_location, kInstrSize * 3));
5652 // scratch is now ori(at, ...).
5653 if (emit_debug_code()) {
5654 And(scratch, scratch, kOpcodeMask);
5655 Check(eq, kTheInstructionToPatchShouldBeAnOri,
5656 scratch, Operand(ORI));
5657 lwu(scratch, MemOperand(li_location, kInstrSize * 3));
5660 Ins(scratch, new_value, 0, kImm16Bits);
5661 sw(scratch, MemOperand(li_location, kInstrSize * 3));
5663 // Update the I-cache so the new lui and ori can be executed.
5664 FlushICache(li_location, 4);
5667 void MacroAssembler::GetRelocatedValue(Register li_location,
5670 lwu(value, MemOperand(li_location));
5671 if (emit_debug_code()) {
5672 And(value, value, kOpcodeMask);
5673 Check(eq, kTheInstructionShouldBeALui,
5674 value, Operand(LUI));
5675 lwu(value, MemOperand(li_location));
5678 // value now holds a lui instruction. Extract the immediate.
5679 andi(value, value, kImm16Mask);
5680 dsll32(value, value, kImm16Bits);
5682 lwu(scratch, MemOperand(li_location, kInstrSize));
5683 if (emit_debug_code()) {
5684 And(scratch, scratch, kOpcodeMask);
5685 Check(eq, kTheInstructionShouldBeAnOri,
5686 scratch, Operand(ORI));
5687 lwu(scratch, MemOperand(li_location, kInstrSize));
5689 // "scratch" now holds an ori instruction. Extract the immediate.
5690 andi(scratch, scratch, kImm16Mask);
5691 dsll32(scratch, scratch, 0);
5693 or_(value, value, scratch);
5695 lwu(scratch, MemOperand(li_location, kInstrSize * 3));
5696 if (emit_debug_code()) {
5697 And(scratch, scratch, kOpcodeMask);
5698 Check(eq, kTheInstructionShouldBeAnOri,
5699 scratch, Operand(ORI));
5700 lwu(scratch, MemOperand(li_location, kInstrSize * 3));
5702 // "scratch" now holds an ori instruction. Extract the immediate.
5703 andi(scratch, scratch, kImm16Mask);
5704 dsll(scratch, scratch, kImm16Bits);
5706 or_(value, value, scratch);
5707 // Sign extend extracted address.
5708 dsra(value, value, kImm16Bits);
5712 void MacroAssembler::CheckPageFlag(
5717 Label* condition_met) {
5718 And(scratch, object, Operand(~Page::kPageAlignmentMask));
5719 ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5720 And(scratch, scratch, Operand(mask));
5721 Branch(condition_met, cc, scratch, Operand(zero_reg));
5725 void MacroAssembler::JumpIfBlack(Register object,
5729 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
5730 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5734 void MacroAssembler::HasColor(Register object,
5735 Register bitmap_scratch,
5736 Register mask_scratch,
5740 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
5741 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
5743 GetMarkBits(object, bitmap_scratch, mask_scratch);
5746 // Note that we are using a 4-byte aligned 8-byte load.
5747 Uld(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5748 And(t8, t9, Operand(mask_scratch));
5749 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
5750 // Shift left 1 by adding.
5751 Daddu(mask_scratch, mask_scratch, Operand(mask_scratch));
5752 And(t8, t9, Operand(mask_scratch));
5753 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
5759 // Detect some, but not all, common pointer-free objects. This is used by the
5760 // incremental write barrier which doesn't care about oddballs (they are always
5761 // marked black immediately so this code is not hit).
5762 void MacroAssembler::JumpIfDataObject(Register value,
5764 Label* not_data_object) {
5765 DCHECK(!AreAliased(value, scratch, t8, no_reg));
5766 Label is_data_object;
5767 ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5768 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5769 Branch(&is_data_object, eq, t8, Operand(scratch));
5770 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5771 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5772 // If it's a string and it's not a cons string then it's an object containing
5774 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5775 And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
5776 Branch(not_data_object, ne, t8, Operand(zero_reg));
5777 bind(&is_data_object);
5781 void MacroAssembler::GetMarkBits(Register addr_reg,
5782 Register bitmap_reg,
5783 Register mask_reg) {
5784 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
5785 // addr_reg is divided into fields:
5786 // |63 page base 20|19 high 8|7 shift 3|2 0|
5787 // 'high' gives the index of the cell holding color bits for the object.
5788 // 'shift' gives the offset in the cell for this object's color.
5789 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5790 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
5791 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
5792 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
5793 dsll(t8, t8, Bitmap::kBytesPerCellLog2);
5794 Daddu(bitmap_reg, bitmap_reg, t8);
5796 dsllv(mask_reg, t8, mask_reg);
5800 void MacroAssembler::EnsureNotWhite(
5802 Register bitmap_scratch,
5803 Register mask_scratch,
5804 Register load_scratch,
5805 Label* value_is_white_and_not_data) {
5806 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
5807 GetMarkBits(value, bitmap_scratch, mask_scratch);
5809 // If the value is black or grey we don't need to do anything.
5810 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5811 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5812 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
5813 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5817 // Since both black and grey have a 1 in the first position and white does
5818 // not have a 1 there we only need to check one bit.
5819 // Note that we are using a 4-byte aligned 8-byte load.
5820 Uld(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5821 And(t8, mask_scratch, load_scratch);
5822 Branch(&done, ne, t8, Operand(zero_reg));
5824 if (emit_debug_code()) {
5825 // Check for impossible bit pattern.
5827 // sll may overflow, making the check conservative.
5828 dsll(t8, mask_scratch, 1);
5829 And(t8, load_scratch, t8);
5830 Branch(&ok, eq, t8, Operand(zero_reg));
5831 stop("Impossible marking bit pattern");
5835 // Value is white. We check whether it is data that doesn't need scanning.
5836 // Currently only checks for HeapNumber and non-cons strings.
5837 Register map = load_scratch; // Holds map while checking type.
5838 Register length = load_scratch; // Holds length of object after testing type.
5839 Label is_data_object;
5841 // Check for heap-number
5842 ld(map, FieldMemOperand(value, HeapObject::kMapOffset));
5843 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5846 Branch(&skip, ne, t8, Operand(map));
5847 li(length, HeapNumber::kSize);
5848 Branch(&is_data_object);
5852 // Check for strings.
5853 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5854 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5855 // If it's a string and it's not a cons string then it's an object containing
5857 Register instance_type = load_scratch;
5858 lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
5859 And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
5860 Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5861 // It's a non-indirect (non-cons and non-slice) string.
5862 // If it's external, the length is just ExternalString::kSize.
5863 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5864 // External strings are the only ones with the kExternalStringTag bit
5866 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
5867 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
5868 And(t8, instance_type, Operand(kExternalStringTag));
5871 Branch(&skip, eq, t8, Operand(zero_reg));
5872 li(length, ExternalString::kSize);
5873 Branch(&is_data_object);
5877 // Sequential string, either Latin1 or UC16.
5878 // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
5879 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
5880 // getting the length multiplied by 2.
5881 DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
5882 DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
5883 lw(t9, UntagSmiFieldMemOperand(value, String::kLengthOffset));
5884 And(t8, instance_type, Operand(kStringEncodingMask));
5887 Branch(&skip, ne, t8, Operand(zero_reg));
5888 // Adjust length for UC16.
5892 Daddu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
5893 DCHECK(!length.is(t8));
5894 And(length, length, Operand(~kObjectAlignmentMask));
5896 bind(&is_data_object);
5897 // Value is a data object, and it is white. Mark it black. Since we know
5898 // that the object is white we can make it black by flipping one bit.
5899 Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5900 Or(t8, t8, Operand(mask_scratch));
5901 Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5903 And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5904 Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5905 Daddu(t8, t8, Operand(length));
5906 Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5912 void MacroAssembler::LoadInstanceDescriptors(Register map,
5913 Register descriptors) {
5914 ld(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
5918 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
5919 ld(dst, FieldMemOperand(map, Map::kBitField3Offset));
5920 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
5924 void MacroAssembler::EnumLength(Register dst, Register map) {
5925 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
5926 ld(dst, FieldMemOperand(map, Map::kBitField3Offset));
5927 And(dst, dst, Operand(Map::EnumLengthBits::kMask));
5932 void MacroAssembler::LoadAccessor(Register dst, Register holder,
5934 AccessorComponent accessor) {
5935 ld(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
5936 LoadInstanceDescriptors(dst, dst);
5938 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
5939 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
5940 : AccessorPair::kSetterOffset;
5941 ld(dst, FieldMemOperand(dst, offset));
5945 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5946 Register empty_fixed_array_value = a6;
5947 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5951 // Check if the enum length field is properly initialized, indicating that
5952 // there is an enum cache.
5953 ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5957 call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
5962 ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5964 // For all objects but the receiver, check that the cache is empty.
5966 Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
5970 // Check that there are no elements. Register a2 contains the current JS
5971 // object we've reached through the prototype chain.
5973 ld(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
5974 Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
5976 // Second chance, the object may be using the empty slow element dictionary.
5977 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
5978 Branch(call_runtime, ne, a2, Operand(at));
5981 ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
5982 Branch(&next, ne, a2, Operand(null_value));
5986 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5987 DCHECK(!output_reg.is(input_reg));
5989 li(output_reg, Operand(255));
5990 // Normal branch: nop in delay slot.
5991 Branch(&done, gt, input_reg, Operand(output_reg));
5992 // Use delay slot in this branch.
5993 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
5994 mov(output_reg, zero_reg); // In delay slot.
5995 mov(output_reg, input_reg); // Value is in range 0..255.
6000 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
6001 DoubleRegister input_reg,
6002 DoubleRegister temp_double_reg) {
6007 Move(temp_double_reg, 0.0);
6008 BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
6010 // Double value is less than zero, NaN or Inf, return 0.
6011 mov(result_reg, zero_reg);
6014 // Double value is >= 255, return 255.
6016 Move(temp_double_reg, 255.0);
6017 BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
6018 li(result_reg, Operand(255));
6021 // In 0-255 range, round and truncate.
6023 cvt_w_d(temp_double_reg, input_reg);
6024 mfc1(result_reg, temp_double_reg);
6029 void MacroAssembler::TestJSArrayForAllocationMemento(
6030 Register receiver_reg,
6031 Register scratch_reg,
6032 Label* no_memento_found,
6034 Label* allocation_memento_present) {
6035 ExternalReference new_space_start =
6036 ExternalReference::new_space_start(isolate());
6037 ExternalReference new_space_allocation_top =
6038 ExternalReference::new_space_allocation_top_address(isolate());
6039 Daddu(scratch_reg, receiver_reg,
6040 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
6041 Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
6042 li(at, Operand(new_space_allocation_top));
6043 ld(at, MemOperand(at));
6044 Branch(no_memento_found, gt, scratch_reg, Operand(at));
6045 ld(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
6046 if (allocation_memento_present) {
6047 Branch(allocation_memento_present, cond, scratch_reg,
6048 Operand(isolate()->factory()->allocation_memento_map()));
6053 Register GetRegisterThatIsNotOneOf(Register reg1,
6060 if (reg1.is_valid()) regs |= reg1.bit();
6061 if (reg2.is_valid()) regs |= reg2.bit();
6062 if (reg3.is_valid()) regs |= reg3.bit();
6063 if (reg4.is_valid()) regs |= reg4.bit();
6064 if (reg5.is_valid()) regs |= reg5.bit();
6065 if (reg6.is_valid()) regs |= reg6.bit();
6067 for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
6068 Register candidate = Register::FromAllocationIndex(i);
6069 if (regs & candidate.bit()) continue;
6077 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
6082 DCHECK(!scratch1.is(scratch0));
6083 Factory* factory = isolate()->factory();
6084 Register current = scratch0;
6087 // Scratch contained elements pointer.
6088 Move(current, object);
6090 // Loop based on the map going up the prototype chain.
6092 ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
6093 lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
6094 DecodeField<Map::ElementsKindBits>(scratch1);
6095 Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
6096 ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
6097 Branch(&loop_again, ne, current, Operand(factory->null_value()));
6101 bool AreAliased(Register reg1,
6109 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
6110 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
6111 reg7.is_valid() + reg8.is_valid();
6114 if (reg1.is_valid()) regs |= reg1.bit();
6115 if (reg2.is_valid()) regs |= reg2.bit();
6116 if (reg3.is_valid()) regs |= reg3.bit();
6117 if (reg4.is_valid()) regs |= reg4.bit();
6118 if (reg5.is_valid()) regs |= reg5.bit();
6119 if (reg6.is_valid()) regs |= reg6.bit();
6120 if (reg7.is_valid()) regs |= reg7.bit();
6121 if (reg8.is_valid()) regs |= reg8.bit();
6122 int n_of_non_aliasing_regs = NumRegs(regs);
6124 return n_of_valid_regs != n_of_non_aliasing_regs;
6128 CodePatcher::CodePatcher(byte* address,
6130 FlushICache flush_cache)
6131 : address_(address),
6132 size_(instructions * Assembler::kInstrSize),
6133 masm_(NULL, address, size_ + Assembler::kGap),
6134 flush_cache_(flush_cache) {
6135 // Create a new macro assembler pointing to the address of the code to patch.
6136 // The size is adjusted with kGap on order for the assembler to generate size
6137 // bytes of instructions without failing with buffer size constraints.
6138 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6142 CodePatcher::~CodePatcher() {
6143 // Indicate that code has changed.
6144 if (flush_cache_ == FLUSH) {
6145 CpuFeatures::FlushICache(address_, size_);
6147 // Check that the code was patched as expected.
6148 DCHECK(masm_.pc_ == address_ + size_);
6149 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6153 void CodePatcher::Emit(Instr instr) {
6154 masm()->emit(instr);
6158 void CodePatcher::Emit(Address addr) {
6159 // masm()->emit(reinterpret_cast<Instr>(addr));
6163 void CodePatcher::ChangeBranchCondition(Condition cond) {
6164 Instr instr = Assembler::instr_at(masm_.pc_);
6165 DCHECK(Assembler::IsBranch(instr));
6166 uint32_t opcode = Assembler::GetOpcodeField(instr);
6167 // Currently only the 'eq' and 'ne' cond values are supported and the simple
6168 // branch instructions (with opcode being the branch type).
6169 // There are some special cases (see Assembler::IsBranch()) so extending this
6171 DCHECK(opcode == BEQ ||
6179 opcode = (cond == eq) ? BEQ : BNE;
6180 instr = (instr & ~kOpcodeMask) | opcode;
6185 void MacroAssembler::TruncatingDiv(Register result,
6188 DCHECK(!dividend.is(result));
6189 DCHECK(!dividend.is(at));
6190 DCHECK(!result.is(at));
6191 base::MagicNumbersForDivision<uint32_t> mag =
6192 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
6193 li(at, Operand(static_cast<int32_t>(mag.multiplier)));
6194 Mulh(result, dividend, Operand(at));
6195 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
6196 if (divisor > 0 && neg) {
6197 Addu(result, result, Operand(dividend));
6199 if (divisor < 0 && !neg && mag.multiplier > 0) {
6200 Subu(result, result, Operand(dividend));
6202 if (mag.shift > 0) sra(result, result, mag.shift);
6203 srl(at, dividend, 31);
6204 Addu(result, result, Operand(at));
6208 } } // namespace v8::internal
6210 #endif // V8_TARGET_ARCH_MIPS64