1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include <assert.h> // For assert
6 #include <limits.h> // For LONG_MIN, LONG_MAX.
10 #if V8_TARGET_ARCH_PPC
12 #include "src/base/bits.h"
13 #include "src/base/division-by-constant.h"
14 #include "src/bootstrapper.h"
15 #include "src/codegen.h"
16 #include "src/cpu-profiler.h"
17 #include "src/debug.h"
18 #include "src/isolate-inl.h"
19 #include "src/runtime/runtime.h"
24 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
25 : Assembler(arg_isolate, buffer, size),
26 generating_stub_(false),
28 if (isolate() != NULL) {
30 Handle<Object>(isolate()->heap()->undefined_value(), isolate());
35 void MacroAssembler::Jump(Register target) {
41 void MacroAssembler::JumpToJSEntry(Register target) {
47 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
48 Condition cond, CRegister cr) {
51 if (cond != al) b(NegateCondition(cond), &skip, cr);
53 DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
55 mov(ip, Operand(target, rmode));
63 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
65 DCHECK(!RelocInfo::IsCodeTarget(rmode));
66 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, cr);
70 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
72 DCHECK(RelocInfo::IsCodeTarget(rmode));
73 // 'code' is always generated ppc code, never THUMB code
74 AllowDeferredHandleDereference embedding_raw_address;
75 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
79 int MacroAssembler::CallSize(Register target) { return 2 * kInstrSize; }
82 void MacroAssembler::Call(Register target) {
83 BlockTrampolinePoolScope block_trampoline_pool(this);
87 // Statement positions are expected to be recorded when the target
89 positions_recorder()->WriteRecordedPositions();
91 // branch via link register and set LK bit for return point
95 DCHECK_EQ(CallSize(target), SizeOfCodeGeneratedSince(&start));
99 void MacroAssembler::CallJSEntry(Register target) {
100 DCHECK(target.is(ip));
105 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode,
107 Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
108 return (2 + instructions_required_for_mov(mov_operand)) * kInstrSize;
112 int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
113 RelocInfo::Mode rmode,
115 return (2 + kMovInstructionsNoConstantPool) * kInstrSize;
119 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
121 BlockTrampolinePoolScope block_trampoline_pool(this);
125 // Check the expected size before generating code to ensure we assume the same
126 // constant pool availability (e.g., whether constant pool is full or not).
127 int expected_size = CallSize(target, rmode, cond);
132 // Statement positions are expected to be recorded when the target
133 // address is loaded.
134 positions_recorder()->WriteRecordedPositions();
136 // This can likely be optimized to make use of bc() with 24bit relative
138 // RecordRelocInfo(x.rmode_, x.imm_);
139 // bc( BA, .... offset, LKset);
142 mov(ip, Operand(reinterpret_cast<intptr_t>(target), rmode));
146 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
150 int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
151 TypeFeedbackId ast_id, Condition cond) {
152 AllowDeferredHandleDereference using_raw_address;
153 return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
157 void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
158 TypeFeedbackId ast_id, Condition cond) {
159 BlockTrampolinePoolScope block_trampoline_pool(this);
160 DCHECK(RelocInfo::IsCodeTarget(rmode));
163 // Check the expected size before generating code to ensure we assume the same
164 // constant pool availability (e.g., whether constant pool is full or not).
165 int expected_size = CallSize(code, rmode, ast_id, cond);
170 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
171 SetRecordedAstId(ast_id);
172 rmode = RelocInfo::CODE_TARGET_WITH_ID;
174 AllowDeferredHandleDereference using_raw_address;
175 Call(reinterpret_cast<Address>(code.location()), rmode, cond);
176 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
180 void MacroAssembler::Ret(Condition cond) {
186 void MacroAssembler::Drop(int count, Condition cond) {
189 Add(sp, sp, count * kPointerSize, r0);
194 void MacroAssembler::Ret(int drop, Condition cond) {
200 void MacroAssembler::Call(Label* target) { b(target, SetLK); }
203 void MacroAssembler::Push(Handle<Object> handle) {
204 mov(r0, Operand(handle));
209 void MacroAssembler::Move(Register dst, Handle<Object> value) {
210 AllowDeferredHandleDereference smi_check;
211 if (value->IsSmi()) {
212 LoadSmiLiteral(dst, reinterpret_cast<Smi*>(*value));
214 DCHECK(value->IsHeapObject());
215 if (isolate()->heap()->InNewSpace(*value)) {
216 Handle<Cell> cell = isolate()->factory()->NewCell(value);
217 mov(dst, Operand(cell));
218 LoadP(dst, FieldMemOperand(dst, Cell::kValueOffset));
220 mov(dst, Operand(value));
226 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
234 void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
241 void MacroAssembler::MultiPush(RegList regs) {
242 int16_t num_to_push = NumberOfBitsSet(regs);
243 int16_t stack_offset = num_to_push * kPointerSize;
245 subi(sp, sp, Operand(stack_offset));
246 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
247 if ((regs & (1 << i)) != 0) {
248 stack_offset -= kPointerSize;
249 StoreP(ToRegister(i), MemOperand(sp, stack_offset));
255 void MacroAssembler::MultiPop(RegList regs) {
256 int16_t stack_offset = 0;
258 for (int16_t i = 0; i < kNumRegisters; i++) {
259 if ((regs & (1 << i)) != 0) {
260 LoadP(ToRegister(i), MemOperand(sp, stack_offset));
261 stack_offset += kPointerSize;
264 addi(sp, sp, Operand(stack_offset));
268 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
271 LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
275 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index,
278 StoreP(source, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
282 void MacroAssembler::InNewSpace(Register object, Register scratch,
283 Condition cond, Label* branch) {
284 // N.B. scratch may be same register as object
285 DCHECK(cond == eq || cond == ne);
286 mov(r0, Operand(ExternalReference::new_space_mask(isolate())));
287 and_(scratch, object, r0);
288 mov(r0, Operand(ExternalReference::new_space_start(isolate())));
294 void MacroAssembler::RecordWriteField(
295 Register object, int offset, Register value, Register dst,
296 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
297 RememberedSetAction remembered_set_action, SmiCheck smi_check,
298 PointersToHereCheck pointers_to_here_check_for_value) {
299 // First, check if a write barrier is even needed. The tests below
300 // catch stores of Smis.
303 // Skip barrier if writing a smi.
304 if (smi_check == INLINE_SMI_CHECK) {
305 JumpIfSmi(value, &done);
308 // Although the object register is tagged, the offset is relative to the start
309 // of the object, so so offset must be a multiple of kPointerSize.
310 DCHECK(IsAligned(offset, kPointerSize));
312 Add(dst, object, offset - kHeapObjectTag, r0);
313 if (emit_debug_code()) {
315 andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
317 stop("Unaligned cell in write barrier");
321 RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
322 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
326 // Clobber clobbered input registers when running with the debug-code flag
327 // turned on to provoke errors.
328 if (emit_debug_code()) {
329 mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
330 mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
335 // Will clobber 4 registers: object, map, dst, ip. The
336 // register 'object' contains a heap object pointer.
337 void MacroAssembler::RecordWriteForMap(Register object, Register map,
339 LinkRegisterStatus lr_status,
340 SaveFPRegsMode fp_mode) {
341 if (emit_debug_code()) {
342 LoadP(dst, FieldMemOperand(map, HeapObject::kMapOffset));
343 Cmpi(dst, Operand(isolate()->factory()->meta_map()), r0);
344 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
347 if (!FLAG_incremental_marking) {
351 if (emit_debug_code()) {
352 LoadP(ip, FieldMemOperand(object, HeapObject::kMapOffset));
354 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
359 // A single check of the map's pages interesting flag suffices, since it is
360 // only set during incremental collection, and then it's also guaranteed that
361 // the from object's page's interesting flag is also set. This optimization
362 // relies on the fact that maps can never be in new space.
364 map, // Used as scratch.
365 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
367 addi(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
368 if (emit_debug_code()) {
370 andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
372 stop("Unaligned cell in write barrier");
376 // Record the actual write.
377 if (lr_status == kLRHasNotBeenSaved) {
381 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
384 if (lr_status == kLRHasNotBeenSaved) {
391 // Count number of write barriers in generated code.
392 isolate()->counters()->write_barriers_static()->Increment();
393 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
395 // Clobber clobbered registers when running with the debug-code flag
396 // turned on to provoke errors.
397 if (emit_debug_code()) {
398 mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 12)));
399 mov(map, Operand(bit_cast<intptr_t>(kZapValue + 16)));
404 // Will clobber 4 registers: object, address, scratch, ip. The
405 // register 'object' contains a heap object pointer. The heap object
406 // tag is shifted away.
407 void MacroAssembler::RecordWrite(
408 Register object, Register address, Register value,
409 LinkRegisterStatus lr_status, SaveFPRegsMode fp_mode,
410 RememberedSetAction remembered_set_action, SmiCheck smi_check,
411 PointersToHereCheck pointers_to_here_check_for_value) {
412 DCHECK(!object.is(value));
413 if (emit_debug_code()) {
414 LoadP(r0, MemOperand(address));
416 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
419 if (remembered_set_action == OMIT_REMEMBERED_SET &&
420 !FLAG_incremental_marking) {
424 // First, check if a write barrier is even needed. The tests below
425 // catch stores of smis and stores into the young generation.
428 if (smi_check == INLINE_SMI_CHECK) {
429 JumpIfSmi(value, &done);
432 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
434 value, // Used as scratch.
435 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
437 CheckPageFlag(object,
438 value, // Used as scratch.
439 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
441 // Record the actual write.
442 if (lr_status == kLRHasNotBeenSaved) {
446 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
449 if (lr_status == kLRHasNotBeenSaved) {
456 // Count number of write barriers in generated code.
457 isolate()->counters()->write_barriers_static()->Increment();
458 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
461 // Clobber clobbered registers when running with the debug-code flag
462 // turned on to provoke errors.
463 if (emit_debug_code()) {
464 mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
465 mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
470 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
471 Register address, Register scratch,
472 SaveFPRegsMode fp_mode,
473 RememberedSetFinalAction and_then) {
475 if (emit_debug_code()) {
477 JumpIfNotInNewSpace(object, scratch, &ok);
478 stop("Remembered set pointer is in new space");
481 // Load store buffer top.
482 ExternalReference store_buffer =
483 ExternalReference::store_buffer_top(isolate());
484 mov(ip, Operand(store_buffer));
485 LoadP(scratch, MemOperand(ip));
486 // Store pointer to buffer and increment buffer top.
487 StoreP(address, MemOperand(scratch));
488 addi(scratch, scratch, Operand(kPointerSize));
489 // Write back new top of buffer.
490 StoreP(scratch, MemOperand(ip));
491 // Call stub on end of buffer.
492 // Check for end of buffer.
493 mov(r0, Operand(StoreBuffer::kStoreBufferOverflowBit));
494 and_(r0, scratch, r0, SetRC);
496 if (and_then == kFallThroughAtEnd) {
499 DCHECK(and_then == kReturnAtEnd);
504 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
505 CallStub(&store_buffer_overflow);
509 if (and_then == kReturnAtEnd) {
515 void MacroAssembler::PushFixedFrame(Register marker_reg) {
517 #if V8_OOL_CONSTANT_POOL
518 if (marker_reg.is_valid()) {
519 Push(r0, fp, kConstantPoolRegister, cp, marker_reg);
521 Push(r0, fp, kConstantPoolRegister, cp);
524 if (marker_reg.is_valid()) {
525 Push(r0, fp, cp, marker_reg);
533 void MacroAssembler::PopFixedFrame(Register marker_reg) {
534 #if V8_OOL_CONSTANT_POOL
535 if (marker_reg.is_valid()) {
536 Pop(r0, fp, kConstantPoolRegister, cp, marker_reg);
538 Pop(r0, fp, kConstantPoolRegister, cp);
541 if (marker_reg.is_valid()) {
542 Pop(r0, fp, cp, marker_reg);
551 // Push and pop all registers that can hold pointers.
552 void MacroAssembler::PushSafepointRegisters() {
553 // Safepoints expect a block of kNumSafepointRegisters values on the
554 // stack, so adjust the stack for unsaved registers.
555 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
556 DCHECK(num_unsaved >= 0);
557 if (num_unsaved > 0) {
558 subi(sp, sp, Operand(num_unsaved * kPointerSize));
560 MultiPush(kSafepointSavedRegisters);
564 void MacroAssembler::PopSafepointRegisters() {
565 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
566 MultiPop(kSafepointSavedRegisters);
567 if (num_unsaved > 0) {
568 addi(sp, sp, Operand(num_unsaved * kPointerSize));
573 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
574 StoreP(src, SafepointRegisterSlot(dst));
578 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
579 LoadP(dst, SafepointRegisterSlot(src));
583 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
584 // The registers are pushed starting with the highest encoding,
585 // which means that lowest encodings are closest to the stack pointer.
586 RegList regs = kSafepointSavedRegisters;
589 DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
591 for (int16_t i = 0; i < reg_code; i++) {
592 if ((regs & (1 << i)) != 0) {
601 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
602 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
606 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
607 // General purpose registers are pushed last on the stack.
608 int doubles_size = DoubleRegister::NumAllocatableRegisters() * kDoubleSize;
609 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
610 return MemOperand(sp, doubles_size + register_offset);
614 void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
615 const DoubleRegister src) {
616 // Turn potential sNaN into qNaN.
617 fadd(dst, src, kDoubleRegZero);
621 void MacroAssembler::ConvertIntToDouble(Register src,
622 DoubleRegister double_dst) {
623 MovIntToDouble(double_dst, src, r0);
624 fcfid(double_dst, double_dst);
628 void MacroAssembler::ConvertUnsignedIntToDouble(Register src,
629 DoubleRegister double_dst) {
630 MovUnsignedIntToDouble(double_dst, src, r0);
631 fcfid(double_dst, double_dst);
635 void MacroAssembler::ConvertIntToFloat(const DoubleRegister dst,
637 const Register int_scratch) {
638 MovIntToDouble(dst, src, int_scratch);
644 void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
645 #if !V8_TARGET_ARCH_PPC64
646 const Register dst_hi,
649 const DoubleRegister double_dst,
650 FPRoundingMode rounding_mode) {
651 if (rounding_mode == kRoundToZero) {
652 fctidz(double_dst, double_input);
654 SetRoundingMode(rounding_mode);
655 fctid(double_dst, double_input);
660 #if !V8_TARGET_ARCH_PPC64
667 #if V8_OOL_CONSTANT_POOL
668 void MacroAssembler::LoadConstantPoolPointerRegister(
669 CodeObjectAccessMethod access_method, int ip_code_entry_delta) {
671 int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize;
672 if (access_method == CAN_USE_IP) {
674 constant_pool_offset += ip_code_entry_delta;
676 DCHECK(access_method == CONSTRUCT_INTERNAL_REFERENCE);
677 base = kConstantPoolRegister;
678 ConstantPoolUnavailableScope constant_pool_unavailable(this);
680 // CheckBuffer() is called too frequently. This will pre-grow
681 // the buffer if needed to avoid spliting the relocation and instructions
682 EnsureSpaceFor(kMovInstructionsNoConstantPool * kInstrSize);
684 uintptr_t code_start = reinterpret_cast<uintptr_t>(pc_) - pc_offset();
685 mov(base, Operand(code_start, RelocInfo::INTERNAL_REFERENCE));
687 LoadP(kConstantPoolRegister, MemOperand(base, constant_pool_offset));
692 void MacroAssembler::StubPrologue(int prologue_offset) {
693 LoadSmiLiteral(r11, Smi::FromInt(StackFrame::STUB));
695 // Adjust FP to point to saved FP.
696 addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
697 #if V8_OOL_CONSTANT_POOL
698 // ip contains prologue address
699 LoadConstantPoolPointerRegister(CAN_USE_IP, -prologue_offset);
700 set_ool_constant_pool_available(true);
705 void MacroAssembler::Prologue(bool code_pre_aging, int prologue_offset) {
707 PredictableCodeSizeScope predictible_code_size_scope(
708 this, kNoCodeAgeSequenceLength);
709 Assembler::BlockTrampolinePoolScope block_trampoline_pool(this);
710 // The following instructions must remain together and unmodified
711 // for code aging to work properly.
712 if (code_pre_aging) {
714 // This matches the code found in PatchPlatformCodeAge()
715 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
716 intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
717 // Don't use Call -- we need to preserve ip and lr
718 nop(); // marker to detect sequence (see IsOld)
719 mov(r3, Operand(target));
721 for (int i = 0; i < kCodeAgingSequenceNops; i++) {
725 // This matches the code found in GetNoCodeAgeSequence()
727 // Adjust fp to point to saved fp.
728 addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
729 for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
734 #if V8_OOL_CONSTANT_POOL
735 // ip contains prologue address
736 LoadConstantPoolPointerRegister(CAN_USE_IP, -prologue_offset);
737 set_ool_constant_pool_available(true);
742 void MacroAssembler::EnterFrame(StackFrame::Type type,
743 bool load_constant_pool_pointer_reg) {
744 if (FLAG_enable_ool_constant_pool && load_constant_pool_pointer_reg) {
746 #if V8_OOL_CONSTANT_POOL
747 // This path should not rely on ip containing code entry.
748 LoadConstantPoolPointerRegister(CONSTRUCT_INTERNAL_REFERENCE);
750 LoadSmiLiteral(ip, Smi::FromInt(type));
753 LoadSmiLiteral(ip, Smi::FromInt(type));
756 // Adjust FP to point to saved FP.
757 addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
759 mov(r0, Operand(CodeObject()));
764 int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
765 #if V8_OOL_CONSTANT_POOL
766 ConstantPoolUnavailableScope constant_pool_unavailable(this);
772 // Drop the execution stack down to the frame pointer and restore
773 // the caller frame pointer, return address and constant pool pointer.
775 LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
776 LoadP(ip, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
777 #if V8_OOL_CONSTANT_POOL
778 const int exitOffset = ExitFrameConstants::kConstantPoolOffset;
779 const int standardOffset = StandardFrameConstants::kConstantPoolOffset;
780 const int offset = ((type == StackFrame::EXIT) ? exitOffset : standardOffset);
781 LoadP(kConstantPoolRegister, MemOperand(fp, offset));
784 frame_ends = pc_offset();
785 Add(sp, fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment, r0);
791 // ExitFrame layout (probably wrongish.. needs updating)
796 // sp_on_exit (for debug?)
799 // <parameters on stack>
801 // Prior to calling EnterExitFrame, we've got a bunch of parameters
802 // on the stack that we need to wrap a real frame around.. so first
803 // we reserve a slot for LK and push the previous SP which is captured
804 // in the fp register (r31)
805 // Then - we buy a new frame
807 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
808 // Set up the frame structure on the stack.
809 DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
810 DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
811 DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
812 DCHECK(stack_space > 0);
814 // This is an opportunity to build a frame to wrap
815 // all of the pushes that have happened inside of V8
816 // since we were called from C code
818 // replicate ARM frame - TODO make this more closely follow PPC ABI
822 // Reserve room for saved entry sp and code object.
823 subi(sp, sp, Operand(ExitFrameConstants::kFrameSize));
825 if (emit_debug_code()) {
826 li(r8, Operand::Zero());
827 StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
829 #if V8_OOL_CONSTANT_POOL
830 StoreP(kConstantPoolRegister,
831 MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
833 mov(r8, Operand(CodeObject()));
834 StoreP(r8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
836 // Save the frame pointer and the context in top.
837 mov(r8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
838 StoreP(fp, MemOperand(r8));
839 mov(r8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
840 StoreP(cp, MemOperand(r8));
842 // Optionally save all volatile double registers.
844 SaveFPRegs(sp, 0, DoubleRegister::kNumVolatileRegisters);
845 // Note that d0 will be accessible at
846 // fp - ExitFrameConstants::kFrameSize -
847 // kNumVolatileRegisters * kDoubleSize,
848 // since the sp slot and code slot were pushed after the fp.
851 addi(sp, sp, Operand(-stack_space * kPointerSize));
853 // Allocate and align the frame preparing for calling the runtime
855 const int frame_alignment = ActivationFrameAlignment();
856 if (frame_alignment > kPointerSize) {
857 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
858 ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
860 li(r0, Operand::Zero());
861 StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
863 // Set the exit frame sp value to point just before the return address
865 addi(r8, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
866 StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
870 void MacroAssembler::InitializeNewString(Register string, Register length,
871 Heap::RootListIndex map_index,
872 Register scratch1, Register scratch2) {
873 SmiTag(scratch1, length);
874 LoadRoot(scratch2, map_index);
875 StoreP(scratch1, FieldMemOperand(string, String::kLengthOffset), r0);
876 li(scratch1, Operand(String::kEmptyHashField));
877 StoreP(scratch2, FieldMemOperand(string, HeapObject::kMapOffset), r0);
878 StoreP(scratch1, FieldMemOperand(string, String::kHashFieldSlot), r0);
882 int MacroAssembler::ActivationFrameAlignment() {
883 #if !defined(USE_SIMULATOR)
884 // Running on the real platform. Use the alignment as mandated by the local
886 // Note: This will break if we ever start generating snapshots on one PPC
887 // platform for another PPC platform with a different alignment.
888 return base::OS::ActivationFrameAlignment();
890 // If we are using the simulator then we should always align to the expected
891 // alignment. As the simulator is used to generate snapshots we do not know
892 // if the target platform will need alignment, so this is controlled from a
894 return FLAG_sim_stack_alignment;
899 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
900 bool restore_context,
901 bool argument_count_is_length) {
902 #if V8_OOL_CONSTANT_POOL
903 ConstantPoolUnavailableScope constant_pool_unavailable(this);
905 // Optionally restore all double registers.
907 // Calculate the stack location of the saved doubles and restore them.
908 const int kNumRegs = DoubleRegister::kNumVolatileRegisters;
910 (ExitFrameConstants::kFrameSize + kNumRegs * kDoubleSize);
911 addi(r6, fp, Operand(-offset));
912 RestoreFPRegs(r6, 0, kNumRegs);
916 li(r6, Operand::Zero());
917 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
918 StoreP(r6, MemOperand(ip));
920 // Restore current context from top and clear it in debug mode.
921 if (restore_context) {
922 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
923 LoadP(cp, MemOperand(ip));
926 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
927 StoreP(r6, MemOperand(ip));
930 // Tear down the exit frame, pop the arguments, and return.
931 LeaveFrame(StackFrame::EXIT);
933 if (argument_count.is_valid()) {
934 if (!argument_count_is_length) {
935 ShiftLeftImm(argument_count, argument_count, Operand(kPointerSizeLog2));
937 add(sp, sp, argument_count);
942 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
947 void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
952 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
953 const ParameterCount& actual,
954 Handle<Code> code_constant,
955 Register code_reg, Label* done,
956 bool* definitely_mismatches,
958 const CallWrapper& call_wrapper) {
959 bool definitely_matches = false;
960 *definitely_mismatches = false;
961 Label regular_invoke;
963 // Check whether the expected and actual arguments count match. If not,
964 // setup registers according to contract with ArgumentsAdaptorTrampoline:
965 // r3: actual arguments count
966 // r4: function (passed through to callee)
967 // r5: expected arguments count
969 // The code below is made a lot easier because the calling code already sets
970 // up actual and expected registers according to the contract if values are
971 // passed in registers.
973 // ARM has some sanity checks as per below, considering add them for PPC
974 // DCHECK(actual.is_immediate() || actual.reg().is(r3));
975 // DCHECK(expected.is_immediate() || expected.reg().is(r5));
976 // DCHECK((!code_constant.is_null() && code_reg.is(no_reg))
977 // || code_reg.is(r6));
979 if (expected.is_immediate()) {
980 DCHECK(actual.is_immediate());
981 if (expected.immediate() == actual.immediate()) {
982 definitely_matches = true;
984 mov(r3, Operand(actual.immediate()));
985 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
986 if (expected.immediate() == sentinel) {
987 // Don't worry about adapting arguments for builtins that
988 // don't want that done. Skip adaption code by making it look
989 // like we have a match between expected and actual number of
991 definitely_matches = true;
993 *definitely_mismatches = true;
994 mov(r5, Operand(expected.immediate()));
998 if (actual.is_immediate()) {
999 cmpi(expected.reg(), Operand(actual.immediate()));
1000 beq(®ular_invoke);
1001 mov(r3, Operand(actual.immediate()));
1003 cmp(expected.reg(), actual.reg());
1004 beq(®ular_invoke);
1008 if (!definitely_matches) {
1009 if (!code_constant.is_null()) {
1010 mov(r6, Operand(code_constant));
1011 addi(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
1014 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
1015 if (flag == CALL_FUNCTION) {
1016 call_wrapper.BeforeCall(CallSize(adaptor));
1018 call_wrapper.AfterCall();
1019 if (!*definitely_mismatches) {
1023 Jump(adaptor, RelocInfo::CODE_TARGET);
1025 bind(®ular_invoke);
1030 void MacroAssembler::InvokeCode(Register code, const ParameterCount& expected,
1031 const ParameterCount& actual, InvokeFlag flag,
1032 const CallWrapper& call_wrapper) {
1033 // You can't call a function without a valid frame.
1034 DCHECK(flag == JUMP_FUNCTION || has_frame());
1037 bool definitely_mismatches = false;
1038 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done,
1039 &definitely_mismatches, flag, call_wrapper);
1040 if (!definitely_mismatches) {
1041 if (flag == CALL_FUNCTION) {
1042 call_wrapper.BeforeCall(CallSize(code));
1044 call_wrapper.AfterCall();
1046 DCHECK(flag == JUMP_FUNCTION);
1047 JumpToJSEntry(code);
1050 // Continue here if InvokePrologue does handle the invocation due to
1051 // mismatched parameter counts.
1057 void MacroAssembler::InvokeFunction(Register fun, const ParameterCount& actual,
1059 const CallWrapper& call_wrapper) {
1060 // You can't call a function without a valid frame.
1061 DCHECK(flag == JUMP_FUNCTION || has_frame());
1063 // Contract with called JS functions requires that function is passed in r4.
1066 Register expected_reg = r5;
1067 Register code_reg = ip;
1069 LoadP(code_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
1070 LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
1071 LoadWordArith(expected_reg,
1073 code_reg, SharedFunctionInfo::kFormalParameterCountOffset));
1074 #if !defined(V8_TARGET_ARCH_PPC64)
1075 SmiUntag(expected_reg);
1077 LoadP(code_reg, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
1079 ParameterCount expected(expected_reg);
1080 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
1084 void MacroAssembler::InvokeFunction(Register function,
1085 const ParameterCount& expected,
1086 const ParameterCount& actual,
1088 const CallWrapper& call_wrapper) {
1089 // You can't call a function without a valid frame.
1090 DCHECK(flag == JUMP_FUNCTION || has_frame());
1092 // Contract with called JS functions requires that function is passed in r4.
1093 DCHECK(function.is(r4));
1095 // Get the function and setup the context.
1096 LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
1098 // We call indirectly through the code field in the function to
1099 // allow recompilation to take effect without changing any of the
1101 LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
1102 InvokeCode(ip, expected, actual, flag, call_wrapper);
1106 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1107 const ParameterCount& expected,
1108 const ParameterCount& actual,
1110 const CallWrapper& call_wrapper) {
1112 InvokeFunction(r4, expected, actual, flag, call_wrapper);
1116 void MacroAssembler::IsObjectJSObjectType(Register heap_object, Register map,
1117 Register scratch, Label* fail) {
1118 LoadP(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
1119 IsInstanceJSObjectType(map, scratch, fail);
1123 void MacroAssembler::IsInstanceJSObjectType(Register map, Register scratch,
1125 lbz(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
1126 cmpi(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1128 cmpi(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
1133 void MacroAssembler::IsObjectJSStringType(Register object, Register scratch,
1135 DCHECK(kNotStringTag != 0);
1137 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1138 lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1139 andi(r0, scratch, Operand(kIsNotStringMask));
1144 void MacroAssembler::IsObjectNameType(Register object, Register scratch,
1146 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1147 lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1148 cmpi(scratch, Operand(LAST_NAME_TYPE));
1153 void MacroAssembler::DebugBreak() {
1154 li(r3, Operand::Zero());
1155 mov(r4, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
1156 CEntryStub ces(isolate(), 1);
1157 DCHECK(AllowThisStubCall(&ces));
1158 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
1162 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
1163 int handler_index) {
1164 // Adjust this code if not the case.
1165 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1166 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1167 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1168 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1169 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1170 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1172 // For the JSEntry handler, we must preserve r1-r7, r0,r8-r15 are available.
1173 // We want the stack to look like
1180 // Link the current handler as the next handler.
1181 mov(r8, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1182 LoadP(r0, MemOperand(r8));
1183 StorePU(r0, MemOperand(sp, -StackHandlerConstants::kSize));
1184 // Set this new handler as the current one.
1185 StoreP(sp, MemOperand(r8));
1187 if (kind == StackHandler::JS_ENTRY) {
1188 li(r8, Operand::Zero()); // NULL frame pointer.
1189 StoreP(r8, MemOperand(sp, StackHandlerConstants::kFPOffset));
1190 LoadSmiLiteral(r8, Smi::FromInt(0)); // Indicates no context.
1191 StoreP(r8, MemOperand(sp, StackHandlerConstants::kContextOffset));
1193 // still not sure if fp is right
1194 StoreP(fp, MemOperand(sp, StackHandlerConstants::kFPOffset));
1195 StoreP(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
1197 unsigned state = StackHandler::IndexField::encode(handler_index) |
1198 StackHandler::KindField::encode(kind);
1199 LoadIntLiteral(r8, state);
1200 StoreP(r8, MemOperand(sp, StackHandlerConstants::kStateOffset));
1201 mov(r8, Operand(CodeObject()));
1202 StoreP(r8, MemOperand(sp, StackHandlerConstants::kCodeOffset));
1206 void MacroAssembler::PopTryHandler() {
1207 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1209 mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1210 addi(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1211 StoreP(r4, MemOperand(ip));
1215 // PPC - make use of ip as a temporary register
1216 void MacroAssembler::JumpToHandlerEntry() {
1217 // Compute the handler entry address and jump to it. The handler table is
1218 // a fixed array of (smi-tagged) code offsets.
1219 // r3 = exception, r4 = code object, r5 = state.
1220 #if V8_OOL_CONSTANT_POOL
1221 ConstantPoolUnavailableScope constant_pool_unavailable(this);
1222 LoadP(kConstantPoolRegister, FieldMemOperand(r4, Code::kConstantPoolOffset));
1224 LoadP(r6, FieldMemOperand(r4, Code::kHandlerTableOffset)); // Handler table.
1225 addi(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1226 srwi(r5, r5, Operand(StackHandler::kKindWidth)); // Handler index.
1227 slwi(ip, r5, Operand(kPointerSizeLog2));
1229 LoadP(r5, MemOperand(ip)); // Smi-tagged offset.
1230 addi(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
1238 void MacroAssembler::Throw(Register value) {
1239 // Adjust this code if not the case.
1240 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1241 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1242 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1243 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1244 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1245 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1248 // The exception is expected in r3.
1249 if (!value.is(r3)) {
1252 // Drop the stack pointer to the top of the top handler.
1253 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1254 LoadP(sp, MemOperand(r6));
1255 // Restore the next handler.
1257 StoreP(r5, MemOperand(r6));
1259 // Get the code object (r4) and state (r5). Restore the context and frame
1266 // If the handler is a JS frame, restore the context to the frame.
1267 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
1269 cmpi(cp, Operand::Zero());
1271 StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1274 JumpToHandlerEntry();
1278 void MacroAssembler::ThrowUncatchable(Register value) {
1279 // Adjust this code if not the case.
1280 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1281 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1282 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1283 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1284 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1285 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1287 // The exception is expected in r3.
1288 if (!value.is(r3)) {
1291 // Drop the stack pointer to the top of the top stack handler.
1292 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1293 LoadP(sp, MemOperand(r6));
1295 // Unwind the handlers until the ENTRY handler is found.
1296 Label fetch_next, check_kind;
1299 LoadP(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
1302 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
1303 LoadP(r5, MemOperand(sp, StackHandlerConstants::kStateOffset));
1304 andi(r0, r5, Operand(StackHandler::KindField::kMask));
1305 bne(&fetch_next, cr0);
1307 // Set the top handler address to next handler past the top ENTRY handler.
1309 StoreP(r5, MemOperand(r6));
1310 // Get the code object (r4) and state (r5). Clear the context and frame
1311 // pointer (0 was saved in the handler).
1317 JumpToHandlerEntry();
1321 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1322 Register scratch, Label* miss) {
1323 Label same_contexts;
1325 DCHECK(!holder_reg.is(scratch));
1326 DCHECK(!holder_reg.is(ip));
1327 DCHECK(!scratch.is(ip));
1329 // Load current lexical context from the stack frame.
1330 LoadP(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
1331 // In debug mode, make sure the lexical context is set.
1333 cmpi(scratch, Operand::Zero());
1334 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
1337 // Load the native context of the current context.
1339 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1340 LoadP(scratch, FieldMemOperand(scratch, offset));
1341 LoadP(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
1343 // Check the context is a native context.
1344 if (emit_debug_code()) {
1345 // Cannot use ip as a temporary in this verification code. Due to the fact
1346 // that ip is clobbered as part of cmp with an object Operand.
1347 push(holder_reg); // Temporarily save holder on the stack.
1348 // Read the first word and compare to the native_context_map.
1349 LoadP(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1350 LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1351 cmp(holder_reg, ip);
1352 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1353 pop(holder_reg); // Restore holder.
1356 // Check if both contexts are the same.
1357 LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1359 beq(&same_contexts);
1361 // Check the context is a native context.
1362 if (emit_debug_code()) {
1363 // Cannot use ip as a temporary in this verification code. Due to the fact
1364 // that ip is clobbered as part of cmp with an object Operand.
1365 push(holder_reg); // Temporarily save holder on the stack.
1366 mr(holder_reg, ip); // Move ip to its holding place.
1367 LoadRoot(ip, Heap::kNullValueRootIndex);
1368 cmp(holder_reg, ip);
1369 Check(ne, kJSGlobalProxyContextShouldNotBeNull);
1371 LoadP(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1372 LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1373 cmp(holder_reg, ip);
1374 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1375 // Restore ip is not needed. ip is reloaded below.
1376 pop(holder_reg); // Restore holder.
1377 // Restore ip to holder's context.
1378 LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1381 // Check that the security token in the calling global object is
1382 // compatible with the security token in the receiving global
1385 Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
1387 LoadP(scratch, FieldMemOperand(scratch, token_offset));
1388 LoadP(ip, FieldMemOperand(ip, token_offset));
1392 bind(&same_contexts);
1396 // Compute the hash code from the untagged key. This must be kept in sync with
1397 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
1398 // code-stub-hydrogen.cc
1399 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1400 // First of all we assign the hash seed to scratch.
1401 LoadRoot(scratch, Heap::kHashSeedRootIndex);
1404 // Xor original key with a seed.
1405 xor_(t0, t0, scratch);
1407 // Compute the hash code from the untagged key. This must be kept in sync
1408 // with ComputeIntegerHash in utils.h.
1410 // hash = ~hash + (hash << 15);
1412 slwi(t0, t0, Operand(15));
1413 add(t0, scratch, t0);
1414 // hash = hash ^ (hash >> 12);
1415 srwi(scratch, t0, Operand(12));
1416 xor_(t0, t0, scratch);
1417 // hash = hash + (hash << 2);
1418 slwi(scratch, t0, Operand(2));
1419 add(t0, t0, scratch);
1420 // hash = hash ^ (hash >> 4);
1421 srwi(scratch, t0, Operand(4));
1422 xor_(t0, t0, scratch);
1423 // hash = hash * 2057;
1425 slwi(scratch, t0, Operand(3));
1426 add(t0, t0, scratch);
1427 slwi(scratch, r0, Operand(11));
1428 add(t0, t0, scratch);
1429 // hash = hash ^ (hash >> 16);
1430 srwi(scratch, t0, Operand(16));
1431 xor_(t0, t0, scratch);
1435 void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements,
1436 Register key, Register result,
1437 Register t0, Register t1,
1441 // elements - holds the slow-case elements of the receiver on entry.
1442 // Unchanged unless 'result' is the same register.
1444 // key - holds the smi key on entry.
1445 // Unchanged unless 'result' is the same register.
1447 // result - holds the result on exit if the load succeeded.
1448 // Allowed to be the same as 'key' or 'result'.
1449 // Unchanged on bailout so 'key' or 'result' can be used
1450 // in further computation.
1452 // Scratch registers:
1454 // t0 - holds the untagged key on entry and holds the hash once computed.
1456 // t1 - used to hold the capacity mask of the dictionary
1458 // t2 - used for the index into the dictionary.
1461 GetNumberHash(t0, t1);
1463 // Compute the capacity mask.
1464 LoadP(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
1466 subi(t1, t1, Operand(1));
1468 // Generate an unrolled loop that performs a few probes before giving up.
1469 for (int i = 0; i < kNumberDictionaryProbes; i++) {
1470 // Use t2 for index calculations and keep the hash intact in t0.
1472 // Compute the masked index: (hash + i + i * i) & mask.
1474 addi(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
1478 // Scale the index by multiplying by the element size.
1479 DCHECK(SeededNumberDictionary::kEntrySize == 3);
1480 slwi(ip, t2, Operand(1));
1481 add(t2, t2, ip); // t2 = t2 * 3
1483 // Check if the key is identical to the name.
1484 slwi(t2, t2, Operand(kPointerSizeLog2));
1485 add(t2, elements, t2);
1487 FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
1489 if (i != kNumberDictionaryProbes - 1) {
1497 // Check that the value is a field property.
1498 // t2: elements + (index * kPointerSize)
1499 const int kDetailsOffset =
1500 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1501 LoadP(t1, FieldMemOperand(t2, kDetailsOffset));
1502 LoadSmiLiteral(ip, Smi::FromInt(PropertyDetails::TypeField::kMask));
1504 and_(r0, t1, ip, SetRC);
1507 // Get the value at the masked, scaled index and return.
1508 const int kValueOffset =
1509 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1510 LoadP(result, FieldMemOperand(t2, kValueOffset));
1514 void MacroAssembler::Allocate(int object_size, Register result,
1515 Register scratch1, Register scratch2,
1516 Label* gc_required, AllocationFlags flags) {
1517 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
1518 if (!FLAG_inline_new) {
1519 if (emit_debug_code()) {
1520 // Trash the registers to simulate an allocation failure.
1521 li(result, Operand(0x7091));
1522 li(scratch1, Operand(0x7191));
1523 li(scratch2, Operand(0x7291));
1529 DCHECK(!result.is(scratch1));
1530 DCHECK(!result.is(scratch2));
1531 DCHECK(!scratch1.is(scratch2));
1532 DCHECK(!scratch1.is(ip));
1533 DCHECK(!scratch2.is(ip));
1535 // Make object size into bytes.
1536 if ((flags & SIZE_IN_WORDS) != 0) {
1537 object_size *= kPointerSize;
1539 DCHECK_EQ(0, static_cast<int>(object_size & kObjectAlignmentMask));
1541 // Check relative positions of allocation top and limit addresses.
1542 ExternalReference allocation_top =
1543 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1544 ExternalReference allocation_limit =
1545 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1547 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
1548 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
1549 DCHECK((limit - top) == kPointerSize);
1551 // Set up allocation top address register.
1552 Register topaddr = scratch1;
1553 mov(topaddr, Operand(allocation_top));
1555 // This code stores a temporary value in ip. This is OK, as the code below
1556 // does not need ip for implicit literal generation.
1557 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1558 // Load allocation top into result and allocation limit into ip.
1559 LoadP(result, MemOperand(topaddr));
1560 LoadP(ip, MemOperand(topaddr, kPointerSize));
1562 if (emit_debug_code()) {
1563 // Assert that result actually contains top on entry. ip is used
1564 // immediately below so this use of ip does not cause difference with
1565 // respect to register content between debug and release mode.
1566 LoadP(ip, MemOperand(topaddr));
1568 Check(eq, kUnexpectedAllocationTop);
1570 // Load allocation limit into ip. Result already contains allocation top.
1571 LoadP(ip, MemOperand(topaddr, limit - top), r0);
1574 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1575 // Align the next allocation. Storing the filler map without checking top is
1576 // safe in new-space because the limit of the heap is aligned there.
1577 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1578 #if V8_TARGET_ARCH_PPC64
1579 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1581 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1582 andi(scratch2, result, Operand(kDoubleAlignmentMask));
1585 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1589 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1590 stw(scratch2, MemOperand(result));
1591 addi(result, result, Operand(kDoubleSize / 2));
1596 // Calculate new top and bail out if new space is exhausted. Use result
1597 // to calculate the new top.
1598 sub(r0, ip, result);
1599 if (is_int16(object_size)) {
1600 cmpi(r0, Operand(object_size));
1602 addi(scratch2, result, Operand(object_size));
1604 Cmpi(r0, Operand(object_size), scratch2);
1606 add(scratch2, result, scratch2);
1608 StoreP(scratch2, MemOperand(topaddr));
1610 // Tag object if requested.
1611 if ((flags & TAG_OBJECT) != 0) {
1612 addi(result, result, Operand(kHeapObjectTag));
1617 void MacroAssembler::Allocate(Register object_size, Register result,
1618 Register scratch1, Register scratch2,
1619 Label* gc_required, AllocationFlags flags) {
1620 if (!FLAG_inline_new) {
1621 if (emit_debug_code()) {
1622 // Trash the registers to simulate an allocation failure.
1623 li(result, Operand(0x7091));
1624 li(scratch1, Operand(0x7191));
1625 li(scratch2, Operand(0x7291));
1631 // Assert that the register arguments are different and that none of
1632 // them are ip. ip is used explicitly in the code generated below.
1633 DCHECK(!result.is(scratch1));
1634 DCHECK(!result.is(scratch2));
1635 DCHECK(!scratch1.is(scratch2));
1636 DCHECK(!object_size.is(ip));
1637 DCHECK(!result.is(ip));
1638 DCHECK(!scratch1.is(ip));
1639 DCHECK(!scratch2.is(ip));
1641 // Check relative positions of allocation top and limit addresses.
1642 ExternalReference allocation_top =
1643 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1644 ExternalReference allocation_limit =
1645 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1646 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
1647 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
1648 DCHECK((limit - top) == kPointerSize);
1650 // Set up allocation top address.
1651 Register topaddr = scratch1;
1652 mov(topaddr, Operand(allocation_top));
1654 // This code stores a temporary value in ip. This is OK, as the code below
1655 // does not need ip for implicit literal generation.
1656 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1657 // Load allocation top into result and allocation limit into ip.
1658 LoadP(result, MemOperand(topaddr));
1659 LoadP(ip, MemOperand(topaddr, kPointerSize));
1661 if (emit_debug_code()) {
1662 // Assert that result actually contains top on entry. ip is used
1663 // immediately below so this use of ip does not cause difference with
1664 // respect to register content between debug and release mode.
1665 LoadP(ip, MemOperand(topaddr));
1667 Check(eq, kUnexpectedAllocationTop);
1669 // Load allocation limit into ip. Result already contains allocation top.
1670 LoadP(ip, MemOperand(topaddr, limit - top));
1673 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1674 // Align the next allocation. Storing the filler map without checking top is
1675 // safe in new-space because the limit of the heap is aligned there.
1676 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1677 #if V8_TARGET_ARCH_PPC64
1678 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1680 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1681 andi(scratch2, result, Operand(kDoubleAlignmentMask));
1684 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1688 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1689 stw(scratch2, MemOperand(result));
1690 addi(result, result, Operand(kDoubleSize / 2));
1695 // Calculate new top and bail out if new space is exhausted. Use result
1696 // to calculate the new top. Object size may be in words so a shift is
1697 // required to get the number of bytes.
1698 sub(r0, ip, result);
1699 if ((flags & SIZE_IN_WORDS) != 0) {
1700 ShiftLeftImm(scratch2, object_size, Operand(kPointerSizeLog2));
1703 add(scratch2, result, scratch2);
1705 cmp(r0, object_size);
1707 add(scratch2, result, object_size);
1710 // Update allocation top. result temporarily holds the new top.
1711 if (emit_debug_code()) {
1712 andi(r0, scratch2, Operand(kObjectAlignmentMask));
1713 Check(eq, kUnalignedAllocationInNewSpace, cr0);
1715 StoreP(scratch2, MemOperand(topaddr));
1717 // Tag object if requested.
1718 if ((flags & TAG_OBJECT) != 0) {
1719 addi(result, result, Operand(kHeapObjectTag));
1724 void MacroAssembler::UndoAllocationInNewSpace(Register object,
1726 ExternalReference new_space_allocation_top =
1727 ExternalReference::new_space_allocation_top_address(isolate());
1729 // Make sure the object has no tag before resetting top.
1730 ClearRightImm(object, object, Operand(kHeapObjectTagSize));
1732 // Check that the object un-allocated is below the current top.
1733 mov(scratch, Operand(new_space_allocation_top));
1734 LoadP(scratch, MemOperand(scratch));
1735 cmp(object, scratch);
1736 Check(lt, kUndoAllocationOfNonAllocatedMemory);
1738 // Write the address of the object to un-allocate as the current top.
1739 mov(scratch, Operand(new_space_allocation_top));
1740 StoreP(object, MemOperand(scratch));
1744 void MacroAssembler::AllocateTwoByteString(Register result, Register length,
1745 Register scratch1, Register scratch2,
1747 Label* gc_required) {
1748 // Calculate the number of bytes needed for the characters in the string while
1749 // observing object alignment.
1750 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1751 slwi(scratch1, length, Operand(1)); // Length in bytes, not chars.
1752 addi(scratch1, scratch1,
1753 Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
1754 mov(r0, Operand(~kObjectAlignmentMask));
1755 and_(scratch1, scratch1, r0);
1757 // Allocate two-byte string in new space.
1758 Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
1760 // Set the map, length and hash field.
1761 InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1,
1766 void MacroAssembler::AllocateOneByteString(Register result, Register length,
1767 Register scratch1, Register scratch2,
1769 Label* gc_required) {
1770 // Calculate the number of bytes needed for the characters in the string while
1771 // observing object alignment.
1772 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1773 DCHECK(kCharSize == 1);
1774 addi(scratch1, length,
1775 Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
1776 li(r0, Operand(~kObjectAlignmentMask));
1777 and_(scratch1, scratch1, r0);
1779 // Allocate one-byte string in new space.
1780 Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
1782 // Set the map, length and hash field.
1783 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
1784 scratch1, scratch2);
1788 void MacroAssembler::AllocateTwoByteConsString(Register result, Register length,
1791 Label* gc_required) {
1792 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1795 InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1,
1800 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
1803 Label* gc_required) {
1804 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1807 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
1808 scratch1, scratch2);
1812 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
1816 Label* gc_required) {
1817 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1820 InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1,
1825 void MacroAssembler::AllocateOneByteSlicedString(Register result,
1829 Label* gc_required) {
1830 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1833 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
1834 scratch1, scratch2);
1838 void MacroAssembler::CompareObjectType(Register object, Register map,
1839 Register type_reg, InstanceType type) {
1840 const Register temp = type_reg.is(no_reg) ? r0 : type_reg;
1842 LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
1843 CompareInstanceType(map, temp, type);
1847 void MacroAssembler::CheckObjectTypeRange(Register object, Register map,
1848 InstanceType min_type,
1849 InstanceType max_type,
1850 Label* false_label) {
1851 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
1852 STATIC_ASSERT(LAST_TYPE < 256);
1853 LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
1854 lbz(ip, FieldMemOperand(map, Map::kInstanceTypeOffset));
1855 subi(ip, ip, Operand(min_type));
1856 cmpli(ip, Operand(max_type - min_type));
1861 void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
1862 InstanceType type) {
1863 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
1864 STATIC_ASSERT(LAST_TYPE < 256);
1865 lbz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1866 cmpi(type_reg, Operand(type));
1870 void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
1871 DCHECK(!obj.is(r0));
1872 LoadRoot(r0, index);
1877 void MacroAssembler::CheckFastElements(Register map, Register scratch,
1879 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
1880 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
1881 STATIC_ASSERT(FAST_ELEMENTS == 2);
1882 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
1883 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
1884 STATIC_ASSERT(Map::kMaximumBitField2FastHoleyElementValue < 0x8000);
1885 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
1890 void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
1892 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
1893 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
1894 STATIC_ASSERT(FAST_ELEMENTS == 2);
1895 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
1896 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
1897 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
1899 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
1904 void MacroAssembler::CheckFastSmiElements(Register map, Register scratch,
1906 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
1907 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
1908 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
1909 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
1914 void MacroAssembler::StoreNumberToDoubleElements(
1915 Register value_reg, Register key_reg, Register elements_reg,
1916 Register scratch1, DoubleRegister double_scratch, Label* fail,
1917 int elements_offset) {
1918 Label smi_value, store;
1920 // Handle smi values specially.
1921 JumpIfSmi(value_reg, &smi_value);
1923 // Ensure that the object is a heap number
1924 CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), fail,
1927 lfd(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
1928 // Double value, turn potential sNaN into qNaN.
1929 CanonicalizeNaN(double_scratch);
1933 SmiToDouble(double_scratch, value_reg);
1936 SmiToDoubleArrayOffset(scratch1, key_reg);
1937 add(scratch1, elements_reg, scratch1);
1938 stfd(double_scratch, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize -
1943 void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
1945 Register overflow_dst,
1947 DCHECK(!dst.is(overflow_dst));
1948 DCHECK(!dst.is(scratch));
1949 DCHECK(!overflow_dst.is(scratch));
1950 DCHECK(!overflow_dst.is(left));
1951 DCHECK(!overflow_dst.is(right));
1953 bool left_is_right = left.is(right);
1954 RCBit xorRC = left_is_right ? SetRC : LeaveRC;
1956 // C = A+B; C overflows if A/B have same sign and C has diff sign than A
1958 mr(scratch, left); // Preserve left.
1959 add(dst, left, right); // Left is overwritten.
1960 xor_(overflow_dst, dst, scratch, xorRC); // Original left.
1961 if (!left_is_right) xor_(scratch, dst, right);
1962 } else if (dst.is(right)) {
1963 mr(scratch, right); // Preserve right.
1964 add(dst, left, right); // Right is overwritten.
1965 xor_(overflow_dst, dst, left, xorRC);
1966 if (!left_is_right) xor_(scratch, dst, scratch); // Original right.
1968 add(dst, left, right);
1969 xor_(overflow_dst, dst, left, xorRC);
1970 if (!left_is_right) xor_(scratch, dst, right);
1972 if (!left_is_right) and_(overflow_dst, scratch, overflow_dst, SetRC);
1976 void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
1978 Register overflow_dst,
1980 Register original_left = left;
1981 DCHECK(!dst.is(overflow_dst));
1982 DCHECK(!dst.is(scratch));
1983 DCHECK(!overflow_dst.is(scratch));
1984 DCHECK(!overflow_dst.is(left));
1986 // C = A+B; C overflows if A/B have same sign and C has diff sign than A
1989 original_left = overflow_dst;
1990 mr(original_left, left);
1992 Add(dst, left, right, scratch);
1993 xor_(overflow_dst, dst, original_left);
1995 and_(overflow_dst, overflow_dst, dst, SetRC);
1997 andc(overflow_dst, overflow_dst, dst, SetRC);
2002 void MacroAssembler::SubAndCheckForOverflow(Register dst, Register left,
2004 Register overflow_dst,
2006 DCHECK(!dst.is(overflow_dst));
2007 DCHECK(!dst.is(scratch));
2008 DCHECK(!overflow_dst.is(scratch));
2009 DCHECK(!overflow_dst.is(left));
2010 DCHECK(!overflow_dst.is(right));
2012 // C = A-B; C overflows if A/B have diff signs and C has diff sign than A
2014 mr(scratch, left); // Preserve left.
2015 sub(dst, left, right); // Left is overwritten.
2016 xor_(overflow_dst, dst, scratch);
2017 xor_(scratch, scratch, right);
2018 and_(overflow_dst, overflow_dst, scratch, SetRC);
2019 } else if (dst.is(right)) {
2020 mr(scratch, right); // Preserve right.
2021 sub(dst, left, right); // Right is overwritten.
2022 xor_(overflow_dst, dst, left);
2023 xor_(scratch, left, scratch);
2024 and_(overflow_dst, overflow_dst, scratch, SetRC);
2026 sub(dst, left, right);
2027 xor_(overflow_dst, dst, left);
2028 xor_(scratch, left, right);
2029 and_(overflow_dst, scratch, overflow_dst, SetRC);
2034 void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map,
2035 Label* early_success) {
2036 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2037 CompareMap(scratch, map, early_success);
2041 void MacroAssembler::CompareMap(Register obj_map, Handle<Map> map,
2042 Label* early_success) {
2043 mov(r0, Operand(map));
2048 void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map,
2049 Label* fail, SmiCheckType smi_check_type) {
2050 if (smi_check_type == DO_SMI_CHECK) {
2051 JumpIfSmi(obj, fail);
2055 CompareMap(obj, scratch, map, &success);
2061 void MacroAssembler::CheckMap(Register obj, Register scratch,
2062 Heap::RootListIndex index, Label* fail,
2063 SmiCheckType smi_check_type) {
2064 if (smi_check_type == DO_SMI_CHECK) {
2065 JumpIfSmi(obj, fail);
2067 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2068 LoadRoot(r0, index);
2074 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
2075 Register scratch2, Handle<WeakCell> cell,
2076 Handle<Code> success,
2077 SmiCheckType smi_check_type) {
2079 if (smi_check_type == DO_SMI_CHECK) {
2080 JumpIfSmi(obj, &fail);
2082 LoadP(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
2083 CmpWeakValue(scratch1, cell, scratch2);
2084 Jump(success, RelocInfo::CODE_TARGET, eq);
2089 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
2090 Register scratch, CRegister cr) {
2091 mov(scratch, Operand(cell));
2092 LoadP(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
2093 cmp(value, scratch, cr);
2097 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
2098 mov(value, Operand(cell));
2099 LoadP(value, FieldMemOperand(value, WeakCell::kValueOffset));
2103 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
2105 GetWeakValue(value, cell);
2106 JumpIfSmi(value, miss);
2110 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
2111 Register scratch, Label* miss,
2112 bool miss_on_bound_function) {
2114 if (miss_on_bound_function) {
2115 // Check that the receiver isn't a smi.
2116 JumpIfSmi(function, miss);
2118 // Check that the function really is a function. Load map into result reg.
2119 CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
2123 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
2125 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
2127 #if V8_TARGET_ARCH_PPC64
2128 SharedFunctionInfo::kBoundFunction,
2130 SharedFunctionInfo::kBoundFunction + kSmiTagSize,
2135 // Make sure that the function has an instance prototype.
2136 lbz(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2137 andi(r0, scratch, Operand(1 << Map::kHasNonInstancePrototype));
2138 bne(&non_instance, cr0);
2141 // Get the prototype or initial map from the function.
2143 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2145 // If the prototype or initial map is the hole, don't return it and
2146 // simply miss the cache instead. This will allow us to allocate a
2147 // prototype object on-demand in the runtime system.
2148 LoadRoot(r0, Heap::kTheHoleValueRootIndex);
2152 // If the function does not have an initial map, we're done.
2154 CompareObjectType(result, scratch, scratch, MAP_TYPE);
2157 // Get the prototype from the initial map.
2158 LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
2160 if (miss_on_bound_function) {
2163 // Non-instance prototype: Fetch prototype from constructor field
2165 bind(&non_instance);
2166 LoadP(result, FieldMemOperand(result, Map::kConstructorOffset));
2174 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id,
2176 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
2177 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
2181 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2182 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
2186 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2187 return has_frame_ || !stub->SometimesSetsUpAFrame();
2191 void MacroAssembler::IndexFromHash(Register hash, Register index) {
2192 // If the hash field contains an array index pick it out. The assert checks
2193 // that the constants for the maximum number of digits for an array index
2194 // cached in the hash field and the number of bits reserved for it does not
2196 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
2197 (1 << String::kArrayIndexValueBits));
2198 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
2202 void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
2204 ConvertIntToDouble(ip, value);
2208 void MacroAssembler::TestDoubleIsInt32(DoubleRegister double_input,
2209 Register scratch1, Register scratch2,
2210 DoubleRegister double_scratch) {
2211 TryDoubleToInt32Exact(scratch1, double_input, scratch2, double_scratch);
2215 void MacroAssembler::TryDoubleToInt32Exact(Register result,
2216 DoubleRegister double_input,
2218 DoubleRegister double_scratch) {
2220 DCHECK(!double_input.is(double_scratch));
2222 ConvertDoubleToInt64(double_input,
2223 #if !V8_TARGET_ARCH_PPC64
2226 result, double_scratch);
2228 #if V8_TARGET_ARCH_PPC64
2229 TestIfInt32(result, r0);
2231 TestIfInt32(scratch, result, r0);
2235 // convert back and compare
2236 fcfid(double_scratch, double_scratch);
2237 fcmpu(double_scratch, double_input);
2242 void MacroAssembler::TryInt32Floor(Register result, DoubleRegister double_input,
2243 Register input_high, Register scratch,
2244 DoubleRegister double_scratch, Label* done,
2246 DCHECK(!result.is(input_high));
2247 DCHECK(!double_input.is(double_scratch));
2250 MovDoubleHighToInt(input_high, double_input);
2253 ExtractBitMask(result, input_high, HeapNumber::kExponentMask);
2254 cmpli(result, Operand(0x7ff));
2257 // Convert (rounding to -Inf)
2258 ConvertDoubleToInt64(double_input,
2259 #if !V8_TARGET_ARCH_PPC64
2262 result, double_scratch, kRoundToMinusInf);
2264 // Test for overflow
2265 #if V8_TARGET_ARCH_PPC64
2266 TestIfInt32(result, r0);
2268 TestIfInt32(scratch, result, r0);
2272 // Test for exactness
2273 fcfid(double_scratch, double_scratch);
2274 fcmpu(double_scratch, double_input);
2282 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2283 DoubleRegister double_input,
2285 DoubleRegister double_scratch = kScratchDoubleReg;
2286 #if !V8_TARGET_ARCH_PPC64
2287 Register scratch = ip;
2290 ConvertDoubleToInt64(double_input,
2291 #if !V8_TARGET_ARCH_PPC64
2294 result, double_scratch);
2296 // Test for overflow
2297 #if V8_TARGET_ARCH_PPC64
2298 TestIfInt32(result, r0);
2300 TestIfInt32(scratch, result, r0);
2306 void MacroAssembler::TruncateDoubleToI(Register result,
2307 DoubleRegister double_input) {
2310 TryInlineTruncateDoubleToI(result, double_input, &done);
2312 // If we fell through then inline version didn't succeed - call stub instead.
2315 // Put input on stack.
2316 stfdu(double_input, MemOperand(sp, -kDoubleSize));
2318 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2321 addi(sp, sp, Operand(kDoubleSize));
2329 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
2331 DoubleRegister double_scratch = kScratchDoubleReg;
2332 DCHECK(!result.is(object));
2334 lfd(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
2335 TryInlineTruncateDoubleToI(result, double_scratch, &done);
2337 // If we fell through then inline version didn't succeed - call stub instead.
2340 DoubleToIStub stub(isolate(), object, result,
2341 HeapNumber::kValueOffset - kHeapObjectTag, true, true);
2350 void MacroAssembler::TruncateNumberToI(Register object, Register result,
2351 Register heap_number_map,
2352 Register scratch1, Label* not_number) {
2354 DCHECK(!result.is(object));
2356 UntagAndJumpIfSmi(result, object, &done);
2357 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
2358 TruncateHeapNumberToI(result, object);
2364 void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src,
2365 int num_least_bits) {
2366 #if V8_TARGET_ARCH_PPC64
2367 rldicl(dst, src, kBitsPerPointer - kSmiShift,
2368 kBitsPerPointer - num_least_bits);
2370 rlwinm(dst, src, kBitsPerPointer - kSmiShift,
2371 kBitsPerPointer - num_least_bits, 31);
2376 void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src,
2377 int num_least_bits) {
2378 rlwinm(dst, src, 0, 32 - num_least_bits, 31);
2382 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
2383 SaveFPRegsMode save_doubles) {
2384 // All parameters are on the stack. r3 has the return value after call.
2386 // If the expected number of arguments of the runtime function is
2387 // constant, we check that the actual number of arguments match the
2389 CHECK(f->nargs < 0 || f->nargs == num_arguments);
2391 // TODO(1236192): Most runtime routines don't need the number of
2392 // arguments passed in because it is constant. At some point we
2393 // should remove this need and make the runtime routine entry code
2395 mov(r3, Operand(num_arguments));
2396 mov(r4, Operand(ExternalReference(f, isolate())));
2397 CEntryStub stub(isolate(),
2398 #if V8_TARGET_ARCH_PPC64
2408 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2409 int num_arguments) {
2410 mov(r3, Operand(num_arguments));
2411 mov(r4, Operand(ext));
2413 CEntryStub stub(isolate(), 1);
2418 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
2421 // TODO(1236192): Most runtime routines don't need the number of
2422 // arguments passed in because it is constant. At some point we
2423 // should remove this need and make the runtime routine entry code
2425 mov(r3, Operand(num_arguments));
2426 JumpToExternalReference(ext);
2430 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, int num_arguments,
2432 TailCallExternalReference(ExternalReference(fid, isolate()), num_arguments,
2437 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
2438 mov(r4, Operand(builtin));
2439 CEntryStub stub(isolate(), 1);
2440 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2444 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag,
2445 const CallWrapper& call_wrapper) {
2446 // You can't call a builtin without a valid frame.
2447 DCHECK(flag == JUMP_FUNCTION || has_frame());
2449 GetBuiltinEntry(ip, id);
2450 if (flag == CALL_FUNCTION) {
2451 call_wrapper.BeforeCall(CallSize(ip));
2453 call_wrapper.AfterCall();
2455 DCHECK(flag == JUMP_FUNCTION);
2461 void MacroAssembler::GetBuiltinFunction(Register target,
2462 Builtins::JavaScript id) {
2463 // Load the builtins object into target register.
2465 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2466 LoadP(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
2467 // Load the JavaScript builtin function from the builtins object.
2469 FieldMemOperand(target, JSBuiltinsObject::OffsetOfFunctionWithId(id)),
2474 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2475 DCHECK(!target.is(r4));
2476 GetBuiltinFunction(r4, id);
2477 // Load the code entry point from the builtins object.
2478 LoadP(target, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
2482 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2483 Register scratch1, Register scratch2) {
2484 if (FLAG_native_code_counters && counter->Enabled()) {
2485 mov(scratch1, Operand(value));
2486 mov(scratch2, Operand(ExternalReference(counter)));
2487 stw(scratch1, MemOperand(scratch2));
2492 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2493 Register scratch1, Register scratch2) {
2495 if (FLAG_native_code_counters && counter->Enabled()) {
2496 mov(scratch2, Operand(ExternalReference(counter)));
2497 lwz(scratch1, MemOperand(scratch2));
2498 addi(scratch1, scratch1, Operand(value));
2499 stw(scratch1, MemOperand(scratch2));
2504 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2505 Register scratch1, Register scratch2) {
2507 if (FLAG_native_code_counters && counter->Enabled()) {
2508 mov(scratch2, Operand(ExternalReference(counter)));
2509 lwz(scratch1, MemOperand(scratch2));
2510 subi(scratch1, scratch1, Operand(value));
2511 stw(scratch1, MemOperand(scratch2));
2516 void MacroAssembler::Assert(Condition cond, BailoutReason reason,
2518 if (emit_debug_code()) Check(cond, reason, cr);
2522 void MacroAssembler::AssertFastElements(Register elements) {
2523 if (emit_debug_code()) {
2524 DCHECK(!elements.is(r0));
2527 LoadP(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2528 LoadRoot(r0, Heap::kFixedArrayMapRootIndex);
2531 LoadRoot(r0, Heap::kFixedDoubleArrayMapRootIndex);
2534 LoadRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
2537 Abort(kJSObjectWithFastElementsMapHasSlowElements);
2544 void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
2548 // will not return here
2553 void MacroAssembler::Abort(BailoutReason reason) {
2557 const char* msg = GetBailoutReason(reason);
2559 RecordComment("Abort message: ");
2563 if (FLAG_trap_on_abort) {
2569 LoadSmiLiteral(r0, Smi::FromInt(reason));
2571 // Disable stub call restrictions to always allow calls to abort.
2573 // We don't actually want to generate a pile of code for this, so just
2574 // claim there is a stack frame, without generating one.
2575 FrameScope scope(this, StackFrame::NONE);
2576 CallRuntime(Runtime::kAbort, 1);
2578 CallRuntime(Runtime::kAbort, 1);
2580 // will not return here
2584 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2585 if (context_chain_length > 0) {
2586 // Move up the chain of contexts to the context containing the slot.
2587 LoadP(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2588 for (int i = 1; i < context_chain_length; i++) {
2589 LoadP(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2592 // Slot is in the current function context. Move it into the
2593 // destination register in case we store into it (the write barrier
2594 // cannot be allowed to destroy the context in esi).
2600 void MacroAssembler::LoadTransitionedArrayMapConditional(
2601 ElementsKind expected_kind, ElementsKind transitioned_kind,
2602 Register map_in_out, Register scratch, Label* no_map_match) {
2603 // Load the global or builtins object from the current context.
2605 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2606 LoadP(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
2608 // Check that the function's map is the same as the expected cached map.
2610 MemOperand(scratch, Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
2611 size_t offset = expected_kind * kPointerSize + FixedArrayBase::kHeaderSize;
2612 LoadP(ip, FieldMemOperand(scratch, offset));
2613 cmp(map_in_out, ip);
2616 // Use the transitioned cached map.
2617 offset = transitioned_kind * kPointerSize + FixedArrayBase::kHeaderSize;
2618 LoadP(map_in_out, FieldMemOperand(scratch, offset));
2622 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2623 // Load the global or builtins object from the current context.
2625 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2626 // Load the native context from the global or builtins object.
2628 FieldMemOperand(function, GlobalObject::kNativeContextOffset));
2629 // Load the function from the native context.
2630 LoadP(function, MemOperand(function, Context::SlotOffset(index)), r0);
2634 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2637 // Load the initial map. The global functions all have initial maps.
2639 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2640 if (emit_debug_code()) {
2642 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2645 Abort(kGlobalFunctionsMustHaveInitialMap);
2651 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2652 Register reg, Register scratch, Label* not_power_of_two_or_zero) {
2653 subi(scratch, reg, Operand(1));
2654 cmpi(scratch, Operand::Zero());
2655 blt(not_power_of_two_or_zero);
2656 and_(r0, scratch, reg, SetRC);
2657 bne(not_power_of_two_or_zero, cr0);
2661 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
2663 Label* zero_and_neg,
2664 Label* not_power_of_two) {
2665 subi(scratch, reg, Operand(1));
2666 cmpi(scratch, Operand::Zero());
2668 and_(r0, scratch, reg, SetRC);
2669 bne(not_power_of_two, cr0);
2672 #if !V8_TARGET_ARCH_PPC64
2673 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
2674 DCHECK(!reg.is(overflow));
2675 mr(overflow, reg); // Save original value.
2677 xor_(overflow, overflow, reg, SetRC); // Overflow if (value ^ 2 * value) < 0.
2681 void MacroAssembler::SmiTagCheckOverflow(Register dst, Register src,
2682 Register overflow) {
2684 // Fall back to slower case.
2685 SmiTagCheckOverflow(dst, overflow);
2687 DCHECK(!dst.is(src));
2688 DCHECK(!dst.is(overflow));
2689 DCHECK(!src.is(overflow));
2691 xor_(overflow, dst, src, SetRC); // Overflow if (value ^ 2 * value) < 0.
2696 void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2,
2697 Label* on_not_both_smi) {
2698 STATIC_ASSERT(kSmiTag == 0);
2699 orx(r0, reg1, reg2, LeaveRC);
2700 JumpIfNotSmi(r0, on_not_both_smi);
2704 void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
2706 STATIC_ASSERT(kSmiTag == 0);
2707 TestBitRange(src, kSmiTagSize - 1, 0, r0);
2713 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src,
2714 Label* non_smi_case) {
2715 STATIC_ASSERT(kSmiTag == 0);
2716 TestBitRange(src, kSmiTagSize - 1, 0, r0);
2718 bne(non_smi_case, cr0);
2722 void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
2723 Label* on_either_smi) {
2724 STATIC_ASSERT(kSmiTag == 0);
2725 JumpIfSmi(reg1, on_either_smi);
2726 JumpIfSmi(reg2, on_either_smi);
2730 void MacroAssembler::AssertNotSmi(Register object) {
2731 if (emit_debug_code()) {
2732 STATIC_ASSERT(kSmiTag == 0);
2733 TestIfSmi(object, r0);
2734 Check(ne, kOperandIsASmi, cr0);
2739 void MacroAssembler::AssertSmi(Register object) {
2740 if (emit_debug_code()) {
2741 STATIC_ASSERT(kSmiTag == 0);
2742 TestIfSmi(object, r0);
2743 Check(eq, kOperandIsNotSmi, cr0);
2748 void MacroAssembler::AssertString(Register object) {
2749 if (emit_debug_code()) {
2750 STATIC_ASSERT(kSmiTag == 0);
2751 TestIfSmi(object, r0);
2752 Check(ne, kOperandIsASmiAndNotAString, cr0);
2754 LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
2755 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
2757 Check(lt, kOperandIsNotAString);
2762 void MacroAssembler::AssertName(Register object) {
2763 if (emit_debug_code()) {
2764 STATIC_ASSERT(kSmiTag == 0);
2765 TestIfSmi(object, r0);
2766 Check(ne, kOperandIsASmiAndNotAName, cr0);
2768 LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
2769 CompareInstanceType(object, object, LAST_NAME_TYPE);
2771 Check(le, kOperandIsNotAName);
2776 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
2778 if (emit_debug_code()) {
2779 Label done_checking;
2780 AssertNotSmi(object);
2781 CompareRoot(object, Heap::kUndefinedValueRootIndex);
2782 beq(&done_checking);
2783 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2784 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
2785 Assert(eq, kExpectedUndefinedOrCell);
2786 bind(&done_checking);
2791 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
2792 if (emit_debug_code()) {
2793 CompareRoot(reg, index);
2794 Check(eq, kHeapNumberMapRegisterClobbered);
2799 void MacroAssembler::JumpIfNotHeapNumber(Register object,
2800 Register heap_number_map,
2802 Label* on_not_heap_number) {
2803 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2804 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2805 cmp(scratch, heap_number_map);
2806 bne(on_not_heap_number);
2810 void MacroAssembler::LookupNumberStringCache(Register object, Register result,
2815 // Use of registers. Register result is used as a temporary.
2816 Register number_string_cache = result;
2817 Register mask = scratch3;
2819 // Load the number string cache.
2820 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2822 // Make the hash mask from the length of the number string cache. It
2823 // contains two elements (number and string) for each cache entry.
2824 LoadP(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
2825 // Divide length by two (length is a smi).
2826 ShiftRightArithImm(mask, mask, kSmiTagSize + kSmiShiftSize + 1);
2827 subi(mask, mask, Operand(1)); // Make mask.
2829 // Calculate the entry in the number string cache. The hash value in the
2830 // number string cache for smis is just the smi value, and the hash for
2831 // doubles is the xor of the upper and lower words. See
2832 // Heap::GetNumberStringCache.
2834 Label load_result_from_cache;
2835 JumpIfSmi(object, &is_smi);
2836 CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found,
2839 STATIC_ASSERT(8 == kDoubleSize);
2840 lwz(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
2841 lwz(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
2842 xor_(scratch1, scratch1, scratch2);
2843 and_(scratch1, scratch1, mask);
2845 // Calculate address of entry in string cache: each entry consists
2846 // of two pointer sized fields.
2847 ShiftLeftImm(scratch1, scratch1, Operand(kPointerSizeLog2 + 1));
2848 add(scratch1, number_string_cache, scratch1);
2850 Register probe = mask;
2851 LoadP(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
2852 JumpIfSmi(probe, not_found);
2853 lfd(d0, FieldMemOperand(object, HeapNumber::kValueOffset));
2854 lfd(d1, FieldMemOperand(probe, HeapNumber::kValueOffset));
2856 bne(not_found); // The cache did not contain this value.
2857 b(&load_result_from_cache);
2860 Register scratch = scratch1;
2861 SmiUntag(scratch, object);
2862 and_(scratch, mask, scratch);
2863 // Calculate address of entry in string cache: each entry consists
2864 // of two pointer sized fields.
2865 ShiftLeftImm(scratch, scratch, Operand(kPointerSizeLog2 + 1));
2866 add(scratch, number_string_cache, scratch);
2868 // Check if the entry is the smi we are looking for.
2869 LoadP(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
2873 // Get the result from the cache.
2874 bind(&load_result_from_cache);
2876 FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
2877 IncrementCounter(isolate()->counters()->number_to_string_native(), 1,
2878 scratch1, scratch2);
2882 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
2883 Register first, Register second, Register scratch1, Register scratch2,
2885 // Test that both first and second are sequential one-byte strings.
2886 // Assume that they are non-smis.
2887 LoadP(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
2888 LoadP(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
2889 lbz(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
2890 lbz(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
2892 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
2896 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
2901 // Check that neither is a smi.
2902 and_(scratch1, first, second);
2903 JumpIfSmi(scratch1, failure);
2904 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
2909 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
2910 Label* not_unique_name) {
2911 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2913 andi(r0, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
2915 cmpi(reg, Operand(SYMBOL_TYPE));
2916 bne(not_unique_name);
2922 // Allocates a heap number or jumps to the need_gc label if the young space
2923 // is full and a scavenge is needed.
2924 void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1,
2926 Register heap_number_map,
2928 TaggingMode tagging_mode,
2930 // Allocate an object in the heap for the heap number and tag it as a heap
2932 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
2933 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
2935 Heap::RootListIndex map_index = mode == MUTABLE
2936 ? Heap::kMutableHeapNumberMapRootIndex
2937 : Heap::kHeapNumberMapRootIndex;
2938 AssertIsRoot(heap_number_map, map_index);
2940 // Store heap number map in the allocated object.
2941 if (tagging_mode == TAG_RESULT) {
2942 StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset),
2945 StoreP(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
2950 void MacroAssembler::AllocateHeapNumberWithValue(
2951 Register result, DoubleRegister value, Register scratch1, Register scratch2,
2952 Register heap_number_map, Label* gc_required) {
2953 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
2954 stfd(value, FieldMemOperand(result, HeapNumber::kValueOffset));
2958 // Copies a fixed number of fields of heap objects from src to dst.
2959 void MacroAssembler::CopyFields(Register dst, Register src, RegList temps,
2961 // At least one bit set in the first 15 registers.
2962 DCHECK((temps & ((1 << 15) - 1)) != 0);
2963 DCHECK((temps & dst.bit()) == 0);
2964 DCHECK((temps & src.bit()) == 0);
2965 // Primitive implementation using only one temporary register.
2967 Register tmp = no_reg;
2968 // Find a temp register in temps list.
2969 for (int i = 0; i < 15; i++) {
2970 if ((temps & (1 << i)) != 0) {
2975 DCHECK(!tmp.is(no_reg));
2977 for (int i = 0; i < field_count; i++) {
2978 LoadP(tmp, FieldMemOperand(src, i * kPointerSize), r0);
2979 StoreP(tmp, FieldMemOperand(dst, i * kPointerSize), r0);
2984 void MacroAssembler::CopyBytes(Register src, Register dst, Register length,
2986 Label align_loop, aligned, word_loop, byte_loop, byte_loop_1, done;
2988 DCHECK(!scratch.is(r0));
2990 cmpi(length, Operand::Zero());
2993 // Check src alignment and length to see whether word_loop is possible
2994 andi(scratch, src, Operand(kPointerSize - 1));
2996 subfic(scratch, scratch, Operand(kPointerSize * 2));
2997 cmp(length, scratch);
3000 // Align src before copying in word size chunks.
3001 subi(scratch, scratch, Operand(kPointerSize));
3004 lbz(scratch, MemOperand(src));
3005 addi(src, src, Operand(1));
3006 subi(length, length, Operand(1));
3007 stb(scratch, MemOperand(dst));
3008 addi(dst, dst, Operand(1));
3013 // Copy bytes in word size chunks.
3014 if (emit_debug_code()) {
3015 andi(r0, src, Operand(kPointerSize - 1));
3016 Assert(eq, kExpectingAlignmentForCopyBytes, cr0);
3019 ShiftRightImm(scratch, length, Operand(kPointerSizeLog2));
3020 cmpi(scratch, Operand::Zero());
3025 LoadP(scratch, MemOperand(src));
3026 addi(src, src, Operand(kPointerSize));
3027 subi(length, length, Operand(kPointerSize));
3028 if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
3029 // currently false for PPC - but possible future opt
3030 StoreP(scratch, MemOperand(dst));
3031 addi(dst, dst, Operand(kPointerSize));
3033 #if V8_TARGET_LITTLE_ENDIAN
3034 stb(scratch, MemOperand(dst, 0));
3035 ShiftRightImm(scratch, scratch, Operand(8));
3036 stb(scratch, MemOperand(dst, 1));
3037 ShiftRightImm(scratch, scratch, Operand(8));
3038 stb(scratch, MemOperand(dst, 2));
3039 ShiftRightImm(scratch, scratch, Operand(8));
3040 stb(scratch, MemOperand(dst, 3));
3041 #if V8_TARGET_ARCH_PPC64
3042 ShiftRightImm(scratch, scratch, Operand(8));
3043 stb(scratch, MemOperand(dst, 4));
3044 ShiftRightImm(scratch, scratch, Operand(8));
3045 stb(scratch, MemOperand(dst, 5));
3046 ShiftRightImm(scratch, scratch, Operand(8));
3047 stb(scratch, MemOperand(dst, 6));
3048 ShiftRightImm(scratch, scratch, Operand(8));
3049 stb(scratch, MemOperand(dst, 7));
3052 #if V8_TARGET_ARCH_PPC64
3053 stb(scratch, MemOperand(dst, 7));
3054 ShiftRightImm(scratch, scratch, Operand(8));
3055 stb(scratch, MemOperand(dst, 6));
3056 ShiftRightImm(scratch, scratch, Operand(8));
3057 stb(scratch, MemOperand(dst, 5));
3058 ShiftRightImm(scratch, scratch, Operand(8));
3059 stb(scratch, MemOperand(dst, 4));
3060 ShiftRightImm(scratch, scratch, Operand(8));
3062 stb(scratch, MemOperand(dst, 3));
3063 ShiftRightImm(scratch, scratch, Operand(8));
3064 stb(scratch, MemOperand(dst, 2));
3065 ShiftRightImm(scratch, scratch, Operand(8));
3066 stb(scratch, MemOperand(dst, 1));
3067 ShiftRightImm(scratch, scratch, Operand(8));
3068 stb(scratch, MemOperand(dst, 0));
3070 addi(dst, dst, Operand(kPointerSize));
3074 // Copy the last bytes if any left.
3075 cmpi(length, Operand::Zero());
3081 lbz(scratch, MemOperand(src));
3082 addi(src, src, Operand(1));
3083 stb(scratch, MemOperand(dst));
3084 addi(dst, dst, Operand(1));
3091 void MacroAssembler::InitializeNFieldsWithFiller(Register start_offset,
3097 StoreP(filler, MemOperand(start_offset));
3098 addi(start_offset, start_offset, Operand(kPointerSize));
3102 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3103 Register end_offset,
3106 sub(r0, end_offset, start_offset, LeaveOE, SetRC);
3108 ShiftRightImm(r0, r0, Operand(kPointerSizeLog2));
3109 InitializeNFieldsWithFiller(start_offset, r0, filler);
3114 void MacroAssembler::SaveFPRegs(Register location, int first, int count) {
3117 subi(location, location, Operand(count * kDoubleSize));
3118 for (int i = 0; i < count; i++) {
3119 DoubleRegister reg = DoubleRegister::from_code(cur++);
3120 stfd(reg, MemOperand(location, i * kDoubleSize));
3125 void MacroAssembler::RestoreFPRegs(Register location, int first, int count) {
3127 int cur = first + count - 1;
3128 for (int i = count - 1; i >= 0; i--) {
3129 DoubleRegister reg = DoubleRegister::from_code(cur--);
3130 lfd(reg, MemOperand(location, i * kDoubleSize));
3132 addi(location, location, Operand(count * kDoubleSize));
3136 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
3137 Register first, Register second, Register scratch1, Register scratch2,
3139 const int kFlatOneByteStringMask =
3140 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3141 const int kFlatOneByteStringTag =
3142 kStringTag | kOneByteStringTag | kSeqStringTag;
3143 andi(scratch1, first, Operand(kFlatOneByteStringMask));
3144 andi(scratch2, second, Operand(kFlatOneByteStringMask));
3145 cmpi(scratch1, Operand(kFlatOneByteStringTag));
3147 cmpi(scratch2, Operand(kFlatOneByteStringTag));
3152 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
3155 const int kFlatOneByteStringMask =
3156 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3157 const int kFlatOneByteStringTag =
3158 kStringTag | kOneByteStringTag | kSeqStringTag;
3159 andi(scratch, type, Operand(kFlatOneByteStringMask));
3160 cmpi(scratch, Operand(kFlatOneByteStringTag));
3164 static const int kRegisterPassedArguments = 8;
3167 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
3168 int num_double_arguments) {
3169 int stack_passed_words = 0;
3170 if (num_double_arguments > DoubleRegister::kNumRegisters) {
3171 stack_passed_words +=
3172 2 * (num_double_arguments - DoubleRegister::kNumRegisters);
3174 // Up to 8 simple arguments are passed in registers r3..r10.
3175 if (num_reg_arguments > kRegisterPassedArguments) {
3176 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
3178 return stack_passed_words;
3182 void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index,
3184 uint32_t encoding_mask) {
3186 TestIfSmi(string, r0);
3187 Check(ne, kNonObject, cr0);
3189 LoadP(ip, FieldMemOperand(string, HeapObject::kMapOffset));
3190 lbz(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
3192 andi(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
3193 cmpi(ip, Operand(encoding_mask));
3194 Check(eq, kUnexpectedStringType);
3196 // The index is assumed to be untagged coming in, tag it to compare with the
3197 // string length without using a temp register, it is restored at the end of
3199 #if !V8_TARGET_ARCH_PPC64
3200 Label index_tag_ok, index_tag_bad;
3201 JumpIfNotSmiCandidate(index, r0, &index_tag_bad);
3203 SmiTag(index, index);
3204 #if !V8_TARGET_ARCH_PPC64
3206 bind(&index_tag_bad);
3207 Abort(kIndexIsTooLarge);
3208 bind(&index_tag_ok);
3211 LoadP(ip, FieldMemOperand(string, String::kLengthOffset));
3213 Check(lt, kIndexIsTooLarge);
3215 DCHECK(Smi::FromInt(0) == 0);
3216 cmpi(index, Operand::Zero());
3217 Check(ge, kIndexIsNegative);
3219 SmiUntag(index, index);
3223 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3224 int num_double_arguments,
3226 int frame_alignment = ActivationFrameAlignment();
3227 int stack_passed_arguments =
3228 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
3229 int stack_space = kNumRequiredStackFrameSlots;
3231 if (frame_alignment > kPointerSize) {
3232 // Make stack end at alignment and make room for stack arguments
3233 // -- preserving original value of sp.
3235 addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kPointerSize));
3236 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3237 ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
3238 StoreP(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3240 // Make room for stack arguments
3241 stack_space += stack_passed_arguments;
3244 // Allocate frame with required slots to make ABI work.
3245 li(r0, Operand::Zero());
3246 StorePU(r0, MemOperand(sp, -stack_space * kPointerSize));
3250 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3252 PrepareCallCFunction(num_reg_arguments, 0, scratch);
3256 void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); }
3259 void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); }
3262 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
3263 DoubleRegister src2) {
3265 DCHECK(!src1.is(d2));
3275 void MacroAssembler::CallCFunction(ExternalReference function,
3276 int num_reg_arguments,
3277 int num_double_arguments) {
3278 mov(ip, Operand(function));
3279 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3283 void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
3284 int num_double_arguments) {
3285 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3289 void MacroAssembler::CallCFunction(ExternalReference function,
3290 int num_arguments) {
3291 CallCFunction(function, num_arguments, 0);
3295 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
3296 CallCFunction(function, num_arguments, 0);
3300 void MacroAssembler::CallCFunctionHelper(Register function,
3301 int num_reg_arguments,
3302 int num_double_arguments) {
3303 DCHECK(has_frame());
3304 // Just call directly. The function called cannot cause a GC, or
3305 // allow preemption, so the return address in the link register
3307 #if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
3308 // AIX uses a function descriptor. When calling C code be aware
3309 // of this descriptor and pick up values from it
3310 LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize));
3311 LoadP(ip, MemOperand(function, 0));
3313 #elif ABI_TOC_ADDRESSABILITY_VIA_IP
3317 Register dest = function;
3322 // Remove frame bought in PrepareCallCFunction
3323 int stack_passed_arguments =
3324 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
3325 int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
3326 if (ActivationFrameAlignment() > kPointerSize) {
3327 LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
3329 addi(sp, sp, Operand(stack_space * kPointerSize));
3334 void MacroAssembler::FlushICache(Register address, size_t size,
3336 if (CpuFeatures::IsSupported(INSTR_AND_DATA_CACHE_COHERENCY)) {
3350 // This code handles ranges which cross a single cacheline boundary.
3351 // scratch is last cacheline which intersects range.
3352 const int kCacheLineSizeLog2 = WhichPowerOf2(CpuFeatures::cache_line_size());
3354 DCHECK(size > 0 && size <= (size_t)(1 << kCacheLineSizeLog2));
3355 addi(scratch, address, Operand(size - 1));
3356 ClearRightImm(scratch, scratch, Operand(kCacheLineSizeLog2));
3357 cmpl(scratch, address);
3369 void MacroAssembler::SetRelocatedValue(Register location, Register scratch,
3370 Register new_value) {
3371 lwz(scratch, MemOperand(location));
3373 #if V8_OOL_CONSTANT_POOL
3374 if (emit_debug_code()) {
3375 // Check that the instruction sequence is a load from the constant pool
3376 #if V8_TARGET_ARCH_PPC64
3377 And(scratch, scratch, Operand(kOpcodeMask | (0x1f * B16)));
3378 Cmpi(scratch, Operand(ADDI), r0);
3379 Check(eq, kTheInstructionShouldBeALi);
3380 lwz(scratch, MemOperand(location, kInstrSize));
3382 ExtractBitMask(scratch, scratch, 0x1f * B16);
3383 cmpi(scratch, Operand(kConstantPoolRegister.code()));
3384 Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
3385 // Scratch was clobbered. Restore it.
3386 lwz(scratch, MemOperand(location));
3388 // Get the address of the constant and patch it.
3389 andi(scratch, scratch, Operand(kImm16Mask));
3390 StorePX(new_value, MemOperand(kConstantPoolRegister, scratch));
3392 // This code assumes a FIXED_SEQUENCE for lis/ori
3394 // At this point scratch is a lis instruction.
3395 if (emit_debug_code()) {
3396 And(scratch, scratch, Operand(kOpcodeMask | (0x1f * B16)));
3397 Cmpi(scratch, Operand(ADDIS), r0);
3398 Check(eq, kTheInstructionToPatchShouldBeALis);
3399 lwz(scratch, MemOperand(location));
3402 // insert new high word into lis instruction
3403 #if V8_TARGET_ARCH_PPC64
3404 srdi(ip, new_value, Operand(32));
3405 rlwimi(scratch, ip, 16, 16, 31);
3407 rlwimi(scratch, new_value, 16, 16, 31);
3410 stw(scratch, MemOperand(location));
3412 lwz(scratch, MemOperand(location, kInstrSize));
3413 // scratch is now ori.
3414 if (emit_debug_code()) {
3415 And(scratch, scratch, Operand(kOpcodeMask));
3416 Cmpi(scratch, Operand(ORI), r0);
3417 Check(eq, kTheInstructionShouldBeAnOri);
3418 lwz(scratch, MemOperand(location, kInstrSize));
3421 // insert new low word into ori instruction
3422 #if V8_TARGET_ARCH_PPC64
3423 rlwimi(scratch, ip, 0, 16, 31);
3425 rlwimi(scratch, new_value, 0, 16, 31);
3427 stw(scratch, MemOperand(location, kInstrSize));
3429 #if V8_TARGET_ARCH_PPC64
3430 if (emit_debug_code()) {
3431 lwz(scratch, MemOperand(location, 2 * kInstrSize));
3432 // scratch is now sldi.
3433 And(scratch, scratch, Operand(kOpcodeMask | kExt5OpcodeMask));
3434 Cmpi(scratch, Operand(EXT5 | RLDICR), r0);
3435 Check(eq, kTheInstructionShouldBeASldi);
3438 lwz(scratch, MemOperand(location, 3 * kInstrSize));
3439 // scratch is now ori.
3440 if (emit_debug_code()) {
3441 And(scratch, scratch, Operand(kOpcodeMask));
3442 Cmpi(scratch, Operand(ORIS), r0);
3443 Check(eq, kTheInstructionShouldBeAnOris);
3444 lwz(scratch, MemOperand(location, 3 * kInstrSize));
3447 rlwimi(scratch, new_value, 16, 16, 31);
3448 stw(scratch, MemOperand(location, 3 * kInstrSize));
3450 lwz(scratch, MemOperand(location, 4 * kInstrSize));
3451 // scratch is now ori.
3452 if (emit_debug_code()) {
3453 And(scratch, scratch, Operand(kOpcodeMask));
3454 Cmpi(scratch, Operand(ORI), r0);
3455 Check(eq, kTheInstructionShouldBeAnOri);
3456 lwz(scratch, MemOperand(location, 4 * kInstrSize));
3458 rlwimi(scratch, new_value, 0, 16, 31);
3459 stw(scratch, MemOperand(location, 4 * kInstrSize));
3462 // Update the I-cache so the new lis and addic can be executed.
3463 #if V8_TARGET_ARCH_PPC64
3464 FlushICache(location, 5 * kInstrSize, scratch);
3466 FlushICache(location, 2 * kInstrSize, scratch);
3472 void MacroAssembler::GetRelocatedValue(Register location, Register result,
3474 lwz(result, MemOperand(location));
3476 #if V8_OOL_CONSTANT_POOL
3477 if (emit_debug_code()) {
3478 // Check that the instruction sequence is a load from the constant pool
3479 #if V8_TARGET_ARCH_PPC64
3480 And(result, result, Operand(kOpcodeMask | (0x1f * B16)));
3481 Cmpi(result, Operand(ADDI), r0);
3482 Check(eq, kTheInstructionShouldBeALi);
3483 lwz(result, MemOperand(location, kInstrSize));
3485 ExtractBitMask(result, result, 0x1f * B16);
3486 cmpi(result, Operand(kConstantPoolRegister.code()));
3487 Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
3488 lwz(result, MemOperand(location));
3490 // Get the address of the constant and retrieve it.
3491 andi(result, result, Operand(kImm16Mask));
3492 LoadPX(result, MemOperand(kConstantPoolRegister, result));
3494 // This code assumes a FIXED_SEQUENCE for lis/ori
3495 if (emit_debug_code()) {
3496 And(result, result, Operand(kOpcodeMask | (0x1f * B16)));
3497 Cmpi(result, Operand(ADDIS), r0);
3498 Check(eq, kTheInstructionShouldBeALis);
3499 lwz(result, MemOperand(location));
3502 // result now holds a lis instruction. Extract the immediate.
3503 slwi(result, result, Operand(16));
3505 lwz(scratch, MemOperand(location, kInstrSize));
3506 if (emit_debug_code()) {
3507 And(scratch, scratch, Operand(kOpcodeMask));
3508 Cmpi(scratch, Operand(ORI), r0);
3509 Check(eq, kTheInstructionShouldBeAnOri);
3510 lwz(scratch, MemOperand(location, kInstrSize));
3512 // Copy the low 16bits from ori instruction into result
3513 rlwimi(result, scratch, 0, 16, 31);
3515 #if V8_TARGET_ARCH_PPC64
3516 if (emit_debug_code()) {
3517 lwz(scratch, MemOperand(location, 2 * kInstrSize));
3518 // scratch is now sldi.
3519 And(scratch, scratch, Operand(kOpcodeMask | kExt5OpcodeMask));
3520 Cmpi(scratch, Operand(EXT5 | RLDICR), r0);
3521 Check(eq, kTheInstructionShouldBeASldi);
3524 lwz(scratch, MemOperand(location, 3 * kInstrSize));
3525 // scratch is now ori.
3526 if (emit_debug_code()) {
3527 And(scratch, scratch, Operand(kOpcodeMask));
3528 Cmpi(scratch, Operand(ORIS), r0);
3529 Check(eq, kTheInstructionShouldBeAnOris);
3530 lwz(scratch, MemOperand(location, 3 * kInstrSize));
3532 sldi(result, result, Operand(16));
3533 rldimi(result, scratch, 0, 48);
3535 lwz(scratch, MemOperand(location, 4 * kInstrSize));
3536 // scratch is now ori.
3537 if (emit_debug_code()) {
3538 And(scratch, scratch, Operand(kOpcodeMask));
3539 Cmpi(scratch, Operand(ORI), r0);
3540 Check(eq, kTheInstructionShouldBeAnOri);
3541 lwz(scratch, MemOperand(location, 4 * kInstrSize));
3543 sldi(result, result, Operand(16));
3544 rldimi(result, scratch, 0, 48);
3550 void MacroAssembler::CheckPageFlag(
3552 Register scratch, // scratch may be same register as object
3553 int mask, Condition cc, Label* condition_met) {
3554 DCHECK(cc == ne || cc == eq);
3555 ClearRightImm(scratch, object, Operand(kPageSizeBits));
3556 LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3558 And(r0, scratch, Operand(mask), SetRC);
3561 bne(condition_met, cr0);
3564 beq(condition_met, cr0);
3569 void MacroAssembler::JumpIfBlack(Register object, Register scratch0,
3570 Register scratch1, Label* on_black) {
3571 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
3572 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
3576 void MacroAssembler::HasColor(Register object, Register bitmap_scratch,
3577 Register mask_scratch, Label* has_color,
3578 int first_bit, int second_bit) {
3579 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3581 GetMarkBits(object, bitmap_scratch, mask_scratch);
3583 Label other_color, word_boundary;
3584 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3585 // Test the first bit
3586 and_(r0, ip, mask_scratch, SetRC);
3587 b(first_bit == 1 ? eq : ne, &other_color, cr0);
3589 // May need to load the next cell
3590 slwi(mask_scratch, mask_scratch, Operand(1), SetRC);
3591 beq(&word_boundary, cr0);
3592 // Test the second bit
3593 and_(r0, ip, mask_scratch, SetRC);
3594 b(second_bit == 1 ? ne : eq, has_color, cr0);
3597 bind(&word_boundary);
3598 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kIntSize));
3599 andi(r0, ip, Operand(1));
3600 b(second_bit == 1 ? ne : eq, has_color, cr0);
3605 // Detect some, but not all, common pointer-free objects. This is used by the
3606 // incremental write barrier which doesn't care about oddballs (they are always
3607 // marked black immediately so this code is not hit).
3608 void MacroAssembler::JumpIfDataObject(Register value, Register scratch,
3609 Label* not_data_object) {
3610 Label is_data_object;
3611 LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
3612 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
3613 beq(&is_data_object);
3614 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3615 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3616 // If it's a string and it's not a cons string then it's an object containing
3618 lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3619 STATIC_ASSERT((kIsIndirectStringMask | kIsNotStringMask) == 0x81);
3620 andi(scratch, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
3621 bne(not_data_object, cr0);
3622 bind(&is_data_object);
3626 void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg,
3627 Register mask_reg) {
3628 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3629 DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
3630 lis(r0, Operand((~Page::kPageAlignmentMask >> 16)));
3631 and_(bitmap_reg, addr_reg, r0);
3632 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3633 ExtractBitRange(mask_reg, addr_reg, kLowBits - 1, kPointerSizeLog2);
3634 ExtractBitRange(ip, addr_reg, kPageSizeBits - 1, kLowBits);
3635 ShiftLeftImm(ip, ip, Operand(Bitmap::kBytesPerCellLog2));
3636 add(bitmap_reg, bitmap_reg, ip);
3638 slw(mask_reg, ip, mask_reg);
3642 void MacroAssembler::EnsureNotWhite(Register value, Register bitmap_scratch,
3643 Register mask_scratch,
3644 Register load_scratch,
3645 Label* value_is_white_and_not_data) {
3646 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3647 GetMarkBits(value, bitmap_scratch, mask_scratch);
3649 // If the value is black or grey we don't need to do anything.
3650 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3651 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
3652 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
3653 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3657 // Since both black and grey have a 1 in the first position and white does
3658 // not have a 1 there we only need to check one bit.
3659 lwz(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3660 and_(r0, mask_scratch, load_scratch, SetRC);
3663 if (emit_debug_code()) {
3664 // Check for impossible bit pattern.
3666 // LSL may overflow, making the check conservative.
3667 slwi(r0, mask_scratch, Operand(1));
3668 and_(r0, load_scratch, r0, SetRC);
3670 stop("Impossible marking bit pattern");
3674 // Value is white. We check whether it is data that doesn't need scanning.
3675 // Currently only checks for HeapNumber and non-cons strings.
3676 Register map = load_scratch; // Holds map while checking type.
3677 Register length = load_scratch; // Holds length of object after testing type.
3678 Label is_data_object, maybe_string_object, is_string_object, is_encoded;
3679 #if V8_TARGET_ARCH_PPC64
3680 Label length_computed;
3684 // Check for heap-number
3685 LoadP(map, FieldMemOperand(value, HeapObject::kMapOffset));
3686 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
3687 bne(&maybe_string_object);
3688 li(length, Operand(HeapNumber::kSize));
3690 bind(&maybe_string_object);
3692 // Check for strings.
3693 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3694 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3695 // If it's a string and it's not a cons string then it's an object containing
3697 Register instance_type = load_scratch;
3698 lbz(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
3699 andi(r0, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
3700 bne(value_is_white_and_not_data, cr0);
3701 // It's a non-indirect (non-cons and non-slice) string.
3702 // If it's external, the length is just ExternalString::kSize.
3703 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
3704 // External strings are the only ones with the kExternalStringTag bit
3706 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
3707 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
3708 andi(r0, instance_type, Operand(kExternalStringTag));
3709 beq(&is_string_object, cr0);
3710 li(length, Operand(ExternalString::kSize));
3712 bind(&is_string_object);
3714 // Sequential string, either Latin1 or UC16.
3715 // For Latin1 (char-size of 1) we untag the smi to get the length.
3716 // For UC16 (char-size of 2):
3717 // - (32-bit) we just leave the smi tag in place, thereby getting
3718 // the length multiplied by 2.
3719 // - (64-bit) we compute the offset in the 2-byte array
3720 DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
3721 LoadP(ip, FieldMemOperand(value, String::kLengthOffset));
3722 andi(r0, instance_type, Operand(kStringEncodingMask));
3723 beq(&is_encoded, cr0);
3725 #if V8_TARGET_ARCH_PPC64
3726 b(&length_computed);
3729 #if V8_TARGET_ARCH_PPC64
3730 SmiToShortArrayOffset(ip, ip);
3731 bind(&length_computed);
3733 DCHECK(kSmiShift == 1);
3735 addi(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
3736 li(r0, Operand(~kObjectAlignmentMask));
3737 and_(length, length, r0);
3739 bind(&is_data_object);
3740 // Value is a data object, and it is white. Mark it black. Since we know
3741 // that the object is white we can make it black by flipping one bit.
3742 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3743 orx(ip, ip, mask_scratch);
3744 stw(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3746 mov(ip, Operand(~Page::kPageAlignmentMask));
3747 and_(bitmap_scratch, bitmap_scratch, ip);
3748 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3749 add(ip, ip, length);
3750 stw(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3756 // Saturate a value into 8-bit unsigned integer
3757 // if input_value < 0, output_value is 0
3758 // if input_value > 255, output_value is 255
3759 // otherwise output_value is the input_value
3760 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3761 int satval = (1 << 8) - 1;
3763 if (CpuFeatures::IsSupported(ISELECT)) {
3764 // set to 0 if negative
3765 cmpi(input_reg, Operand::Zero());
3766 isel(lt, output_reg, r0, input_reg);
3768 // set to satval if > satval
3769 li(r0, Operand(satval));
3770 cmpi(output_reg, Operand(satval));
3771 isel(lt, output_reg, output_reg, r0);
3773 Label done, negative_label, overflow_label;
3774 cmpi(input_reg, Operand::Zero());
3775 blt(&negative_label);
3777 cmpi(input_reg, Operand(satval));
3778 bgt(&overflow_label);
3779 if (!output_reg.is(input_reg)) {
3780 mr(output_reg, input_reg);
3784 bind(&negative_label);
3785 li(output_reg, Operand::Zero()); // set to 0 if negative
3788 bind(&overflow_label); // set to satval if > satval
3789 li(output_reg, Operand(satval));
3796 void MacroAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
3799 void MacroAssembler::ResetRoundingMode() {
3800 mtfsfi(7, kRoundToNearest); // reset (default is kRoundToNearest)
3804 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3805 DoubleRegister input_reg,
3806 DoubleRegister double_scratch) {
3811 LoadDoubleLiteral(double_scratch, 0.0, result_reg);
3812 fcmpu(input_reg, double_scratch);
3815 // Double value is less than zero, NaN or Inf, return 0.
3816 LoadIntLiteral(result_reg, 0);
3819 // Double value is >= 255, return 255.
3821 LoadDoubleLiteral(double_scratch, 255.0, result_reg);
3822 fcmpu(input_reg, double_scratch);
3824 LoadIntLiteral(result_reg, 255);
3827 // In 0-255 range, round and truncate.
3830 // round to nearest (default rounding mode)
3831 fctiw(double_scratch, input_reg);
3832 MovDoubleLowToInt(result_reg, double_scratch);
3837 void MacroAssembler::LoadInstanceDescriptors(Register map,
3838 Register descriptors) {
3839 LoadP(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3843 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3844 lwz(dst, FieldMemOperand(map, Map::kBitField3Offset));
3845 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3849 void MacroAssembler::EnumLength(Register dst, Register map) {
3850 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3851 lwz(dst, FieldMemOperand(map, Map::kBitField3Offset));
3852 ExtractBitMask(dst, dst, Map::EnumLengthBits::kMask);
3857 void MacroAssembler::LoadAccessor(Register dst, Register holder,
3859 AccessorComponent accessor) {
3860 LoadP(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
3861 LoadInstanceDescriptors(dst, dst);
3863 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
3864 const int getterOffset = AccessorPair::kGetterOffset;
3865 const int setterOffset = AccessorPair::kSetterOffset;
3866 int offset = ((accessor == ACCESSOR_GETTER) ? getterOffset : setterOffset);
3867 LoadP(dst, FieldMemOperand(dst, offset));
3871 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
3872 Register empty_fixed_array_value = r9;
3873 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3877 // Check if the enum length field is properly initialized, indicating that
3878 // there is an enum cache.
3879 LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
3882 CmpSmiLiteral(r6, Smi::FromInt(kInvalidEnumCacheSentinel), r0);
3888 LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
3890 // For all objects but the receiver, check that the cache is empty.
3892 CmpSmiLiteral(r6, Smi::FromInt(0), r0);
3897 // Check that there are no elements. Register r5 contains the current JS
3898 // object we've reached through the prototype chain.
3900 LoadP(r5, FieldMemOperand(r5, JSObject::kElementsOffset));
3901 cmp(r5, empty_fixed_array_value);
3904 // Second chance, the object may be using the empty slow element dictionary.
3905 CompareRoot(r5, Heap::kEmptySlowElementDictionaryRootIndex);
3909 LoadP(r5, FieldMemOperand(r4, Map::kPrototypeOffset));
3910 cmp(r5, null_value);
3915 ////////////////////////////////////////////////////////////////////////////////
3917 // New MacroAssembler Interfaces added for PPC
3919 ////////////////////////////////////////////////////////////////////////////////
3920 void MacroAssembler::LoadIntLiteral(Register dst, int value) {
3921 mov(dst, Operand(value));
3925 void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
3926 mov(dst, Operand(smi));
3930 void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
3932 #if V8_OOL_CONSTANT_POOL
3933 // TODO(mbrandy): enable extended constant pool usage for doubles.
3934 // See ARM commit e27ab337 for a reference.
3935 if (is_ool_constant_pool_available() && !is_constant_pool_full()) {
3936 RelocInfo rinfo(pc_, value);
3937 ConstantPoolAddEntry(rinfo);
3938 #if V8_TARGET_ARCH_PPC64
3939 // We use 2 instruction sequence here for consistency with mov.
3940 li(scratch, Operand::Zero());
3941 lfdx(result, MemOperand(kConstantPoolRegister, scratch));
3943 lfd(result, MemOperand(kConstantPoolRegister, 0));
3949 // avoid gcc strict aliasing error using union cast
3952 #if V8_TARGET_ARCH_PPC64
3959 litVal.dval = value;
3961 #if V8_TARGET_ARCH_PPC64
3962 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3963 mov(scratch, Operand(litVal.ival));
3964 mtfprd(result, scratch);
3969 addi(sp, sp, Operand(-kDoubleSize));
3970 #if V8_TARGET_ARCH_PPC64
3971 mov(scratch, Operand(litVal.ival));
3972 std(scratch, MemOperand(sp));
3974 LoadIntLiteral(scratch, litVal.ival[0]);
3975 stw(scratch, MemOperand(sp, 0));
3976 LoadIntLiteral(scratch, litVal.ival[1]);
3977 stw(scratch, MemOperand(sp, 4));
3979 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
3980 lfd(result, MemOperand(sp, 0));
3981 addi(sp, sp, Operand(kDoubleSize));
3985 void MacroAssembler::MovIntToDouble(DoubleRegister dst, Register src,
3987 // sign-extend src to 64-bit
3988 #if V8_TARGET_ARCH_PPC64
3989 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3995 DCHECK(!src.is(scratch));
3996 subi(sp, sp, Operand(kDoubleSize));
3997 #if V8_TARGET_ARCH_PPC64
3998 extsw(scratch, src);
3999 std(scratch, MemOperand(sp, 0));
4001 srawi(scratch, src, 31);
4002 stw(scratch, MemOperand(sp, Register::kExponentOffset));
4003 stw(src, MemOperand(sp, Register::kMantissaOffset));
4005 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
4006 lfd(dst, MemOperand(sp, 0));
4007 addi(sp, sp, Operand(kDoubleSize));
4011 void MacroAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
4013 // zero-extend src to 64-bit
4014 #if V8_TARGET_ARCH_PPC64
4015 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4021 DCHECK(!src.is(scratch));
4022 subi(sp, sp, Operand(kDoubleSize));
4023 #if V8_TARGET_ARCH_PPC64
4024 clrldi(scratch, src, Operand(32));
4025 std(scratch, MemOperand(sp, 0));
4027 li(scratch, Operand::Zero());
4028 stw(scratch, MemOperand(sp, Register::kExponentOffset));
4029 stw(src, MemOperand(sp, Register::kMantissaOffset));
4031 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
4032 lfd(dst, MemOperand(sp, 0));
4033 addi(sp, sp, Operand(kDoubleSize));
4037 void MacroAssembler::MovInt64ToDouble(DoubleRegister dst,
4038 #if !V8_TARGET_ARCH_PPC64
4042 #if V8_TARGET_ARCH_PPC64
4043 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4049 subi(sp, sp, Operand(kDoubleSize));
4050 #if V8_TARGET_ARCH_PPC64
4051 std(src, MemOperand(sp, 0));
4053 stw(src_hi, MemOperand(sp, Register::kExponentOffset));
4054 stw(src, MemOperand(sp, Register::kMantissaOffset));
4056 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
4057 lfd(dst, MemOperand(sp, 0));
4058 addi(sp, sp, Operand(kDoubleSize));
4062 #if V8_TARGET_ARCH_PPC64
4063 void MacroAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
4067 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4068 sldi(scratch, src_hi, Operand(32));
4069 rldimi(scratch, src_lo, 0, 32);
4070 mtfprd(dst, scratch);
4074 subi(sp, sp, Operand(kDoubleSize));
4075 stw(src_hi, MemOperand(sp, Register::kExponentOffset));
4076 stw(src_lo, MemOperand(sp, Register::kMantissaOffset));
4077 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
4078 lfd(dst, MemOperand(sp));
4079 addi(sp, sp, Operand(kDoubleSize));
4084 void MacroAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
4085 #if V8_TARGET_ARCH_PPC64
4086 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4092 subi(sp, sp, Operand(kDoubleSize));
4093 stfd(src, MemOperand(sp));
4094 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
4095 lwz(dst, MemOperand(sp, Register::kMantissaOffset));
4096 addi(sp, sp, Operand(kDoubleSize));
4100 void MacroAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
4101 #if V8_TARGET_ARCH_PPC64
4102 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4104 srdi(dst, dst, Operand(32));
4109 subi(sp, sp, Operand(kDoubleSize));
4110 stfd(src, MemOperand(sp));
4111 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
4112 lwz(dst, MemOperand(sp, Register::kExponentOffset));
4113 addi(sp, sp, Operand(kDoubleSize));
4117 void MacroAssembler::MovDoubleToInt64(
4118 #if !V8_TARGET_ARCH_PPC64
4121 Register dst, DoubleRegister src) {
4122 #if V8_TARGET_ARCH_PPC64
4123 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4129 subi(sp, sp, Operand(kDoubleSize));
4130 stfd(src, MemOperand(sp));
4131 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
4132 #if V8_TARGET_ARCH_PPC64
4133 ld(dst, MemOperand(sp, 0));
4135 lwz(dst_hi, MemOperand(sp, Register::kExponentOffset));
4136 lwz(dst, MemOperand(sp, Register::kMantissaOffset));
4138 addi(sp, sp, Operand(kDoubleSize));
4142 void MacroAssembler::Add(Register dst, Register src, intptr_t value,
4144 if (is_int16(value)) {
4145 addi(dst, src, Operand(value));
4147 mov(scratch, Operand(value));
4148 add(dst, src, scratch);
4153 void MacroAssembler::Cmpi(Register src1, const Operand& src2, Register scratch,
4155 intptr_t value = src2.immediate();
4156 if (is_int16(value)) {
4157 cmpi(src1, src2, cr);
4160 cmp(src1, scratch, cr);
4165 void MacroAssembler::Cmpli(Register src1, const Operand& src2, Register scratch,
4167 intptr_t value = src2.immediate();
4168 if (is_uint16(value)) {
4169 cmpli(src1, src2, cr);
4172 cmpl(src1, scratch, cr);
4177 void MacroAssembler::Cmpwi(Register src1, const Operand& src2, Register scratch,
4179 intptr_t value = src2.immediate();
4180 if (is_int16(value)) {
4181 cmpwi(src1, src2, cr);
4184 cmpw(src1, scratch, cr);
4189 void MacroAssembler::Cmplwi(Register src1, const Operand& src2,
4190 Register scratch, CRegister cr) {
4191 intptr_t value = src2.immediate();
4192 if (is_uint16(value)) {
4193 cmplwi(src1, src2, cr);
4196 cmplw(src1, scratch, cr);
4201 void MacroAssembler::And(Register ra, Register rs, const Operand& rb,
4204 and_(ra, rs, rb.rm(), rc);
4206 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == SetRC) {
4209 // mov handles the relocation.
4212 and_(ra, rs, r0, rc);
4218 void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) {
4220 orx(ra, rs, rb.rm(), rc);
4222 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) {
4225 // mov handles the relocation.
4228 orx(ra, rs, r0, rc);
4234 void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
4237 xor_(ra, rs, rb.rm(), rc);
4239 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) {
4242 // mov handles the relocation.
4245 xor_(ra, rs, r0, rc);
4251 void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
4253 #if V8_TARGET_ARCH_PPC64
4254 LoadSmiLiteral(scratch, smi);
4255 cmp(src1, scratch, cr);
4257 Cmpi(src1, Operand(smi), scratch, cr);
4262 void MacroAssembler::CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
4264 #if V8_TARGET_ARCH_PPC64
4265 LoadSmiLiteral(scratch, smi);
4266 cmpl(src1, scratch, cr);
4268 Cmpli(src1, Operand(smi), scratch, cr);
4273 void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
4275 #if V8_TARGET_ARCH_PPC64
4276 LoadSmiLiteral(scratch, smi);
4277 add(dst, src, scratch);
4279 Add(dst, src, reinterpret_cast<intptr_t>(smi), scratch);
4284 void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
4286 #if V8_TARGET_ARCH_PPC64
4287 LoadSmiLiteral(scratch, smi);
4288 sub(dst, src, scratch);
4290 Add(dst, src, -(reinterpret_cast<intptr_t>(smi)), scratch);
4295 void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi,
4296 Register scratch, RCBit rc) {
4297 #if V8_TARGET_ARCH_PPC64
4298 LoadSmiLiteral(scratch, smi);
4299 and_(dst, src, scratch, rc);
4301 And(dst, src, Operand(smi), rc);
4306 // Load a "pointer" sized value from the memory location
4307 void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
4309 int offset = mem.offset();
4311 if (!is_int16(offset)) {
4312 /* cannot use d-form */
4313 DCHECK(!scratch.is(no_reg));
4314 mov(scratch, Operand(offset));
4315 #if V8_TARGET_ARCH_PPC64
4316 ldx(dst, MemOperand(mem.ra(), scratch));
4318 lwzx(dst, MemOperand(mem.ra(), scratch));
4321 #if V8_TARGET_ARCH_PPC64
4322 int misaligned = (offset & 3);
4324 // adjust base to conform to offset alignment requirements
4325 // Todo: enhance to use scratch if dst is unsuitable
4326 DCHECK(!dst.is(r0));
4327 addi(dst, mem.ra(), Operand((offset & 3) - 4));
4328 ld(dst, MemOperand(dst, (offset & ~3) + 4));
4339 // Store a "pointer" sized value to the memory location
4340 void MacroAssembler::StoreP(Register src, const MemOperand& mem,
4342 int offset = mem.offset();
4344 if (!is_int16(offset)) {
4345 /* cannot use d-form */
4346 DCHECK(!scratch.is(no_reg));
4347 mov(scratch, Operand(offset));
4348 #if V8_TARGET_ARCH_PPC64
4349 stdx(src, MemOperand(mem.ra(), scratch));
4351 stwx(src, MemOperand(mem.ra(), scratch));
4354 #if V8_TARGET_ARCH_PPC64
4355 int misaligned = (offset & 3);
4357 // adjust base to conform to offset alignment requirements
4358 // a suitable scratch is required here
4359 DCHECK(!scratch.is(no_reg));
4360 if (scratch.is(r0)) {
4361 LoadIntLiteral(scratch, offset);
4362 stdx(src, MemOperand(mem.ra(), scratch));
4364 addi(scratch, mem.ra(), Operand((offset & 3) - 4));
4365 std(src, MemOperand(scratch, (offset & ~3) + 4));
4376 void MacroAssembler::LoadWordArith(Register dst, const MemOperand& mem,
4378 int offset = mem.offset();
4380 if (!is_int16(offset)) {
4381 DCHECK(!scratch.is(no_reg));
4382 mov(scratch, Operand(offset));
4383 lwax(dst, MemOperand(mem.ra(), scratch));
4385 #if V8_TARGET_ARCH_PPC64
4386 int misaligned = (offset & 3);
4388 // adjust base to conform to offset alignment requirements
4389 // Todo: enhance to use scratch if dst is unsuitable
4390 DCHECK(!dst.is(r0));
4391 addi(dst, mem.ra(), Operand((offset & 3) - 4));
4392 lwa(dst, MemOperand(dst, (offset & ~3) + 4));
4403 // Variable length depending on whether offset fits into immediate field
4404 // MemOperand currently only supports d-form
4405 void MacroAssembler::LoadWord(Register dst, const MemOperand& mem,
4407 Register base = mem.ra();
4408 int offset = mem.offset();
4410 if (!is_int16(offset)) {
4411 LoadIntLiteral(scratch, offset);
4412 lwzx(dst, MemOperand(base, scratch));
4419 // Variable length depending on whether offset fits into immediate field
4420 // MemOperand current only supports d-form
4421 void MacroAssembler::StoreWord(Register src, const MemOperand& mem,
4423 Register base = mem.ra();
4424 int offset = mem.offset();
4426 if (!is_int16(offset)) {
4427 LoadIntLiteral(scratch, offset);
4428 stwx(src, MemOperand(base, scratch));
4435 void MacroAssembler::LoadHalfWordArith(Register dst, const MemOperand& mem,
4437 int offset = mem.offset();
4439 if (!is_int16(offset)) {
4440 DCHECK(!scratch.is(no_reg));
4441 mov(scratch, Operand(offset));
4442 lhax(dst, MemOperand(mem.ra(), scratch));
4449 // Variable length depending on whether offset fits into immediate field
4450 // MemOperand currently only supports d-form
4451 void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem,
4453 Register base = mem.ra();
4454 int offset = mem.offset();
4456 if (!is_int16(offset)) {
4457 LoadIntLiteral(scratch, offset);
4458 lhzx(dst, MemOperand(base, scratch));
4465 // Variable length depending on whether offset fits into immediate field
4466 // MemOperand current only supports d-form
4467 void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
4469 Register base = mem.ra();
4470 int offset = mem.offset();
4472 if (!is_int16(offset)) {
4473 LoadIntLiteral(scratch, offset);
4474 sthx(src, MemOperand(base, scratch));
4481 // Variable length depending on whether offset fits into immediate field
4482 // MemOperand currently only supports d-form
4483 void MacroAssembler::LoadByte(Register dst, const MemOperand& mem,
4485 Register base = mem.ra();
4486 int offset = mem.offset();
4488 if (!is_int16(offset)) {
4489 LoadIntLiteral(scratch, offset);
4490 lbzx(dst, MemOperand(base, scratch));
4497 // Variable length depending on whether offset fits into immediate field
4498 // MemOperand current only supports d-form
4499 void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
4501 Register base = mem.ra();
4502 int offset = mem.offset();
4504 if (!is_int16(offset)) {
4505 LoadIntLiteral(scratch, offset);
4506 stbx(src, MemOperand(base, scratch));
4513 void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
4514 Representation r, Register scratch) {
4515 DCHECK(!r.IsDouble());
4516 if (r.IsInteger8()) {
4517 LoadByte(dst, mem, scratch);
4519 } else if (r.IsUInteger8()) {
4520 LoadByte(dst, mem, scratch);
4521 } else if (r.IsInteger16()) {
4522 LoadHalfWordArith(dst, mem, scratch);
4523 } else if (r.IsUInteger16()) {
4524 LoadHalfWord(dst, mem, scratch);
4525 #if V8_TARGET_ARCH_PPC64
4526 } else if (r.IsInteger32()) {
4527 LoadWordArith(dst, mem, scratch);
4530 LoadP(dst, mem, scratch);
4535 void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
4536 Representation r, Register scratch) {
4537 DCHECK(!r.IsDouble());
4538 if (r.IsInteger8() || r.IsUInteger8()) {
4539 StoreByte(src, mem, scratch);
4540 } else if (r.IsInteger16() || r.IsUInteger16()) {
4541 StoreHalfWord(src, mem, scratch);
4542 #if V8_TARGET_ARCH_PPC64
4543 } else if (r.IsInteger32()) {
4544 StoreWord(src, mem, scratch);
4547 if (r.IsHeapObject()) {
4549 } else if (r.IsSmi()) {
4552 StoreP(src, mem, scratch);
4557 void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem,
4559 Register base = mem.ra();
4560 int offset = mem.offset();
4562 if (!is_int16(offset)) {
4563 mov(scratch, Operand(offset));
4564 lfdx(dst, MemOperand(base, scratch));
4571 void MacroAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
4573 Register base = mem.ra();
4574 int offset = mem.offset();
4576 if (!is_int16(offset)) {
4577 mov(scratch, Operand(offset));
4578 stfdx(src, MemOperand(base, scratch));
4585 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
4586 Register scratch_reg,
4587 Label* no_memento_found) {
4588 ExternalReference new_space_start =
4589 ExternalReference::new_space_start(isolate());
4590 ExternalReference new_space_allocation_top =
4591 ExternalReference::new_space_allocation_top_address(isolate());
4592 addi(scratch_reg, receiver_reg,
4593 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
4594 Cmpi(scratch_reg, Operand(new_space_start), r0);
4595 blt(no_memento_found);
4596 mov(ip, Operand(new_space_allocation_top));
4597 LoadP(ip, MemOperand(ip));
4598 cmp(scratch_reg, ip);
4599 bgt(no_memento_found);
4600 LoadP(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
4601 Cmpi(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()),
4606 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
4607 Register reg4, Register reg5,
4610 if (reg1.is_valid()) regs |= reg1.bit();
4611 if (reg2.is_valid()) regs |= reg2.bit();
4612 if (reg3.is_valid()) regs |= reg3.bit();
4613 if (reg4.is_valid()) regs |= reg4.bit();
4614 if (reg5.is_valid()) regs |= reg5.bit();
4615 if (reg6.is_valid()) regs |= reg6.bit();
4617 for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
4618 Register candidate = Register::FromAllocationIndex(i);
4619 if (regs & candidate.bit()) continue;
4627 void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object,
4631 DCHECK(!scratch1.is(scratch0));
4632 Factory* factory = isolate()->factory();
4633 Register current = scratch0;
4636 // scratch contained elements pointer.
4637 mr(current, object);
4639 // Loop based on the map going up the prototype chain.
4641 LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
4642 lbz(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
4643 DecodeField<Map::ElementsKindBits>(scratch1);
4644 cmpi(scratch1, Operand(DICTIONARY_ELEMENTS));
4646 LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
4647 Cmpi(current, Operand(factory->null_value()), r0);
4653 bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
4654 Register reg5, Register reg6, Register reg7, Register reg8) {
4655 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
4656 reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
4657 reg7.is_valid() + reg8.is_valid();
4660 if (reg1.is_valid()) regs |= reg1.bit();
4661 if (reg2.is_valid()) regs |= reg2.bit();
4662 if (reg3.is_valid()) regs |= reg3.bit();
4663 if (reg4.is_valid()) regs |= reg4.bit();
4664 if (reg5.is_valid()) regs |= reg5.bit();
4665 if (reg6.is_valid()) regs |= reg6.bit();
4666 if (reg7.is_valid()) regs |= reg7.bit();
4667 if (reg8.is_valid()) regs |= reg8.bit();
4668 int n_of_non_aliasing_regs = NumRegs(regs);
4670 return n_of_valid_regs != n_of_non_aliasing_regs;
4675 CodePatcher::CodePatcher(byte* address, int instructions,
4676 FlushICache flush_cache)
4677 : address_(address),
4678 size_(instructions * Assembler::kInstrSize),
4679 masm_(NULL, address, size_ + Assembler::kGap),
4680 flush_cache_(flush_cache) {
4681 // Create a new macro assembler pointing to the address of the code to patch.
4682 // The size is adjusted with kGap on order for the assembler to generate size
4683 // bytes of instructions without failing with buffer size constraints.
4684 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4688 CodePatcher::~CodePatcher() {
4689 // Indicate that code has changed.
4690 if (flush_cache_ == FLUSH) {
4691 CpuFeatures::FlushICache(address_, size_);
4694 // Check that the code was patched as expected.
4695 DCHECK(masm_.pc_ == address_ + size_);
4696 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4700 void CodePatcher::Emit(Instr instr) { masm()->emit(instr); }
4703 void CodePatcher::EmitCondition(Condition cond) {
4704 Instr instr = Assembler::instr_at(masm_.pc_);
4707 instr = (instr & ~kCondMask) | BT;
4710 instr = (instr & ~kCondMask) | BF;
4719 void MacroAssembler::TruncatingDiv(Register result, Register dividend,
4721 DCHECK(!dividend.is(result));
4722 DCHECK(!dividend.is(r0));
4723 DCHECK(!result.is(r0));
4724 base::MagicNumbersForDivision<uint32_t> mag =
4725 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
4726 mov(r0, Operand(mag.multiplier));
4727 mulhw(result, dividend, r0);
4728 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
4729 if (divisor > 0 && neg) {
4730 add(result, result, dividend);
4732 if (divisor < 0 && !neg && mag.multiplier > 0) {
4733 sub(result, result, dividend);
4735 if (mag.shift > 0) srawi(result, result, mag.shift);
4736 ExtractBit(r0, dividend, 31);
4737 add(result, result, r0);
4740 } // namespace internal
4743 #endif // V8_TARGET_ARCH_PPC