1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_PPC_MACRO_ASSEMBLER_PPC_H_
6 #define V8_PPC_MACRO_ASSEMBLER_PPC_H_
8 #include "src/assembler.h"
9 #include "src/bailout-reason.h"
10 #include "src/frames.h"
11 #include "src/globals.h"
16 // ----------------------------------------------------------------------------
17 // Static helper functions
19 // Generate a MemOperand for loading a field from an object.
20 inline MemOperand FieldMemOperand(Register object, int offset) {
21 return MemOperand(object, offset - kHeapObjectTag);
25 // Flags used for AllocateHeapNumber
34 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
35 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
36 enum PointersToHereCheck {
37 kPointersToHereMaybeInteresting,
38 kPointersToHereAreAlwaysInteresting
40 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
43 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
44 Register reg3 = no_reg,
45 Register reg4 = no_reg,
46 Register reg5 = no_reg,
47 Register reg6 = no_reg);
51 bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
52 Register reg4 = no_reg, Register reg5 = no_reg,
53 Register reg6 = no_reg, Register reg7 = no_reg,
54 Register reg8 = no_reg);
57 // These exist to provide portability between 32 and 64bit
58 #if V8_TARGET_ARCH_PPC64
64 #define StorePUX stdux
65 #define ShiftLeftImm sldi
66 #define ShiftRightImm srdi
67 #define ClearLeftImm clrldi
68 #define ClearRightImm clrrdi
69 #define ShiftRightArithImm sradi
70 #define ShiftLeft_ sld
71 #define ShiftRight_ srd
72 #define ShiftRightArith srad
81 #define StorePUX stwux
82 #define ShiftLeftImm slwi
83 #define ShiftRightImm srwi
84 #define ClearLeftImm clrlwi
85 #define ClearRightImm clrrwi
86 #define ShiftRightArithImm srawi
87 #define ShiftLeft_ slw
88 #define ShiftRight_ srw
89 #define ShiftRightArith sraw
95 // MacroAssembler implements a collection of frequently used macros.
96 class MacroAssembler : public Assembler {
98 // The isolate parameter can be NULL if the macro assembler should
99 // not use isolate-dependent functionality. In this case, it's the
100 // responsibility of the caller to never invoke such function on the
102 MacroAssembler(Isolate* isolate, void* buffer, int size);
105 // Returns the size of a call in instructions.
106 static int CallSize(Register target);
107 int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
108 static int CallSizeNotPredictableCodeSize(Address target,
109 RelocInfo::Mode rmode,
110 Condition cond = al);
112 // Jump, Call, and Ret pseudo instructions implementing inter-working.
113 void Jump(Register target);
114 void JumpToJSEntry(Register target);
115 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
117 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
118 void Call(Register target);
119 void CallJSEntry(Register target);
120 void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
121 int CallSize(Handle<Code> code,
122 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
123 TypeFeedbackId ast_id = TypeFeedbackId::None(),
124 Condition cond = al);
125 void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
126 TypeFeedbackId ast_id = TypeFeedbackId::None(),
127 Condition cond = al);
128 void Ret(Condition cond = al);
130 // Emit code to discard a non-negative number of pointer-sized elements
131 // from the stack, clobbering only the sp register.
132 void Drop(int count, Condition cond = al);
134 void Ret(int drop, Condition cond = al);
136 void Call(Label* target);
138 // Emit call to the code we are currently generating.
140 Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
141 Call(self, RelocInfo::CODE_TARGET);
144 // Register move. May do nothing if the registers are identical.
145 void Move(Register dst, Handle<Object> value);
146 void Move(Register dst, Register src, Condition cond = al);
147 void Move(DoubleRegister dst, DoubleRegister src);
149 void MultiPush(RegList regs);
150 void MultiPop(RegList regs);
152 // Load an object from the root table.
153 void LoadRoot(Register destination, Heap::RootListIndex index,
154 Condition cond = al);
155 // Store an object to the root table.
156 void StoreRoot(Register source, Heap::RootListIndex index,
157 Condition cond = al);
159 // ---------------------------------------------------------------------------
162 void IncrementalMarkingRecordWriteHelper(Register object, Register value,
165 enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
167 // Record in the remembered set the fact that we have a pointer to new space
168 // at the address pointed to by the addr register. Only works if addr is not
170 void RememberedSetHelper(Register object, // Used for debug code.
171 Register addr, Register scratch,
172 SaveFPRegsMode save_fp,
173 RememberedSetFinalAction and_then);
175 void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
176 Label* condition_met);
178 // Check if object is in new space. Jumps if the object is not in new space.
179 // The register scratch can be object itself, but scratch will be clobbered.
180 void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
181 InNewSpace(object, scratch, ne, branch);
184 // Check if object is in new space. Jumps if the object is in new space.
185 // The register scratch can be object itself, but it will be clobbered.
186 void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
187 InNewSpace(object, scratch, eq, branch);
190 // Check if an object has a given incremental marking color.
191 void HasColor(Register object, Register scratch0, Register scratch1,
192 Label* has_color, int first_bit, int second_bit);
194 void JumpIfBlack(Register object, Register scratch0, Register scratch1,
197 // Checks the color of an object. If the object is already grey or black
198 // then we just fall through, since it is already live. If it is white and
199 // we can determine that it doesn't need to be scanned, then we just mark it
200 // black and fall through. For the rest we jump to the label so the
201 // incremental marker can fix its assumptions.
202 void EnsureNotWhite(Register object, Register scratch1, Register scratch2,
203 Register scratch3, Label* object_is_white_and_not_data);
205 // Detects conservatively whether an object is data-only, i.e. it does need to
206 // be scanned by the garbage collector.
207 void JumpIfDataObject(Register value, Register scratch,
208 Label* not_data_object);
210 // Notify the garbage collector that we wrote a pointer into an object.
211 // |object| is the object being stored into, |value| is the object being
212 // stored. value and scratch registers are clobbered by the operation.
213 // The offset is the offset from the start of the object, not the offset from
214 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
215 void RecordWriteField(
216 Register object, int offset, Register value, Register scratch,
217 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
218 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
219 SmiCheck smi_check = INLINE_SMI_CHECK,
220 PointersToHereCheck pointers_to_here_check_for_value =
221 kPointersToHereMaybeInteresting);
223 // As above, but the offset has the tag presubtracted. For use with
224 // MemOperand(reg, off).
225 inline void RecordWriteContextSlot(
226 Register context, int offset, Register value, Register scratch,
227 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
228 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
229 SmiCheck smi_check = INLINE_SMI_CHECK,
230 PointersToHereCheck pointers_to_here_check_for_value =
231 kPointersToHereMaybeInteresting) {
232 RecordWriteField(context, offset + kHeapObjectTag, value, scratch,
233 lr_status, save_fp, remembered_set_action, smi_check,
234 pointers_to_here_check_for_value);
237 void RecordWriteForMap(Register object, Register map, Register dst,
238 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
240 // For a given |object| notify the garbage collector that the slot |address|
241 // has been written. |value| is the object being stored. The value and
242 // address registers are clobbered by the operation.
244 Register object, Register address, Register value,
245 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
246 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
247 SmiCheck smi_check = INLINE_SMI_CHECK,
248 PointersToHereCheck pointers_to_here_check_for_value =
249 kPointersToHereMaybeInteresting);
251 void Push(Register src) { push(src); }
254 void Push(Handle<Object> handle);
255 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
257 // Push two registers. Pushes leftmost register first (to highest address).
258 void Push(Register src1, Register src2) {
259 StorePU(src2, MemOperand(sp, -2 * kPointerSize));
260 StoreP(src1, MemOperand(sp, kPointerSize));
263 // Push three registers. Pushes leftmost register first (to highest address).
264 void Push(Register src1, Register src2, Register src3) {
265 StorePU(src3, MemOperand(sp, -3 * kPointerSize));
266 StoreP(src2, MemOperand(sp, kPointerSize));
267 StoreP(src1, MemOperand(sp, 2 * kPointerSize));
270 // Push four registers. Pushes leftmost register first (to highest address).
271 void Push(Register src1, Register src2, Register src3, Register src4) {
272 StorePU(src4, MemOperand(sp, -4 * kPointerSize));
273 StoreP(src3, MemOperand(sp, kPointerSize));
274 StoreP(src2, MemOperand(sp, 2 * kPointerSize));
275 StoreP(src1, MemOperand(sp, 3 * kPointerSize));
278 // Push five registers. Pushes leftmost register first (to highest address).
279 void Push(Register src1, Register src2, Register src3, Register src4,
281 StorePU(src5, MemOperand(sp, -5 * kPointerSize));
282 StoreP(src4, MemOperand(sp, kPointerSize));
283 StoreP(src3, MemOperand(sp, 2 * kPointerSize));
284 StoreP(src2, MemOperand(sp, 3 * kPointerSize));
285 StoreP(src1, MemOperand(sp, 4 * kPointerSize));
288 void Pop(Register dst) { pop(dst); }
290 // Pop two registers. Pops rightmost register first (from lower address).
291 void Pop(Register src1, Register src2) {
292 LoadP(src2, MemOperand(sp, 0));
293 LoadP(src1, MemOperand(sp, kPointerSize));
294 addi(sp, sp, Operand(2 * kPointerSize));
297 // Pop three registers. Pops rightmost register first (from lower address).
298 void Pop(Register src1, Register src2, Register src3) {
299 LoadP(src3, MemOperand(sp, 0));
300 LoadP(src2, MemOperand(sp, kPointerSize));
301 LoadP(src1, MemOperand(sp, 2 * kPointerSize));
302 addi(sp, sp, Operand(3 * kPointerSize));
305 // Pop four registers. Pops rightmost register first (from lower address).
306 void Pop(Register src1, Register src2, Register src3, Register src4) {
307 LoadP(src4, MemOperand(sp, 0));
308 LoadP(src3, MemOperand(sp, kPointerSize));
309 LoadP(src2, MemOperand(sp, 2 * kPointerSize));
310 LoadP(src1, MemOperand(sp, 3 * kPointerSize));
311 addi(sp, sp, Operand(4 * kPointerSize));
314 // Pop five registers. Pops rightmost register first (from lower address).
315 void Pop(Register src1, Register src2, Register src3, Register src4,
317 LoadP(src5, MemOperand(sp, 0));
318 LoadP(src4, MemOperand(sp, kPointerSize));
319 LoadP(src3, MemOperand(sp, 2 * kPointerSize));
320 LoadP(src2, MemOperand(sp, 3 * kPointerSize));
321 LoadP(src1, MemOperand(sp, 4 * kPointerSize));
322 addi(sp, sp, Operand(5 * kPointerSize));
325 // Push a fixed frame, consisting of lr, fp, context and
326 // JS function / marker id if marker_reg is a valid register.
327 void PushFixedFrame(Register marker_reg = no_reg);
328 void PopFixedFrame(Register marker_reg = no_reg);
330 // Push and pop the registers that can hold pointers, as defined by the
331 // RegList constant kSafepointSavedRegisters.
332 void PushSafepointRegisters();
333 void PopSafepointRegisters();
334 // Store value in register src in the safepoint stack slot for
336 void StoreToSafepointRegisterSlot(Register src, Register dst);
337 // Load the value of the src register from its safepoint stack slot
338 // into register dst.
339 void LoadFromSafepointRegisterSlot(Register dst, Register src);
341 // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
343 // Does not handle errors.
344 void FlushICache(Register address, size_t size, Register scratch);
346 // If the value is a NaN, canonicalize the value else, do nothing.
347 void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
348 void CanonicalizeNaN(const DoubleRegister value) {
349 CanonicalizeNaN(value, value);
352 // Converts the integer (untagged smi) in |src| to a double, storing
353 // the result to |double_dst|
354 void ConvertIntToDouble(Register src, DoubleRegister double_dst);
356 // Converts the unsigned integer (untagged smi) in |src| to
357 // a double, storing the result to |double_dst|
358 void ConvertUnsignedIntToDouble(Register src, DoubleRegister double_dst);
360 // Converts the integer (untagged smi) in |src| to
361 // a float, storing the result in |dst|
362 // Warning: The value in |int_scrach| will be changed in the process!
363 void ConvertIntToFloat(const DoubleRegister dst, const Register src,
364 const Register int_scratch);
366 // Converts the double_input to an integer. Note that, upon return,
367 // the contents of double_dst will also hold the fixed point representation.
368 void ConvertDoubleToInt64(const DoubleRegister double_input,
369 #if !V8_TARGET_ARCH_PPC64
370 const Register dst_hi,
372 const Register dst, const DoubleRegister double_dst,
373 FPRoundingMode rounding_mode = kRoundToZero);
375 // Generates function and stub prologue code.
376 void StubPrologue(int prologue_offset = 0);
377 void Prologue(bool code_pre_aging, int prologue_offset = 0);
380 // stack_space - extra stack space, used for parameters before call to C.
381 // At least one slot (for the return address) should be provided.
382 void EnterExitFrame(bool save_doubles, int stack_space = 1);
384 // Leave the current exit frame. Expects the return value in r0.
385 // Expect the number of values, pushed prior to the exit frame, to
386 // remove in a register (or no_reg, if there is nothing to remove).
387 void LeaveExitFrame(bool save_doubles, Register argument_count,
388 bool restore_context,
389 bool argument_count_is_length = false);
391 // Get the actual activation frame alignment for target environment.
392 static int ActivationFrameAlignment();
394 void LoadContext(Register dst, int context_chain_length);
396 // Conditionally load the cached Array transitioned map of type
397 // transitioned_kind from the native context if the map in register
398 // map_in_out is the cached Array map in the native context of
400 void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
401 ElementsKind transitioned_kind,
404 Label* no_map_match);
406 void LoadGlobalFunction(int index, Register function);
408 // Load the initial map from the global function. The registers
409 // function and map can be the same, function is then overwritten.
410 void LoadGlobalFunctionInitialMap(Register function, Register map,
413 void InitializeRootRegister() {
414 ExternalReference roots_array_start =
415 ExternalReference::roots_array_start(isolate());
416 mov(kRootRegister, Operand(roots_array_start));
419 // ----------------------------------------------------------------
420 // new PPC macro-assembler interfaces that are slightly higher level
421 // than assembler-ppc and may generate variable length sequences
423 // load a literal signed int value <value> to GPR <dst>
424 void LoadIntLiteral(Register dst, int value);
426 // load an SMI value <value> to GPR <dst>
427 void LoadSmiLiteral(Register dst, Smi* smi);
429 // load a literal double value <value> to FPR <result>
430 void LoadDoubleLiteral(DoubleRegister result, double value, Register scratch);
432 void LoadWord(Register dst, const MemOperand& mem, Register scratch);
433 void LoadWordArith(Register dst, const MemOperand& mem,
434 Register scratch = no_reg);
435 void StoreWord(Register src, const MemOperand& mem, Register scratch);
437 void LoadHalfWord(Register dst, const MemOperand& mem, Register scratch);
438 void LoadHalfWordArith(Register dst, const MemOperand& mem,
439 Register scratch = no_reg);
440 void StoreHalfWord(Register src, const MemOperand& mem, Register scratch);
442 void LoadByte(Register dst, const MemOperand& mem, Register scratch);
443 void StoreByte(Register src, const MemOperand& mem, Register scratch);
445 void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
446 Register scratch = no_reg);
447 void StoreRepresentation(Register src, const MemOperand& mem,
448 Representation r, Register scratch = no_reg);
450 void LoadDouble(DoubleRegister dst, const MemOperand& mem, Register scratch);
451 void StoreDouble(DoubleRegister src, const MemOperand& mem, Register scratch);
453 // Move values between integer and floating point registers.
454 void MovIntToDouble(DoubleRegister dst, Register src, Register scratch);
455 void MovUnsignedIntToDouble(DoubleRegister dst, Register src,
457 void MovInt64ToDouble(DoubleRegister dst,
458 #if !V8_TARGET_ARCH_PPC64
462 #if V8_TARGET_ARCH_PPC64
463 void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi,
464 Register src_lo, Register scratch);
466 void InsertDoubleLow(DoubleRegister dst, Register src, Register scratch);
467 void InsertDoubleHigh(DoubleRegister dst, Register src, Register scratch);
468 void MovDoubleLowToInt(Register dst, DoubleRegister src);
469 void MovDoubleHighToInt(Register dst, DoubleRegister src);
470 void MovDoubleToInt64(
471 #if !V8_TARGET_ARCH_PPC64
474 Register dst, DoubleRegister src);
476 void Add(Register dst, Register src, intptr_t value, Register scratch);
477 void Cmpi(Register src1, const Operand& src2, Register scratch,
479 void Cmpli(Register src1, const Operand& src2, Register scratch,
481 void Cmpwi(Register src1, const Operand& src2, Register scratch,
483 void Cmplwi(Register src1, const Operand& src2, Register scratch,
485 void And(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
486 void Or(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
487 void Xor(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
489 void AddSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
490 void SubSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
491 void CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
493 void CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
495 void AndSmiLiteral(Register dst, Register src, Smi* smi, Register scratch,
498 // Set new rounding mode RN to FPSCR
499 void SetRoundingMode(FPRoundingMode RN);
501 // reset rounding mode to default (kRoundToNearest)
502 void ResetRoundingMode();
504 // These exist to provide portability between 32 and 64bit
505 void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
506 void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
508 // ---------------------------------------------------------------------------
509 // JavaScript invokes
511 // Invoke the JavaScript function code by either calling or jumping.
512 void InvokeCode(Register code, const ParameterCount& expected,
513 const ParameterCount& actual, InvokeFlag flag,
514 const CallWrapper& call_wrapper);
516 // Invoke the JavaScript function in the given register. Changes the
517 // current context to the context in the function before invoking.
518 void InvokeFunction(Register function, const ParameterCount& actual,
519 InvokeFlag flag, const CallWrapper& call_wrapper);
521 void InvokeFunction(Register function, const ParameterCount& expected,
522 const ParameterCount& actual, InvokeFlag flag,
523 const CallWrapper& call_wrapper);
525 void InvokeFunction(Handle<JSFunction> function,
526 const ParameterCount& expected,
527 const ParameterCount& actual, InvokeFlag flag,
528 const CallWrapper& call_wrapper);
530 void IsObjectJSObjectType(Register heap_object, Register map,
531 Register scratch, Label* fail);
533 void IsInstanceJSObjectType(Register map, Register scratch, Label* fail);
535 void IsObjectJSStringType(Register object, Register scratch, Label* fail);
537 void IsObjectNameType(Register object, Register scratch, Label* fail);
539 // ---------------------------------------------------------------------------
544 // ---------------------------------------------------------------------------
545 // Exception handling
547 // Push a new stack handler and link into stack handler chain.
548 void PushStackHandler();
550 // Unlink the stack handler on top of the stack from the stack handler chain.
551 // Must preserve the result register.
552 void PopStackHandler();
554 // ---------------------------------------------------------------------------
555 // Inline caching support
557 // Generate code for checking access rights - used for security checks
558 // on access to global objects across environments. The holder register
559 // is left untouched, whereas both scratch registers are clobbered.
560 void CheckAccessGlobalProxy(Register holder_reg, Register scratch,
563 void GetNumberHash(Register t0, Register scratch);
565 void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
566 Register result, Register t0, Register t1,
570 inline void MarkCode(NopMarkerTypes type) { nop(type); }
572 // Check if the given instruction is a 'type' marker.
573 // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
574 // These instructions are generated to mark special location in the code,
575 // like some special IC code.
576 static inline bool IsMarkedCode(Instr instr, int type) {
577 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
578 return IsNop(instr, type);
582 static inline int GetCodeMarker(Instr instr) {
583 int dst_reg_offset = 12;
584 int dst_mask = 0xf << dst_reg_offset;
586 int dst_reg = (instr & dst_mask) >> dst_reg_offset;
587 int src_reg = instr & src_mask;
588 uint32_t non_register_mask = ~(dst_mask | src_mask);
589 uint32_t mov_mask = al | 13 << 21;
591 // Return <n> if we have a mov rn rn, else return -1.
592 int type = ((instr & non_register_mask) == mov_mask) &&
593 (dst_reg == src_reg) && (FIRST_IC_MARKER <= dst_reg) &&
594 (dst_reg < LAST_CODE_MARKER)
597 DCHECK((type == -1) ||
598 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
603 // ---------------------------------------------------------------------------
604 // Allocation support
606 // Allocate an object in new space or old pointer space. The object_size is
607 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
608 // is passed. If the space is exhausted control continues at the gc_required
609 // label. The allocated object is returned in result. If the flag
610 // tag_allocated_object is true the result is tagged as as a heap object.
611 // All registers are clobbered also when control continues at the gc_required
613 void Allocate(int object_size, Register result, Register scratch1,
614 Register scratch2, Label* gc_required, AllocationFlags flags);
616 void Allocate(Register object_size, Register result, Register scratch1,
617 Register scratch2, Label* gc_required, AllocationFlags flags);
619 // Undo allocation in new space. The object passed and objects allocated after
620 // it will no longer be allocated. The caller must make sure that no pointers
621 // are left to the object(s) no longer allocated as they would be invalid when
622 // allocation is undone.
623 void UndoAllocationInNewSpace(Register object, Register scratch);
626 void AllocateTwoByteString(Register result, Register length,
627 Register scratch1, Register scratch2,
628 Register scratch3, Label* gc_required);
629 void AllocateOneByteString(Register result, Register length,
630 Register scratch1, Register scratch2,
631 Register scratch3, Label* gc_required);
632 void AllocateTwoByteConsString(Register result, Register length,
633 Register scratch1, Register scratch2,
635 void AllocateOneByteConsString(Register result, Register length,
636 Register scratch1, Register scratch2,
638 void AllocateTwoByteSlicedString(Register result, Register length,
639 Register scratch1, Register scratch2,
641 void AllocateOneByteSlicedString(Register result, Register length,
642 Register scratch1, Register scratch2,
645 // Allocates a heap number or jumps to the gc_required label if the young
646 // space is full and a scavenge is needed. All registers are clobbered also
647 // when control continues at the gc_required label.
648 void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
649 Register heap_number_map, Label* gc_required,
650 TaggingMode tagging_mode = TAG_RESULT,
651 MutableMode mode = IMMUTABLE);
652 void AllocateHeapNumberWithValue(Register result, DoubleRegister value,
653 Register scratch1, Register scratch2,
654 Register heap_number_map,
657 // Copies a fixed number of fields of heap objects from src to dst.
658 void CopyFields(Register dst, Register src, RegList temps, int field_count);
660 // Copies a number of bytes from src to dst. All registers are clobbered. On
661 // exit src and dst will point to the place just after where the last byte was
662 // read or written and length will be zero.
663 void CopyBytes(Register src, Register dst, Register length, Register scratch);
665 // Initialize fields with filler values. |count| fields starting at
666 // |start_offset| are overwritten with the value in |filler|. At the end the
667 // loop, |start_offset| points at the next uninitialized field. |count| is
668 // assumed to be non-zero.
669 void InitializeNFieldsWithFiller(Register start_offset, Register count,
672 // Initialize fields with filler values. Fields starting at |start_offset|
673 // not including end_offset are overwritten with the value in |filler|. At
674 // the end the loop, |start_offset| takes the value of |end_offset|.
675 void InitializeFieldsWithFiller(Register start_offset, Register end_offset,
678 // ---------------------------------------------------------------------------
679 // Support functions.
681 // Machine code version of Map::GetConstructor().
682 // |temp| holds |result|'s map when done, and |temp2| its instance type.
683 void GetMapConstructor(Register result, Register map, Register temp,
686 // Try to get function prototype of a function and puts the value in
687 // the result register. Checks that the function really is a
688 // function and jumps to the miss label if the fast checks fail. The
689 // function register will be untouched; the other registers may be
691 void TryGetFunctionPrototype(Register function, Register result,
692 Register scratch, Label* miss,
693 bool miss_on_bound_function = false);
695 // Compare object type for heap object. heap_object contains a non-Smi
696 // whose object type should be compared with the given type. This both
697 // sets the flags and leaves the object type in the type_reg register.
698 // It leaves the map in the map register (unless the type_reg and map register
699 // are the same register). It leaves the heap object in the heap_object
700 // register unless the heap_object register is the same register as one of the
702 // Type_reg can be no_reg. In that case ip is used.
703 void CompareObjectType(Register heap_object, Register map, Register type_reg,
706 // Compare object type for heap object. Branch to false_label if type
707 // is lower than min_type or greater than max_type.
708 // Load map into the register map.
709 void CheckObjectTypeRange(Register heap_object, Register map,
710 InstanceType min_type, InstanceType max_type,
713 // Compare instance type in a map. map contains a valid map object whose
714 // object type should be compared with the given type. This both
715 // sets the flags and leaves the object type in the type_reg register.
716 void CompareInstanceType(Register map, Register type_reg, InstanceType type);
719 // Check if a map for a JSObject indicates that the object has fast elements.
720 // Jump to the specified label if it does not.
721 void CheckFastElements(Register map, Register scratch, Label* fail);
723 // Check if a map for a JSObject indicates that the object can have both smi
724 // and HeapObject elements. Jump to the specified label if it does not.
725 void CheckFastObjectElements(Register map, Register scratch, Label* fail);
727 // Check if a map for a JSObject indicates that the object has fast smi only
728 // elements. Jump to the specified label if it does not.
729 void CheckFastSmiElements(Register map, Register scratch, Label* fail);
731 // Check to see if maybe_number can be stored as a double in
732 // FastDoubleElements. If it can, store it at the index specified by key in
733 // the FastDoubleElements array elements. Otherwise jump to fail.
734 void StoreNumberToDoubleElements(Register value_reg, Register key_reg,
735 Register elements_reg, Register scratch1,
736 DoubleRegister double_scratch, Label* fail,
737 int elements_offset = 0);
739 // Compare an object's map with the specified map and its transitioned
740 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
741 // set with result of map compare. If multiple map compares are required, the
742 // compare sequences branches to early_success.
743 void CompareMap(Register obj, Register scratch, Handle<Map> map,
744 Label* early_success);
746 // As above, but the map of the object is already loaded into the register
747 // which is preserved by the code generated.
748 void CompareMap(Register obj_map, Handle<Map> map, Label* early_success);
750 // Check if the map of an object is equal to a specified map and branch to
751 // label if not. Skip the smi check if not required (object is known to be a
752 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
753 // against maps that are ElementsKind transition maps of the specified map.
754 void CheckMap(Register obj, Register scratch, Handle<Map> map, Label* fail,
755 SmiCheckType smi_check_type);
758 void CheckMap(Register obj, Register scratch, Heap::RootListIndex index,
759 Label* fail, SmiCheckType smi_check_type);
762 // Check if the map of an object is equal to a specified weak map and branch
763 // to a specified target if equal. Skip the smi check if not required
764 // (object is known to be a heap object)
765 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
766 Handle<WeakCell> cell, Handle<Code> success,
767 SmiCheckType smi_check_type);
769 // Compare the given value and the value of weak cell.
770 void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch,
773 void GetWeakValue(Register value, Handle<WeakCell> cell);
775 // Load the value of the weak cell in the value register. Branch to the given
776 // miss label if the weak cell was cleared.
777 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
779 // Compare the object in a register to a value from the root list.
780 // Uses the ip register as scratch.
781 void CompareRoot(Register obj, Heap::RootListIndex index);
784 // Load and check the instance type of an object for being a string.
785 // Loads the type into the second argument register.
786 // Returns a condition that will be enabled if the object was a string.
787 Condition IsObjectStringType(Register obj, Register type) {
788 LoadP(type, FieldMemOperand(obj, HeapObject::kMapOffset));
789 lbz(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
790 andi(r0, type, Operand(kIsNotStringMask));
791 DCHECK_EQ(0u, kStringTag);
796 // Picks out an array index from the hash field.
798 // hash - holds the index's hash. Clobbered.
799 // index - holds the overwritten index on exit.
800 void IndexFromHash(Register hash, Register index);
802 // Get the number of least significant bits from a register
803 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
804 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
806 // Load the value of a smi object into a double register.
807 void SmiToDouble(DoubleRegister value, Register smi);
809 // Check if a double can be exactly represented as a signed 32-bit integer.
810 // CR_EQ in cr7 is set if true.
811 void TestDoubleIsInt32(DoubleRegister double_input, Register scratch1,
812 Register scratch2, DoubleRegister double_scratch);
814 // Try to convert a double to a signed 32-bit integer.
815 // CR_EQ in cr7 is set and result assigned if the conversion is exact.
816 void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
817 Register scratch, DoubleRegister double_scratch);
819 // Floor a double and writes the value to the result register.
820 // Go to exact if the conversion is exact (to be able to test -0),
821 // fall through calling code if an overflow occurred, else go to done.
822 // In return, input_high is loaded with high bits of input.
823 void TryInt32Floor(Register result, DoubleRegister double_input,
824 Register input_high, Register scratch,
825 DoubleRegister double_scratch, Label* done, Label* exact);
827 // Performs a truncating conversion of a floating point number as used by
828 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
829 // succeeds, otherwise falls through if result is saturated. On return
830 // 'result' either holds answer, or is clobbered on fall through.
832 // Only public for the test code in test-code-stubs-arm.cc.
833 void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
836 // Performs a truncating conversion of a floating point number as used by
837 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
838 // Exits with 'result' holding the answer.
839 void TruncateDoubleToI(Register result, DoubleRegister double_input);
841 // Performs a truncating conversion of a heap number as used by
842 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
843 // must be different registers. Exits with 'result' holding the answer.
844 void TruncateHeapNumberToI(Register result, Register object);
846 // Converts the smi or heap number in object to an int32 using the rules
847 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
848 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
849 // different registers.
850 void TruncateNumberToI(Register object, Register result,
851 Register heap_number_map, Register scratch1,
854 // Overflow handling functions.
855 // Usage: call the appropriate arithmetic function and then call one of the
856 // flow control functions with the corresponding label.
858 // Compute dst = left + right, setting condition codes. dst may be same as
859 // either left or right (or a unique register). left and right must not be
860 // the same register.
861 void AddAndCheckForOverflow(Register dst, Register left, Register right,
862 Register overflow_dst, Register scratch = r0);
863 void AddAndCheckForOverflow(Register dst, Register left, intptr_t right,
864 Register overflow_dst, Register scratch = r0);
866 // Compute dst = left - right, setting condition codes. dst may be same as
867 // either left or right (or a unique register). left and right must not be
868 // the same register.
869 void SubAndCheckForOverflow(Register dst, Register left, Register right,
870 Register overflow_dst, Register scratch = r0);
872 void BranchOnOverflow(Label* label) { blt(label, cr0); }
874 void BranchOnNoOverflow(Label* label) { bge(label, cr0); }
876 void RetOnOverflow(void) {
884 void RetOnNoOverflow(void) {
892 // Pushes <count> double values to <location>, starting from d<first>.
893 void SaveFPRegs(Register location, int first, int count);
895 // Pops <count> double values from <location>, starting from d<first>.
896 void RestoreFPRegs(Register location, int first, int count);
898 // ---------------------------------------------------------------------------
902 void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None(),
903 Condition cond = al);
906 void TailCallStub(CodeStub* stub, Condition cond = al);
908 // Call a runtime routine.
909 void CallRuntime(const Runtime::Function* f, int num_arguments,
910 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
911 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
912 const Runtime::Function* function = Runtime::FunctionForId(id);
913 CallRuntime(function, function->nargs, kSaveFPRegs);
916 // Convenience function: Same as above, but takes the fid instead.
917 void CallRuntime(Runtime::FunctionId id, int num_arguments,
918 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
919 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
922 // Convenience function: call an external reference.
923 void CallExternalReference(const ExternalReference& ext, int num_arguments);
925 // Tail call of a runtime routine (jump).
926 // Like JumpToExternalReference, but also takes care of passing the number
928 void TailCallExternalReference(const ExternalReference& ext,
929 int num_arguments, int result_size);
931 // Convenience function: tail call a runtime routine (jump).
932 void TailCallRuntime(Runtime::FunctionId fid, int num_arguments,
935 int CalculateStackPassedWords(int num_reg_arguments,
936 int num_double_arguments);
938 // Before calling a C-function from generated code, align arguments on stack.
939 // After aligning the frame, non-register arguments must be stored in
940 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
941 // are word sized. If double arguments are used, this function assumes that
942 // all double arguments are stored before core registers; otherwise the
943 // correct alignment of the double values is not guaranteed.
944 // Some compilers/platforms require the stack to be aligned when calling
946 // Needs a scratch register to do some arithmetic. This register will be
948 void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
950 void PrepareCallCFunction(int num_reg_arguments, Register scratch);
952 // There are two ways of passing double arguments on ARM, depending on
953 // whether soft or hard floating point ABI is used. These functions
954 // abstract parameter passing for the three different ways we call
955 // C functions from generated code.
956 void MovToFloatParameter(DoubleRegister src);
957 void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
958 void MovToFloatResult(DoubleRegister src);
960 // Calls a C function and cleans up the space for arguments allocated
961 // by PrepareCallCFunction. The called function is not allowed to trigger a
962 // garbage collection, since that might move the code and invalidate the
963 // return address (unless this is somehow accounted for by the called
965 void CallCFunction(ExternalReference function, int num_arguments);
966 void CallCFunction(Register function, int num_arguments);
967 void CallCFunction(ExternalReference function, int num_reg_arguments,
968 int num_double_arguments);
969 void CallCFunction(Register function, int num_reg_arguments,
970 int num_double_arguments);
972 void MovFromFloatParameter(DoubleRegister dst);
973 void MovFromFloatResult(DoubleRegister dst);
975 // Jump to a runtime routine.
976 void JumpToExternalReference(const ExternalReference& builtin);
978 // Invoke specified builtin JavaScript function. Adds an entry to
979 // the unresolved list if the name does not resolve.
980 void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag,
981 const CallWrapper& call_wrapper = NullCallWrapper());
983 // Store the code object for the given builtin in the target register and
984 // setup the function in r1.
985 void GetBuiltinEntry(Register target, Builtins::JavaScript id);
987 // Store the function for the given builtin in the target register.
988 void GetBuiltinFunction(Register target, Builtins::JavaScript id);
990 Handle<Object> CodeObject() {
991 DCHECK(!code_object_.is_null());
996 // Emit code for a truncating division by a constant. The dividend register is
997 // unchanged and ip gets clobbered. Dividend and result must be different.
998 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1000 // ---------------------------------------------------------------------------
1001 // StatsCounter support
1003 void SetCounter(StatsCounter* counter, int value, Register scratch1,
1005 void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1007 void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1011 // ---------------------------------------------------------------------------
1014 // Calls Abort(msg) if the condition cond is not satisfied.
1015 // Use --debug_code to enable.
1016 void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
1017 void AssertFastElements(Register elements);
1019 // Like Assert(), but always enabled.
1020 void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
1022 // Print a message to stdout and abort execution.
1023 void Abort(BailoutReason reason);
1025 // Verify restrictions about code generated in stubs.
1026 void set_generating_stub(bool value) { generating_stub_ = value; }
1027 bool generating_stub() { return generating_stub_; }
1028 void set_has_frame(bool value) { has_frame_ = value; }
1029 bool has_frame() { return has_frame_; }
1030 inline bool AllowThisStubCall(CodeStub* stub);
1032 // ---------------------------------------------------------------------------
1035 // Check whether the value of reg is a power of two and not zero. If not
1036 // control continues at the label not_power_of_two. If reg is a power of two
1037 // the register scratch contains the value of (reg - 1) when control falls
1039 void JumpIfNotPowerOfTwoOrZero(Register reg, Register scratch,
1040 Label* not_power_of_two_or_zero);
1041 // Check whether the value of reg is a power of two and not zero.
1042 // Control falls through if it is, with scratch containing the mask
1044 // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
1045 // zero or negative, or jumps to the 'not_power_of_two' label if the value is
1046 // strictly positive but not a power of two.
1047 void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, Register scratch,
1048 Label* zero_and_neg,
1049 Label* not_power_of_two);
1051 // ---------------------------------------------------------------------------
1052 // Bit testing/extraction
1054 // Bit numbering is such that the least significant bit is bit 0
1055 // (for consistency between 32/64-bit).
1057 // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
1058 // and place them into the least significant bits of dst.
1059 inline void ExtractBitRange(Register dst, Register src, int rangeStart,
1060 int rangeEnd, RCBit rc = LeaveRC) {
1061 DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
1062 int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd;
1063 int width = rangeStart - rangeEnd + 1;
1064 #if V8_TARGET_ARCH_PPC64
1065 rldicl(dst, src, rotate, kBitsPerPointer - width, rc);
1067 rlwinm(dst, src, rotate, kBitsPerPointer - width, kBitsPerPointer - 1, rc);
1071 inline void ExtractBit(Register dst, Register src, uint32_t bitNumber,
1072 RCBit rc = LeaveRC) {
1073 ExtractBitRange(dst, src, bitNumber, bitNumber, rc);
1076 // Extract consecutive bits (defined by mask) from src and place them
1077 // into the least significant bits of dst.
1078 inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
1079 RCBit rc = LeaveRC) {
1080 int start = kBitsPerPointer - 1;
1082 uintptr_t bit = (1L << start);
1084 while (bit && (mask & bit) == 0) {
1091 while (bit && (mask & bit)) {
1096 // 1-bits in mask must be contiguous
1097 DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
1099 ExtractBitRange(dst, src, start, end, rc);
1102 // Test single bit in value.
1103 inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
1104 ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC);
1107 // Test consecutive bit range in value. Range is defined by
1108 // rangeStart - rangeEnd.
1109 inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
1110 Register scratch = r0) {
1111 ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC);
1114 // Test consecutive bit range in value. Range is defined by mask.
1115 inline void TestBitMask(Register value, uintptr_t mask,
1116 Register scratch = r0) {
1117 ExtractBitMask(scratch, value, mask, SetRC);
1121 // ---------------------------------------------------------------------------
1124 // Shift left by kSmiShift
1125 void SmiTag(Register reg, RCBit rc = LeaveRC) { SmiTag(reg, reg, rc); }
1126 void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) {
1127 ShiftLeftImm(dst, src, Operand(kSmiShift), rc);
1130 #if !V8_TARGET_ARCH_PPC64
1131 // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
1132 void SmiTagCheckOverflow(Register reg, Register overflow);
1133 void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
1135 inline void JumpIfNotSmiCandidate(Register value, Register scratch,
1136 Label* not_smi_label) {
1137 // High bits must be identical to fit into an Smi
1138 STATIC_ASSERT(kSmiShift == 1);
1139 addis(scratch, value, Operand(0x40000000u >> 16));
1140 cmpi(scratch, Operand::Zero());
1144 inline void TestUnsignedSmiCandidate(Register value, Register scratch) {
1145 // The test is different for unsigned int values. Since we need
1146 // the value to be in the range of a positive smi, we can't
1147 // handle any of the high bits being set in the value.
1148 TestBitRange(value, kBitsPerPointer - 1, kBitsPerPointer - 1 - kSmiShift,
1151 inline void JumpIfNotUnsignedSmiCandidate(Register value, Register scratch,
1152 Label* not_smi_label) {
1153 TestUnsignedSmiCandidate(value, scratch);
1154 bne(not_smi_label, cr0);
1157 void SmiUntag(Register reg, RCBit rc = LeaveRC) { SmiUntag(reg, reg, rc); }
1159 void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) {
1160 ShiftRightArithImm(dst, src, kSmiShift, rc);
1163 void SmiToPtrArrayOffset(Register dst, Register src) {
1164 #if V8_TARGET_ARCH_PPC64
1165 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
1166 ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2);
1168 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
1169 ShiftLeftImm(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
1173 void SmiToByteArrayOffset(Register dst, Register src) { SmiUntag(dst, src); }
1175 void SmiToShortArrayOffset(Register dst, Register src) {
1176 #if V8_TARGET_ARCH_PPC64
1177 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 1);
1178 ShiftRightArithImm(dst, src, kSmiShift - 1);
1180 STATIC_ASSERT(kSmiTag == 0 && kSmiShift == 1);
1187 void SmiToIntArrayOffset(Register dst, Register src) {
1188 #if V8_TARGET_ARCH_PPC64
1189 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 2);
1190 ShiftRightArithImm(dst, src, kSmiShift - 2);
1192 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < 2);
1193 ShiftLeftImm(dst, src, Operand(2 - kSmiShift));
1197 #define SmiToFloatArrayOffset SmiToIntArrayOffset
1199 void SmiToDoubleArrayOffset(Register dst, Register src) {
1200 #if V8_TARGET_ARCH_PPC64
1201 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kDoubleSizeLog2);
1202 ShiftRightArithImm(dst, src, kSmiShift - kDoubleSizeLog2);
1204 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kDoubleSizeLog2);
1205 ShiftLeftImm(dst, src, Operand(kDoubleSizeLog2 - kSmiShift));
1209 void SmiToArrayOffset(Register dst, Register src, int elementSizeLog2) {
1210 if (kSmiShift < elementSizeLog2) {
1211 ShiftLeftImm(dst, src, Operand(elementSizeLog2 - kSmiShift));
1212 } else if (kSmiShift > elementSizeLog2) {
1213 ShiftRightArithImm(dst, src, kSmiShift - elementSizeLog2);
1214 } else if (!dst.is(src)) {
1219 void IndexToArrayOffset(Register dst, Register src, int elementSizeLog2,
1222 SmiToArrayOffset(dst, src, elementSizeLog2);
1224 ShiftLeftImm(dst, src, Operand(elementSizeLog2));
1228 // Untag the source value into destination and jump if source is a smi.
1229 // Souce and destination can be the same register.
1230 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1232 // Untag the source value into destination and jump if source is not a smi.
1233 // Souce and destination can be the same register.
1234 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1236 inline void TestIfSmi(Register value, Register scratch) {
1237 TestBitRange(value, kSmiTagSize - 1, 0, scratch);
1240 inline void TestIfPositiveSmi(Register value, Register scratch) {
1241 #if V8_TARGET_ARCH_PPC64
1242 rldicl(scratch, value, 1, kBitsPerPointer - (1 + kSmiTagSize), SetRC);
1244 rlwinm(scratch, value, 1, kBitsPerPointer - (1 + kSmiTagSize),
1245 kBitsPerPointer - 1, SetRC);
1249 // Jump the register contains a smi.
1250 inline void JumpIfSmi(Register value, Label* smi_label) {
1251 TestIfSmi(value, r0);
1252 beq(smi_label, cr0); // branch if SMI
1254 // Jump if either of the registers contain a non-smi.
1255 inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
1256 TestIfSmi(value, r0);
1257 bne(not_smi_label, cr0);
1259 // Jump if either of the registers contain a non-smi.
1260 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1261 // Jump if either of the registers contain a smi.
1262 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1264 // Abort execution if argument is a smi, enabled via --debug-code.
1265 void AssertNotSmi(Register object);
1266 void AssertSmi(Register object);
1269 #if V8_TARGET_ARCH_PPC64
1270 inline void TestIfInt32(Register value, Register scratch,
1271 CRegister cr = cr7) {
1272 // High bits must be identical to fit into an 32-bit integer
1273 extsw(scratch, value);
1274 cmp(scratch, value, cr);
1277 inline void TestIfInt32(Register hi_word, Register lo_word, Register scratch,
1278 CRegister cr = cr7) {
1279 // High bits must be identical to fit into an 32-bit integer
1280 srawi(scratch, lo_word, 31);
1281 cmp(scratch, hi_word, cr);
1285 #if V8_TARGET_ARCH_PPC64
1286 // Ensure it is permissable to read/write int value directly from
1287 // upper half of the smi.
1288 STATIC_ASSERT(kSmiTag == 0);
1289 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
1291 #if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN
1292 #define SmiWordOffset(offset) (offset + kPointerSize / 2)
1294 #define SmiWordOffset(offset) offset
1297 // Abort execution if argument is not a string, enabled via --debug-code.
1298 void AssertString(Register object);
1300 // Abort execution if argument is not a name, enabled via --debug-code.
1301 void AssertName(Register object);
1303 // Abort execution if argument is not undefined or an AllocationSite, enabled
1304 // via --debug-code.
1305 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1307 // Abort execution if reg is not the root value with the given index,
1308 // enabled via --debug-code.
1309 void AssertIsRoot(Register reg, Heap::RootListIndex index);
1311 // ---------------------------------------------------------------------------
1312 // HeapNumber utilities
1314 void JumpIfNotHeapNumber(Register object, Register heap_number_map,
1315 Register scratch, Label* on_not_heap_number);
1317 // ---------------------------------------------------------------------------
1320 // Generate code to do a lookup in the number string cache. If the number in
1321 // the register object is found in the cache the generated code falls through
1322 // with the result in the result register. The object and the result register
1323 // can be the same. If the number is not found in the cache the code jumps to
1324 // the label not_found with only the content of register object unchanged.
1325 void LookupNumberStringCache(Register object, Register result,
1326 Register scratch1, Register scratch2,
1327 Register scratch3, Label* not_found);
1329 // Checks if both objects are sequential one-byte strings and jumps to label
1330 // if either is not. Assumes that neither object is a smi.
1331 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
1337 // Checks if both objects are sequential one-byte strings and jumps to label
1338 // if either is not.
1339 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
1342 Label* not_flat_one_byte_strings);
1344 // Checks if both instance types are sequential one-byte strings and jumps to
1345 // label if either is not.
1346 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1347 Register first_object_instance_type, Register second_object_instance_type,
1348 Register scratch1, Register scratch2, Label* failure);
1350 // Check if instance type is sequential one-byte string and jump to label if
1352 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1355 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1357 void EmitSeqStringSetCharCheck(Register string, Register index,
1358 Register value, uint32_t encoding_mask);
1360 // ---------------------------------------------------------------------------
1361 // Patching helpers.
1363 // Retrieve/patch the relocated value (lis/ori pair).
1364 void GetRelocatedValue(Register location, Register result, Register scratch);
1365 void SetRelocatedValue(Register location, Register scratch,
1366 Register new_value);
1368 void ClampUint8(Register output_reg, Register input_reg);
1370 // Saturate a value into 8-bit unsigned integer
1371 // if input_value < 0, output_value is 0
1372 // if input_value > 255, output_value is 255
1373 // otherwise output_value is the (int)input_value (round to nearest)
1374 void ClampDoubleToUint8(Register result_reg, DoubleRegister input_reg,
1375 DoubleRegister temp_double_reg);
1378 void LoadInstanceDescriptors(Register map, Register descriptors);
1379 void EnumLength(Register dst, Register map);
1380 void NumberOfOwnDescriptors(Register dst, Register map);
1381 void LoadAccessor(Register dst, Register holder, int accessor_index,
1382 AccessorComponent accessor);
1384 template <typename Field>
1385 void DecodeField(Register dst, Register src, RCBit rc = LeaveRC) {
1386 ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift,
1390 template <typename Field>
1391 void DecodeField(Register reg, RCBit rc = LeaveRC) {
1392 DecodeField<Field>(reg, reg, rc);
1395 template <typename Field>
1396 void DecodeFieldToSmi(Register dst, Register src) {
1397 #if V8_TARGET_ARCH_PPC64
1398 DecodeField<Field>(dst, src);
1401 // 32-bit can do this in one instruction:
1402 int start = Field::kSize + kSmiShift - 1;
1403 int end = kSmiShift;
1404 int rotate = kSmiShift - Field::kShift;
1406 rotate += kBitsPerPointer;
1408 rlwinm(dst, src, rotate, kBitsPerPointer - start - 1,
1409 kBitsPerPointer - end - 1);
1413 template <typename Field>
1414 void DecodeFieldToSmi(Register reg) {
1415 DecodeFieldToSmi<Field>(reg, reg);
1418 // Activation support.
1419 void EnterFrame(StackFrame::Type type,
1420 bool load_constant_pool_pointer_reg = false);
1421 // Returns the pc offset at which the frame ends.
1422 int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
1424 // Expects object in r0 and returns map with validated enum cache
1425 // in r0. Assumes that any other register can be used as a scratch.
1426 void CheckEnumCache(Register null_value, Label* call_runtime);
1428 // AllocationMemento support. Arrays may have an associated
1429 // AllocationMemento object that can be checked for in order to pretransition
1431 // On entry, receiver_reg should point to the array object.
1432 // scratch_reg gets clobbered.
1433 // If allocation info is present, condition flags are set to eq.
1434 void TestJSArrayForAllocationMemento(Register receiver_reg,
1435 Register scratch_reg,
1436 Label* no_memento_found);
1438 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1439 Register scratch_reg,
1440 Label* memento_found) {
1441 Label no_memento_found;
1442 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1445 bind(&no_memento_found);
1448 // Jumps to found label if a prototype map has dictionary elements.
1449 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1450 Register scratch1, Label* found);
1453 static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1455 void CallCFunctionHelper(Register function, int num_reg_arguments,
1456 int num_double_arguments);
1458 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
1459 CRegister cr = cr7);
1461 // Helper functions for generating invokes.
1462 void InvokePrologue(const ParameterCount& expected,
1463 const ParameterCount& actual, Handle<Code> code_constant,
1464 Register code_reg, Label* done,
1465 bool* definitely_mismatches, InvokeFlag flag,
1466 const CallWrapper& call_wrapper);
1468 void InitializeNewString(Register string, Register length,
1469 Heap::RootListIndex map_index, Register scratch1,
1472 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1473 void InNewSpace(Register object, Register scratch,
1474 Condition cond, // eq for new space, ne otherwise.
1477 // Helper for finding the mark bits for an address. Afterwards, the
1478 // bitmap register points at the word with the mark bits and the mask
1479 // the position of the first bit. Leaves addr_reg unchanged.
1480 inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
1483 static const RegList kSafepointSavedRegisters;
1484 static const int kNumSafepointSavedRegisters;
1486 // Compute memory operands for safepoint stack slots.
1487 static int SafepointRegisterStackIndex(int reg_code);
1488 MemOperand SafepointRegisterSlot(Register reg);
1489 MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1491 bool generating_stub_;
1493 // This handle will be patched with the code object on installation.
1494 Handle<Object> code_object_;
1496 // Needs access to SafepointRegisterStackIndex for compiled frame
1498 friend class StandardFrame;
1502 // The code patcher is used to patch (typically) small parts of code e.g. for
1503 // debugging and other types of instrumentation. When using the code patcher
1504 // the exact number of bytes specified must be emitted. It is not legal to emit
1505 // relocation information. If any of these constraints are violated it causes
1506 // an assertion to fail.
1509 enum FlushICache { FLUSH, DONT_FLUSH };
1511 CodePatcher(byte* address, int instructions, FlushICache flush_cache = FLUSH);
1512 virtual ~CodePatcher();
1514 // Macro assembler to emit code.
1515 MacroAssembler* masm() { return &masm_; }
1517 // Emit an instruction directly.
1518 void Emit(Instr instr);
1520 // Emit the condition part of an instruction leaving the rest of the current
1521 // instruction unchanged.
1522 void EmitCondition(Condition cond);
1525 byte* address_; // The address of the code being patched.
1526 int size_; // Number of bytes of the expected patch size.
1527 MacroAssembler masm_; // Macro assembler used to generate the code.
1528 FlushICache flush_cache_; // Whether to flush the I cache after patching.
1532 // -----------------------------------------------------------------------------
1533 // Static helper functions.
1535 inline MemOperand ContextOperand(Register context, int index) {
1536 return MemOperand(context, Context::SlotOffset(index));
1540 inline MemOperand GlobalObjectOperand() {
1541 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
1545 #ifdef GENERATED_CODE_COVERAGE
1546 #define CODE_COVERAGE_STRINGIFY(x) #x
1547 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1548 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1549 #define ACCESS_MASM(masm) \
1550 masm->stop(__FILE_LINE__); \
1553 #define ACCESS_MASM(masm) masm->
1556 } // namespace v8::internal
1558 #endif // V8_PPC_MACRO_ASSEMBLER_PPC_H_