1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_PPC_MACRO_ASSEMBLER_PPC_H_
6 #define V8_PPC_MACRO_ASSEMBLER_PPC_H_
8 #include "src/assembler.h"
9 #include "src/bailout-reason.h"
10 #include "src/frames.h"
11 #include "src/globals.h"
16 // ----------------------------------------------------------------------------
17 // Static helper functions
19 // Generate a MemOperand for loading a field from an object.
20 inline MemOperand FieldMemOperand(Register object, int offset) {
21 return MemOperand(object, offset - kHeapObjectTag);
25 // Flags used for AllocateHeapNumber
34 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
35 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
36 enum PointersToHereCheck {
37 kPointersToHereMaybeInteresting,
38 kPointersToHereAreAlwaysInteresting
40 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
43 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
44 Register reg3 = no_reg,
45 Register reg4 = no_reg,
46 Register reg5 = no_reg,
47 Register reg6 = no_reg);
51 bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
52 Register reg4 = no_reg, Register reg5 = no_reg,
53 Register reg6 = no_reg, Register reg7 = no_reg,
54 Register reg8 = no_reg);
57 // These exist to provide portability between 32 and 64bit
58 #if V8_TARGET_ARCH_PPC64
64 #define StorePUX stdux
65 #define ShiftLeftImm sldi
66 #define ShiftRightImm srdi
67 #define ClearLeftImm clrldi
68 #define ClearRightImm clrrdi
69 #define ShiftRightArithImm sradi
70 #define ShiftLeft_ sld
71 #define ShiftRight_ srd
72 #define ShiftRightArith srad
81 #define StorePUX stwux
82 #define ShiftLeftImm slwi
83 #define ShiftRightImm srwi
84 #define ClearLeftImm clrlwi
85 #define ClearRightImm clrrwi
86 #define ShiftRightArithImm srawi
87 #define ShiftLeft_ slw
88 #define ShiftRight_ srw
89 #define ShiftRightArith sraw
95 // MacroAssembler implements a collection of frequently used macros.
96 class MacroAssembler : public Assembler {
98 // The isolate parameter can be NULL if the macro assembler should
99 // not use isolate-dependent functionality. In this case, it's the
100 // responsibility of the caller to never invoke such function on the
102 MacroAssembler(Isolate* isolate, void* buffer, int size);
105 // Returns the size of a call in instructions. Note, the value returned is
106 // only valid as long as no entries are added to the constant pool between
107 // checking the call size and emitting the actual call.
108 static int CallSize(Register target);
109 int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
110 static int CallSizeNotPredictableCodeSize(Address target,
111 RelocInfo::Mode rmode,
112 Condition cond = al);
114 // Jump, Call, and Ret pseudo instructions implementing inter-working.
115 void Jump(Register target);
116 void JumpToJSEntry(Register target);
117 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
119 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
120 void Call(Register target);
121 void CallJSEntry(Register target);
122 void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
123 int CallSize(Handle<Code> code,
124 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
125 TypeFeedbackId ast_id = TypeFeedbackId::None(),
126 Condition cond = al);
127 void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
128 TypeFeedbackId ast_id = TypeFeedbackId::None(),
129 Condition cond = al);
130 void Ret(Condition cond = al);
132 // Emit code to discard a non-negative number of pointer-sized elements
133 // from the stack, clobbering only the sp register.
134 void Drop(int count, Condition cond = al);
136 void Ret(int drop, Condition cond = al);
138 void Call(Label* target);
140 // Emit call to the code we are currently generating.
142 Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
143 Call(self, RelocInfo::CODE_TARGET);
146 // Register move. May do nothing if the registers are identical.
147 void Move(Register dst, Handle<Object> value);
148 void Move(Register dst, Register src, Condition cond = al);
149 void Move(DoubleRegister dst, DoubleRegister src);
151 void MultiPush(RegList regs);
152 void MultiPop(RegList regs);
154 // Load an object from the root table.
155 void LoadRoot(Register destination, Heap::RootListIndex index,
156 Condition cond = al);
157 // Store an object to the root table.
158 void StoreRoot(Register source, Heap::RootListIndex index,
159 Condition cond = al);
161 // ---------------------------------------------------------------------------
164 void IncrementalMarkingRecordWriteHelper(Register object, Register value,
167 enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
169 // Record in the remembered set the fact that we have a pointer to new space
170 // at the address pointed to by the addr register. Only works if addr is not
172 void RememberedSetHelper(Register object, // Used for debug code.
173 Register addr, Register scratch,
174 SaveFPRegsMode save_fp,
175 RememberedSetFinalAction and_then);
177 void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
178 Label* condition_met);
180 // Check if object is in new space. Jumps if the object is not in new space.
181 // The register scratch can be object itself, but scratch will be clobbered.
182 void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
183 InNewSpace(object, scratch, ne, branch);
186 // Check if object is in new space. Jumps if the object is in new space.
187 // The register scratch can be object itself, but it will be clobbered.
188 void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
189 InNewSpace(object, scratch, eq, branch);
192 // Check if an object has a given incremental marking color.
193 void HasColor(Register object, Register scratch0, Register scratch1,
194 Label* has_color, int first_bit, int second_bit);
196 void JumpIfBlack(Register object, Register scratch0, Register scratch1,
199 // Checks the color of an object. If the object is already grey or black
200 // then we just fall through, since it is already live. If it is white and
201 // we can determine that it doesn't need to be scanned, then we just mark it
202 // black and fall through. For the rest we jump to the label so the
203 // incremental marker can fix its assumptions.
204 void EnsureNotWhite(Register object, Register scratch1, Register scratch2,
205 Register scratch3, Label* object_is_white_and_not_data);
207 // Detects conservatively whether an object is data-only, i.e. it does need to
208 // be scanned by the garbage collector.
209 void JumpIfDataObject(Register value, Register scratch,
210 Label* not_data_object);
212 // Notify the garbage collector that we wrote a pointer into an object.
213 // |object| is the object being stored into, |value| is the object being
214 // stored. value and scratch registers are clobbered by the operation.
215 // The offset is the offset from the start of the object, not the offset from
216 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
217 void RecordWriteField(
218 Register object, int offset, Register value, Register scratch,
219 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
220 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
221 SmiCheck smi_check = INLINE_SMI_CHECK,
222 PointersToHereCheck pointers_to_here_check_for_value =
223 kPointersToHereMaybeInteresting);
225 // As above, but the offset has the tag presubtracted. For use with
226 // MemOperand(reg, off).
227 inline void RecordWriteContextSlot(
228 Register context, int offset, Register value, Register scratch,
229 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
230 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
231 SmiCheck smi_check = INLINE_SMI_CHECK,
232 PointersToHereCheck pointers_to_here_check_for_value =
233 kPointersToHereMaybeInteresting) {
234 RecordWriteField(context, offset + kHeapObjectTag, value, scratch,
235 lr_status, save_fp, remembered_set_action, smi_check,
236 pointers_to_here_check_for_value);
239 void RecordWriteForMap(Register object, Register map, Register dst,
240 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
242 // For a given |object| notify the garbage collector that the slot |address|
243 // has been written. |value| is the object being stored. The value and
244 // address registers are clobbered by the operation.
246 Register object, Register address, Register value,
247 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
248 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
249 SmiCheck smi_check = INLINE_SMI_CHECK,
250 PointersToHereCheck pointers_to_here_check_for_value =
251 kPointersToHereMaybeInteresting);
253 void Push(Register src) { push(src); }
256 void Push(Handle<Object> handle);
257 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
259 // Push two registers. Pushes leftmost register first (to highest address).
260 void Push(Register src1, Register src2) {
261 StorePU(src2, MemOperand(sp, -2 * kPointerSize));
262 StoreP(src1, MemOperand(sp, kPointerSize));
265 // Push three registers. Pushes leftmost register first (to highest address).
266 void Push(Register src1, Register src2, Register src3) {
267 StorePU(src3, MemOperand(sp, -3 * kPointerSize));
268 StoreP(src2, MemOperand(sp, kPointerSize));
269 StoreP(src1, MemOperand(sp, 2 * kPointerSize));
272 // Push four registers. Pushes leftmost register first (to highest address).
273 void Push(Register src1, Register src2, Register src3, Register src4) {
274 StorePU(src4, MemOperand(sp, -4 * kPointerSize));
275 StoreP(src3, MemOperand(sp, kPointerSize));
276 StoreP(src2, MemOperand(sp, 2 * kPointerSize));
277 StoreP(src1, MemOperand(sp, 3 * kPointerSize));
280 // Push five registers. Pushes leftmost register first (to highest address).
281 void Push(Register src1, Register src2, Register src3, Register src4,
283 StorePU(src5, MemOperand(sp, -5 * kPointerSize));
284 StoreP(src4, MemOperand(sp, kPointerSize));
285 StoreP(src3, MemOperand(sp, 2 * kPointerSize));
286 StoreP(src2, MemOperand(sp, 3 * kPointerSize));
287 StoreP(src1, MemOperand(sp, 4 * kPointerSize));
290 void Pop(Register dst) { pop(dst); }
292 // Pop two registers. Pops rightmost register first (from lower address).
293 void Pop(Register src1, Register src2) {
294 LoadP(src2, MemOperand(sp, 0));
295 LoadP(src1, MemOperand(sp, kPointerSize));
296 addi(sp, sp, Operand(2 * kPointerSize));
299 // Pop three registers. Pops rightmost register first (from lower address).
300 void Pop(Register src1, Register src2, Register src3) {
301 LoadP(src3, MemOperand(sp, 0));
302 LoadP(src2, MemOperand(sp, kPointerSize));
303 LoadP(src1, MemOperand(sp, 2 * kPointerSize));
304 addi(sp, sp, Operand(3 * kPointerSize));
307 // Pop four registers. Pops rightmost register first (from lower address).
308 void Pop(Register src1, Register src2, Register src3, Register src4) {
309 LoadP(src4, MemOperand(sp, 0));
310 LoadP(src3, MemOperand(sp, kPointerSize));
311 LoadP(src2, MemOperand(sp, 2 * kPointerSize));
312 LoadP(src1, MemOperand(sp, 3 * kPointerSize));
313 addi(sp, sp, Operand(4 * kPointerSize));
316 // Pop five registers. Pops rightmost register first (from lower address).
317 void Pop(Register src1, Register src2, Register src3, Register src4,
319 LoadP(src5, MemOperand(sp, 0));
320 LoadP(src4, MemOperand(sp, kPointerSize));
321 LoadP(src3, MemOperand(sp, 2 * kPointerSize));
322 LoadP(src2, MemOperand(sp, 3 * kPointerSize));
323 LoadP(src1, MemOperand(sp, 4 * kPointerSize));
324 addi(sp, sp, Operand(5 * kPointerSize));
327 // Push a fixed frame, consisting of lr, fp, context and
328 // JS function / marker id if marker_reg is a valid register.
329 void PushFixedFrame(Register marker_reg = no_reg);
330 void PopFixedFrame(Register marker_reg = no_reg);
332 // Push and pop the registers that can hold pointers, as defined by the
333 // RegList constant kSafepointSavedRegisters.
334 void PushSafepointRegisters();
335 void PopSafepointRegisters();
336 // Store value in register src in the safepoint stack slot for
338 void StoreToSafepointRegisterSlot(Register src, Register dst);
339 // Load the value of the src register from its safepoint stack slot
340 // into register dst.
341 void LoadFromSafepointRegisterSlot(Register dst, Register src);
343 // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
345 // Does not handle errors.
346 void FlushICache(Register address, size_t size, Register scratch);
348 // If the value is a NaN, canonicalize the value else, do nothing.
349 void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
350 void CanonicalizeNaN(const DoubleRegister value) {
351 CanonicalizeNaN(value, value);
354 // Converts the integer (untagged smi) in |src| to a double, storing
355 // the result to |double_dst|
356 void ConvertIntToDouble(Register src, DoubleRegister double_dst);
358 // Converts the unsigned integer (untagged smi) in |src| to
359 // a double, storing the result to |double_dst|
360 void ConvertUnsignedIntToDouble(Register src, DoubleRegister double_dst);
362 // Converts the integer (untagged smi) in |src| to
363 // a float, storing the result in |dst|
364 // Warning: The value in |int_scrach| will be changed in the process!
365 void ConvertIntToFloat(const DoubleRegister dst, const Register src,
366 const Register int_scratch);
368 // Converts the double_input to an integer. Note that, upon return,
369 // the contents of double_dst will also hold the fixed point representation.
370 void ConvertDoubleToInt64(const DoubleRegister double_input,
371 #if !V8_TARGET_ARCH_PPC64
372 const Register dst_hi,
374 const Register dst, const DoubleRegister double_dst,
375 FPRoundingMode rounding_mode = kRoundToZero);
377 // Generates function and stub prologue code.
378 void StubPrologue(int prologue_offset = 0);
379 void Prologue(bool code_pre_aging, int prologue_offset = 0);
382 // stack_space - extra stack space, used for alignment before call to C.
383 void EnterExitFrame(bool save_doubles, int stack_space = 0);
385 // Leave the current exit frame. Expects the return value in r0.
386 // Expect the number of values, pushed prior to the exit frame, to
387 // remove in a register (or no_reg, if there is nothing to remove).
388 void LeaveExitFrame(bool save_doubles, Register argument_count,
389 bool restore_context,
390 bool argument_count_is_length = false);
392 // Get the actual activation frame alignment for target environment.
393 static int ActivationFrameAlignment();
395 void LoadContext(Register dst, int context_chain_length);
397 // Conditionally load the cached Array transitioned map of type
398 // transitioned_kind from the native context if the map in register
399 // map_in_out is the cached Array map in the native context of
401 void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
402 ElementsKind transitioned_kind,
405 Label* no_map_match);
407 void LoadGlobalFunction(int index, Register function);
409 // Load the initial map from the global function. The registers
410 // function and map can be the same, function is then overwritten.
411 void LoadGlobalFunctionInitialMap(Register function, Register map,
414 void InitializeRootRegister() {
415 ExternalReference roots_array_start =
416 ExternalReference::roots_array_start(isolate());
417 mov(kRootRegister, Operand(roots_array_start));
420 // ----------------------------------------------------------------
421 // new PPC macro-assembler interfaces that are slightly higher level
422 // than assembler-ppc and may generate variable length sequences
424 // load a literal signed int value <value> to GPR <dst>
425 void LoadIntLiteral(Register dst, int value);
427 // load an SMI value <value> to GPR <dst>
428 void LoadSmiLiteral(Register dst, Smi* smi);
430 // load a literal double value <value> to FPR <result>
431 void LoadDoubleLiteral(DoubleRegister result, double value, Register scratch);
433 void LoadWord(Register dst, const MemOperand& mem, Register scratch);
434 void LoadWordArith(Register dst, const MemOperand& mem,
435 Register scratch = no_reg);
436 void StoreWord(Register src, const MemOperand& mem, Register scratch);
438 void LoadHalfWord(Register dst, const MemOperand& mem, Register scratch);
439 void LoadHalfWordArith(Register dst, const MemOperand& mem,
440 Register scratch = no_reg);
441 void StoreHalfWord(Register src, const MemOperand& mem, Register scratch);
443 void LoadByte(Register dst, const MemOperand& mem, Register scratch);
444 void StoreByte(Register src, const MemOperand& mem, Register scratch);
446 void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
447 Register scratch = no_reg);
448 void StoreRepresentation(Register src, const MemOperand& mem,
449 Representation r, Register scratch = no_reg);
451 void LoadDouble(DoubleRegister dst, const MemOperand& mem, Register scratch);
452 void StoreDouble(DoubleRegister src, const MemOperand& mem, Register scratch);
454 // Move values between integer and floating point registers.
455 void MovIntToDouble(DoubleRegister dst, Register src, Register scratch);
456 void MovUnsignedIntToDouble(DoubleRegister dst, Register src,
458 void MovInt64ToDouble(DoubleRegister dst,
459 #if !V8_TARGET_ARCH_PPC64
463 #if V8_TARGET_ARCH_PPC64
464 void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi,
465 Register src_lo, Register scratch);
467 void MovDoubleLowToInt(Register dst, DoubleRegister src);
468 void MovDoubleHighToInt(Register dst, DoubleRegister src);
469 void MovDoubleToInt64(
470 #if !V8_TARGET_ARCH_PPC64
473 Register dst, DoubleRegister src);
475 void Add(Register dst, Register src, intptr_t value, Register scratch);
476 void Cmpi(Register src1, const Operand& src2, Register scratch,
478 void Cmpli(Register src1, const Operand& src2, Register scratch,
480 void Cmpwi(Register src1, const Operand& src2, Register scratch,
482 void Cmplwi(Register src1, const Operand& src2, Register scratch,
484 void And(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
485 void Or(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
486 void Xor(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
488 void AddSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
489 void SubSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
490 void CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
492 void CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
494 void AndSmiLiteral(Register dst, Register src, Smi* smi, Register scratch,
497 // Set new rounding mode RN to FPSCR
498 void SetRoundingMode(FPRoundingMode RN);
500 // reset rounding mode to default (kRoundToNearest)
501 void ResetRoundingMode();
503 // These exist to provide portability between 32 and 64bit
504 void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
505 void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
507 // ---------------------------------------------------------------------------
508 // JavaScript invokes
510 // Invoke the JavaScript function code by either calling or jumping.
511 void InvokeCode(Register code, const ParameterCount& expected,
512 const ParameterCount& actual, InvokeFlag flag,
513 const CallWrapper& call_wrapper);
515 // Invoke the JavaScript function in the given register. Changes the
516 // current context to the context in the function before invoking.
517 void InvokeFunction(Register function, const ParameterCount& actual,
518 InvokeFlag flag, const CallWrapper& call_wrapper);
520 void InvokeFunction(Register function, const ParameterCount& expected,
521 const ParameterCount& actual, InvokeFlag flag,
522 const CallWrapper& call_wrapper);
524 void InvokeFunction(Handle<JSFunction> function,
525 const ParameterCount& expected,
526 const ParameterCount& actual, InvokeFlag flag,
527 const CallWrapper& call_wrapper);
529 void IsObjectJSObjectType(Register heap_object, Register map,
530 Register scratch, Label* fail);
532 void IsInstanceJSObjectType(Register map, Register scratch, Label* fail);
534 void IsObjectJSStringType(Register object, Register scratch, Label* fail);
536 void IsObjectNameType(Register object, Register scratch, Label* fail);
538 // ---------------------------------------------------------------------------
543 // ---------------------------------------------------------------------------
544 // Exception handling
546 // Push a new try handler and link into try handler chain.
547 void PushTryHandler(StackHandler::Kind kind, int handler_index);
549 // Unlink the stack handler on top of the stack from the try handler chain.
550 // Must preserve the result register.
551 void PopTryHandler();
553 // Passes thrown value to the handler of top of the try handler chain.
554 void Throw(Register value);
556 // Propagates an uncatchable exception to the top of the current JS stack's
558 void ThrowUncatchable(Register value);
560 // ---------------------------------------------------------------------------
561 // Inline caching support
563 // Generate code for checking access rights - used for security checks
564 // on access to global objects across environments. The holder register
565 // is left untouched, whereas both scratch registers are clobbered.
566 void CheckAccessGlobalProxy(Register holder_reg, Register scratch,
569 void GetNumberHash(Register t0, Register scratch);
571 void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
572 Register result, Register t0, Register t1,
576 inline void MarkCode(NopMarkerTypes type) { nop(type); }
578 // Check if the given instruction is a 'type' marker.
579 // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
580 // These instructions are generated to mark special location in the code,
581 // like some special IC code.
582 static inline bool IsMarkedCode(Instr instr, int type) {
583 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
584 return IsNop(instr, type);
588 static inline int GetCodeMarker(Instr instr) {
589 int dst_reg_offset = 12;
590 int dst_mask = 0xf << dst_reg_offset;
592 int dst_reg = (instr & dst_mask) >> dst_reg_offset;
593 int src_reg = instr & src_mask;
594 uint32_t non_register_mask = ~(dst_mask | src_mask);
595 uint32_t mov_mask = al | 13 << 21;
597 // Return <n> if we have a mov rn rn, else return -1.
598 int type = ((instr & non_register_mask) == mov_mask) &&
599 (dst_reg == src_reg) && (FIRST_IC_MARKER <= dst_reg) &&
600 (dst_reg < LAST_CODE_MARKER)
603 DCHECK((type == -1) ||
604 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
609 // ---------------------------------------------------------------------------
610 // Allocation support
612 // Allocate an object in new space or old pointer space. The object_size is
613 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
614 // is passed. If the space is exhausted control continues at the gc_required
615 // label. The allocated object is returned in result. If the flag
616 // tag_allocated_object is true the result is tagged as as a heap object.
617 // All registers are clobbered also when control continues at the gc_required
619 void Allocate(int object_size, Register result, Register scratch1,
620 Register scratch2, Label* gc_required, AllocationFlags flags);
622 void Allocate(Register object_size, Register result, Register scratch1,
623 Register scratch2, Label* gc_required, AllocationFlags flags);
625 // Undo allocation in new space. The object passed and objects allocated after
626 // it will no longer be allocated. The caller must make sure that no pointers
627 // are left to the object(s) no longer allocated as they would be invalid when
628 // allocation is undone.
629 void UndoAllocationInNewSpace(Register object, Register scratch);
632 void AllocateTwoByteString(Register result, Register length,
633 Register scratch1, Register scratch2,
634 Register scratch3, Label* gc_required);
635 void AllocateOneByteString(Register result, Register length,
636 Register scratch1, Register scratch2,
637 Register scratch3, Label* gc_required);
638 void AllocateTwoByteConsString(Register result, Register length,
639 Register scratch1, Register scratch2,
641 void AllocateOneByteConsString(Register result, Register length,
642 Register scratch1, Register scratch2,
644 void AllocateTwoByteSlicedString(Register result, Register length,
645 Register scratch1, Register scratch2,
647 void AllocateOneByteSlicedString(Register result, Register length,
648 Register scratch1, Register scratch2,
651 // Allocates a heap number or jumps to the gc_required label if the young
652 // space is full and a scavenge is needed. All registers are clobbered also
653 // when control continues at the gc_required label.
654 void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
655 Register heap_number_map, Label* gc_required,
656 TaggingMode tagging_mode = TAG_RESULT,
657 MutableMode mode = IMMUTABLE);
658 void AllocateHeapNumberWithValue(Register result, DoubleRegister value,
659 Register scratch1, Register scratch2,
660 Register heap_number_map,
663 // Copies a fixed number of fields of heap objects from src to dst.
664 void CopyFields(Register dst, Register src, RegList temps, int field_count);
666 // Copies a number of bytes from src to dst. All registers are clobbered. On
667 // exit src and dst will point to the place just after where the last byte was
668 // read or written and length will be zero.
669 void CopyBytes(Register src, Register dst, Register length, Register scratch);
671 // Initialize fields with filler values. |count| fields starting at
672 // |start_offset| are overwritten with the value in |filler|. At the end the
673 // loop, |start_offset| points at the next uninitialized field. |count| is
674 // assumed to be non-zero.
675 void InitializeNFieldsWithFiller(Register start_offset, Register count,
678 // Initialize fields with filler values. Fields starting at |start_offset|
679 // not including end_offset are overwritten with the value in |filler|. At
680 // the end the loop, |start_offset| takes the value of |end_offset|.
681 void InitializeFieldsWithFiller(Register start_offset, Register end_offset,
684 // ---------------------------------------------------------------------------
685 // Support functions.
687 // Try to get function prototype of a function and puts the value in
688 // the result register. Checks that the function really is a
689 // function and jumps to the miss label if the fast checks fail. The
690 // function register will be untouched; the other registers may be
692 void TryGetFunctionPrototype(Register function, Register result,
693 Register scratch, Label* miss,
694 bool miss_on_bound_function = false);
696 // Compare object type for heap object. heap_object contains a non-Smi
697 // whose object type should be compared with the given type. This both
698 // sets the flags and leaves the object type in the type_reg register.
699 // It leaves the map in the map register (unless the type_reg and map register
700 // are the same register). It leaves the heap object in the heap_object
701 // register unless the heap_object register is the same register as one of the
703 // Type_reg can be no_reg. In that case ip is used.
704 void CompareObjectType(Register heap_object, Register map, Register type_reg,
707 // Compare object type for heap object. Branch to false_label if type
708 // is lower than min_type or greater than max_type.
709 // Load map into the register map.
710 void CheckObjectTypeRange(Register heap_object, Register map,
711 InstanceType min_type, InstanceType max_type,
714 // Compare instance type in a map. map contains a valid map object whose
715 // object type should be compared with the given type. This both
716 // sets the flags and leaves the object type in the type_reg register.
717 void CompareInstanceType(Register map, Register type_reg, InstanceType type);
720 // Check if a map for a JSObject indicates that the object has fast elements.
721 // Jump to the specified label if it does not.
722 void CheckFastElements(Register map, Register scratch, Label* fail);
724 // Check if a map for a JSObject indicates that the object can have both smi
725 // and HeapObject elements. Jump to the specified label if it does not.
726 void CheckFastObjectElements(Register map, Register scratch, Label* fail);
728 // Check if a map for a JSObject indicates that the object has fast smi only
729 // elements. Jump to the specified label if it does not.
730 void CheckFastSmiElements(Register map, Register scratch, Label* fail);
732 // Check to see if maybe_number can be stored as a double in
733 // FastDoubleElements. If it can, store it at the index specified by key in
734 // the FastDoubleElements array elements. Otherwise jump to fail.
735 void StoreNumberToDoubleElements(Register value_reg, Register key_reg,
736 Register elements_reg, Register scratch1,
737 DoubleRegister double_scratch, Label* fail,
738 int elements_offset = 0);
740 // Compare an object's map with the specified map and its transitioned
741 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
742 // set with result of map compare. If multiple map compares are required, the
743 // compare sequences branches to early_success.
744 void CompareMap(Register obj, Register scratch, Handle<Map> map,
745 Label* early_success);
747 // As above, but the map of the object is already loaded into the register
748 // which is preserved by the code generated.
749 void CompareMap(Register obj_map, Handle<Map> map, Label* early_success);
751 // Check if the map of an object is equal to a specified map and branch to
752 // label if not. Skip the smi check if not required (object is known to be a
753 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
754 // against maps that are ElementsKind transition maps of the specified map.
755 void CheckMap(Register obj, Register scratch, Handle<Map> map, Label* fail,
756 SmiCheckType smi_check_type);
759 void CheckMap(Register obj, Register scratch, Heap::RootListIndex index,
760 Label* fail, SmiCheckType smi_check_type);
763 // Check if the map of an object is equal to a specified weak map and branch
764 // to a specified target if equal. Skip the smi check if not required
765 // (object is known to be a heap object)
766 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
767 Handle<WeakCell> cell, Handle<Code> success,
768 SmiCheckType smi_check_type);
770 // Compare the given value and the value of weak cell.
771 void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch,
774 void GetWeakValue(Register value, Handle<WeakCell> cell);
776 // Load the value of the weak cell in the value register. Branch to the given
777 // miss label if the weak cell was cleared.
778 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
780 // Compare the object in a register to a value from the root list.
781 // Uses the ip register as scratch.
782 void CompareRoot(Register obj, Heap::RootListIndex index);
785 // Load and check the instance type of an object for being a string.
786 // Loads the type into the second argument register.
787 // Returns a condition that will be enabled if the object was a string.
788 Condition IsObjectStringType(Register obj, Register type) {
789 LoadP(type, FieldMemOperand(obj, HeapObject::kMapOffset));
790 lbz(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
791 andi(r0, type, Operand(kIsNotStringMask));
792 DCHECK_EQ(0u, kStringTag);
797 // Picks out an array index from the hash field.
799 // hash - holds the index's hash. Clobbered.
800 // index - holds the overwritten index on exit.
801 void IndexFromHash(Register hash, Register index);
803 // Get the number of least significant bits from a register
804 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
805 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
807 // Load the value of a smi object into a double register.
808 void SmiToDouble(DoubleRegister value, Register smi);
810 // Check if a double can be exactly represented as a signed 32-bit integer.
811 // CR_EQ in cr7 is set if true.
812 void TestDoubleIsInt32(DoubleRegister double_input, Register scratch1,
813 Register scratch2, DoubleRegister double_scratch);
815 // Try to convert a double to a signed 32-bit integer.
816 // CR_EQ in cr7 is set and result assigned if the conversion is exact.
817 void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
818 Register scratch, DoubleRegister double_scratch);
820 // Floor a double and writes the value to the result register.
821 // Go to exact if the conversion is exact (to be able to test -0),
822 // fall through calling code if an overflow occurred, else go to done.
823 // In return, input_high is loaded with high bits of input.
824 void TryInt32Floor(Register result, DoubleRegister double_input,
825 Register input_high, Register scratch,
826 DoubleRegister double_scratch, Label* done, Label* exact);
828 // Performs a truncating conversion of a floating point number as used by
829 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
830 // succeeds, otherwise falls through if result is saturated. On return
831 // 'result' either holds answer, or is clobbered on fall through.
833 // Only public for the test code in test-code-stubs-arm.cc.
834 void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
837 // Performs a truncating conversion of a floating point number as used by
838 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
839 // Exits with 'result' holding the answer.
840 void TruncateDoubleToI(Register result, DoubleRegister double_input);
842 // Performs a truncating conversion of a heap number as used by
843 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
844 // must be different registers. Exits with 'result' holding the answer.
845 void TruncateHeapNumberToI(Register result, Register object);
847 // Converts the smi or heap number in object to an int32 using the rules
848 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
849 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
850 // different registers.
851 void TruncateNumberToI(Register object, Register result,
852 Register heap_number_map, Register scratch1,
855 // Overflow handling functions.
856 // Usage: call the appropriate arithmetic function and then call one of the
857 // flow control functions with the corresponding label.
859 // Compute dst = left + right, setting condition codes. dst may be same as
860 // either left or right (or a unique register). left and right must not be
861 // the same register.
862 void AddAndCheckForOverflow(Register dst, Register left, Register right,
863 Register overflow_dst, Register scratch = r0);
864 void AddAndCheckForOverflow(Register dst, Register left, intptr_t right,
865 Register overflow_dst, Register scratch = r0);
867 // Compute dst = left - right, setting condition codes. dst may be same as
868 // either left or right (or a unique register). left and right must not be
869 // the same register.
870 void SubAndCheckForOverflow(Register dst, Register left, Register right,
871 Register overflow_dst, Register scratch = r0);
873 void BranchOnOverflow(Label* label) { blt(label, cr0); }
875 void BranchOnNoOverflow(Label* label) { bge(label, cr0); }
877 void RetOnOverflow(void) {
885 void RetOnNoOverflow(void) {
893 // Pushes <count> double values to <location>, starting from d<first>.
894 void SaveFPRegs(Register location, int first, int count);
896 // Pops <count> double values from <location>, starting from d<first>.
897 void RestoreFPRegs(Register location, int first, int count);
899 // ---------------------------------------------------------------------------
903 void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None(),
904 Condition cond = al);
907 void TailCallStub(CodeStub* stub, Condition cond = al);
909 // Call a runtime routine.
910 void CallRuntime(const Runtime::Function* f, int num_arguments,
911 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
912 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
913 const Runtime::Function* function = Runtime::FunctionForId(id);
914 CallRuntime(function, function->nargs, kSaveFPRegs);
917 // Convenience function: Same as above, but takes the fid instead.
918 void CallRuntime(Runtime::FunctionId id, int num_arguments,
919 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
920 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
923 // Convenience function: call an external reference.
924 void CallExternalReference(const ExternalReference& ext, int num_arguments);
926 // Tail call of a runtime routine (jump).
927 // Like JumpToExternalReference, but also takes care of passing the number
929 void TailCallExternalReference(const ExternalReference& ext,
930 int num_arguments, int result_size);
932 // Convenience function: tail call a runtime routine (jump).
933 void TailCallRuntime(Runtime::FunctionId fid, int num_arguments,
936 int CalculateStackPassedWords(int num_reg_arguments,
937 int num_double_arguments);
939 // Before calling a C-function from generated code, align arguments on stack.
940 // After aligning the frame, non-register arguments must be stored in
941 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
942 // are word sized. If double arguments are used, this function assumes that
943 // all double arguments are stored before core registers; otherwise the
944 // correct alignment of the double values is not guaranteed.
945 // Some compilers/platforms require the stack to be aligned when calling
947 // Needs a scratch register to do some arithmetic. This register will be
949 void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
951 void PrepareCallCFunction(int num_reg_arguments, Register scratch);
953 // There are two ways of passing double arguments on ARM, depending on
954 // whether soft or hard floating point ABI is used. These functions
955 // abstract parameter passing for the three different ways we call
956 // C functions from generated code.
957 void MovToFloatParameter(DoubleRegister src);
958 void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
959 void MovToFloatResult(DoubleRegister src);
961 // Calls a C function and cleans up the space for arguments allocated
962 // by PrepareCallCFunction. The called function is not allowed to trigger a
963 // garbage collection, since that might move the code and invalidate the
964 // return address (unless this is somehow accounted for by the called
966 void CallCFunction(ExternalReference function, int num_arguments);
967 void CallCFunction(Register function, int num_arguments);
968 void CallCFunction(ExternalReference function, int num_reg_arguments,
969 int num_double_arguments);
970 void CallCFunction(Register function, int num_reg_arguments,
971 int num_double_arguments);
973 void MovFromFloatParameter(DoubleRegister dst);
974 void MovFromFloatResult(DoubleRegister dst);
976 // Jump to a runtime routine.
977 void JumpToExternalReference(const ExternalReference& builtin);
979 // Invoke specified builtin JavaScript function. Adds an entry to
980 // the unresolved list if the name does not resolve.
981 void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag,
982 const CallWrapper& call_wrapper = NullCallWrapper());
984 // Store the code object for the given builtin in the target register and
985 // setup the function in r1.
986 void GetBuiltinEntry(Register target, Builtins::JavaScript id);
988 // Store the function for the given builtin in the target register.
989 void GetBuiltinFunction(Register target, Builtins::JavaScript id);
991 Handle<Object> CodeObject() {
992 DCHECK(!code_object_.is_null());
997 // Emit code for a truncating division by a constant. The dividend register is
998 // unchanged and ip gets clobbered. Dividend and result must be different.
999 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1001 // ---------------------------------------------------------------------------
1002 // StatsCounter support
1004 void SetCounter(StatsCounter* counter, int value, Register scratch1,
1006 void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1008 void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1012 // ---------------------------------------------------------------------------
1015 // Calls Abort(msg) if the condition cond is not satisfied.
1016 // Use --debug_code to enable.
1017 void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
1018 void AssertFastElements(Register elements);
1020 // Like Assert(), but always enabled.
1021 void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
1023 // Print a message to stdout and abort execution.
1024 void Abort(BailoutReason reason);
1026 // Verify restrictions about code generated in stubs.
1027 void set_generating_stub(bool value) { generating_stub_ = value; }
1028 bool generating_stub() { return generating_stub_; }
1029 void set_has_frame(bool value) { has_frame_ = value; }
1030 bool has_frame() { return has_frame_; }
1031 inline bool AllowThisStubCall(CodeStub* stub);
1033 // ---------------------------------------------------------------------------
1036 // Check whether the value of reg is a power of two and not zero. If not
1037 // control continues at the label not_power_of_two. If reg is a power of two
1038 // the register scratch contains the value of (reg - 1) when control falls
1040 void JumpIfNotPowerOfTwoOrZero(Register reg, Register scratch,
1041 Label* not_power_of_two_or_zero);
1042 // Check whether the value of reg is a power of two and not zero.
1043 // Control falls through if it is, with scratch containing the mask
1045 // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
1046 // zero or negative, or jumps to the 'not_power_of_two' label if the value is
1047 // strictly positive but not a power of two.
1048 void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, Register scratch,
1049 Label* zero_and_neg,
1050 Label* not_power_of_two);
1052 // ---------------------------------------------------------------------------
1053 // Bit testing/extraction
1055 // Bit numbering is such that the least significant bit is bit 0
1056 // (for consistency between 32/64-bit).
1058 // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
1059 // and place them into the least significant bits of dst.
1060 inline void ExtractBitRange(Register dst, Register src, int rangeStart,
1061 int rangeEnd, RCBit rc = LeaveRC) {
1062 DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
1063 int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd;
1064 int width = rangeStart - rangeEnd + 1;
1065 #if V8_TARGET_ARCH_PPC64
1066 rldicl(dst, src, rotate, kBitsPerPointer - width, rc);
1068 rlwinm(dst, src, rotate, kBitsPerPointer - width, kBitsPerPointer - 1, rc);
1072 inline void ExtractBit(Register dst, Register src, uint32_t bitNumber,
1073 RCBit rc = LeaveRC) {
1074 ExtractBitRange(dst, src, bitNumber, bitNumber, rc);
1077 // Extract consecutive bits (defined by mask) from src and place them
1078 // into the least significant bits of dst.
1079 inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
1080 RCBit rc = LeaveRC) {
1081 int start = kBitsPerPointer - 1;
1083 uintptr_t bit = (1L << start);
1085 while (bit && (mask & bit) == 0) {
1092 while (bit && (mask & bit)) {
1097 // 1-bits in mask must be contiguous
1098 DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
1100 ExtractBitRange(dst, src, start, end, rc);
1103 // Test single bit in value.
1104 inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
1105 ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC);
1108 // Test consecutive bit range in value. Range is defined by
1109 // rangeStart - rangeEnd.
1110 inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
1111 Register scratch = r0) {
1112 ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC);
1115 // Test consecutive bit range in value. Range is defined by mask.
1116 inline void TestBitMask(Register value, uintptr_t mask,
1117 Register scratch = r0) {
1118 ExtractBitMask(scratch, value, mask, SetRC);
1122 // ---------------------------------------------------------------------------
1125 // Shift left by kSmiShift
1126 void SmiTag(Register reg, RCBit rc = LeaveRC) { SmiTag(reg, reg, rc); }
1127 void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) {
1128 ShiftLeftImm(dst, src, Operand(kSmiShift), rc);
1131 #if !V8_TARGET_ARCH_PPC64
1132 // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
1133 void SmiTagCheckOverflow(Register reg, Register overflow);
1134 void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
1136 inline void JumpIfNotSmiCandidate(Register value, Register scratch,
1137 Label* not_smi_label) {
1138 // High bits must be identical to fit into an Smi
1139 STATIC_ASSERT(kSmiShift == 1);
1140 addis(scratch, value, Operand(0x40000000u >> 16));
1141 cmpi(scratch, Operand::Zero());
1145 inline void TestUnsignedSmiCandidate(Register value, Register scratch) {
1146 // The test is different for unsigned int values. Since we need
1147 // the value to be in the range of a positive smi, we can't
1148 // handle any of the high bits being set in the value.
1149 TestBitRange(value, kBitsPerPointer - 1, kBitsPerPointer - 1 - kSmiShift,
1152 inline void JumpIfNotUnsignedSmiCandidate(Register value, Register scratch,
1153 Label* not_smi_label) {
1154 TestUnsignedSmiCandidate(value, scratch);
1155 bne(not_smi_label, cr0);
1158 void SmiUntag(Register reg, RCBit rc = LeaveRC) { SmiUntag(reg, reg, rc); }
1160 void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) {
1161 ShiftRightArithImm(dst, src, kSmiShift, rc);
1164 void SmiToPtrArrayOffset(Register dst, Register src) {
1165 #if V8_TARGET_ARCH_PPC64
1166 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
1167 ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2);
1169 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
1170 ShiftLeftImm(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
1174 void SmiToByteArrayOffset(Register dst, Register src) { SmiUntag(dst, src); }
1176 void SmiToShortArrayOffset(Register dst, Register src) {
1177 #if V8_TARGET_ARCH_PPC64
1178 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 1);
1179 ShiftRightArithImm(dst, src, kSmiShift - 1);
1181 STATIC_ASSERT(kSmiTag == 0 && kSmiShift == 1);
1188 void SmiToIntArrayOffset(Register dst, Register src) {
1189 #if V8_TARGET_ARCH_PPC64
1190 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 2);
1191 ShiftRightArithImm(dst, src, kSmiShift - 2);
1193 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < 2);
1194 ShiftLeftImm(dst, src, Operand(2 - kSmiShift));
1198 #define SmiToFloatArrayOffset SmiToIntArrayOffset
1200 void SmiToDoubleArrayOffset(Register dst, Register src) {
1201 #if V8_TARGET_ARCH_PPC64
1202 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kDoubleSizeLog2);
1203 ShiftRightArithImm(dst, src, kSmiShift - kDoubleSizeLog2);
1205 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kDoubleSizeLog2);
1206 ShiftLeftImm(dst, src, Operand(kDoubleSizeLog2 - kSmiShift));
1210 void SmiToArrayOffset(Register dst, Register src, int elementSizeLog2) {
1211 if (kSmiShift < elementSizeLog2) {
1212 ShiftLeftImm(dst, src, Operand(elementSizeLog2 - kSmiShift));
1213 } else if (kSmiShift > elementSizeLog2) {
1214 ShiftRightArithImm(dst, src, kSmiShift - elementSizeLog2);
1215 } else if (!dst.is(src)) {
1220 void IndexToArrayOffset(Register dst, Register src, int elementSizeLog2,
1223 SmiToArrayOffset(dst, src, elementSizeLog2);
1225 ShiftLeftImm(dst, src, Operand(elementSizeLog2));
1229 // Untag the source value into destination and jump if source is a smi.
1230 // Souce and destination can be the same register.
1231 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1233 // Untag the source value into destination and jump if source is not a smi.
1234 // Souce and destination can be the same register.
1235 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1237 inline void TestIfSmi(Register value, Register scratch) {
1238 TestBitRange(value, kSmiTagSize - 1, 0, scratch);
1241 inline void TestIfPositiveSmi(Register value, Register scratch) {
1242 #if V8_TARGET_ARCH_PPC64
1243 rldicl(scratch, value, 1, kBitsPerPointer - (1 + kSmiTagSize), SetRC);
1245 rlwinm(scratch, value, 1, kBitsPerPointer - (1 + kSmiTagSize),
1246 kBitsPerPointer - 1, SetRC);
1250 // Jump the register contains a smi.
1251 inline void JumpIfSmi(Register value, Label* smi_label) {
1252 TestIfSmi(value, r0);
1253 beq(smi_label, cr0); // branch if SMI
1255 // Jump if either of the registers contain a non-smi.
1256 inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
1257 TestIfSmi(value, r0);
1258 bne(not_smi_label, cr0);
1260 // Jump if either of the registers contain a non-smi.
1261 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1262 // Jump if either of the registers contain a smi.
1263 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1265 // Abort execution if argument is a smi, enabled via --debug-code.
1266 void AssertNotSmi(Register object);
1267 void AssertSmi(Register object);
1270 #if V8_TARGET_ARCH_PPC64
1271 inline void TestIfInt32(Register value, Register scratch,
1272 CRegister cr = cr7) {
1273 // High bits must be identical to fit into an 32-bit integer
1274 extsw(scratch, value);
1275 cmp(scratch, value, cr);
1278 inline void TestIfInt32(Register hi_word, Register lo_word, Register scratch,
1279 CRegister cr = cr7) {
1280 // High bits must be identical to fit into an 32-bit integer
1281 srawi(scratch, lo_word, 31);
1282 cmp(scratch, hi_word, cr);
1286 #if V8_TARGET_ARCH_PPC64
1287 // Ensure it is permissable to read/write int value directly from
1288 // upper half of the smi.
1289 STATIC_ASSERT(kSmiTag == 0);
1290 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
1292 #if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN
1293 #define SmiWordOffset(offset) (offset + kPointerSize / 2)
1295 #define SmiWordOffset(offset) offset
1298 // Abort execution if argument is not a string, enabled via --debug-code.
1299 void AssertString(Register object);
1301 // Abort execution if argument is not a name, enabled via --debug-code.
1302 void AssertName(Register object);
1304 // Abort execution if argument is not undefined or an AllocationSite, enabled
1305 // via --debug-code.
1306 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1308 // Abort execution if reg is not the root value with the given index,
1309 // enabled via --debug-code.
1310 void AssertIsRoot(Register reg, Heap::RootListIndex index);
1312 // ---------------------------------------------------------------------------
1313 // HeapNumber utilities
1315 void JumpIfNotHeapNumber(Register object, Register heap_number_map,
1316 Register scratch, Label* on_not_heap_number);
1318 // ---------------------------------------------------------------------------
1321 // Generate code to do a lookup in the number string cache. If the number in
1322 // the register object is found in the cache the generated code falls through
1323 // with the result in the result register. The object and the result register
1324 // can be the same. If the number is not found in the cache the code jumps to
1325 // the label not_found with only the content of register object unchanged.
1326 void LookupNumberStringCache(Register object, Register result,
1327 Register scratch1, Register scratch2,
1328 Register scratch3, Label* not_found);
1330 // Checks if both objects are sequential one-byte strings and jumps to label
1331 // if either is not. Assumes that neither object is a smi.
1332 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
1338 // Checks if both objects are sequential one-byte strings and jumps to label
1339 // if either is not.
1340 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
1343 Label* not_flat_one_byte_strings);
1345 // Checks if both instance types are sequential one-byte strings and jumps to
1346 // label if either is not.
1347 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1348 Register first_object_instance_type, Register second_object_instance_type,
1349 Register scratch1, Register scratch2, Label* failure);
1351 // Check if instance type is sequential one-byte string and jump to label if
1353 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1356 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1358 void EmitSeqStringSetCharCheck(Register string, Register index,
1359 Register value, uint32_t encoding_mask);
1361 // ---------------------------------------------------------------------------
1362 // Patching helpers.
1364 // Retrieve/patch the relocated value (lis/ori pair or constant pool load).
1365 void GetRelocatedValue(Register location, Register result, Register scratch);
1366 void SetRelocatedValue(Register location, Register scratch,
1367 Register new_value);
1369 void ClampUint8(Register output_reg, Register input_reg);
1371 // Saturate a value into 8-bit unsigned integer
1372 // if input_value < 0, output_value is 0
1373 // if input_value > 255, output_value is 255
1374 // otherwise output_value is the (int)input_value (round to nearest)
1375 void ClampDoubleToUint8(Register result_reg, DoubleRegister input_reg,
1376 DoubleRegister temp_double_reg);
1379 void LoadInstanceDescriptors(Register map, Register descriptors);
1380 void EnumLength(Register dst, Register map);
1381 void NumberOfOwnDescriptors(Register dst, Register map);
1382 void LoadAccessor(Register dst, Register holder, int accessor_index,
1383 AccessorComponent accessor);
1385 template <typename Field>
1386 void DecodeField(Register dst, Register src, RCBit rc = LeaveRC) {
1387 ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift,
1391 template <typename Field>
1392 void DecodeField(Register reg, RCBit rc = LeaveRC) {
1393 DecodeField<Field>(reg, reg, rc);
1396 template <typename Field>
1397 void DecodeFieldToSmi(Register dst, Register src) {
1398 #if V8_TARGET_ARCH_PPC64
1399 DecodeField<Field>(dst, src);
1402 // 32-bit can do this in one instruction:
1403 int start = Field::kSize + kSmiShift - 1;
1404 int end = kSmiShift;
1405 int rotate = kSmiShift - Field::kShift;
1407 rotate += kBitsPerPointer;
1409 rlwinm(dst, src, rotate, kBitsPerPointer - start - 1,
1410 kBitsPerPointer - end - 1);
1414 template <typename Field>
1415 void DecodeFieldToSmi(Register reg) {
1416 DecodeFieldToSmi<Field>(reg, reg);
1419 // Activation support.
1420 void EnterFrame(StackFrame::Type type,
1421 bool load_constant_pool_pointer_reg = false);
1422 // Returns the pc offset at which the frame ends.
1423 int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
1425 // Expects object in r0 and returns map with validated enum cache
1426 // in r0. Assumes that any other register can be used as a scratch.
1427 void CheckEnumCache(Register null_value, Label* call_runtime);
1429 // AllocationMemento support. Arrays may have an associated
1430 // AllocationMemento object that can be checked for in order to pretransition
1432 // On entry, receiver_reg should point to the array object.
1433 // scratch_reg gets clobbered.
1434 // If allocation info is present, condition flags are set to eq.
1435 void TestJSArrayForAllocationMemento(Register receiver_reg,
1436 Register scratch_reg,
1437 Label* no_memento_found);
1439 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1440 Register scratch_reg,
1441 Label* memento_found) {
1442 Label no_memento_found;
1443 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1446 bind(&no_memento_found);
1449 // Jumps to found label if a prototype map has dictionary elements.
1450 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1451 Register scratch1, Label* found);
1454 static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1456 void CallCFunctionHelper(Register function, int num_reg_arguments,
1457 int num_double_arguments);
1459 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
1460 CRegister cr = cr7);
1462 // Helper functions for generating invokes.
1463 void InvokePrologue(const ParameterCount& expected,
1464 const ParameterCount& actual, Handle<Code> code_constant,
1465 Register code_reg, Label* done,
1466 bool* definitely_mismatches, InvokeFlag flag,
1467 const CallWrapper& call_wrapper);
1469 void InitializeNewString(Register string, Register length,
1470 Heap::RootListIndex map_index, Register scratch1,
1473 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1474 void InNewSpace(Register object, Register scratch,
1475 Condition cond, // eq for new space, ne otherwise.
1478 // Helper for finding the mark bits for an address. Afterwards, the
1479 // bitmap register points at the word with the mark bits and the mask
1480 // the position of the first bit. Leaves addr_reg unchanged.
1481 inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
1484 // Helper for throwing exceptions. Compute a handler address and jump to
1485 // it. See the implementation for register usage.
1486 void JumpToHandlerEntry();
1488 // Compute memory operands for safepoint stack slots.
1489 static int SafepointRegisterStackIndex(int reg_code);
1490 MemOperand SafepointRegisterSlot(Register reg);
1491 MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1493 #if V8_OOL_CONSTANT_POOL
1494 // Loads the constant pool pointer (kConstantPoolRegister).
1495 enum CodeObjectAccessMethod { CAN_USE_IP, CONSTRUCT_INTERNAL_REFERENCE };
1496 void LoadConstantPoolPointerRegister(CodeObjectAccessMethod access_method,
1497 int ip_code_entry_delta = 0);
1500 bool generating_stub_;
1502 // This handle will be patched with the code object on installation.
1503 Handle<Object> code_object_;
1505 // Needs access to SafepointRegisterStackIndex for compiled frame
1507 friend class StandardFrame;
1511 // The code patcher is used to patch (typically) small parts of code e.g. for
1512 // debugging and other types of instrumentation. When using the code patcher
1513 // the exact number of bytes specified must be emitted. It is not legal to emit
1514 // relocation information. If any of these constraints are violated it causes
1515 // an assertion to fail.
1518 enum FlushICache { FLUSH, DONT_FLUSH };
1520 CodePatcher(byte* address, int instructions, FlushICache flush_cache = FLUSH);
1521 virtual ~CodePatcher();
1523 // Macro assembler to emit code.
1524 MacroAssembler* masm() { return &masm_; }
1526 // Emit an instruction directly.
1527 void Emit(Instr instr);
1529 // Emit the condition part of an instruction leaving the rest of the current
1530 // instruction unchanged.
1531 void EmitCondition(Condition cond);
1534 byte* address_; // The address of the code being patched.
1535 int size_; // Number of bytes of the expected patch size.
1536 MacroAssembler masm_; // Macro assembler used to generate the code.
1537 FlushICache flush_cache_; // Whether to flush the I cache after patching.
1541 // -----------------------------------------------------------------------------
1542 // Static helper functions.
1544 inline MemOperand ContextOperand(Register context, int index) {
1545 return MemOperand(context, Context::SlotOffset(index));
1549 inline MemOperand GlobalObjectOperand() {
1550 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
1554 #ifdef GENERATED_CODE_COVERAGE
1555 #define CODE_COVERAGE_STRINGIFY(x) #x
1556 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1557 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1558 #define ACCESS_MASM(masm) \
1559 masm->stop(__FILE_LINE__); \
1562 #define ACCESS_MASM(masm) masm->
1565 } // namespace v8::internal
1567 #endif // V8_PPC_MACRO_ASSEMBLER_PPC_H_