1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_PPC_MACRO_ASSEMBLER_PPC_H_
6 #define V8_PPC_MACRO_ASSEMBLER_PPC_H_
8 #include "src/assembler.h"
9 #include "src/bailout-reason.h"
10 #include "src/frames.h"
11 #include "src/globals.h"
16 // Give alias names to registers for calling conventions.
17 const Register kReturnRegister0 = {kRegister_r3_Code};
18 const Register kReturnRegister1 = {kRegister_r4_Code};
19 const Register kJSFunctionRegister = {kRegister_r4_Code};
20 const Register kContextRegister = {kRegister_r30_Code};
21 const Register kInterpreterAccumulatorRegister = {kRegister_r3_Code};
22 const Register kInterpreterRegisterFileRegister = {kRegister_r14_Code};
23 const Register kInterpreterBytecodeOffsetRegister = {kRegister_r15_Code};
24 const Register kInterpreterBytecodeArrayRegister = {kRegister_r16_Code};
25 const Register kInterpreterDispatchTableRegister = {kRegister_r17_Code};
26 const Register kRuntimeCallFunctionRegister = {kRegister_r4_Code};
27 const Register kRuntimeCallArgCountRegister = {kRegister_r3_Code};
29 // ----------------------------------------------------------------------------
30 // Static helper functions
32 // Generate a MemOperand for loading a field from an object.
33 inline MemOperand FieldMemOperand(Register object, int offset) {
34 return MemOperand(object, offset - kHeapObjectTag);
38 // Flags used for AllocateHeapNumber
47 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
48 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
49 enum PointersToHereCheck {
50 kPointersToHereMaybeInteresting,
51 kPointersToHereAreAlwaysInteresting
53 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
56 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
57 Register reg3 = no_reg,
58 Register reg4 = no_reg,
59 Register reg5 = no_reg,
60 Register reg6 = no_reg);
64 bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
65 Register reg4 = no_reg, Register reg5 = no_reg,
66 Register reg6 = no_reg, Register reg7 = no_reg,
67 Register reg8 = no_reg);
70 // These exist to provide portability between 32 and 64bit
71 #if V8_TARGET_ARCH_PPC64
77 #define StorePUX stdux
78 #define ShiftLeftImm sldi
79 #define ShiftRightImm srdi
80 #define ClearLeftImm clrldi
81 #define ClearRightImm clrrdi
82 #define ShiftRightArithImm sradi
83 #define ShiftLeft_ sld
84 #define ShiftRight_ srd
85 #define ShiftRightArith srad
94 #define StorePUX stwux
95 #define ShiftLeftImm slwi
96 #define ShiftRightImm srwi
97 #define ClearLeftImm clrlwi
98 #define ClearRightImm clrrwi
99 #define ShiftRightArithImm srawi
100 #define ShiftLeft_ slw
101 #define ShiftRight_ srw
102 #define ShiftRightArith sraw
108 // MacroAssembler implements a collection of frequently used macros.
109 class MacroAssembler : public Assembler {
111 // The isolate parameter can be NULL if the macro assembler should
112 // not use isolate-dependent functionality. In this case, it's the
113 // responsibility of the caller to never invoke such function on the
115 MacroAssembler(Isolate* isolate, void* buffer, int size);
118 // Returns the size of a call in instructions. Note, the value returned is
119 // only valid as long as no entries are added to the constant pool between
120 // checking the call size and emitting the actual call.
121 static int CallSize(Register target);
122 int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
123 static int CallSizeNotPredictableCodeSize(Address target,
124 RelocInfo::Mode rmode,
125 Condition cond = al);
127 // Jump, Call, and Ret pseudo instructions implementing inter-working.
128 void Jump(Register target);
129 void JumpToJSEntry(Register target);
130 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
132 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
133 void Call(Register target);
134 void CallJSEntry(Register target);
135 void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
136 int CallSize(Handle<Code> code,
137 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
138 TypeFeedbackId ast_id = TypeFeedbackId::None(),
139 Condition cond = al);
140 void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
141 TypeFeedbackId ast_id = TypeFeedbackId::None(),
142 Condition cond = al);
143 void Ret() { blr(); }
144 void Ret(Condition cond, CRegister cr = cr7) { bclr(cond, cr); }
146 // Emit code to discard a non-negative number of pointer-sized elements
147 // from the stack, clobbering only the sp register.
148 void Drop(int count);
155 void Call(Label* target);
157 // Emit call to the code we are currently generating.
159 Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
160 Call(self, RelocInfo::CODE_TARGET);
163 // Register move. May do nothing if the registers are identical.
164 void Move(Register dst, Handle<Object> value);
165 void Move(Register dst, Register src, Condition cond = al);
166 void Move(DoubleRegister dst, DoubleRegister src);
168 void MultiPush(RegList regs, Register location = sp);
169 void MultiPop(RegList regs, Register location = sp);
171 void MultiPushDoubles(RegList dregs, Register location = sp);
172 void MultiPopDoubles(RegList dregs, Register location = sp);
174 // Load an object from the root table.
175 void LoadRoot(Register destination, Heap::RootListIndex index,
176 Condition cond = al);
177 // Store an object to the root table.
178 void StoreRoot(Register source, Heap::RootListIndex index,
179 Condition cond = al);
181 // ---------------------------------------------------------------------------
184 void IncrementalMarkingRecordWriteHelper(Register object, Register value,
187 enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
189 // Record in the remembered set the fact that we have a pointer to new space
190 // at the address pointed to by the addr register. Only works if addr is not
192 void RememberedSetHelper(Register object, // Used for debug code.
193 Register addr, Register scratch,
194 SaveFPRegsMode save_fp,
195 RememberedSetFinalAction and_then);
197 void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
198 Label* condition_met);
200 // Check if object is in new space. Jumps if the object is not in new space.
201 // The register scratch can be object itself, but scratch will be clobbered.
202 void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
203 InNewSpace(object, scratch, ne, branch);
206 // Check if object is in new space. Jumps if the object is in new space.
207 // The register scratch can be object itself, but it will be clobbered.
208 void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
209 InNewSpace(object, scratch, eq, branch);
212 // Check if an object has a given incremental marking color.
213 void HasColor(Register object, Register scratch0, Register scratch1,
214 Label* has_color, int first_bit, int second_bit);
216 void JumpIfBlack(Register object, Register scratch0, Register scratch1,
219 // Checks the color of an object. If the object is already grey or black
220 // then we just fall through, since it is already live. If it is white and
221 // we can determine that it doesn't need to be scanned, then we just mark it
222 // black and fall through. For the rest we jump to the label so the
223 // incremental marker can fix its assumptions.
224 void EnsureNotWhite(Register object, Register scratch1, Register scratch2,
225 Register scratch3, Label* object_is_white_and_not_data);
227 // Detects conservatively whether an object is data-only, i.e. it does need to
228 // be scanned by the garbage collector.
229 void JumpIfDataObject(Register value, Register scratch,
230 Label* not_data_object);
232 // Notify the garbage collector that we wrote a pointer into an object.
233 // |object| is the object being stored into, |value| is the object being
234 // stored. value and scratch registers are clobbered by the operation.
235 // The offset is the offset from the start of the object, not the offset from
236 // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
237 void RecordWriteField(
238 Register object, int offset, Register value, Register scratch,
239 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
240 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
241 SmiCheck smi_check = INLINE_SMI_CHECK,
242 PointersToHereCheck pointers_to_here_check_for_value =
243 kPointersToHereMaybeInteresting);
245 // As above, but the offset has the tag presubtracted. For use with
246 // MemOperand(reg, off).
247 inline void RecordWriteContextSlot(
248 Register context, int offset, Register value, Register scratch,
249 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
250 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
251 SmiCheck smi_check = INLINE_SMI_CHECK,
252 PointersToHereCheck pointers_to_here_check_for_value =
253 kPointersToHereMaybeInteresting) {
254 RecordWriteField(context, offset + kHeapObjectTag, value, scratch,
255 lr_status, save_fp, remembered_set_action, smi_check,
256 pointers_to_here_check_for_value);
259 void RecordWriteForMap(Register object, Register map, Register dst,
260 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
262 // For a given |object| notify the garbage collector that the slot |address|
263 // has been written. |value| is the object being stored. The value and
264 // address registers are clobbered by the operation.
266 Register object, Register address, Register value,
267 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
268 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
269 SmiCheck smi_check = INLINE_SMI_CHECK,
270 PointersToHereCheck pointers_to_here_check_for_value =
271 kPointersToHereMaybeInteresting);
273 void Push(Register src) { push(src); }
276 void Push(Handle<Object> handle);
277 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
279 // Push two registers. Pushes leftmost register first (to highest address).
280 void Push(Register src1, Register src2) {
281 StorePU(src2, MemOperand(sp, -2 * kPointerSize));
282 StoreP(src1, MemOperand(sp, kPointerSize));
285 // Push three registers. Pushes leftmost register first (to highest address).
286 void Push(Register src1, Register src2, Register src3) {
287 StorePU(src3, MemOperand(sp, -3 * kPointerSize));
288 StoreP(src2, MemOperand(sp, kPointerSize));
289 StoreP(src1, MemOperand(sp, 2 * kPointerSize));
292 // Push four registers. Pushes leftmost register first (to highest address).
293 void Push(Register src1, Register src2, Register src3, Register src4) {
294 StorePU(src4, MemOperand(sp, -4 * kPointerSize));
295 StoreP(src3, MemOperand(sp, kPointerSize));
296 StoreP(src2, MemOperand(sp, 2 * kPointerSize));
297 StoreP(src1, MemOperand(sp, 3 * kPointerSize));
300 // Push five registers. Pushes leftmost register first (to highest address).
301 void Push(Register src1, Register src2, Register src3, Register src4,
303 StorePU(src5, MemOperand(sp, -5 * kPointerSize));
304 StoreP(src4, MemOperand(sp, kPointerSize));
305 StoreP(src3, MemOperand(sp, 2 * kPointerSize));
306 StoreP(src2, MemOperand(sp, 3 * kPointerSize));
307 StoreP(src1, MemOperand(sp, 4 * kPointerSize));
310 void Pop(Register dst) { pop(dst); }
312 // Pop two registers. Pops rightmost register first (from lower address).
313 void Pop(Register src1, Register src2) {
314 LoadP(src2, MemOperand(sp, 0));
315 LoadP(src1, MemOperand(sp, kPointerSize));
316 addi(sp, sp, Operand(2 * kPointerSize));
319 // Pop three registers. Pops rightmost register first (from lower address).
320 void Pop(Register src1, Register src2, Register src3) {
321 LoadP(src3, MemOperand(sp, 0));
322 LoadP(src2, MemOperand(sp, kPointerSize));
323 LoadP(src1, MemOperand(sp, 2 * kPointerSize));
324 addi(sp, sp, Operand(3 * kPointerSize));
327 // Pop four registers. Pops rightmost register first (from lower address).
328 void Pop(Register src1, Register src2, Register src3, Register src4) {
329 LoadP(src4, MemOperand(sp, 0));
330 LoadP(src3, MemOperand(sp, kPointerSize));
331 LoadP(src2, MemOperand(sp, 2 * kPointerSize));
332 LoadP(src1, MemOperand(sp, 3 * kPointerSize));
333 addi(sp, sp, Operand(4 * kPointerSize));
336 // Pop five registers. Pops rightmost register first (from lower address).
337 void Pop(Register src1, Register src2, Register src3, Register src4,
339 LoadP(src5, MemOperand(sp, 0));
340 LoadP(src4, MemOperand(sp, kPointerSize));
341 LoadP(src3, MemOperand(sp, 2 * kPointerSize));
342 LoadP(src2, MemOperand(sp, 3 * kPointerSize));
343 LoadP(src1, MemOperand(sp, 4 * kPointerSize));
344 addi(sp, sp, Operand(5 * kPointerSize));
347 // Push a fixed frame, consisting of lr, fp, context and
348 // JS function / marker id if marker_reg is a valid register.
349 void PushFixedFrame(Register marker_reg = no_reg);
350 void PopFixedFrame(Register marker_reg = no_reg);
352 // Push and pop the registers that can hold pointers, as defined by the
353 // RegList constant kSafepointSavedRegisters.
354 void PushSafepointRegisters();
355 void PopSafepointRegisters();
356 // Store value in register src in the safepoint stack slot for
358 void StoreToSafepointRegisterSlot(Register src, Register dst);
359 // Load the value of the src register from its safepoint stack slot
360 // into register dst.
361 void LoadFromSafepointRegisterSlot(Register dst, Register src);
363 // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
365 // Does not handle errors.
366 void FlushICache(Register address, size_t size, Register scratch);
368 // If the value is a NaN, canonicalize the value else, do nothing.
369 void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
370 void CanonicalizeNaN(const DoubleRegister value) {
371 CanonicalizeNaN(value, value);
374 // Converts the integer (untagged smi) in |src| to a double, storing
375 // the result to |double_dst|
376 void ConvertIntToDouble(Register src, DoubleRegister double_dst);
378 // Converts the unsigned integer (untagged smi) in |src| to
379 // a double, storing the result to |double_dst|
380 void ConvertUnsignedIntToDouble(Register src, DoubleRegister double_dst);
382 // Converts the integer (untagged smi) in |src| to
383 // a float, storing the result in |dst|
384 // Warning: The value in |int_scrach| will be changed in the process!
385 void ConvertIntToFloat(const DoubleRegister dst, const Register src,
386 const Register int_scratch);
388 // Converts the double_input to an integer. Note that, upon return,
389 // the contents of double_dst will also hold the fixed point representation.
390 void ConvertDoubleToInt64(const DoubleRegister double_input,
391 #if !V8_TARGET_ARCH_PPC64
392 const Register dst_hi,
394 const Register dst, const DoubleRegister double_dst,
395 FPRoundingMode rounding_mode = kRoundToZero);
397 // Generates function and stub prologue code.
398 void StubPrologue(int prologue_offset = 0);
399 void Prologue(bool code_pre_aging, int prologue_offset = 0);
402 // stack_space - extra stack space, used for parameters before call to C.
403 // At least one slot (for the return address) should be provided.
404 void EnterExitFrame(bool save_doubles, int stack_space = 1);
406 // Leave the current exit frame. Expects the return value in r0.
407 // Expect the number of values, pushed prior to the exit frame, to
408 // remove in a register (or no_reg, if there is nothing to remove).
409 void LeaveExitFrame(bool save_doubles, Register argument_count,
410 bool restore_context,
411 bool argument_count_is_length = false);
413 // Get the actual activation frame alignment for target environment.
414 static int ActivationFrameAlignment();
416 void LoadContext(Register dst, int context_chain_length);
418 // Load the global proxy from the current context.
419 void LoadGlobalProxy(Register dst);
421 // Conditionally load the cached Array transitioned map of type
422 // transitioned_kind from the native context if the map in register
423 // map_in_out is the cached Array map in the native context of
425 void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
426 ElementsKind transitioned_kind,
429 Label* no_map_match);
431 void LoadGlobalFunction(int index, Register function);
433 // Load the initial map from the global function. The registers
434 // function and map can be the same, function is then overwritten.
435 void LoadGlobalFunctionInitialMap(Register function, Register map,
438 void InitializeRootRegister() {
439 ExternalReference roots_array_start =
440 ExternalReference::roots_array_start(isolate());
441 mov(kRootRegister, Operand(roots_array_start));
444 // ----------------------------------------------------------------
445 // new PPC macro-assembler interfaces that are slightly higher level
446 // than assembler-ppc and may generate variable length sequences
448 // load a literal signed int value <value> to GPR <dst>
449 void LoadIntLiteral(Register dst, int value);
451 // load an SMI value <value> to GPR <dst>
452 void LoadSmiLiteral(Register dst, Smi* smi);
454 // load a literal double value <value> to FPR <result>
455 void LoadDoubleLiteral(DoubleRegister result, double value, Register scratch);
457 void LoadWord(Register dst, const MemOperand& mem, Register scratch);
458 void LoadWordArith(Register dst, const MemOperand& mem,
459 Register scratch = no_reg);
460 void StoreWord(Register src, const MemOperand& mem, Register scratch);
462 void LoadHalfWord(Register dst, const MemOperand& mem, Register scratch);
463 void LoadHalfWordArith(Register dst, const MemOperand& mem,
464 Register scratch = no_reg);
465 void StoreHalfWord(Register src, const MemOperand& mem, Register scratch);
467 void LoadByte(Register dst, const MemOperand& mem, Register scratch);
468 void StoreByte(Register src, const MemOperand& mem, Register scratch);
470 void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
471 Register scratch = no_reg);
472 void StoreRepresentation(Register src, const MemOperand& mem,
473 Representation r, Register scratch = no_reg);
475 void LoadDouble(DoubleRegister dst, const MemOperand& mem, Register scratch);
476 void StoreDouble(DoubleRegister src, const MemOperand& mem, Register scratch);
478 // Move values between integer and floating point registers.
479 void MovIntToDouble(DoubleRegister dst, Register src, Register scratch);
480 void MovUnsignedIntToDouble(DoubleRegister dst, Register src,
482 void MovInt64ToDouble(DoubleRegister dst,
483 #if !V8_TARGET_ARCH_PPC64
487 #if V8_TARGET_ARCH_PPC64
488 void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi,
489 Register src_lo, Register scratch);
491 void InsertDoubleLow(DoubleRegister dst, Register src, Register scratch);
492 void InsertDoubleHigh(DoubleRegister dst, Register src, Register scratch);
493 void MovDoubleLowToInt(Register dst, DoubleRegister src);
494 void MovDoubleHighToInt(Register dst, DoubleRegister src);
495 void MovDoubleToInt64(
496 #if !V8_TARGET_ARCH_PPC64
499 Register dst, DoubleRegister src);
500 void MovIntToFloat(DoubleRegister dst, Register src);
501 void MovFloatToInt(Register dst, DoubleRegister src);
503 void Add(Register dst, Register src, intptr_t value, Register scratch);
504 void Cmpi(Register src1, const Operand& src2, Register scratch,
506 void Cmpli(Register src1, const Operand& src2, Register scratch,
508 void Cmpwi(Register src1, const Operand& src2, Register scratch,
510 void Cmplwi(Register src1, const Operand& src2, Register scratch,
512 void And(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
513 void Or(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
514 void Xor(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
516 void AddSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
517 void SubSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
518 void CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
520 void CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
522 void AndSmiLiteral(Register dst, Register src, Smi* smi, Register scratch,
525 // Set new rounding mode RN to FPSCR
526 void SetRoundingMode(FPRoundingMode RN);
528 // reset rounding mode to default (kRoundToNearest)
529 void ResetRoundingMode();
531 // These exist to provide portability between 32 and 64bit
532 void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
533 void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
535 // ---------------------------------------------------------------------------
536 // JavaScript invokes
538 // Invoke the JavaScript function code by either calling or jumping.
539 void InvokeCode(Register code, const ParameterCount& expected,
540 const ParameterCount& actual, InvokeFlag flag,
541 const CallWrapper& call_wrapper);
543 // Invoke the JavaScript function in the given register. Changes the
544 // current context to the context in the function before invoking.
545 void InvokeFunction(Register function, const ParameterCount& actual,
546 InvokeFlag flag, const CallWrapper& call_wrapper);
548 void InvokeFunction(Register function, const ParameterCount& expected,
549 const ParameterCount& actual, InvokeFlag flag,
550 const CallWrapper& call_wrapper);
552 void InvokeFunction(Handle<JSFunction> function,
553 const ParameterCount& expected,
554 const ParameterCount& actual, InvokeFlag flag,
555 const CallWrapper& call_wrapper);
557 void IsObjectJSStringType(Register object, Register scratch, Label* fail);
559 void IsObjectNameType(Register object, Register scratch, Label* fail);
561 // ---------------------------------------------------------------------------
566 // ---------------------------------------------------------------------------
567 // Exception handling
569 // Push a new stack handler and link into stack handler chain.
570 void PushStackHandler();
572 // Unlink the stack handler on top of the stack from the stack handler chain.
573 // Must preserve the result register.
574 void PopStackHandler();
576 // ---------------------------------------------------------------------------
577 // Inline caching support
579 // Generate code for checking access rights - used for security checks
580 // on access to global objects across environments. The holder register
581 // is left untouched, whereas both scratch registers are clobbered.
582 void CheckAccessGlobalProxy(Register holder_reg, Register scratch,
585 void GetNumberHash(Register t0, Register scratch);
587 void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
588 Register result, Register t0, Register t1,
592 inline void MarkCode(NopMarkerTypes type) { nop(type); }
594 // Check if the given instruction is a 'type' marker.
595 // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
596 // These instructions are generated to mark special location in the code,
597 // like some special IC code.
598 static inline bool IsMarkedCode(Instr instr, int type) {
599 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
600 return IsNop(instr, type);
604 static inline int GetCodeMarker(Instr instr) {
605 int dst_reg_offset = 12;
606 int dst_mask = 0xf << dst_reg_offset;
608 int dst_reg = (instr & dst_mask) >> dst_reg_offset;
609 int src_reg = instr & src_mask;
610 uint32_t non_register_mask = ~(dst_mask | src_mask);
611 uint32_t mov_mask = al | 13 << 21;
613 // Return <n> if we have a mov rn rn, else return -1.
614 int type = ((instr & non_register_mask) == mov_mask) &&
615 (dst_reg == src_reg) && (FIRST_IC_MARKER <= dst_reg) &&
616 (dst_reg < LAST_CODE_MARKER)
619 DCHECK((type == -1) ||
620 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
625 // ---------------------------------------------------------------------------
626 // Allocation support
628 // Allocate an object in new space or old space. The object_size is
629 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
630 // is passed. If the space is exhausted control continues at the gc_required
631 // label. The allocated object is returned in result. If the flag
632 // tag_allocated_object is true the result is tagged as as a heap object.
633 // All registers are clobbered also when control continues at the gc_required
635 void Allocate(int object_size, Register result, Register scratch1,
636 Register scratch2, Label* gc_required, AllocationFlags flags);
638 void Allocate(Register object_size, Register result, Register scratch1,
639 Register scratch2, Label* gc_required, AllocationFlags flags);
641 void AllocateTwoByteString(Register result, Register length,
642 Register scratch1, Register scratch2,
643 Register scratch3, Label* gc_required);
644 void AllocateOneByteString(Register result, Register length,
645 Register scratch1, Register scratch2,
646 Register scratch3, Label* gc_required);
647 void AllocateTwoByteConsString(Register result, Register length,
648 Register scratch1, Register scratch2,
650 void AllocateOneByteConsString(Register result, Register length,
651 Register scratch1, Register scratch2,
653 void AllocateTwoByteSlicedString(Register result, Register length,
654 Register scratch1, Register scratch2,
656 void AllocateOneByteSlicedString(Register result, Register length,
657 Register scratch1, Register scratch2,
660 // Allocates a heap number or jumps to the gc_required label if the young
661 // space is full and a scavenge is needed. All registers are clobbered also
662 // when control continues at the gc_required label.
663 void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
664 Register heap_number_map, Label* gc_required,
665 TaggingMode tagging_mode = TAG_RESULT,
666 MutableMode mode = IMMUTABLE);
667 void AllocateHeapNumberWithValue(Register result, DoubleRegister value,
668 Register scratch1, Register scratch2,
669 Register heap_number_map,
672 // Copies a fixed number of fields of heap objects from src to dst.
673 void CopyFields(Register dst, Register src, RegList temps, int field_count);
675 // Copies a number of bytes from src to dst. All registers are clobbered. On
676 // exit src and dst will point to the place just after where the last byte was
677 // read or written and length will be zero.
678 void CopyBytes(Register src, Register dst, Register length, Register scratch);
680 // Initialize fields with filler values. |count| fields starting at
681 // |start_offset| are overwritten with the value in |filler|. At the end the
682 // loop, |start_offset| points at the next uninitialized field. |count| is
683 // assumed to be non-zero.
684 void InitializeNFieldsWithFiller(Register start_offset, Register count,
687 // Initialize fields with filler values. Fields starting at |start_offset|
688 // not including end_offset are overwritten with the value in |filler|. At
689 // the end the loop, |start_offset| takes the value of |end_offset|.
690 void InitializeFieldsWithFiller(Register start_offset, Register end_offset,
693 // ---------------------------------------------------------------------------
694 // Support functions.
696 // Machine code version of Map::GetConstructor().
697 // |temp| holds |result|'s map when done, and |temp2| its instance type.
698 void GetMapConstructor(Register result, Register map, Register temp,
701 // Try to get function prototype of a function and puts the value in
702 // the result register. Checks that the function really is a
703 // function and jumps to the miss label if the fast checks fail. The
704 // function register will be untouched; the other registers may be
706 void TryGetFunctionPrototype(Register function, Register result,
707 Register scratch, Label* miss);
709 // Compare object type for heap object. heap_object contains a non-Smi
710 // whose object type should be compared with the given type. This both
711 // sets the flags and leaves the object type in the type_reg register.
712 // It leaves the map in the map register (unless the type_reg and map register
713 // are the same register). It leaves the heap object in the heap_object
714 // register unless the heap_object register is the same register as one of the
716 // Type_reg can be no_reg. In that case ip is used.
717 void CompareObjectType(Register heap_object, Register map, Register type_reg,
720 // Compare instance type in a map. map contains a valid map object whose
721 // object type should be compared with the given type. This both
722 // sets the flags and leaves the object type in the type_reg register.
723 void CompareInstanceType(Register map, Register type_reg, InstanceType type);
726 // Check if a map for a JSObject indicates that the object has fast elements.
727 // Jump to the specified label if it does not.
728 void CheckFastElements(Register map, Register scratch, Label* fail);
730 // Check if a map for a JSObject indicates that the object can have both smi
731 // and HeapObject elements. Jump to the specified label if it does not.
732 void CheckFastObjectElements(Register map, Register scratch, Label* fail);
734 // Check if a map for a JSObject indicates that the object has fast smi only
735 // elements. Jump to the specified label if it does not.
736 void CheckFastSmiElements(Register map, Register scratch, Label* fail);
738 // Check to see if maybe_number can be stored as a double in
739 // FastDoubleElements. If it can, store it at the index specified by key in
740 // the FastDoubleElements array elements. Otherwise jump to fail.
741 void StoreNumberToDoubleElements(Register value_reg, Register key_reg,
742 Register elements_reg, Register scratch1,
743 DoubleRegister double_scratch, Label* fail,
744 int elements_offset = 0);
746 // Compare an object's map with the specified map and its transitioned
747 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
748 // set with result of map compare. If multiple map compares are required, the
749 // compare sequences branches to early_success.
750 void CompareMap(Register obj, Register scratch, Handle<Map> map,
751 Label* early_success);
753 // As above, but the map of the object is already loaded into the register
754 // which is preserved by the code generated.
755 void CompareMap(Register obj_map, Handle<Map> map, Label* early_success);
757 // Check if the map of an object is equal to a specified map and branch to
758 // label if not. Skip the smi check if not required (object is known to be a
759 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
760 // against maps that are ElementsKind transition maps of the specified map.
761 void CheckMap(Register obj, Register scratch, Handle<Map> map, Label* fail,
762 SmiCheckType smi_check_type);
765 void CheckMap(Register obj, Register scratch, Heap::RootListIndex index,
766 Label* fail, SmiCheckType smi_check_type);
769 // Check if the map of an object is equal to a specified weak map and branch
770 // to a specified target if equal. Skip the smi check if not required
771 // (object is known to be a heap object)
772 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
773 Handle<WeakCell> cell, Handle<Code> success,
774 SmiCheckType smi_check_type);
776 // Compare the given value and the value of weak cell.
777 void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch,
780 void GetWeakValue(Register value, Handle<WeakCell> cell);
782 // Load the value of the weak cell in the value register. Branch to the given
783 // miss label if the weak cell was cleared.
784 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
786 // Compare the object in a register to a value from the root list.
787 // Uses the ip register as scratch.
788 void CompareRoot(Register obj, Heap::RootListIndex index);
789 void PushRoot(Heap::RootListIndex index) {
794 // Compare the object in a register to a value and jump if they are equal.
795 void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
796 CompareRoot(with, index);
800 // Compare the object in a register to a value and jump if they are not equal.
801 void JumpIfNotRoot(Register with, Heap::RootListIndex index,
802 Label* if_not_equal) {
803 CompareRoot(with, index);
807 // Load and check the instance type of an object for being a string.
808 // Loads the type into the second argument register.
809 // Returns a condition that will be enabled if the object was a string.
810 Condition IsObjectStringType(Register obj, Register type) {
811 LoadP(type, FieldMemOperand(obj, HeapObject::kMapOffset));
812 lbz(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
813 andi(r0, type, Operand(kIsNotStringMask));
814 DCHECK_EQ(0u, kStringTag);
819 // Picks out an array index from the hash field.
821 // hash - holds the index's hash. Clobbered.
822 // index - holds the overwritten index on exit.
823 void IndexFromHash(Register hash, Register index);
825 // Get the number of least significant bits from a register
826 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
827 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
829 // Load the value of a smi object into a double register.
830 void SmiToDouble(DoubleRegister value, Register smi);
832 // Check if a double can be exactly represented as a signed 32-bit integer.
833 // CR_EQ in cr7 is set if true.
834 void TestDoubleIsInt32(DoubleRegister double_input, Register scratch1,
835 Register scratch2, DoubleRegister double_scratch);
837 // Try to convert a double to a signed 32-bit integer.
838 // CR_EQ in cr7 is set and result assigned if the conversion is exact.
839 void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
840 Register scratch, DoubleRegister double_scratch);
842 // Floor a double and writes the value to the result register.
843 // Go to exact if the conversion is exact (to be able to test -0),
844 // fall through calling code if an overflow occurred, else go to done.
845 // In return, input_high is loaded with high bits of input.
846 void TryInt32Floor(Register result, DoubleRegister double_input,
847 Register input_high, Register scratch,
848 DoubleRegister double_scratch, Label* done, Label* exact);
850 // Performs a truncating conversion of a floating point number as used by
851 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
852 // succeeds, otherwise falls through if result is saturated. On return
853 // 'result' either holds answer, or is clobbered on fall through.
855 // Only public for the test code in test-code-stubs-arm.cc.
856 void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
859 // Performs a truncating conversion of a floating point number as used by
860 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
861 // Exits with 'result' holding the answer.
862 void TruncateDoubleToI(Register result, DoubleRegister double_input);
864 // Performs a truncating conversion of a heap number as used by
865 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
866 // must be different registers. Exits with 'result' holding the answer.
867 void TruncateHeapNumberToI(Register result, Register object);
869 // Converts the smi or heap number in object to an int32 using the rules
870 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
871 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
872 // different registers.
873 void TruncateNumberToI(Register object, Register result,
874 Register heap_number_map, Register scratch1,
877 // Overflow handling functions.
878 // Usage: call the appropriate arithmetic function and then call one of the
879 // flow control functions with the corresponding label.
881 // Compute dst = left + right, setting condition codes. dst may be same as
882 // either left or right (or a unique register). left and right must not be
883 // the same register.
884 void AddAndCheckForOverflow(Register dst, Register left, Register right,
885 Register overflow_dst, Register scratch = r0);
886 void AddAndCheckForOverflow(Register dst, Register left, intptr_t right,
887 Register overflow_dst, Register scratch = r0);
889 // Compute dst = left - right, setting condition codes. dst may be same as
890 // either left or right (or a unique register). left and right must not be
891 // the same register.
892 void SubAndCheckForOverflow(Register dst, Register left, Register right,
893 Register overflow_dst, Register scratch = r0);
895 void BranchOnOverflow(Label* label) { blt(label, cr0); }
897 void BranchOnNoOverflow(Label* label) { bge(label, cr0); }
899 void RetOnOverflow(void) { Ret(lt, cr0); }
901 void RetOnNoOverflow(void) { Ret(ge, cr0); }
903 // ---------------------------------------------------------------------------
907 void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None(),
908 Condition cond = al);
911 void TailCallStub(CodeStub* stub, Condition cond = al);
913 // Call a runtime routine.
914 void CallRuntime(const Runtime::Function* f, int num_arguments,
915 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
916 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
917 const Runtime::Function* function = Runtime::FunctionForId(id);
918 CallRuntime(function, function->nargs, kSaveFPRegs);
921 // Convenience function: Same as above, but takes the fid instead.
922 void CallRuntime(Runtime::FunctionId id, int num_arguments,
923 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
924 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
927 // Convenience function: call an external reference.
928 void CallExternalReference(const ExternalReference& ext, int num_arguments);
930 // Tail call of a runtime routine (jump).
931 // Like JumpToExternalReference, but also takes care of passing the number
933 void TailCallExternalReference(const ExternalReference& ext,
934 int num_arguments, int result_size);
936 // Convenience function: tail call a runtime routine (jump).
937 void TailCallRuntime(Runtime::FunctionId fid, int num_arguments,
940 int CalculateStackPassedWords(int num_reg_arguments,
941 int num_double_arguments);
943 // Before calling a C-function from generated code, align arguments on stack.
944 // After aligning the frame, non-register arguments must be stored in
945 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
946 // are word sized. If double arguments are used, this function assumes that
947 // all double arguments are stored before core registers; otherwise the
948 // correct alignment of the double values is not guaranteed.
949 // Some compilers/platforms require the stack to be aligned when calling
951 // Needs a scratch register to do some arithmetic. This register will be
953 void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
955 void PrepareCallCFunction(int num_reg_arguments, Register scratch);
957 // There are two ways of passing double arguments on ARM, depending on
958 // whether soft or hard floating point ABI is used. These functions
959 // abstract parameter passing for the three different ways we call
960 // C functions from generated code.
961 void MovToFloatParameter(DoubleRegister src);
962 void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
963 void MovToFloatResult(DoubleRegister src);
965 // Calls a C function and cleans up the space for arguments allocated
966 // by PrepareCallCFunction. The called function is not allowed to trigger a
967 // garbage collection, since that might move the code and invalidate the
968 // return address (unless this is somehow accounted for by the called
970 void CallCFunction(ExternalReference function, int num_arguments);
971 void CallCFunction(Register function, int num_arguments);
972 void CallCFunction(ExternalReference function, int num_reg_arguments,
973 int num_double_arguments);
974 void CallCFunction(Register function, int num_reg_arguments,
975 int num_double_arguments);
977 void MovFromFloatParameter(DoubleRegister dst);
978 void MovFromFloatResult(DoubleRegister dst);
980 // Jump to a runtime routine.
981 void JumpToExternalReference(const ExternalReference& builtin);
983 // Invoke specified builtin JavaScript function.
984 void InvokeBuiltin(int native_context_index, InvokeFlag flag,
985 const CallWrapper& call_wrapper = NullCallWrapper());
987 // Store the code object for the given builtin in the target register and
988 // setup the function in r1.
989 void GetBuiltinEntry(Register target, int native_context_index);
991 // Store the function for the given builtin in the target register.
992 void GetBuiltinFunction(Register target, int native_context_index);
994 Handle<Object> CodeObject() {
995 DCHECK(!code_object_.is_null());
1000 // Emit code for a truncating division by a constant. The dividend register is
1001 // unchanged and ip gets clobbered. Dividend and result must be different.
1002 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1004 // ---------------------------------------------------------------------------
1005 // StatsCounter support
1007 void SetCounter(StatsCounter* counter, int value, Register scratch1,
1009 void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1011 void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1015 // ---------------------------------------------------------------------------
1018 // Calls Abort(msg) if the condition cond is not satisfied.
1019 // Use --debug_code to enable.
1020 void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
1021 void AssertFastElements(Register elements);
1023 // Like Assert(), but always enabled.
1024 void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
1026 // Print a message to stdout and abort execution.
1027 void Abort(BailoutReason reason);
1029 // Verify restrictions about code generated in stubs.
1030 void set_generating_stub(bool value) { generating_stub_ = value; }
1031 bool generating_stub() { return generating_stub_; }
1032 void set_has_frame(bool value) { has_frame_ = value; }
1033 bool has_frame() { return has_frame_; }
1034 inline bool AllowThisStubCall(CodeStub* stub);
1036 // ---------------------------------------------------------------------------
1039 // Check whether the value of reg is a power of two and not zero. If not
1040 // control continues at the label not_power_of_two. If reg is a power of two
1041 // the register scratch contains the value of (reg - 1) when control falls
1043 void JumpIfNotPowerOfTwoOrZero(Register reg, Register scratch,
1044 Label* not_power_of_two_or_zero);
1045 // Check whether the value of reg is a power of two and not zero.
1046 // Control falls through if it is, with scratch containing the mask
1048 // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
1049 // zero or negative, or jumps to the 'not_power_of_two' label if the value is
1050 // strictly positive but not a power of two.
1051 void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, Register scratch,
1052 Label* zero_and_neg,
1053 Label* not_power_of_two);
1055 // ---------------------------------------------------------------------------
1056 // Bit testing/extraction
1058 // Bit numbering is such that the least significant bit is bit 0
1059 // (for consistency between 32/64-bit).
1061 // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
1062 // and place them into the least significant bits of dst.
1063 inline void ExtractBitRange(Register dst, Register src, int rangeStart,
1064 int rangeEnd, RCBit rc = LeaveRC) {
1065 DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
1066 int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd;
1067 int width = rangeStart - rangeEnd + 1;
1068 if (rc == SetRC && rangeEnd == 0 && width <= 16) {
1069 andi(dst, src, Operand((1 << width) - 1));
1071 #if V8_TARGET_ARCH_PPC64
1072 rldicl(dst, src, rotate, kBitsPerPointer - width, rc);
1074 rlwinm(dst, src, rotate, kBitsPerPointer - width, kBitsPerPointer - 1,
1080 inline void ExtractBit(Register dst, Register src, uint32_t bitNumber,
1081 RCBit rc = LeaveRC) {
1082 ExtractBitRange(dst, src, bitNumber, bitNumber, rc);
1085 // Extract consecutive bits (defined by mask) from src and place them
1086 // into the least significant bits of dst.
1087 inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
1088 RCBit rc = LeaveRC) {
1089 int start = kBitsPerPointer - 1;
1091 uintptr_t bit = (1L << start);
1093 while (bit && (mask & bit) == 0) {
1100 while (bit && (mask & bit)) {
1105 // 1-bits in mask must be contiguous
1106 DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
1108 ExtractBitRange(dst, src, start, end, rc);
1111 // Test single bit in value.
1112 inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
1113 ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC);
1116 // Test consecutive bit range in value. Range is defined by
1117 // rangeStart - rangeEnd.
1118 inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
1119 Register scratch = r0) {
1120 ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC);
1123 // Test consecutive bit range in value. Range is defined by mask.
1124 inline void TestBitMask(Register value, uintptr_t mask,
1125 Register scratch = r0) {
1126 ExtractBitMask(scratch, value, mask, SetRC);
1130 // ---------------------------------------------------------------------------
1133 // Shift left by kSmiShift
1134 void SmiTag(Register reg, RCBit rc = LeaveRC) { SmiTag(reg, reg, rc); }
1135 void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) {
1136 ShiftLeftImm(dst, src, Operand(kSmiShift), rc);
1139 #if !V8_TARGET_ARCH_PPC64
1140 // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
1141 void SmiTagCheckOverflow(Register reg, Register overflow);
1142 void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
1144 inline void JumpIfNotSmiCandidate(Register value, Register scratch,
1145 Label* not_smi_label) {
1146 // High bits must be identical to fit into an Smi
1147 STATIC_ASSERT(kSmiShift == 1);
1148 addis(scratch, value, Operand(0x40000000u >> 16));
1149 cmpi(scratch, Operand::Zero());
1153 inline void TestUnsignedSmiCandidate(Register value, Register scratch) {
1154 // The test is different for unsigned int values. Since we need
1155 // the value to be in the range of a positive smi, we can't
1156 // handle any of the high bits being set in the value.
1157 TestBitRange(value, kBitsPerPointer - 1, kBitsPerPointer - 1 - kSmiShift,
1160 inline void JumpIfNotUnsignedSmiCandidate(Register value, Register scratch,
1161 Label* not_smi_label) {
1162 TestUnsignedSmiCandidate(value, scratch);
1163 bne(not_smi_label, cr0);
1166 void SmiUntag(Register reg, RCBit rc = LeaveRC) { SmiUntag(reg, reg, rc); }
1168 void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) {
1169 ShiftRightArithImm(dst, src, kSmiShift, rc);
1172 void SmiToPtrArrayOffset(Register dst, Register src) {
1173 #if V8_TARGET_ARCH_PPC64
1174 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
1175 ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2);
1177 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
1178 ShiftLeftImm(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
1182 void SmiToByteArrayOffset(Register dst, Register src) { SmiUntag(dst, src); }
1184 void SmiToShortArrayOffset(Register dst, Register src) {
1185 #if V8_TARGET_ARCH_PPC64
1186 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 1);
1187 ShiftRightArithImm(dst, src, kSmiShift - 1);
1189 STATIC_ASSERT(kSmiTag == 0 && kSmiShift == 1);
1196 void SmiToIntArrayOffset(Register dst, Register src) {
1197 #if V8_TARGET_ARCH_PPC64
1198 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 2);
1199 ShiftRightArithImm(dst, src, kSmiShift - 2);
1201 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < 2);
1202 ShiftLeftImm(dst, src, Operand(2 - kSmiShift));
1206 #define SmiToFloatArrayOffset SmiToIntArrayOffset
1208 void SmiToDoubleArrayOffset(Register dst, Register src) {
1209 #if V8_TARGET_ARCH_PPC64
1210 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kDoubleSizeLog2);
1211 ShiftRightArithImm(dst, src, kSmiShift - kDoubleSizeLog2);
1213 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kDoubleSizeLog2);
1214 ShiftLeftImm(dst, src, Operand(kDoubleSizeLog2 - kSmiShift));
1218 void SmiToArrayOffset(Register dst, Register src, int elementSizeLog2) {
1219 if (kSmiShift < elementSizeLog2) {
1220 ShiftLeftImm(dst, src, Operand(elementSizeLog2 - kSmiShift));
1221 } else if (kSmiShift > elementSizeLog2) {
1222 ShiftRightArithImm(dst, src, kSmiShift - elementSizeLog2);
1223 } else if (!dst.is(src)) {
1228 void IndexToArrayOffset(Register dst, Register src, int elementSizeLog2,
1231 SmiToArrayOffset(dst, src, elementSizeLog2);
1233 ShiftLeftImm(dst, src, Operand(elementSizeLog2));
1237 // Untag the source value into destination and jump if source is a smi.
1238 // Souce and destination can be the same register.
1239 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1241 // Untag the source value into destination and jump if source is not a smi.
1242 // Souce and destination can be the same register.
1243 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1245 inline void TestIfSmi(Register value, Register scratch) {
1246 TestBitRange(value, kSmiTagSize - 1, 0, scratch);
1249 inline void TestIfPositiveSmi(Register value, Register scratch) {
1250 #if V8_TARGET_ARCH_PPC64
1251 rldicl(scratch, value, 1, kBitsPerPointer - (1 + kSmiTagSize), SetRC);
1253 rlwinm(scratch, value, 1, kBitsPerPointer - (1 + kSmiTagSize),
1254 kBitsPerPointer - 1, SetRC);
1258 // Jump the register contains a smi.
1259 inline void JumpIfSmi(Register value, Label* smi_label) {
1260 TestIfSmi(value, r0);
1261 beq(smi_label, cr0); // branch if SMI
1263 // Jump if either of the registers contain a non-smi.
1264 inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
1265 TestIfSmi(value, r0);
1266 bne(not_smi_label, cr0);
1268 // Jump if either of the registers contain a non-smi.
1269 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1270 // Jump if either of the registers contain a smi.
1271 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1273 // Abort execution if argument is a smi, enabled via --debug-code.
1274 void AssertNotSmi(Register object);
1275 void AssertSmi(Register object);
1278 #if V8_TARGET_ARCH_PPC64
1279 inline void TestIfInt32(Register value, Register scratch,
1280 CRegister cr = cr7) {
1281 // High bits must be identical to fit into an 32-bit integer
1282 extsw(scratch, value);
1283 cmp(scratch, value, cr);
1286 inline void TestIfInt32(Register hi_word, Register lo_word, Register scratch,
1287 CRegister cr = cr7) {
1288 // High bits must be identical to fit into an 32-bit integer
1289 srawi(scratch, lo_word, 31);
1290 cmp(scratch, hi_word, cr);
1294 #if V8_TARGET_ARCH_PPC64
1295 // Ensure it is permissable to read/write int value directly from
1296 // upper half of the smi.
1297 STATIC_ASSERT(kSmiTag == 0);
1298 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
1300 #if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN
1301 #define SmiWordOffset(offset) (offset + kPointerSize / 2)
1303 #define SmiWordOffset(offset) offset
1306 // Abort execution if argument is not a string, enabled via --debug-code.
1307 void AssertString(Register object);
1309 // Abort execution if argument is not a name, enabled via --debug-code.
1310 void AssertName(Register object);
1312 void AssertFunction(Register object);
1314 // Abort execution if argument is not undefined or an AllocationSite, enabled
1315 // via --debug-code.
1316 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1318 // Abort execution if reg is not the root value with the given index,
1319 // enabled via --debug-code.
1320 void AssertIsRoot(Register reg, Heap::RootListIndex index);
1322 // ---------------------------------------------------------------------------
1323 // HeapNumber utilities
1325 void JumpIfNotHeapNumber(Register object, Register heap_number_map,
1326 Register scratch, Label* on_not_heap_number);
1328 // ---------------------------------------------------------------------------
1331 // Checks if both objects are sequential one-byte strings and jumps to label
1332 // if either is not. Assumes that neither object is a smi.
1333 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
1339 // Checks if both objects are sequential one-byte strings and jumps to label
1340 // if either is not.
1341 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
1344 Label* not_flat_one_byte_strings);
1346 // Checks if both instance types are sequential one-byte strings and jumps to
1347 // label if either is not.
1348 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1349 Register first_object_instance_type, Register second_object_instance_type,
1350 Register scratch1, Register scratch2, Label* failure);
1352 // Check if instance type is sequential one-byte string and jump to label if
1354 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1357 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1359 void EmitSeqStringSetCharCheck(Register string, Register index,
1360 Register value, uint32_t encoding_mask);
1362 // ---------------------------------------------------------------------------
1363 // Patching helpers.
1365 // Decode offset from constant pool load instruction(s).
1366 // Caller must place the instruction word at <location> in <result>.
1367 void DecodeConstantPoolOffset(Register result, Register location);
1369 void ClampUint8(Register output_reg, Register input_reg);
1371 // Saturate a value into 8-bit unsigned integer
1372 // if input_value < 0, output_value is 0
1373 // if input_value > 255, output_value is 255
1374 // otherwise output_value is the (int)input_value (round to nearest)
1375 void ClampDoubleToUint8(Register result_reg, DoubleRegister input_reg,
1376 DoubleRegister temp_double_reg);
1379 void LoadInstanceDescriptors(Register map, Register descriptors);
1380 void EnumLength(Register dst, Register map);
1381 void NumberOfOwnDescriptors(Register dst, Register map);
1382 void LoadAccessor(Register dst, Register holder, int accessor_index,
1383 AccessorComponent accessor);
1385 template <typename Field>
1386 void DecodeField(Register dst, Register src, RCBit rc = LeaveRC) {
1387 ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift,
1391 template <typename Field>
1392 void DecodeField(Register reg, RCBit rc = LeaveRC) {
1393 DecodeField<Field>(reg, reg, rc);
1396 template <typename Field>
1397 void DecodeFieldToSmi(Register dst, Register src) {
1398 #if V8_TARGET_ARCH_PPC64
1399 DecodeField<Field>(dst, src);
1402 // 32-bit can do this in one instruction:
1403 int start = Field::kSize + kSmiShift - 1;
1404 int end = kSmiShift;
1405 int rotate = kSmiShift - Field::kShift;
1407 rotate += kBitsPerPointer;
1409 rlwinm(dst, src, rotate, kBitsPerPointer - start - 1,
1410 kBitsPerPointer - end - 1);
1414 template <typename Field>
1415 void DecodeFieldToSmi(Register reg) {
1416 DecodeFieldToSmi<Field>(reg, reg);
1419 // Activation support.
1420 void EnterFrame(StackFrame::Type type,
1421 bool load_constant_pool_pointer_reg = false);
1422 // Returns the pc offset at which the frame ends.
1423 int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
1425 // Expects object in r0 and returns map with validated enum cache
1426 // in r0. Assumes that any other register can be used as a scratch.
1427 void CheckEnumCache(Register null_value, Label* call_runtime);
1429 // AllocationMemento support. Arrays may have an associated
1430 // AllocationMemento object that can be checked for in order to pretransition
1432 // On entry, receiver_reg should point to the array object.
1433 // scratch_reg gets clobbered.
1434 // If allocation info is present, condition flags are set to eq.
1435 void TestJSArrayForAllocationMemento(Register receiver_reg,
1436 Register scratch_reg,
1437 Label* no_memento_found);
1439 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1440 Register scratch_reg,
1441 Label* memento_found) {
1442 Label no_memento_found;
1443 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1446 bind(&no_memento_found);
1449 // Jumps to found label if a prototype map has dictionary elements.
1450 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1451 Register scratch1, Label* found);
1453 // Loads the constant pool pointer (kConstantPoolRegister).
1454 void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
1455 Register code_target_address);
1456 void LoadConstantPoolPointerRegister();
1457 void LoadConstantPoolPointerRegister(Register base, int code_entry_delta = 0);
1459 void AbortConstantPoolBuilding() {
1461 // Avoid DCHECK(!is_linked()) failure in ~Label()
1462 bind(ConstantPoolPosition());
1467 static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1469 void CallCFunctionHelper(Register function, int num_reg_arguments,
1470 int num_double_arguments);
1472 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
1473 CRegister cr = cr7);
1475 // Helper functions for generating invokes.
1476 void InvokePrologue(const ParameterCount& expected,
1477 const ParameterCount& actual, Handle<Code> code_constant,
1478 Register code_reg, Label* done,
1479 bool* definitely_mismatches, InvokeFlag flag,
1480 const CallWrapper& call_wrapper);
1482 void InitializeNewString(Register string, Register length,
1483 Heap::RootListIndex map_index, Register scratch1,
1486 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1487 void InNewSpace(Register object, Register scratch,
1488 Condition cond, // eq for new space, ne otherwise.
1491 // Helper for finding the mark bits for an address. Afterwards, the
1492 // bitmap register points at the word with the mark bits and the mask
1493 // the position of the first bit. Leaves addr_reg unchanged.
1494 inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
1497 static const RegList kSafepointSavedRegisters;
1498 static const int kNumSafepointSavedRegisters;
1500 // Compute memory operands for safepoint stack slots.
1501 static int SafepointRegisterStackIndex(int reg_code);
1502 MemOperand SafepointRegisterSlot(Register reg);
1503 MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1505 bool generating_stub_;
1507 // This handle will be patched with the code object on installation.
1508 Handle<Object> code_object_;
1510 // Needs access to SafepointRegisterStackIndex for compiled frame
1512 friend class StandardFrame;
1516 // The code patcher is used to patch (typically) small parts of code e.g. for
1517 // debugging and other types of instrumentation. When using the code patcher
1518 // the exact number of bytes specified must be emitted. It is not legal to emit
1519 // relocation information. If any of these constraints are violated it causes
1520 // an assertion to fail.
1523 enum FlushICache { FLUSH, DONT_FLUSH };
1525 CodePatcher(byte* address, int instructions, FlushICache flush_cache = FLUSH);
1528 // Macro assembler to emit code.
1529 MacroAssembler* masm() { return &masm_; }
1531 // Emit an instruction directly.
1532 void Emit(Instr instr);
1534 // Emit the condition part of an instruction leaving the rest of the current
1535 // instruction unchanged.
1536 void EmitCondition(Condition cond);
1539 byte* address_; // The address of the code being patched.
1540 int size_; // Number of bytes of the expected patch size.
1541 MacroAssembler masm_; // Macro assembler used to generate the code.
1542 FlushICache flush_cache_; // Whether to flush the I cache after patching.
1546 // -----------------------------------------------------------------------------
1547 // Static helper functions.
1549 inline MemOperand ContextOperand(Register context, int index = 0) {
1550 return MemOperand(context, Context::SlotOffset(index));
1554 inline MemOperand GlobalObjectOperand() {
1555 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
1559 #ifdef GENERATED_CODE_COVERAGE
1560 #define CODE_COVERAGE_STRINGIFY(x) #x
1561 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1562 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1563 #define ACCESS_MASM(masm) \
1564 masm->stop(__FILE_LINE__); \
1567 #define ACCESS_MASM(masm) masm->
1570 } // namespace v8::internal
1572 #endif // V8_PPC_MACRO_ASSEMBLER_PPC_H_