1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
6 #define V8_ARM_MACRO_ASSEMBLER_ARM_H_
8 #include "src/assembler.h"
9 #include "src/bailout-reason.h"
10 #include "src/frames.h"
11 #include "src/globals.h"
16 // Give alias names to registers for calling conventions.
17 const Register kReturnRegister0 = {kRegister_r0_Code};
18 const Register kReturnRegister1 = {kRegister_r1_Code};
19 const Register kJSFunctionRegister = {kRegister_r1_Code};
20 const Register kContextRegister = {kRegister_r7_Code};
21 const Register kInterpreterAccumulatorRegister = {kRegister_r0_Code};
22 const Register kInterpreterRegisterFileRegister = {kRegister_r4_Code};
23 const Register kInterpreterBytecodeOffsetRegister = {kRegister_r5_Code};
24 const Register kInterpreterBytecodeArrayRegister = {kRegister_r6_Code};
25 const Register kInterpreterDispatchTableRegister = {kRegister_r8_Code};
26 const Register kRuntimeCallFunctionRegister = {kRegister_r1_Code};
27 const Register kRuntimeCallArgCountRegister = {kRegister_r0_Code};
29 // ----------------------------------------------------------------------------
30 // Static helper functions
32 // Generate a MemOperand for loading a field from an object.
33 inline MemOperand FieldMemOperand(Register object, int offset) {
34 return MemOperand(object, offset - kHeapObjectTag);
38 // Give alias names to registers
39 const Register cp = { kRegister_r7_Code }; // JavaScript context pointer.
40 const Register pp = { kRegister_r8_Code }; // Constant pool pointer.
41 const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer.
43 // Flags used for AllocateHeapNumber
52 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
53 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
54 enum PointersToHereCheck {
55 kPointersToHereMaybeInteresting,
56 kPointersToHereAreAlwaysInteresting
58 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
61 Register GetRegisterThatIsNotOneOf(Register reg1,
62 Register reg2 = no_reg,
63 Register reg3 = no_reg,
64 Register reg4 = no_reg,
65 Register reg5 = no_reg,
66 Register reg6 = no_reg);
70 bool AreAliased(Register reg1,
72 Register reg3 = no_reg,
73 Register reg4 = no_reg,
74 Register reg5 = no_reg,
75 Register reg6 = no_reg,
76 Register reg7 = no_reg,
77 Register reg8 = no_reg);
81 enum TargetAddressStorageMode {
82 CAN_INLINE_TARGET_ADDRESS,
83 NEVER_INLINE_TARGET_ADDRESS
86 // MacroAssembler implements a collection of frequently used macros.
87 class MacroAssembler: public Assembler {
89 // The isolate parameter can be NULL if the macro assembler should
90 // not use isolate-dependent functionality. In this case, it's the
91 // responsibility of the caller to never invoke such function on the
93 MacroAssembler(Isolate* isolate, void* buffer, int size);
96 // Returns the size of a call in instructions. Note, the value returned is
97 // only valid as long as no entries are added to the constant pool between
98 // checking the call size and emitting the actual call.
99 static int CallSize(Register target, Condition cond = al);
100 int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
101 int CallStubSize(CodeStub* stub,
102 TypeFeedbackId ast_id = TypeFeedbackId::None(),
103 Condition cond = al);
104 static int CallSizeNotPredictableCodeSize(Isolate* isolate,
106 RelocInfo::Mode rmode,
107 Condition cond = al);
109 // Jump, Call, and Ret pseudo instructions implementing inter-working.
110 void Jump(Register target, Condition cond = al);
111 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
112 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
113 void Call(Register target, Condition cond = al);
114 void Call(Address target, RelocInfo::Mode rmode,
116 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
117 int CallSize(Handle<Code> code,
118 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
119 TypeFeedbackId ast_id = TypeFeedbackId::None(),
120 Condition cond = al);
121 void Call(Handle<Code> code,
122 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
123 TypeFeedbackId ast_id = TypeFeedbackId::None(),
125 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
126 void Ret(Condition cond = al);
128 // Emit code to discard a non-negative number of pointer-sized elements
129 // from the stack, clobbering only the sp register.
130 void Drop(int count, Condition cond = al);
132 void Ret(int drop, Condition cond = al);
134 // Swap two registers. If the scratch register is omitted then a slightly
135 // less efficient form using xor instead of mov is emitted.
136 void Swap(Register reg1,
138 Register scratch = no_reg,
139 Condition cond = al);
141 void Mls(Register dst, Register src1, Register src2, Register srcA,
142 Condition cond = al);
143 void And(Register dst, Register src1, const Operand& src2,
144 Condition cond = al);
145 void Ubfx(Register dst, Register src, int lsb, int width,
146 Condition cond = al);
147 void Sbfx(Register dst, Register src, int lsb, int width,
148 Condition cond = al);
149 // The scratch register is not used for ARMv7.
150 // scratch can be the same register as src (in which case it is trashed), but
151 // not the same as dst.
152 void Bfi(Register dst,
157 Condition cond = al);
158 void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
159 void Usat(Register dst, int satpos, const Operand& src,
160 Condition cond = al);
162 void Call(Label* target);
163 void Push(Register src) { push(src); }
164 void Pop(Register dst) { pop(dst); }
166 // Register move. May do nothing if the registers are identical.
167 void Move(Register dst, Smi* smi) { mov(dst, Operand(smi)); }
168 void Move(Register dst, Handle<Object> value);
169 void Move(Register dst, Register src, Condition cond = al);
170 void Move(Register dst, const Operand& src, SBit sbit = LeaveCC,
171 Condition cond = al) {
172 if (!src.is_reg() || !src.rm().is(dst) || sbit != LeaveCC) {
173 mov(dst, src, sbit, cond);
176 void Move(DwVfpRegister dst, DwVfpRegister src);
178 void Load(Register dst, const MemOperand& src, Representation r);
179 void Store(Register src, const MemOperand& dst, Representation r);
181 // Load an object from the root table.
182 void LoadRoot(Register destination,
183 Heap::RootListIndex index,
184 Condition cond = al);
185 // Store an object to the root table.
186 void StoreRoot(Register source,
187 Heap::RootListIndex index,
188 Condition cond = al);
190 // ---------------------------------------------------------------------------
193 void IncrementalMarkingRecordWriteHelper(Register object,
197 enum RememberedSetFinalAction {
202 // Record in the remembered set the fact that we have a pointer to new space
203 // at the address pointed to by the addr register. Only works if addr is not
205 void RememberedSetHelper(Register object, // Used for debug code.
208 SaveFPRegsMode save_fp,
209 RememberedSetFinalAction and_then);
211 void CheckPageFlag(Register object,
215 Label* condition_met);
217 // Check if object is in new space. Jumps if the object is not in new space.
218 // The register scratch can be object itself, but scratch will be clobbered.
219 void JumpIfNotInNewSpace(Register object,
222 InNewSpace(object, scratch, ne, branch);
225 // Check if object is in new space. Jumps if the object is in new space.
226 // The register scratch can be object itself, but it will be clobbered.
227 void JumpIfInNewSpace(Register object,
230 InNewSpace(object, scratch, eq, branch);
233 // Check if an object has a given incremental marking color.
234 void HasColor(Register object,
241 void JumpIfBlack(Register object,
246 // Checks the color of an object. If the object is already grey or black
247 // then we just fall through, since it is already live. If it is white and
248 // we can determine that it doesn't need to be scanned, then we just mark it
249 // black and fall through. For the rest we jump to the label so the
250 // incremental marker can fix its assumptions.
251 void EnsureNotWhite(Register object,
255 Label* object_is_white_and_not_data);
257 // Detects conservatively whether an object is data-only, i.e. it does need to
258 // be scanned by the garbage collector.
259 void JumpIfDataObject(Register value,
261 Label* not_data_object);
263 // Notify the garbage collector that we wrote a pointer into an object.
264 // |object| is the object being stored into, |value| is the object being
265 // stored. value and scratch registers are clobbered by the operation.
266 // The offset is the offset from the start of the object, not the offset from
267 // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
268 void RecordWriteField(
273 LinkRegisterStatus lr_status,
274 SaveFPRegsMode save_fp,
275 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
276 SmiCheck smi_check = INLINE_SMI_CHECK,
277 PointersToHereCheck pointers_to_here_check_for_value =
278 kPointersToHereMaybeInteresting);
280 // As above, but the offset has the tag presubtracted. For use with
281 // MemOperand(reg, off).
282 inline void RecordWriteContextSlot(
287 LinkRegisterStatus lr_status,
288 SaveFPRegsMode save_fp,
289 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
290 SmiCheck smi_check = INLINE_SMI_CHECK,
291 PointersToHereCheck pointers_to_here_check_for_value =
292 kPointersToHereMaybeInteresting) {
293 RecordWriteField(context,
294 offset + kHeapObjectTag,
299 remembered_set_action,
301 pointers_to_here_check_for_value);
304 void RecordWriteForMap(
308 LinkRegisterStatus lr_status,
309 SaveFPRegsMode save_fp);
311 // For a given |object| notify the garbage collector that the slot |address|
312 // has been written. |value| is the object being stored. The value and
313 // address registers are clobbered by the operation.
318 LinkRegisterStatus lr_status,
319 SaveFPRegsMode save_fp,
320 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
321 SmiCheck smi_check = INLINE_SMI_CHECK,
322 PointersToHereCheck pointers_to_here_check_for_value =
323 kPointersToHereMaybeInteresting);
326 void Push(Handle<Object> handle);
327 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
329 // Push two registers. Pushes leftmost register first (to highest address).
330 void Push(Register src1, Register src2, Condition cond = al) {
331 DCHECK(!src1.is(src2));
332 if (src1.code() > src2.code()) {
333 stm(db_w, sp, src1.bit() | src2.bit(), cond);
335 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
336 str(src2, MemOperand(sp, 4, NegPreIndex), cond);
340 // Push three registers. Pushes leftmost register first (to highest address).
341 void Push(Register src1, Register src2, Register src3, Condition cond = al) {
342 DCHECK(!AreAliased(src1, src2, src3));
343 if (src1.code() > src2.code()) {
344 if (src2.code() > src3.code()) {
345 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
347 stm(db_w, sp, src1.bit() | src2.bit(), cond);
348 str(src3, MemOperand(sp, 4, NegPreIndex), cond);
351 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
352 Push(src2, src3, cond);
356 // Push four registers. Pushes leftmost register first (to highest address).
357 void Push(Register src1,
361 Condition cond = al) {
362 DCHECK(!AreAliased(src1, src2, src3, src4));
363 if (src1.code() > src2.code()) {
364 if (src2.code() > src3.code()) {
365 if (src3.code() > src4.code()) {
368 src1.bit() | src2.bit() | src3.bit() | src4.bit(),
371 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
372 str(src4, MemOperand(sp, 4, NegPreIndex), cond);
375 stm(db_w, sp, src1.bit() | src2.bit(), cond);
376 Push(src3, src4, cond);
379 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
380 Push(src2, src3, src4, cond);
384 // Push five registers. Pushes leftmost register first (to highest address).
385 void Push(Register src1, Register src2, Register src3, Register src4,
386 Register src5, Condition cond = al) {
387 DCHECK(!AreAliased(src1, src2, src3, src4, src5));
388 if (src1.code() > src2.code()) {
389 if (src2.code() > src3.code()) {
390 if (src3.code() > src4.code()) {
391 if (src4.code() > src5.code()) {
393 src1.bit() | src2.bit() | src3.bit() | src4.bit() | src5.bit(),
396 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit() | src4.bit(),
398 str(src5, MemOperand(sp, 4, NegPreIndex), cond);
401 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
402 Push(src4, src5, cond);
405 stm(db_w, sp, src1.bit() | src2.bit(), cond);
406 Push(src3, src4, src5, cond);
409 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
410 Push(src2, src3, src4, src5, cond);
414 // Pop two registers. Pops rightmost register first (from lower address).
415 void Pop(Register src1, Register src2, Condition cond = al) {
416 DCHECK(!src1.is(src2));
417 if (src1.code() > src2.code()) {
418 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
420 ldr(src2, MemOperand(sp, 4, PostIndex), cond);
421 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
425 // Pop three registers. Pops rightmost register first (from lower address).
426 void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
427 DCHECK(!AreAliased(src1, src2, src3));
428 if (src1.code() > src2.code()) {
429 if (src2.code() > src3.code()) {
430 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
432 ldr(src3, MemOperand(sp, 4, PostIndex), cond);
433 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
436 Pop(src2, src3, cond);
437 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
441 // Pop four registers. Pops rightmost register first (from lower address).
442 void Pop(Register src1,
446 Condition cond = al) {
447 DCHECK(!AreAliased(src1, src2, src3, src4));
448 if (src1.code() > src2.code()) {
449 if (src2.code() > src3.code()) {
450 if (src3.code() > src4.code()) {
453 src1.bit() | src2.bit() | src3.bit() | src4.bit(),
456 ldr(src4, MemOperand(sp, 4, PostIndex), cond);
457 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
460 Pop(src3, src4, cond);
461 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
464 Pop(src2, src3, src4, cond);
465 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
469 // Push a fixed frame, consisting of lr, fp, constant pool (if
470 // FLAG_enable_embedded_constant_pool), context and JS function / marker id if
471 // marker_reg is a valid register.
472 void PushFixedFrame(Register marker_reg = no_reg);
473 void PopFixedFrame(Register marker_reg = no_reg);
475 // Push and pop the registers that can hold pointers, as defined by the
476 // RegList constant kSafepointSavedRegisters.
477 void PushSafepointRegisters();
478 void PopSafepointRegisters();
479 // Store value in register src in the safepoint stack slot for
481 void StoreToSafepointRegisterSlot(Register src, Register dst);
482 // Load the value of the src register from its safepoint stack slot
483 // into register dst.
484 void LoadFromSafepointRegisterSlot(Register dst, Register src);
486 // Load two consecutive registers with two consecutive memory locations.
487 void Ldrd(Register dst1,
489 const MemOperand& src,
490 Condition cond = al);
492 // Store two consecutive registers to two consecutive memory locations.
493 void Strd(Register src1,
495 const MemOperand& dst,
496 Condition cond = al);
498 // Ensure that FPSCR contains values needed by JavaScript.
499 // We need the NaNModeControlBit to be sure that operations like
500 // vadd and vsub generate the Canonical NaN (if a NaN must be generated).
501 // In VFP3 it will be always the Canonical NaN.
502 // In VFP2 it will be either the Canonical NaN or the negative version
503 // of the Canonical NaN. It doesn't matter if we have two values. The aim
504 // is to be sure to never generate the hole NaN.
505 void VFPEnsureFPSCRState(Register scratch);
507 // If the value is a NaN, canonicalize the value else, do nothing.
508 void VFPCanonicalizeNaN(const DwVfpRegister dst,
509 const DwVfpRegister src,
510 const Condition cond = al);
511 void VFPCanonicalizeNaN(const DwVfpRegister value,
512 const Condition cond = al) {
513 VFPCanonicalizeNaN(value, value, cond);
516 // Compare single values and move the result to the normal condition flags.
517 void VFPCompareAndSetFlags(const SwVfpRegister src1, const SwVfpRegister src2,
518 const Condition cond = al);
519 void VFPCompareAndSetFlags(const SwVfpRegister src1, const float src2,
520 const Condition cond = al);
522 // Compare double values and move the result to the normal condition flags.
523 void VFPCompareAndSetFlags(const DwVfpRegister src1,
524 const DwVfpRegister src2,
525 const Condition cond = al);
526 void VFPCompareAndSetFlags(const DwVfpRegister src1,
528 const Condition cond = al);
530 // Compare single values and then load the fpscr flags to a register.
531 void VFPCompareAndLoadFlags(const SwVfpRegister src1,
532 const SwVfpRegister src2,
533 const Register fpscr_flags,
534 const Condition cond = al);
535 void VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2,
536 const Register fpscr_flags,
537 const Condition cond = al);
539 // Compare double values and then load the fpscr flags to a register.
540 void VFPCompareAndLoadFlags(const DwVfpRegister src1,
541 const DwVfpRegister src2,
542 const Register fpscr_flags,
543 const Condition cond = al);
544 void VFPCompareAndLoadFlags(const DwVfpRegister src1,
546 const Register fpscr_flags,
547 const Condition cond = al);
549 void Vmov(const DwVfpRegister dst,
551 const Register scratch = no_reg);
553 void VmovHigh(Register dst, DwVfpRegister src);
554 void VmovHigh(DwVfpRegister dst, Register src);
555 void VmovLow(Register dst, DwVfpRegister src);
556 void VmovLow(DwVfpRegister dst, Register src);
558 // Loads the number from object into dst register.
559 // If |object| is neither smi nor heap number, |not_number| is jumped to
560 // with |object| still intact.
561 void LoadNumber(Register object,
562 LowDwVfpRegister dst,
563 Register heap_number_map,
567 // Loads the number from object into double_dst in the double format.
568 // Control will jump to not_int32 if the value cannot be exactly represented
569 // by a 32-bit integer.
570 // Floating point value in the 32-bit integer range that are not exact integer
572 void LoadNumberAsInt32Double(Register object,
573 DwVfpRegister double_dst,
574 Register heap_number_map,
576 LowDwVfpRegister double_scratch,
579 // Loads the number from object into dst as a 32-bit integer.
580 // Control will jump to not_int32 if the object cannot be exactly represented
581 // by a 32-bit integer.
582 // Floating point value in the 32-bit integer range that are not exact integer
583 // won't be converted.
584 void LoadNumberAsInt32(Register object,
586 Register heap_number_map,
588 DwVfpRegister double_scratch0,
589 LowDwVfpRegister double_scratch1,
592 // Generates function and stub prologue code.
594 void Prologue(bool code_pre_aging);
597 // stack_space - extra stack space, used for alignment before call to C.
598 void EnterExitFrame(bool save_doubles, int stack_space = 0);
600 // Leave the current exit frame. Expects the return value in r0.
601 // Expect the number of values, pushed prior to the exit frame, to
602 // remove in a register (or no_reg, if there is nothing to remove).
603 void LeaveExitFrame(bool save_doubles, Register argument_count,
604 bool restore_context,
605 bool argument_count_is_length = false);
607 // Get the actual activation frame alignment for target environment.
608 static int ActivationFrameAlignment();
610 void LoadContext(Register dst, int context_chain_length);
612 // Load the global proxy from the current context.
613 void LoadGlobalProxy(Register dst);
615 // Conditionally load the cached Array transitioned map of type
616 // transitioned_kind from the native context if the map in register
617 // map_in_out is the cached Array map in the native context of
619 void LoadTransitionedArrayMapConditional(
620 ElementsKind expected_kind,
621 ElementsKind transitioned_kind,
624 Label* no_map_match);
626 void LoadGlobalFunction(int index, Register function);
628 // Load the initial map from the global function. The registers
629 // function and map can be the same, function is then overwritten.
630 void LoadGlobalFunctionInitialMap(Register function,
634 void InitializeRootRegister() {
635 ExternalReference roots_array_start =
636 ExternalReference::roots_array_start(isolate());
637 mov(kRootRegister, Operand(roots_array_start));
640 // ---------------------------------------------------------------------------
641 // JavaScript invokes
643 // Invoke the JavaScript function code by either calling or jumping.
644 void InvokeCode(Register code,
645 const ParameterCount& expected,
646 const ParameterCount& actual,
648 const CallWrapper& call_wrapper);
650 // Invoke the JavaScript function in the given register. Changes the
651 // current context to the context in the function before invoking.
652 void InvokeFunction(Register function,
653 const ParameterCount& actual,
655 const CallWrapper& call_wrapper);
657 void InvokeFunction(Register function,
658 const ParameterCount& expected,
659 const ParameterCount& actual,
661 const CallWrapper& call_wrapper);
663 void InvokeFunction(Handle<JSFunction> function,
664 const ParameterCount& expected,
665 const ParameterCount& actual,
667 const CallWrapper& call_wrapper);
669 void IsObjectJSStringType(Register object,
673 void IsObjectNameType(Register object,
677 // ---------------------------------------------------------------------------
682 // ---------------------------------------------------------------------------
683 // Exception handling
685 // Push a new stack handler and link into stack handler chain.
686 void PushStackHandler();
688 // Unlink the stack handler on top of the stack from the stack handler chain.
689 // Must preserve the result register.
690 void PopStackHandler();
692 // ---------------------------------------------------------------------------
693 // Inline caching support
695 // Generate code for checking access rights - used for security checks
696 // on access to global objects across environments. The holder register
697 // is left untouched, whereas both scratch registers are clobbered.
698 void CheckAccessGlobalProxy(Register holder_reg,
702 void GetNumberHash(Register t0, Register scratch);
704 void LoadFromNumberDictionary(Label* miss,
713 inline void MarkCode(NopMarkerTypes type) {
717 // Check if the given instruction is a 'type' marker.
718 // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
719 // These instructions are generated to mark special location in the code,
720 // like some special IC code.
721 static inline bool IsMarkedCode(Instr instr, int type) {
722 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
723 return IsNop(instr, type);
727 static inline int GetCodeMarker(Instr instr) {
728 int dst_reg_offset = 12;
729 int dst_mask = 0xf << dst_reg_offset;
731 int dst_reg = (instr & dst_mask) >> dst_reg_offset;
732 int src_reg = instr & src_mask;
733 uint32_t non_register_mask = ~(dst_mask | src_mask);
734 uint32_t mov_mask = al | 13 << 21;
736 // Return <n> if we have a mov rn rn, else return -1.
737 int type = ((instr & non_register_mask) == mov_mask) &&
738 (dst_reg == src_reg) &&
739 (FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER)
742 DCHECK((type == -1) ||
743 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
748 // ---------------------------------------------------------------------------
749 // Allocation support
751 // Allocate an object in new space or old space. The object_size is
752 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
753 // is passed. If the space is exhausted control continues at the gc_required
754 // label. The allocated object is returned in result. If the flag
755 // tag_allocated_object is true the result is tagged as as a heap object.
756 // All registers are clobbered also when control continues at the gc_required
758 void Allocate(int object_size,
763 AllocationFlags flags);
765 void Allocate(Register object_size,
770 AllocationFlags flags);
772 void AllocateTwoByteString(Register result,
778 void AllocateOneByteString(Register result, Register length,
779 Register scratch1, Register scratch2,
780 Register scratch3, Label* gc_required);
781 void AllocateTwoByteConsString(Register result,
786 void AllocateOneByteConsString(Register result, Register length,
787 Register scratch1, Register scratch2,
789 void AllocateTwoByteSlicedString(Register result,
794 void AllocateOneByteSlicedString(Register result, Register length,
795 Register scratch1, Register scratch2,
798 // Allocates a heap number or jumps to the gc_required label if the young
799 // space is full and a scavenge is needed. All registers are clobbered also
800 // when control continues at the gc_required label.
801 void AllocateHeapNumber(Register result,
804 Register heap_number_map,
806 TaggingMode tagging_mode = TAG_RESULT,
807 MutableMode mode = IMMUTABLE);
808 void AllocateHeapNumberWithValue(Register result,
812 Register heap_number_map,
815 // Copies a fixed number of fields of heap objects from src to dst.
816 void CopyFields(Register dst,
818 LowDwVfpRegister double_scratch,
821 // Copies a number of bytes from src to dst. All registers are clobbered. On
822 // exit src and dst will point to the place just after where the last byte was
823 // read or written and length will be zero.
824 void CopyBytes(Register src,
829 // Initialize fields with filler values. Fields starting at |start_offset|
830 // not including end_offset are overwritten with the value in |filler|. At
831 // the end the loop, |start_offset| takes the value of |end_offset|.
832 void InitializeFieldsWithFiller(Register start_offset,
836 // ---------------------------------------------------------------------------
837 // Support functions.
839 // Machine code version of Map::GetConstructor().
840 // |temp| holds |result|'s map when done, and |temp2| its instance type.
841 void GetMapConstructor(Register result, Register map, Register temp,
844 // Try to get function prototype of a function and puts the value in
845 // the result register. Checks that the function really is a
846 // function and jumps to the miss label if the fast checks fail. The
847 // function register will be untouched; the other registers may be
849 void TryGetFunctionPrototype(Register function, Register result,
850 Register scratch, Label* miss);
852 // Compare object type for heap object. heap_object contains a non-Smi
853 // whose object type should be compared with the given type. This both
854 // sets the flags and leaves the object type in the type_reg register.
855 // It leaves the map in the map register (unless the type_reg and map register
856 // are the same register). It leaves the heap object in the heap_object
857 // register unless the heap_object register is the same register as one of the
859 // Type_reg can be no_reg. In that case ip is used.
860 void CompareObjectType(Register heap_object,
865 // Compare instance type in a map. map contains a valid map object whose
866 // object type should be compared with the given type. This both
867 // sets the flags and leaves the object type in the type_reg register.
868 void CompareInstanceType(Register map,
873 // Check if a map for a JSObject indicates that the object has fast elements.
874 // Jump to the specified label if it does not.
875 void CheckFastElements(Register map,
879 // Check if a map for a JSObject indicates that the object can have both smi
880 // and HeapObject elements. Jump to the specified label if it does not.
881 void CheckFastObjectElements(Register map,
885 // Check if a map for a JSObject indicates that the object has fast smi only
886 // elements. Jump to the specified label if it does not.
887 void CheckFastSmiElements(Register map,
891 // Check to see if maybe_number can be stored as a double in
892 // FastDoubleElements. If it can, store it at the index specified by key in
893 // the FastDoubleElements array elements. Otherwise jump to fail.
894 void StoreNumberToDoubleElements(Register value_reg,
896 Register elements_reg,
898 LowDwVfpRegister double_scratch,
900 int elements_offset = 0);
902 // Compare an object's map with the specified map and its transitioned
903 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
904 // set with result of map compare. If multiple map compares are required, the
905 // compare sequences branches to early_success.
906 void CompareMap(Register obj,
909 Label* early_success);
911 // As above, but the map of the object is already loaded into the register
912 // which is preserved by the code generated.
913 void CompareMap(Register obj_map,
915 Label* early_success);
917 // Check if the map of an object is equal to a specified map and branch to
918 // label if not. Skip the smi check if not required (object is known to be a
919 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
920 // against maps that are ElementsKind transition maps of the specified map.
921 void CheckMap(Register obj,
925 SmiCheckType smi_check_type);
928 void CheckMap(Register obj,
930 Heap::RootListIndex index,
932 SmiCheckType smi_check_type);
935 // Check if the map of an object is equal to a specified weak map and branch
936 // to a specified target if equal. Skip the smi check if not required
937 // (object is known to be a heap object)
938 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
939 Handle<WeakCell> cell, Handle<Code> success,
940 SmiCheckType smi_check_type);
942 // Compare the given value and the value of weak cell.
943 void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
945 void GetWeakValue(Register value, Handle<WeakCell> cell);
947 // Load the value of the weak cell in the value register. Branch to the given
948 // miss label if the weak cell was cleared.
949 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
951 // Compare the object in a register to a value from the root list.
952 // Uses the ip register as scratch.
953 void CompareRoot(Register obj, Heap::RootListIndex index);
954 void PushRoot(Heap::RootListIndex index) {
959 // Compare the object in a register to a value and jump if they are equal.
960 void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
961 CompareRoot(with, index);
965 // Compare the object in a register to a value and jump if they are not equal.
966 void JumpIfNotRoot(Register with, Heap::RootListIndex index,
967 Label* if_not_equal) {
968 CompareRoot(with, index);
972 // Load and check the instance type of an object for being a string.
973 // Loads the type into the second argument register.
974 // Returns a condition that will be enabled if the object was a string
975 // and the passed-in condition passed. If the passed-in condition failed
976 // then flags remain unchanged.
977 Condition IsObjectStringType(Register obj,
979 Condition cond = al) {
980 ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
981 ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
982 tst(type, Operand(kIsNotStringMask), cond);
983 DCHECK_EQ(0u, kStringTag);
988 // Picks out an array index from the hash field.
990 // hash - holds the index's hash. Clobbered.
991 // index - holds the overwritten index on exit.
992 void IndexFromHash(Register hash, Register index);
994 // Get the number of least significant bits from a register
995 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
996 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
998 // Load the value of a smi object into a double register.
999 // The register value must be between d0 and d15.
1000 void SmiToDouble(LowDwVfpRegister value, Register smi);
1002 // Check if a double can be exactly represented as a signed 32-bit integer.
1003 // Z flag set to one if true.
1004 void TestDoubleIsInt32(DwVfpRegister double_input,
1005 LowDwVfpRegister double_scratch);
1007 // Try to convert a double to a signed 32-bit integer.
1008 // Z flag set to one and result assigned if the conversion is exact.
1009 void TryDoubleToInt32Exact(Register result,
1010 DwVfpRegister double_input,
1011 LowDwVfpRegister double_scratch);
1013 // Floor a double and writes the value to the result register.
1014 // Go to exact if the conversion is exact (to be able to test -0),
1015 // fall through calling code if an overflow occurred, else go to done.
1016 // In return, input_high is loaded with high bits of input.
1017 void TryInt32Floor(Register result,
1018 DwVfpRegister double_input,
1019 Register input_high,
1020 LowDwVfpRegister double_scratch,
1024 // Performs a truncating conversion of a floating point number as used by
1025 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
1026 // succeeds, otherwise falls through if result is saturated. On return
1027 // 'result' either holds answer, or is clobbered on fall through.
1029 // Only public for the test code in test-code-stubs-arm.cc.
1030 void TryInlineTruncateDoubleToI(Register result,
1031 DwVfpRegister input,
1034 // Performs a truncating conversion of a floating point number as used by
1035 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
1036 // Exits with 'result' holding the answer.
1037 void TruncateDoubleToI(Register result, DwVfpRegister double_input);
1039 // Performs a truncating conversion of a heap number as used by
1040 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
1041 // must be different registers. Exits with 'result' holding the answer.
1042 void TruncateHeapNumberToI(Register result, Register object);
1044 // Converts the smi or heap number in object to an int32 using the rules
1045 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
1046 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
1047 // different registers.
1048 void TruncateNumberToI(Register object,
1050 Register heap_number_map,
1054 // Check whether d16-d31 are available on the CPU. The result is given by the
1055 // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
1056 void CheckFor32DRegs(Register scratch);
1058 // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double
1059 // values to location, saving [d0..(d15|d31)].
1060 void SaveFPRegs(Register location, Register scratch);
1062 // Does a runtime check for 16/32 FP registers. Either way, pops 32 double
1063 // values to location, restoring [d0..(d15|d31)].
1064 void RestoreFPRegs(Register location, Register scratch);
1066 // ---------------------------------------------------------------------------
1069 // Call a code stub.
1070 void CallStub(CodeStub* stub,
1071 TypeFeedbackId ast_id = TypeFeedbackId::None(),
1072 Condition cond = al);
1074 // Call a code stub.
1075 void TailCallStub(CodeStub* stub, Condition cond = al);
1077 // Call a runtime routine.
1078 void CallRuntime(const Runtime::Function* f,
1080 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1081 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1082 const Runtime::Function* function = Runtime::FunctionForId(id);
1083 CallRuntime(function, function->nargs, kSaveFPRegs);
1086 // Convenience function: Same as above, but takes the fid instead.
1087 void CallRuntime(Runtime::FunctionId id,
1089 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1090 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
1093 // Convenience function: call an external reference.
1094 void CallExternalReference(const ExternalReference& ext,
1097 // Tail call of a runtime routine (jump).
1098 // Like JumpToExternalReference, but also takes care of passing the number
1100 void TailCallExternalReference(const ExternalReference& ext,
1104 // Convenience function: tail call a runtime routine (jump).
1105 void TailCallRuntime(Runtime::FunctionId fid,
1109 int CalculateStackPassedWords(int num_reg_arguments,
1110 int num_double_arguments);
1112 // Before calling a C-function from generated code, align arguments on stack.
1113 // After aligning the frame, non-register arguments must be stored in
1114 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
1115 // are word sized. If double arguments are used, this function assumes that
1116 // all double arguments are stored before core registers; otherwise the
1117 // correct alignment of the double values is not guaranteed.
1118 // Some compilers/platforms require the stack to be aligned when calling
1120 // Needs a scratch register to do some arithmetic. This register will be
1122 void PrepareCallCFunction(int num_reg_arguments,
1123 int num_double_registers,
1125 void PrepareCallCFunction(int num_reg_arguments,
1128 // There are two ways of passing double arguments on ARM, depending on
1129 // whether soft or hard floating point ABI is used. These functions
1130 // abstract parameter passing for the three different ways we call
1131 // C functions from generated code.
1132 void MovToFloatParameter(DwVfpRegister src);
1133 void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2);
1134 void MovToFloatResult(DwVfpRegister src);
1136 // Calls a C function and cleans up the space for arguments allocated
1137 // by PrepareCallCFunction. The called function is not allowed to trigger a
1138 // garbage collection, since that might move the code and invalidate the
1139 // return address (unless this is somehow accounted for by the called
1141 void CallCFunction(ExternalReference function, int num_arguments);
1142 void CallCFunction(Register function, int num_arguments);
1143 void CallCFunction(ExternalReference function,
1144 int num_reg_arguments,
1145 int num_double_arguments);
1146 void CallCFunction(Register function,
1147 int num_reg_arguments,
1148 int num_double_arguments);
1150 void MovFromFloatParameter(DwVfpRegister dst);
1151 void MovFromFloatResult(DwVfpRegister dst);
1153 // Jump to a runtime routine.
1154 void JumpToExternalReference(const ExternalReference& builtin);
1156 // Invoke specified builtin JavaScript function.
1157 void InvokeBuiltin(int native_context_index, InvokeFlag flag,
1158 const CallWrapper& call_wrapper = NullCallWrapper());
1160 // Store the code object for the given builtin in the target register and
1161 // setup the function in r1.
1162 void GetBuiltinEntry(Register target, int native_context_index);
1164 // Store the function for the given builtin in the target register.
1165 void GetBuiltinFunction(Register target, int native_context_index);
1167 Handle<Object> CodeObject() {
1168 DCHECK(!code_object_.is_null());
1169 return code_object_;
1173 // Emit code for a truncating division by a constant. The dividend register is
1174 // unchanged and ip gets clobbered. Dividend and result must be different.
1175 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1177 // ---------------------------------------------------------------------------
1178 // StatsCounter support
1180 void SetCounter(StatsCounter* counter, int value,
1181 Register scratch1, Register scratch2);
1182 void IncrementCounter(StatsCounter* counter, int value,
1183 Register scratch1, Register scratch2);
1184 void DecrementCounter(StatsCounter* counter, int value,
1185 Register scratch1, Register scratch2);
1188 // ---------------------------------------------------------------------------
1191 // Calls Abort(msg) if the condition cond is not satisfied.
1192 // Use --debug_code to enable.
1193 void Assert(Condition cond, BailoutReason reason);
1194 void AssertFastElements(Register elements);
1196 // Like Assert(), but always enabled.
1197 void Check(Condition cond, BailoutReason reason);
1199 // Print a message to stdout and abort execution.
1200 void Abort(BailoutReason msg);
1202 // Verify restrictions about code generated in stubs.
1203 void set_generating_stub(bool value) { generating_stub_ = value; }
1204 bool generating_stub() { return generating_stub_; }
1205 void set_has_frame(bool value) { has_frame_ = value; }
1206 bool has_frame() { return has_frame_; }
1207 inline bool AllowThisStubCall(CodeStub* stub);
1209 // EABI variant for double arguments in use.
1210 bool use_eabi_hardfloat() {
1212 return base::OS::ArmUsingHardFloat();
1213 #elif USE_EABI_HARDFLOAT
1220 // ---------------------------------------------------------------------------
1223 // Check whether the value of reg is a power of two and not zero. If not
1224 // control continues at the label not_power_of_two. If reg is a power of two
1225 // the register scratch contains the value of (reg - 1) when control falls
1227 void JumpIfNotPowerOfTwoOrZero(Register reg,
1229 Label* not_power_of_two_or_zero);
1230 // Check whether the value of reg is a power of two and not zero.
1231 // Control falls through if it is, with scratch containing the mask
1233 // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
1234 // zero or negative, or jumps to the 'not_power_of_two' label if the value is
1235 // strictly positive but not a power of two.
1236 void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
1238 Label* zero_and_neg,
1239 Label* not_power_of_two);
1241 // ---------------------------------------------------------------------------
1244 void SmiTag(Register reg, SBit s = LeaveCC) {
1245 add(reg, reg, Operand(reg), s);
1247 void SmiTag(Register dst, Register src, SBit s = LeaveCC) {
1248 add(dst, src, Operand(src), s);
1251 // Try to convert int32 to smi. If the value is to large, preserve
1252 // the original value and jump to not_a_smi. Destroys scratch and
1254 void TrySmiTag(Register reg, Label* not_a_smi) {
1255 TrySmiTag(reg, reg, not_a_smi);
1257 void TrySmiTag(Register reg, Register src, Label* not_a_smi) {
1258 SmiTag(ip, src, SetCC);
1264 void SmiUntag(Register reg, SBit s = LeaveCC) {
1265 mov(reg, Operand::SmiUntag(reg), s);
1267 void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
1268 mov(dst, Operand::SmiUntag(src), s);
1271 // Untag the source value into destination and jump if source is a smi.
1272 // Souce and destination can be the same register.
1273 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1275 // Untag the source value into destination and jump if source is not a smi.
1276 // Souce and destination can be the same register.
1277 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1279 // Test if the register contains a smi (Z == 0 (eq) if true).
1280 inline void SmiTst(Register value) {
1281 tst(value, Operand(kSmiTagMask));
1283 inline void NonNegativeSmiTst(Register value) {
1284 tst(value, Operand(kSmiTagMask | kSmiSignMask));
1286 // Jump if the register contains a smi.
1287 inline void JumpIfSmi(Register value, Label* smi_label) {
1288 tst(value, Operand(kSmiTagMask));
1291 // Jump if either of the registers contain a non-smi.
1292 inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
1293 tst(value, Operand(kSmiTagMask));
1294 b(ne, not_smi_label);
1296 // Jump if either of the registers contain a non-smi.
1297 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1298 // Jump if either of the registers contain a smi.
1299 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1301 // Abort execution if argument is a smi, enabled via --debug-code.
1302 void AssertNotSmi(Register object);
1303 void AssertSmi(Register object);
1305 // Abort execution if argument is not a string, enabled via --debug-code.
1306 void AssertString(Register object);
1308 // Abort execution if argument is not a name, enabled via --debug-code.
1309 void AssertName(Register object);
1311 // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1312 void AssertFunction(Register object);
1314 // Abort execution if argument is not undefined or an AllocationSite, enabled
1315 // via --debug-code.
1316 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1318 // Abort execution if reg is not the root value with the given index,
1319 // enabled via --debug-code.
1320 void AssertIsRoot(Register reg, Heap::RootListIndex index);
1322 // ---------------------------------------------------------------------------
1323 // HeapNumber utilities
1325 void JumpIfNotHeapNumber(Register object,
1326 Register heap_number_map,
1328 Label* on_not_heap_number);
1330 // ---------------------------------------------------------------------------
1333 // Checks if both objects are sequential one-byte strings and jumps to label
1334 // if either is not. Assumes that neither object is a smi.
1335 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
1341 // Checks if both objects are sequential one-byte strings and jumps to label
1342 // if either is not.
1343 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
1346 Label* not_flat_one_byte_strings);
1348 // Checks if both instance types are sequential one-byte strings and jumps to
1349 // label if either is not.
1350 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1351 Register first_object_instance_type, Register second_object_instance_type,
1352 Register scratch1, Register scratch2, Label* failure);
1354 // Check if instance type is sequential one-byte string and jump to label if
1356 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1359 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1361 void EmitSeqStringSetCharCheck(Register string,
1364 uint32_t encoding_mask);
1367 void ClampUint8(Register output_reg, Register input_reg);
1369 void ClampDoubleToUint8(Register result_reg,
1370 DwVfpRegister input_reg,
1371 LowDwVfpRegister double_scratch);
1374 void LoadInstanceDescriptors(Register map, Register descriptors);
1375 void EnumLength(Register dst, Register map);
1376 void NumberOfOwnDescriptors(Register dst, Register map);
1377 void LoadAccessor(Register dst, Register holder, int accessor_index,
1378 AccessorComponent accessor);
1380 template<typename Field>
1381 void DecodeField(Register dst, Register src) {
1382 Ubfx(dst, src, Field::kShift, Field::kSize);
1385 template<typename Field>
1386 void DecodeField(Register reg) {
1387 DecodeField<Field>(reg, reg);
1390 template<typename Field>
1391 void DecodeFieldToSmi(Register dst, Register src) {
1392 static const int shift = Field::kShift;
1393 static const int mask = Field::kMask >> shift << kSmiTagSize;
1394 STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
1395 STATIC_ASSERT(kSmiTag == 0);
1396 if (shift < kSmiTagSize) {
1397 mov(dst, Operand(src, LSL, kSmiTagSize - shift));
1398 and_(dst, dst, Operand(mask));
1399 } else if (shift > kSmiTagSize) {
1400 mov(dst, Operand(src, LSR, shift - kSmiTagSize));
1401 and_(dst, dst, Operand(mask));
1403 and_(dst, src, Operand(mask));
1407 template<typename Field>
1408 void DecodeFieldToSmi(Register reg) {
1409 DecodeField<Field>(reg, reg);
1412 // Load the type feedback vector from a JavaScript frame.
1413 void EmitLoadTypeFeedbackVector(Register vector);
1415 // Activation support.
1416 void EnterFrame(StackFrame::Type type,
1417 bool load_constant_pool_pointer_reg = false);
1418 // Returns the pc offset at which the frame ends.
1419 int LeaveFrame(StackFrame::Type type);
1421 // Expects object in r0 and returns map with validated enum cache
1422 // in r0. Assumes that any other register can be used as a scratch.
1423 void CheckEnumCache(Register null_value, Label* call_runtime);
1425 // AllocationMemento support. Arrays may have an associated
1426 // AllocationMemento object that can be checked for in order to pretransition
1428 // On entry, receiver_reg should point to the array object.
1429 // scratch_reg gets clobbered.
1430 // If allocation info is present, condition flags are set to eq.
1431 void TestJSArrayForAllocationMemento(Register receiver_reg,
1432 Register scratch_reg,
1433 Label* no_memento_found);
1435 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1436 Register scratch_reg,
1437 Label* memento_found) {
1438 Label no_memento_found;
1439 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1441 b(eq, memento_found);
1442 bind(&no_memento_found);
1445 // Jumps to found label if a prototype map has dictionary elements.
1446 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1447 Register scratch1, Label* found);
1449 // Loads the constant pool pointer (pp) register.
1450 void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
1451 Register code_target_address);
1452 void LoadConstantPoolPointerRegister();
1455 void CallCFunctionHelper(Register function,
1456 int num_reg_arguments,
1457 int num_double_arguments);
1459 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
1461 // Helper functions for generating invokes.
1462 void InvokePrologue(const ParameterCount& expected,
1463 const ParameterCount& actual,
1464 Handle<Code> code_constant,
1467 bool* definitely_mismatches,
1469 const CallWrapper& call_wrapper);
1471 void InitializeNewString(Register string,
1473 Heap::RootListIndex map_index,
1477 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1478 void InNewSpace(Register object,
1480 Condition cond, // eq for new space, ne otherwise.
1483 // Helper for finding the mark bits for an address. Afterwards, the
1484 // bitmap register points at the word with the mark bits and the mask
1485 // the position of the first bit. Leaves addr_reg unchanged.
1486 inline void GetMarkBits(Register addr_reg,
1487 Register bitmap_reg,
1490 // Compute memory operands for safepoint stack slots.
1491 static int SafepointRegisterStackIndex(int reg_code);
1492 MemOperand SafepointRegisterSlot(Register reg);
1493 MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1495 bool generating_stub_;
1497 // This handle will be patched with the code object on installation.
1498 Handle<Object> code_object_;
1500 // Needs access to SafepointRegisterStackIndex for compiled frame
1502 friend class StandardFrame;
1506 // The code patcher is used to patch (typically) small parts of code e.g. for
1507 // debugging and other types of instrumentation. When using the code patcher
1508 // the exact number of bytes specified must be emitted. It is not legal to emit
1509 // relocation information. If any of these constraints are violated it causes
1510 // an assertion to fail.
1518 CodePatcher(byte* address,
1520 FlushICache flush_cache = FLUSH);
1523 // Macro assembler to emit code.
1524 MacroAssembler* masm() { return &masm_; }
1526 // Emit an instruction directly.
1527 void Emit(Instr instr);
1529 // Emit an address directly.
1530 void Emit(Address addr);
1532 // Emit the condition part of an instruction leaving the rest of the current
1533 // instruction unchanged.
1534 void EmitCondition(Condition cond);
1537 byte* address_; // The address of the code being patched.
1538 int size_; // Number of bytes of the expected patch size.
1539 MacroAssembler masm_; // Macro assembler used to generate the code.
1540 FlushICache flush_cache_; // Whether to flush the I cache after patching.
1544 // -----------------------------------------------------------------------------
1545 // Static helper functions.
1547 inline MemOperand ContextOperand(Register context, int index = 0) {
1548 return MemOperand(context, Context::SlotOffset(index));
1552 inline MemOperand GlobalObjectOperand() {
1553 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
1557 #ifdef GENERATED_CODE_COVERAGE
1558 #define CODE_COVERAGE_STRINGIFY(x) #x
1559 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1560 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1561 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
1563 #define ACCESS_MASM(masm) masm->
1567 } } // namespace v8::internal
1569 #endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_