1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
6 #define V8_ARM_MACRO_ASSEMBLER_ARM_H_
8 #include "src/assembler.h"
9 #include "src/bailout-reason.h"
10 #include "src/frames.h"
11 #include "src/globals.h"
16 // Give alias names to registers for calling conventions.
17 const Register kReturnRegister0 = {kRegister_r0_Code};
18 const Register kReturnRegister1 = {kRegister_r1_Code};
19 const Register kJSFunctionRegister = {kRegister_r1_Code};
20 const Register kContextRegister = {kRegister_r7_Code};
21 const Register kInterpreterAccumulatorRegister = {kRegister_r0_Code};
22 const Register kInterpreterRegisterFileRegister = {kRegister_r4_Code};
23 const Register kInterpreterBytecodeOffsetRegister = {kRegister_r5_Code};
24 const Register kInterpreterBytecodeArrayRegister = {kRegister_r6_Code};
25 const Register kInterpreterDispatchTableRegister = {kRegister_r8_Code};
26 const Register kRuntimeCallFunctionRegister = {kRegister_r1_Code};
27 const Register kRuntimeCallArgCountRegister = {kRegister_r0_Code};
29 // ----------------------------------------------------------------------------
30 // Static helper functions
32 // Generate a MemOperand for loading a field from an object.
33 inline MemOperand FieldMemOperand(Register object, int offset) {
34 return MemOperand(object, offset - kHeapObjectTag);
38 // Give alias names to registers
39 const Register cp = { kRegister_r7_Code }; // JavaScript context pointer.
40 const Register pp = { kRegister_r8_Code }; // Constant pool pointer.
41 const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer.
43 // Flags used for AllocateHeapNumber
52 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
53 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
54 enum PointersToHereCheck {
55 kPointersToHereMaybeInteresting,
56 kPointersToHereAreAlwaysInteresting
58 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
61 Register GetRegisterThatIsNotOneOf(Register reg1,
62 Register reg2 = no_reg,
63 Register reg3 = no_reg,
64 Register reg4 = no_reg,
65 Register reg5 = no_reg,
66 Register reg6 = no_reg);
70 bool AreAliased(Register reg1,
72 Register reg3 = no_reg,
73 Register reg4 = no_reg,
74 Register reg5 = no_reg,
75 Register reg6 = no_reg,
76 Register reg7 = no_reg,
77 Register reg8 = no_reg);
81 enum TargetAddressStorageMode {
82 CAN_INLINE_TARGET_ADDRESS,
83 NEVER_INLINE_TARGET_ADDRESS
86 // MacroAssembler implements a collection of frequently used macros.
87 class MacroAssembler: public Assembler {
89 // The isolate parameter can be NULL if the macro assembler should
90 // not use isolate-dependent functionality. In this case, it's the
91 // responsibility of the caller to never invoke such function on the
93 MacroAssembler(Isolate* isolate, void* buffer, int size);
96 // Returns the size of a call in instructions. Note, the value returned is
97 // only valid as long as no entries are added to the constant pool between
98 // checking the call size and emitting the actual call.
99 static int CallSize(Register target, Condition cond = al);
100 int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
101 int CallStubSize(CodeStub* stub,
102 TypeFeedbackId ast_id = TypeFeedbackId::None(),
103 Condition cond = al);
104 static int CallSizeNotPredictableCodeSize(Isolate* isolate,
106 RelocInfo::Mode rmode,
107 Condition cond = al);
109 // Jump, Call, and Ret pseudo instructions implementing inter-working.
110 void Jump(Register target, Condition cond = al);
111 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
112 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
113 void Call(Register target, Condition cond = al);
114 void Call(Address target, RelocInfo::Mode rmode,
116 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
117 int CallSize(Handle<Code> code,
118 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
119 TypeFeedbackId ast_id = TypeFeedbackId::None(),
120 Condition cond = al);
121 void Call(Handle<Code> code,
122 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
123 TypeFeedbackId ast_id = TypeFeedbackId::None(),
125 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
126 void Ret(Condition cond = al);
128 // Emit code to discard a non-negative number of pointer-sized elements
129 // from the stack, clobbering only the sp register.
130 void Drop(int count, Condition cond = al);
132 void Ret(int drop, Condition cond = al);
134 // Swap two registers. If the scratch register is omitted then a slightly
135 // less efficient form using xor instead of mov is emitted.
136 void Swap(Register reg1,
138 Register scratch = no_reg,
139 Condition cond = al);
141 void Mls(Register dst, Register src1, Register src2, Register srcA,
142 Condition cond = al);
143 void And(Register dst, Register src1, const Operand& src2,
144 Condition cond = al);
145 void Ubfx(Register dst, Register src, int lsb, int width,
146 Condition cond = al);
147 void Sbfx(Register dst, Register src, int lsb, int width,
148 Condition cond = al);
149 // The scratch register is not used for ARMv7.
150 // scratch can be the same register as src (in which case it is trashed), but
151 // not the same as dst.
152 void Bfi(Register dst,
157 Condition cond = al);
158 void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
159 void Usat(Register dst, int satpos, const Operand& src,
160 Condition cond = al);
162 void Call(Label* target);
163 void Push(Register src) { push(src); }
164 void Pop(Register dst) { pop(dst); }
166 // Register move. May do nothing if the registers are identical.
167 void Move(Register dst, Handle<Object> value);
168 void Move(Register dst, Register src, Condition cond = al);
169 void Move(Register dst, const Operand& src, SBit sbit = LeaveCC,
170 Condition cond = al) {
171 if (!src.is_reg() || !src.rm().is(dst) || sbit != LeaveCC) {
172 mov(dst, src, sbit, cond);
175 void Move(DwVfpRegister dst, DwVfpRegister src);
177 void Load(Register dst, const MemOperand& src, Representation r);
178 void Store(Register src, const MemOperand& dst, Representation r);
180 // Load an object from the root table.
181 void LoadRoot(Register destination,
182 Heap::RootListIndex index,
183 Condition cond = al);
184 // Store an object to the root table.
185 void StoreRoot(Register source,
186 Heap::RootListIndex index,
187 Condition cond = al);
189 // ---------------------------------------------------------------------------
192 void IncrementalMarkingRecordWriteHelper(Register object,
196 enum RememberedSetFinalAction {
201 // Record in the remembered set the fact that we have a pointer to new space
202 // at the address pointed to by the addr register. Only works if addr is not
204 void RememberedSetHelper(Register object, // Used for debug code.
207 SaveFPRegsMode save_fp,
208 RememberedSetFinalAction and_then);
210 void CheckPageFlag(Register object,
214 Label* condition_met);
216 // Check if object is in new space. Jumps if the object is not in new space.
217 // The register scratch can be object itself, but scratch will be clobbered.
218 void JumpIfNotInNewSpace(Register object,
221 InNewSpace(object, scratch, ne, branch);
224 // Check if object is in new space. Jumps if the object is in new space.
225 // The register scratch can be object itself, but it will be clobbered.
226 void JumpIfInNewSpace(Register object,
229 InNewSpace(object, scratch, eq, branch);
232 // Check if an object has a given incremental marking color.
233 void HasColor(Register object,
240 void JumpIfBlack(Register object,
245 // Checks the color of an object. If the object is already grey or black
246 // then we just fall through, since it is already live. If it is white and
247 // we can determine that it doesn't need to be scanned, then we just mark it
248 // black and fall through. For the rest we jump to the label so the
249 // incremental marker can fix its assumptions.
250 void EnsureNotWhite(Register object,
254 Label* object_is_white_and_not_data);
256 // Detects conservatively whether an object is data-only, i.e. it does need to
257 // be scanned by the garbage collector.
258 void JumpIfDataObject(Register value,
260 Label* not_data_object);
262 // Notify the garbage collector that we wrote a pointer into an object.
263 // |object| is the object being stored into, |value| is the object being
264 // stored. value and scratch registers are clobbered by the operation.
265 // The offset is the offset from the start of the object, not the offset from
266 // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
267 void RecordWriteField(
272 LinkRegisterStatus lr_status,
273 SaveFPRegsMode save_fp,
274 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
275 SmiCheck smi_check = INLINE_SMI_CHECK,
276 PointersToHereCheck pointers_to_here_check_for_value =
277 kPointersToHereMaybeInteresting);
279 // As above, but the offset has the tag presubtracted. For use with
280 // MemOperand(reg, off).
281 inline void RecordWriteContextSlot(
286 LinkRegisterStatus lr_status,
287 SaveFPRegsMode save_fp,
288 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
289 SmiCheck smi_check = INLINE_SMI_CHECK,
290 PointersToHereCheck pointers_to_here_check_for_value =
291 kPointersToHereMaybeInteresting) {
292 RecordWriteField(context,
293 offset + kHeapObjectTag,
298 remembered_set_action,
300 pointers_to_here_check_for_value);
303 void RecordWriteForMap(
307 LinkRegisterStatus lr_status,
308 SaveFPRegsMode save_fp);
310 // For a given |object| notify the garbage collector that the slot |address|
311 // has been written. |value| is the object being stored. The value and
312 // address registers are clobbered by the operation.
317 LinkRegisterStatus lr_status,
318 SaveFPRegsMode save_fp,
319 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
320 SmiCheck smi_check = INLINE_SMI_CHECK,
321 PointersToHereCheck pointers_to_here_check_for_value =
322 kPointersToHereMaybeInteresting);
325 void Push(Handle<Object> handle);
326 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
328 // Push two registers. Pushes leftmost register first (to highest address).
329 void Push(Register src1, Register src2, Condition cond = al) {
330 DCHECK(!src1.is(src2));
331 if (src1.code() > src2.code()) {
332 stm(db_w, sp, src1.bit() | src2.bit(), cond);
334 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
335 str(src2, MemOperand(sp, 4, NegPreIndex), cond);
339 // Push three registers. Pushes leftmost register first (to highest address).
340 void Push(Register src1, Register src2, Register src3, Condition cond = al) {
341 DCHECK(!AreAliased(src1, src2, src3));
342 if (src1.code() > src2.code()) {
343 if (src2.code() > src3.code()) {
344 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
346 stm(db_w, sp, src1.bit() | src2.bit(), cond);
347 str(src3, MemOperand(sp, 4, NegPreIndex), cond);
350 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
351 Push(src2, src3, cond);
355 // Push four registers. Pushes leftmost register first (to highest address).
356 void Push(Register src1,
360 Condition cond = al) {
361 DCHECK(!AreAliased(src1, src2, src3, src4));
362 if (src1.code() > src2.code()) {
363 if (src2.code() > src3.code()) {
364 if (src3.code() > src4.code()) {
367 src1.bit() | src2.bit() | src3.bit() | src4.bit(),
370 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
371 str(src4, MemOperand(sp, 4, NegPreIndex), cond);
374 stm(db_w, sp, src1.bit() | src2.bit(), cond);
375 Push(src3, src4, cond);
378 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
379 Push(src2, src3, src4, cond);
383 // Push five registers. Pushes leftmost register first (to highest address).
384 void Push(Register src1, Register src2, Register src3, Register src4,
385 Register src5, Condition cond = al) {
386 DCHECK(!AreAliased(src1, src2, src3, src4, src5));
387 if (src1.code() > src2.code()) {
388 if (src2.code() > src3.code()) {
389 if (src3.code() > src4.code()) {
390 if (src4.code() > src5.code()) {
392 src1.bit() | src2.bit() | src3.bit() | src4.bit() | src5.bit(),
395 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit() | src4.bit(),
397 str(src5, MemOperand(sp, 4, NegPreIndex), cond);
400 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
401 Push(src4, src5, cond);
404 stm(db_w, sp, src1.bit() | src2.bit(), cond);
405 Push(src3, src4, src5, cond);
408 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
409 Push(src2, src3, src4, src5, cond);
413 // Pop two registers. Pops rightmost register first (from lower address).
414 void Pop(Register src1, Register src2, Condition cond = al) {
415 DCHECK(!src1.is(src2));
416 if (src1.code() > src2.code()) {
417 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
419 ldr(src2, MemOperand(sp, 4, PostIndex), cond);
420 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
424 // Pop three registers. Pops rightmost register first (from lower address).
425 void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
426 DCHECK(!AreAliased(src1, src2, src3));
427 if (src1.code() > src2.code()) {
428 if (src2.code() > src3.code()) {
429 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
431 ldr(src3, MemOperand(sp, 4, PostIndex), cond);
432 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
435 Pop(src2, src3, cond);
436 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
440 // Pop four registers. Pops rightmost register first (from lower address).
441 void Pop(Register src1,
445 Condition cond = al) {
446 DCHECK(!AreAliased(src1, src2, src3, src4));
447 if (src1.code() > src2.code()) {
448 if (src2.code() > src3.code()) {
449 if (src3.code() > src4.code()) {
452 src1.bit() | src2.bit() | src3.bit() | src4.bit(),
455 ldr(src4, MemOperand(sp, 4, PostIndex), cond);
456 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
459 Pop(src3, src4, cond);
460 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
463 Pop(src2, src3, src4, cond);
464 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
468 // Push a fixed frame, consisting of lr, fp, constant pool (if
469 // FLAG_enable_embedded_constant_pool), context and JS function / marker id if
470 // marker_reg is a valid register.
471 void PushFixedFrame(Register marker_reg = no_reg);
472 void PopFixedFrame(Register marker_reg = no_reg);
474 // Push and pop the registers that can hold pointers, as defined by the
475 // RegList constant kSafepointSavedRegisters.
476 void PushSafepointRegisters();
477 void PopSafepointRegisters();
478 // Store value in register src in the safepoint stack slot for
480 void StoreToSafepointRegisterSlot(Register src, Register dst);
481 // Load the value of the src register from its safepoint stack slot
482 // into register dst.
483 void LoadFromSafepointRegisterSlot(Register dst, Register src);
485 // Load two consecutive registers with two consecutive memory locations.
486 void Ldrd(Register dst1,
488 const MemOperand& src,
489 Condition cond = al);
491 // Store two consecutive registers to two consecutive memory locations.
492 void Strd(Register src1,
494 const MemOperand& dst,
495 Condition cond = al);
497 // Ensure that FPSCR contains values needed by JavaScript.
498 // We need the NaNModeControlBit to be sure that operations like
499 // vadd and vsub generate the Canonical NaN (if a NaN must be generated).
500 // In VFP3 it will be always the Canonical NaN.
501 // In VFP2 it will be either the Canonical NaN or the negative version
502 // of the Canonical NaN. It doesn't matter if we have two values. The aim
503 // is to be sure to never generate the hole NaN.
504 void VFPEnsureFPSCRState(Register scratch);
506 // If the value is a NaN, canonicalize the value else, do nothing.
507 void VFPCanonicalizeNaN(const DwVfpRegister dst,
508 const DwVfpRegister src,
509 const Condition cond = al);
510 void VFPCanonicalizeNaN(const DwVfpRegister value,
511 const Condition cond = al) {
512 VFPCanonicalizeNaN(value, value, cond);
515 // Compare single values and move the result to the normal condition flags.
516 void VFPCompareAndSetFlags(const SwVfpRegister src1, const SwVfpRegister src2,
517 const Condition cond = al);
518 void VFPCompareAndSetFlags(const SwVfpRegister src1, const float src2,
519 const Condition cond = al);
521 // Compare double values and move the result to the normal condition flags.
522 void VFPCompareAndSetFlags(const DwVfpRegister src1,
523 const DwVfpRegister src2,
524 const Condition cond = al);
525 void VFPCompareAndSetFlags(const DwVfpRegister src1,
527 const Condition cond = al);
529 // Compare single values and then load the fpscr flags to a register.
530 void VFPCompareAndLoadFlags(const SwVfpRegister src1,
531 const SwVfpRegister src2,
532 const Register fpscr_flags,
533 const Condition cond = al);
534 void VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2,
535 const Register fpscr_flags,
536 const Condition cond = al);
538 // Compare double values and then load the fpscr flags to a register.
539 void VFPCompareAndLoadFlags(const DwVfpRegister src1,
540 const DwVfpRegister src2,
541 const Register fpscr_flags,
542 const Condition cond = al);
543 void VFPCompareAndLoadFlags(const DwVfpRegister src1,
545 const Register fpscr_flags,
546 const Condition cond = al);
548 void Vmov(const DwVfpRegister dst,
550 const Register scratch = no_reg);
552 void VmovHigh(Register dst, DwVfpRegister src);
553 void VmovHigh(DwVfpRegister dst, Register src);
554 void VmovLow(Register dst, DwVfpRegister src);
555 void VmovLow(DwVfpRegister dst, Register src);
557 // Loads the number from object into dst register.
558 // If |object| is neither smi nor heap number, |not_number| is jumped to
559 // with |object| still intact.
560 void LoadNumber(Register object,
561 LowDwVfpRegister dst,
562 Register heap_number_map,
566 // Loads the number from object into double_dst in the double format.
567 // Control will jump to not_int32 if the value cannot be exactly represented
568 // by a 32-bit integer.
569 // Floating point value in the 32-bit integer range that are not exact integer
571 void LoadNumberAsInt32Double(Register object,
572 DwVfpRegister double_dst,
573 Register heap_number_map,
575 LowDwVfpRegister double_scratch,
578 // Loads the number from object into dst as a 32-bit integer.
579 // Control will jump to not_int32 if the object cannot be exactly represented
580 // by a 32-bit integer.
581 // Floating point value in the 32-bit integer range that are not exact integer
582 // won't be converted.
583 void LoadNumberAsInt32(Register object,
585 Register heap_number_map,
587 DwVfpRegister double_scratch0,
588 LowDwVfpRegister double_scratch1,
591 // Generates function and stub prologue code.
593 void Prologue(bool code_pre_aging);
596 // stack_space - extra stack space, used for alignment before call to C.
597 void EnterExitFrame(bool save_doubles, int stack_space = 0);
599 // Leave the current exit frame. Expects the return value in r0.
600 // Expect the number of values, pushed prior to the exit frame, to
601 // remove in a register (or no_reg, if there is nothing to remove).
602 void LeaveExitFrame(bool save_doubles, Register argument_count,
603 bool restore_context,
604 bool argument_count_is_length = false);
606 // Get the actual activation frame alignment for target environment.
607 static int ActivationFrameAlignment();
609 void LoadContext(Register dst, int context_chain_length);
611 // Conditionally load the cached Array transitioned map of type
612 // transitioned_kind from the native context if the map in register
613 // map_in_out is the cached Array map in the native context of
615 void LoadTransitionedArrayMapConditional(
616 ElementsKind expected_kind,
617 ElementsKind transitioned_kind,
620 Label* no_map_match);
622 void LoadGlobalFunction(int index, Register function);
624 // Load the initial map from the global function. The registers
625 // function and map can be the same, function is then overwritten.
626 void LoadGlobalFunctionInitialMap(Register function,
630 void InitializeRootRegister() {
631 ExternalReference roots_array_start =
632 ExternalReference::roots_array_start(isolate());
633 mov(kRootRegister, Operand(roots_array_start));
636 // ---------------------------------------------------------------------------
637 // JavaScript invokes
639 // Invoke the JavaScript function code by either calling or jumping.
640 void InvokeCode(Register code,
641 const ParameterCount& expected,
642 const ParameterCount& actual,
644 const CallWrapper& call_wrapper);
646 // Invoke the JavaScript function in the given register. Changes the
647 // current context to the context in the function before invoking.
648 void InvokeFunction(Register function,
649 const ParameterCount& actual,
651 const CallWrapper& call_wrapper);
653 void InvokeFunction(Register function,
654 const ParameterCount& expected,
655 const ParameterCount& actual,
657 const CallWrapper& call_wrapper);
659 void InvokeFunction(Handle<JSFunction> function,
660 const ParameterCount& expected,
661 const ParameterCount& actual,
663 const CallWrapper& call_wrapper);
665 void IsObjectJSStringType(Register object,
669 void IsObjectNameType(Register object,
673 // ---------------------------------------------------------------------------
678 // ---------------------------------------------------------------------------
679 // Exception handling
681 // Push a new stack handler and link into stack handler chain.
682 void PushStackHandler();
684 // Unlink the stack handler on top of the stack from the stack handler chain.
685 // Must preserve the result register.
686 void PopStackHandler();
688 // ---------------------------------------------------------------------------
689 // Inline caching support
691 // Generate code for checking access rights - used for security checks
692 // on access to global objects across environments. The holder register
693 // is left untouched, whereas both scratch registers are clobbered.
694 void CheckAccessGlobalProxy(Register holder_reg,
698 void GetNumberHash(Register t0, Register scratch);
700 void LoadFromNumberDictionary(Label* miss,
709 inline void MarkCode(NopMarkerTypes type) {
713 // Check if the given instruction is a 'type' marker.
714 // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
715 // These instructions are generated to mark special location in the code,
716 // like some special IC code.
717 static inline bool IsMarkedCode(Instr instr, int type) {
718 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
719 return IsNop(instr, type);
723 static inline int GetCodeMarker(Instr instr) {
724 int dst_reg_offset = 12;
725 int dst_mask = 0xf << dst_reg_offset;
727 int dst_reg = (instr & dst_mask) >> dst_reg_offset;
728 int src_reg = instr & src_mask;
729 uint32_t non_register_mask = ~(dst_mask | src_mask);
730 uint32_t mov_mask = al | 13 << 21;
732 // Return <n> if we have a mov rn rn, else return -1.
733 int type = ((instr & non_register_mask) == mov_mask) &&
734 (dst_reg == src_reg) &&
735 (FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER)
738 DCHECK((type == -1) ||
739 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
744 // ---------------------------------------------------------------------------
745 // Allocation support
747 // Allocate an object in new space or old space. The object_size is
748 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
749 // is passed. If the space is exhausted control continues at the gc_required
750 // label. The allocated object is returned in result. If the flag
751 // tag_allocated_object is true the result is tagged as as a heap object.
752 // All registers are clobbered also when control continues at the gc_required
754 void Allocate(int object_size,
759 AllocationFlags flags);
761 void Allocate(Register object_size,
766 AllocationFlags flags);
768 void AllocateTwoByteString(Register result,
774 void AllocateOneByteString(Register result, Register length,
775 Register scratch1, Register scratch2,
776 Register scratch3, Label* gc_required);
777 void AllocateTwoByteConsString(Register result,
782 void AllocateOneByteConsString(Register result, Register length,
783 Register scratch1, Register scratch2,
785 void AllocateTwoByteSlicedString(Register result,
790 void AllocateOneByteSlicedString(Register result, Register length,
791 Register scratch1, Register scratch2,
794 // Allocates a heap number or jumps to the gc_required label if the young
795 // space is full and a scavenge is needed. All registers are clobbered also
796 // when control continues at the gc_required label.
797 void AllocateHeapNumber(Register result,
800 Register heap_number_map,
802 TaggingMode tagging_mode = TAG_RESULT,
803 MutableMode mode = IMMUTABLE);
804 void AllocateHeapNumberWithValue(Register result,
808 Register heap_number_map,
811 // Copies a fixed number of fields of heap objects from src to dst.
812 void CopyFields(Register dst,
814 LowDwVfpRegister double_scratch,
817 // Copies a number of bytes from src to dst. All registers are clobbered. On
818 // exit src and dst will point to the place just after where the last byte was
819 // read or written and length will be zero.
820 void CopyBytes(Register src,
825 // Initialize fields with filler values. Fields starting at |start_offset|
826 // not including end_offset are overwritten with the value in |filler|. At
827 // the end the loop, |start_offset| takes the value of |end_offset|.
828 void InitializeFieldsWithFiller(Register start_offset,
832 // ---------------------------------------------------------------------------
833 // Support functions.
835 // Machine code version of Map::GetConstructor().
836 // |temp| holds |result|'s map when done, and |temp2| its instance type.
837 void GetMapConstructor(Register result, Register map, Register temp,
840 // Try to get function prototype of a function and puts the value in
841 // the result register. Checks that the function really is a
842 // function and jumps to the miss label if the fast checks fail. The
843 // function register will be untouched; the other registers may be
845 void TryGetFunctionPrototype(Register function, Register result,
846 Register scratch, Label* miss);
848 // Compare object type for heap object. heap_object contains a non-Smi
849 // whose object type should be compared with the given type. This both
850 // sets the flags and leaves the object type in the type_reg register.
851 // It leaves the map in the map register (unless the type_reg and map register
852 // are the same register). It leaves the heap object in the heap_object
853 // register unless the heap_object register is the same register as one of the
855 // Type_reg can be no_reg. In that case ip is used.
856 void CompareObjectType(Register heap_object,
861 // Compare instance type in a map. map contains a valid map object whose
862 // object type should be compared with the given type. This both
863 // sets the flags and leaves the object type in the type_reg register.
864 void CompareInstanceType(Register map,
869 // Check if a map for a JSObject indicates that the object has fast elements.
870 // Jump to the specified label if it does not.
871 void CheckFastElements(Register map,
875 // Check if a map for a JSObject indicates that the object can have both smi
876 // and HeapObject elements. Jump to the specified label if it does not.
877 void CheckFastObjectElements(Register map,
881 // Check if a map for a JSObject indicates that the object has fast smi only
882 // elements. Jump to the specified label if it does not.
883 void CheckFastSmiElements(Register map,
887 // Check to see if maybe_number can be stored as a double in
888 // FastDoubleElements. If it can, store it at the index specified by key in
889 // the FastDoubleElements array elements. Otherwise jump to fail.
890 void StoreNumberToDoubleElements(Register value_reg,
892 Register elements_reg,
894 LowDwVfpRegister double_scratch,
896 int elements_offset = 0);
898 // Compare an object's map with the specified map and its transitioned
899 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
900 // set with result of map compare. If multiple map compares are required, the
901 // compare sequences branches to early_success.
902 void CompareMap(Register obj,
905 Label* early_success);
907 // As above, but the map of the object is already loaded into the register
908 // which is preserved by the code generated.
909 void CompareMap(Register obj_map,
911 Label* early_success);
913 // Check if the map of an object is equal to a specified map and branch to
914 // label if not. Skip the smi check if not required (object is known to be a
915 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
916 // against maps that are ElementsKind transition maps of the specified map.
917 void CheckMap(Register obj,
921 SmiCheckType smi_check_type);
924 void CheckMap(Register obj,
926 Heap::RootListIndex index,
928 SmiCheckType smi_check_type);
931 // Check if the map of an object is equal to a specified weak map and branch
932 // to a specified target if equal. Skip the smi check if not required
933 // (object is known to be a heap object)
934 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
935 Handle<WeakCell> cell, Handle<Code> success,
936 SmiCheckType smi_check_type);
938 // Compare the given value and the value of weak cell.
939 void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
941 void GetWeakValue(Register value, Handle<WeakCell> cell);
943 // Load the value of the weak cell in the value register. Branch to the given
944 // miss label if the weak cell was cleared.
945 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
947 // Compare the object in a register to a value from the root list.
948 // Uses the ip register as scratch.
949 void CompareRoot(Register obj, Heap::RootListIndex index);
952 // Load and check the instance type of an object for being a string.
953 // Loads the type into the second argument register.
954 // Returns a condition that will be enabled if the object was a string
955 // and the passed-in condition passed. If the passed-in condition failed
956 // then flags remain unchanged.
957 Condition IsObjectStringType(Register obj,
959 Condition cond = al) {
960 ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
961 ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
962 tst(type, Operand(kIsNotStringMask), cond);
963 DCHECK_EQ(0u, kStringTag);
968 // Picks out an array index from the hash field.
970 // hash - holds the index's hash. Clobbered.
971 // index - holds the overwritten index on exit.
972 void IndexFromHash(Register hash, Register index);
974 // Get the number of least significant bits from a register
975 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
976 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
978 // Load the value of a smi object into a double register.
979 // The register value must be between d0 and d15.
980 void SmiToDouble(LowDwVfpRegister value, Register smi);
982 // Check if a double can be exactly represented as a signed 32-bit integer.
983 // Z flag set to one if true.
984 void TestDoubleIsInt32(DwVfpRegister double_input,
985 LowDwVfpRegister double_scratch);
987 // Try to convert a double to a signed 32-bit integer.
988 // Z flag set to one and result assigned if the conversion is exact.
989 void TryDoubleToInt32Exact(Register result,
990 DwVfpRegister double_input,
991 LowDwVfpRegister double_scratch);
993 // Floor a double and writes the value to the result register.
994 // Go to exact if the conversion is exact (to be able to test -0),
995 // fall through calling code if an overflow occurred, else go to done.
996 // In return, input_high is loaded with high bits of input.
997 void TryInt32Floor(Register result,
998 DwVfpRegister double_input,
1000 LowDwVfpRegister double_scratch,
1004 // Performs a truncating conversion of a floating point number as used by
1005 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
1006 // succeeds, otherwise falls through if result is saturated. On return
1007 // 'result' either holds answer, or is clobbered on fall through.
1009 // Only public for the test code in test-code-stubs-arm.cc.
1010 void TryInlineTruncateDoubleToI(Register result,
1011 DwVfpRegister input,
1014 // Performs a truncating conversion of a floating point number as used by
1015 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
1016 // Exits with 'result' holding the answer.
1017 void TruncateDoubleToI(Register result, DwVfpRegister double_input);
1019 // Performs a truncating conversion of a heap number as used by
1020 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
1021 // must be different registers. Exits with 'result' holding the answer.
1022 void TruncateHeapNumberToI(Register result, Register object);
1024 // Converts the smi or heap number in object to an int32 using the rules
1025 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
1026 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
1027 // different registers.
1028 void TruncateNumberToI(Register object,
1030 Register heap_number_map,
1034 // Check whether d16-d31 are available on the CPU. The result is given by the
1035 // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
1036 void CheckFor32DRegs(Register scratch);
1038 // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double
1039 // values to location, saving [d0..(d15|d31)].
1040 void SaveFPRegs(Register location, Register scratch);
1042 // Does a runtime check for 16/32 FP registers. Either way, pops 32 double
1043 // values to location, restoring [d0..(d15|d31)].
1044 void RestoreFPRegs(Register location, Register scratch);
1046 // ---------------------------------------------------------------------------
1049 // Call a code stub.
1050 void CallStub(CodeStub* stub,
1051 TypeFeedbackId ast_id = TypeFeedbackId::None(),
1052 Condition cond = al);
1054 // Call a code stub.
1055 void TailCallStub(CodeStub* stub, Condition cond = al);
1057 // Call a runtime routine.
1058 void CallRuntime(const Runtime::Function* f,
1060 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1061 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1062 const Runtime::Function* function = Runtime::FunctionForId(id);
1063 CallRuntime(function, function->nargs, kSaveFPRegs);
1066 // Convenience function: Same as above, but takes the fid instead.
1067 void CallRuntime(Runtime::FunctionId id,
1069 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1070 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
1073 // Convenience function: call an external reference.
1074 void CallExternalReference(const ExternalReference& ext,
1077 // Tail call of a runtime routine (jump).
1078 // Like JumpToExternalReference, but also takes care of passing the number
1080 void TailCallExternalReference(const ExternalReference& ext,
1084 // Convenience function: tail call a runtime routine (jump).
1085 void TailCallRuntime(Runtime::FunctionId fid,
1089 int CalculateStackPassedWords(int num_reg_arguments,
1090 int num_double_arguments);
1092 // Before calling a C-function from generated code, align arguments on stack.
1093 // After aligning the frame, non-register arguments must be stored in
1094 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
1095 // are word sized. If double arguments are used, this function assumes that
1096 // all double arguments are stored before core registers; otherwise the
1097 // correct alignment of the double values is not guaranteed.
1098 // Some compilers/platforms require the stack to be aligned when calling
1100 // Needs a scratch register to do some arithmetic. This register will be
1102 void PrepareCallCFunction(int num_reg_arguments,
1103 int num_double_registers,
1105 void PrepareCallCFunction(int num_reg_arguments,
1108 // There are two ways of passing double arguments on ARM, depending on
1109 // whether soft or hard floating point ABI is used. These functions
1110 // abstract parameter passing for the three different ways we call
1111 // C functions from generated code.
1112 void MovToFloatParameter(DwVfpRegister src);
1113 void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2);
1114 void MovToFloatResult(DwVfpRegister src);
1116 // Calls a C function and cleans up the space for arguments allocated
1117 // by PrepareCallCFunction. The called function is not allowed to trigger a
1118 // garbage collection, since that might move the code and invalidate the
1119 // return address (unless this is somehow accounted for by the called
1121 void CallCFunction(ExternalReference function, int num_arguments);
1122 void CallCFunction(Register function, int num_arguments);
1123 void CallCFunction(ExternalReference function,
1124 int num_reg_arguments,
1125 int num_double_arguments);
1126 void CallCFunction(Register function,
1127 int num_reg_arguments,
1128 int num_double_arguments);
1130 void MovFromFloatParameter(DwVfpRegister dst);
1131 void MovFromFloatResult(DwVfpRegister dst);
1133 // Jump to a runtime routine.
1134 void JumpToExternalReference(const ExternalReference& builtin);
1136 // Invoke specified builtin JavaScript function.
1137 void InvokeBuiltin(int native_context_index, InvokeFlag flag,
1138 const CallWrapper& call_wrapper = NullCallWrapper());
1140 // Store the code object for the given builtin in the target register and
1141 // setup the function in r1.
1142 void GetBuiltinEntry(Register target, int native_context_index);
1144 // Store the function for the given builtin in the target register.
1145 void GetBuiltinFunction(Register target, int native_context_index);
1147 Handle<Object> CodeObject() {
1148 DCHECK(!code_object_.is_null());
1149 return code_object_;
1153 // Emit code for a truncating division by a constant. The dividend register is
1154 // unchanged and ip gets clobbered. Dividend and result must be different.
1155 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1157 // ---------------------------------------------------------------------------
1158 // StatsCounter support
1160 void SetCounter(StatsCounter* counter, int value,
1161 Register scratch1, Register scratch2);
1162 void IncrementCounter(StatsCounter* counter, int value,
1163 Register scratch1, Register scratch2);
1164 void DecrementCounter(StatsCounter* counter, int value,
1165 Register scratch1, Register scratch2);
1168 // ---------------------------------------------------------------------------
1171 // Calls Abort(msg) if the condition cond is not satisfied.
1172 // Use --debug_code to enable.
1173 void Assert(Condition cond, BailoutReason reason);
1174 void AssertFastElements(Register elements);
1176 // Like Assert(), but always enabled.
1177 void Check(Condition cond, BailoutReason reason);
1179 // Print a message to stdout and abort execution.
1180 void Abort(BailoutReason msg);
1182 // Verify restrictions about code generated in stubs.
1183 void set_generating_stub(bool value) { generating_stub_ = value; }
1184 bool generating_stub() { return generating_stub_; }
1185 void set_has_frame(bool value) { has_frame_ = value; }
1186 bool has_frame() { return has_frame_; }
1187 inline bool AllowThisStubCall(CodeStub* stub);
1189 // EABI variant for double arguments in use.
1190 bool use_eabi_hardfloat() {
1192 return base::OS::ArmUsingHardFloat();
1193 #elif USE_EABI_HARDFLOAT
1200 // ---------------------------------------------------------------------------
1203 // Check whether the value of reg is a power of two and not zero. If not
1204 // control continues at the label not_power_of_two. If reg is a power of two
1205 // the register scratch contains the value of (reg - 1) when control falls
1207 void JumpIfNotPowerOfTwoOrZero(Register reg,
1209 Label* not_power_of_two_or_zero);
1210 // Check whether the value of reg is a power of two and not zero.
1211 // Control falls through if it is, with scratch containing the mask
1213 // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
1214 // zero or negative, or jumps to the 'not_power_of_two' label if the value is
1215 // strictly positive but not a power of two.
1216 void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
1218 Label* zero_and_neg,
1219 Label* not_power_of_two);
1221 // ---------------------------------------------------------------------------
1224 void SmiTag(Register reg, SBit s = LeaveCC) {
1225 add(reg, reg, Operand(reg), s);
1227 void SmiTag(Register dst, Register src, SBit s = LeaveCC) {
1228 add(dst, src, Operand(src), s);
1231 // Try to convert int32 to smi. If the value is to large, preserve
1232 // the original value and jump to not_a_smi. Destroys scratch and
1234 void TrySmiTag(Register reg, Label* not_a_smi) {
1235 TrySmiTag(reg, reg, not_a_smi);
1237 void TrySmiTag(Register reg, Register src, Label* not_a_smi) {
1238 SmiTag(ip, src, SetCC);
1244 void SmiUntag(Register reg, SBit s = LeaveCC) {
1245 mov(reg, Operand::SmiUntag(reg), s);
1247 void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
1248 mov(dst, Operand::SmiUntag(src), s);
1251 // Untag the source value into destination and jump if source is a smi.
1252 // Souce and destination can be the same register.
1253 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1255 // Untag the source value into destination and jump if source is not a smi.
1256 // Souce and destination can be the same register.
1257 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1259 // Test if the register contains a smi (Z == 0 (eq) if true).
1260 inline void SmiTst(Register value) {
1261 tst(value, Operand(kSmiTagMask));
1263 inline void NonNegativeSmiTst(Register value) {
1264 tst(value, Operand(kSmiTagMask | kSmiSignMask));
1266 // Jump if the register contains a smi.
1267 inline void JumpIfSmi(Register value, Label* smi_label) {
1268 tst(value, Operand(kSmiTagMask));
1271 // Jump if either of the registers contain a non-smi.
1272 inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
1273 tst(value, Operand(kSmiTagMask));
1274 b(ne, not_smi_label);
1276 // Jump if either of the registers contain a non-smi.
1277 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1278 // Jump if either of the registers contain a smi.
1279 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1281 // Abort execution if argument is a smi, enabled via --debug-code.
1282 void AssertNotSmi(Register object);
1283 void AssertSmi(Register object);
1285 // Abort execution if argument is not a string, enabled via --debug-code.
1286 void AssertString(Register object);
1288 // Abort execution if argument is not a name, enabled via --debug-code.
1289 void AssertName(Register object);
1291 // Abort execution if argument is not undefined or an AllocationSite, enabled
1292 // via --debug-code.
1293 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1295 // Abort execution if reg is not the root value with the given index,
1296 // enabled via --debug-code.
1297 void AssertIsRoot(Register reg, Heap::RootListIndex index);
1299 // ---------------------------------------------------------------------------
1300 // HeapNumber utilities
1302 void JumpIfNotHeapNumber(Register object,
1303 Register heap_number_map,
1305 Label* on_not_heap_number);
1307 // ---------------------------------------------------------------------------
1310 // Generate code to do a lookup in the number string cache. If the number in
1311 // the register object is found in the cache the generated code falls through
1312 // with the result in the result register. The object and the result register
1313 // can be the same. If the number is not found in the cache the code jumps to
1314 // the label not_found with only the content of register object unchanged.
1315 void LookupNumberStringCache(Register object,
1322 // Checks if both objects are sequential one-byte strings and jumps to label
1323 // if either is not. Assumes that neither object is a smi.
1324 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
1330 // Checks if both objects are sequential one-byte strings and jumps to label
1331 // if either is not.
1332 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
1335 Label* not_flat_one_byte_strings);
1337 // Checks if both instance types are sequential one-byte strings and jumps to
1338 // label if either is not.
1339 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1340 Register first_object_instance_type, Register second_object_instance_type,
1341 Register scratch1, Register scratch2, Label* failure);
1343 // Check if instance type is sequential one-byte string and jump to label if
1345 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1348 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1350 void EmitSeqStringSetCharCheck(Register string,
1353 uint32_t encoding_mask);
1356 void ClampUint8(Register output_reg, Register input_reg);
1358 void ClampDoubleToUint8(Register result_reg,
1359 DwVfpRegister input_reg,
1360 LowDwVfpRegister double_scratch);
1363 void LoadInstanceDescriptors(Register map, Register descriptors);
1364 void EnumLength(Register dst, Register map);
1365 void NumberOfOwnDescriptors(Register dst, Register map);
1366 void LoadAccessor(Register dst, Register holder, int accessor_index,
1367 AccessorComponent accessor);
1369 template<typename Field>
1370 void DecodeField(Register dst, Register src) {
1371 Ubfx(dst, src, Field::kShift, Field::kSize);
1374 template<typename Field>
1375 void DecodeField(Register reg) {
1376 DecodeField<Field>(reg, reg);
1379 template<typename Field>
1380 void DecodeFieldToSmi(Register dst, Register src) {
1381 static const int shift = Field::kShift;
1382 static const int mask = Field::kMask >> shift << kSmiTagSize;
1383 STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
1384 STATIC_ASSERT(kSmiTag == 0);
1385 if (shift < kSmiTagSize) {
1386 mov(dst, Operand(src, LSL, kSmiTagSize - shift));
1387 and_(dst, dst, Operand(mask));
1388 } else if (shift > kSmiTagSize) {
1389 mov(dst, Operand(src, LSR, shift - kSmiTagSize));
1390 and_(dst, dst, Operand(mask));
1392 and_(dst, src, Operand(mask));
1396 template<typename Field>
1397 void DecodeFieldToSmi(Register reg) {
1398 DecodeField<Field>(reg, reg);
1401 // Activation support.
1402 void EnterFrame(StackFrame::Type type,
1403 bool load_constant_pool_pointer_reg = false);
1404 // Returns the pc offset at which the frame ends.
1405 int LeaveFrame(StackFrame::Type type);
1407 // Expects object in r0 and returns map with validated enum cache
1408 // in r0. Assumes that any other register can be used as a scratch.
1409 void CheckEnumCache(Register null_value, Label* call_runtime);
1411 // AllocationMemento support. Arrays may have an associated
1412 // AllocationMemento object that can be checked for in order to pretransition
1414 // On entry, receiver_reg should point to the array object.
1415 // scratch_reg gets clobbered.
1416 // If allocation info is present, condition flags are set to eq.
1417 void TestJSArrayForAllocationMemento(Register receiver_reg,
1418 Register scratch_reg,
1419 Label* no_memento_found);
1421 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1422 Register scratch_reg,
1423 Label* memento_found) {
1424 Label no_memento_found;
1425 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1427 b(eq, memento_found);
1428 bind(&no_memento_found);
1431 // Jumps to found label if a prototype map has dictionary elements.
1432 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1433 Register scratch1, Label* found);
1435 // Loads the constant pool pointer (pp) register.
1436 void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
1437 Register code_target_address);
1438 void LoadConstantPoolPointerRegister();
1441 void CallCFunctionHelper(Register function,
1442 int num_reg_arguments,
1443 int num_double_arguments);
1445 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
1447 // Helper functions for generating invokes.
1448 void InvokePrologue(const ParameterCount& expected,
1449 const ParameterCount& actual,
1450 Handle<Code> code_constant,
1453 bool* definitely_mismatches,
1455 const CallWrapper& call_wrapper);
1457 void InitializeNewString(Register string,
1459 Heap::RootListIndex map_index,
1463 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1464 void InNewSpace(Register object,
1466 Condition cond, // eq for new space, ne otherwise.
1469 // Helper for finding the mark bits for an address. Afterwards, the
1470 // bitmap register points at the word with the mark bits and the mask
1471 // the position of the first bit. Leaves addr_reg unchanged.
1472 inline void GetMarkBits(Register addr_reg,
1473 Register bitmap_reg,
1476 // Compute memory operands for safepoint stack slots.
1477 static int SafepointRegisterStackIndex(int reg_code);
1478 MemOperand SafepointRegisterSlot(Register reg);
1479 MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1481 bool generating_stub_;
1483 // This handle will be patched with the code object on installation.
1484 Handle<Object> code_object_;
1486 // Needs access to SafepointRegisterStackIndex for compiled frame
1488 friend class StandardFrame;
1492 // The code patcher is used to patch (typically) small parts of code e.g. for
1493 // debugging and other types of instrumentation. When using the code patcher
1494 // the exact number of bytes specified must be emitted. It is not legal to emit
1495 // relocation information. If any of these constraints are violated it causes
1496 // an assertion to fail.
1504 CodePatcher(byte* address,
1506 FlushICache flush_cache = FLUSH);
1509 // Macro assembler to emit code.
1510 MacroAssembler* masm() { return &masm_; }
1512 // Emit an instruction directly.
1513 void Emit(Instr instr);
1515 // Emit an address directly.
1516 void Emit(Address addr);
1518 // Emit the condition part of an instruction leaving the rest of the current
1519 // instruction unchanged.
1520 void EmitCondition(Condition cond);
1523 byte* address_; // The address of the code being patched.
1524 int size_; // Number of bytes of the expected patch size.
1525 MacroAssembler masm_; // Macro assembler used to generate the code.
1526 FlushICache flush_cache_; // Whether to flush the I cache after patching.
1530 // -----------------------------------------------------------------------------
1531 // Static helper functions.
1533 inline MemOperand ContextOperand(Register context, int index = 0) {
1534 return MemOperand(context, Context::SlotOffset(index));
1538 inline MemOperand GlobalObjectOperand() {
1539 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
1543 #ifdef GENERATED_CODE_COVERAGE
1544 #define CODE_COVERAGE_STRINGIFY(x) #x
1545 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1546 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1547 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
1549 #define ACCESS_MASM(masm) masm->
1553 } } // namespace v8::internal
1555 #endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_