1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
6 #define V8_ARM_MACRO_ASSEMBLER_ARM_H_
8 #include "src/assembler.h"
9 #include "src/bailout-reason.h"
10 #include "src/frames.h"
11 #include "src/globals.h"
16 // Give alias names to registers for calling conventions.
17 const Register kReturnRegister0 = {kRegister_r0_Code};
18 const Register kReturnRegister1 = {kRegister_r1_Code};
19 const Register kJSFunctionRegister = {kRegister_r1_Code};
20 const Register kContextRegister = {kRegister_r7_Code};
21 const Register kInterpreterAccumulatorRegister = {kRegister_r0_Code};
22 const Register kInterpreterRegisterFileRegister = {kRegister_r4_Code};
23 const Register kInterpreterBytecodeOffsetRegister = {kRegister_r5_Code};
24 const Register kInterpreterBytecodeArrayRegister = {kRegister_r6_Code};
25 const Register kInterpreterDispatchTableRegister = {kRegister_r8_Code};
26 const Register kRuntimeCallFunctionRegister = {kRegister_r1_Code};
27 const Register kRuntimeCallArgCountRegister = {kRegister_r0_Code};
29 // ----------------------------------------------------------------------------
30 // Static helper functions
32 // Generate a MemOperand for loading a field from an object.
33 inline MemOperand FieldMemOperand(Register object, int offset) {
34 return MemOperand(object, offset - kHeapObjectTag);
38 // Give alias names to registers
39 const Register cp = { kRegister_r7_Code }; // JavaScript context pointer.
40 const Register pp = { kRegister_r8_Code }; // Constant pool pointer.
41 const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer.
43 // Flags used for AllocateHeapNumber
52 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
53 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
54 enum PointersToHereCheck {
55 kPointersToHereMaybeInteresting,
56 kPointersToHereAreAlwaysInteresting
58 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
61 Register GetRegisterThatIsNotOneOf(Register reg1,
62 Register reg2 = no_reg,
63 Register reg3 = no_reg,
64 Register reg4 = no_reg,
65 Register reg5 = no_reg,
66 Register reg6 = no_reg);
70 bool AreAliased(Register reg1,
72 Register reg3 = no_reg,
73 Register reg4 = no_reg,
74 Register reg5 = no_reg,
75 Register reg6 = no_reg,
76 Register reg7 = no_reg,
77 Register reg8 = no_reg);
81 enum TargetAddressStorageMode {
82 CAN_INLINE_TARGET_ADDRESS,
83 NEVER_INLINE_TARGET_ADDRESS
86 // MacroAssembler implements a collection of frequently used macros.
87 class MacroAssembler: public Assembler {
89 // The isolate parameter can be NULL if the macro assembler should
90 // not use isolate-dependent functionality. In this case, it's the
91 // responsibility of the caller to never invoke such function on the
93 MacroAssembler(Isolate* isolate, void* buffer, int size);
96 // Returns the size of a call in instructions. Note, the value returned is
97 // only valid as long as no entries are added to the constant pool between
98 // checking the call size and emitting the actual call.
99 static int CallSize(Register target, Condition cond = al);
100 int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
101 int CallStubSize(CodeStub* stub,
102 TypeFeedbackId ast_id = TypeFeedbackId::None(),
103 Condition cond = al);
104 static int CallSizeNotPredictableCodeSize(Isolate* isolate,
106 RelocInfo::Mode rmode,
107 Condition cond = al);
109 // Jump, Call, and Ret pseudo instructions implementing inter-working.
110 void Jump(Register target, Condition cond = al);
111 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
112 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
113 void Call(Register target, Condition cond = al);
114 void Call(Address target, RelocInfo::Mode rmode,
116 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
117 int CallSize(Handle<Code> code,
118 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
119 TypeFeedbackId ast_id = TypeFeedbackId::None(),
120 Condition cond = al);
121 void Call(Handle<Code> code,
122 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
123 TypeFeedbackId ast_id = TypeFeedbackId::None(),
125 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
126 void Ret(Condition cond = al);
128 // Emit code to discard a non-negative number of pointer-sized elements
129 // from the stack, clobbering only the sp register.
130 void Drop(int count, Condition cond = al);
132 void Ret(int drop, Condition cond = al);
134 // Swap two registers. If the scratch register is omitted then a slightly
135 // less efficient form using xor instead of mov is emitted.
136 void Swap(Register reg1,
138 Register scratch = no_reg,
139 Condition cond = al);
141 void Mls(Register dst, Register src1, Register src2, Register srcA,
142 Condition cond = al);
143 void And(Register dst, Register src1, const Operand& src2,
144 Condition cond = al);
145 void Ubfx(Register dst, Register src, int lsb, int width,
146 Condition cond = al);
147 void Sbfx(Register dst, Register src, int lsb, int width,
148 Condition cond = al);
149 // The scratch register is not used for ARMv7.
150 // scratch can be the same register as src (in which case it is trashed), but
151 // not the same as dst.
152 void Bfi(Register dst,
157 Condition cond = al);
158 void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
159 void Usat(Register dst, int satpos, const Operand& src,
160 Condition cond = al);
162 void Call(Label* target);
163 void Push(Register src) { push(src); }
164 void Pop(Register dst) { pop(dst); }
166 // Register move. May do nothing if the registers are identical.
167 void Move(Register dst, Handle<Object> value);
168 void Move(Register dst, Register src, Condition cond = al);
169 void Move(Register dst, const Operand& src, SBit sbit = LeaveCC,
170 Condition cond = al) {
171 if (!src.is_reg() || !src.rm().is(dst) || sbit != LeaveCC) {
172 mov(dst, src, sbit, cond);
175 void Move(DwVfpRegister dst, DwVfpRegister src);
177 void Load(Register dst, const MemOperand& src, Representation r);
178 void Store(Register src, const MemOperand& dst, Representation r);
180 // Load an object from the root table.
181 void LoadRoot(Register destination,
182 Heap::RootListIndex index,
183 Condition cond = al);
184 // Store an object to the root table.
185 void StoreRoot(Register source,
186 Heap::RootListIndex index,
187 Condition cond = al);
189 // ---------------------------------------------------------------------------
192 void IncrementalMarkingRecordWriteHelper(Register object,
196 enum RememberedSetFinalAction {
201 // Record in the remembered set the fact that we have a pointer to new space
202 // at the address pointed to by the addr register. Only works if addr is not
204 void RememberedSetHelper(Register object, // Used for debug code.
207 SaveFPRegsMode save_fp,
208 RememberedSetFinalAction and_then);
210 void CheckPageFlag(Register object,
214 Label* condition_met);
216 // Check if object is in new space. Jumps if the object is not in new space.
217 // The register scratch can be object itself, but scratch will be clobbered.
218 void JumpIfNotInNewSpace(Register object,
221 InNewSpace(object, scratch, ne, branch);
224 // Check if object is in new space. Jumps if the object is in new space.
225 // The register scratch can be object itself, but it will be clobbered.
226 void JumpIfInNewSpace(Register object,
229 InNewSpace(object, scratch, eq, branch);
232 // Check if an object has a given incremental marking color.
233 void HasColor(Register object,
240 void JumpIfBlack(Register object,
245 // Checks the color of an object. If the object is already grey or black
246 // then we just fall through, since it is already live. If it is white and
247 // we can determine that it doesn't need to be scanned, then we just mark it
248 // black and fall through. For the rest we jump to the label so the
249 // incremental marker can fix its assumptions.
250 void EnsureNotWhite(Register object,
254 Label* object_is_white_and_not_data);
256 // Detects conservatively whether an object is data-only, i.e. it does need to
257 // be scanned by the garbage collector.
258 void JumpIfDataObject(Register value,
260 Label* not_data_object);
262 // Notify the garbage collector that we wrote a pointer into an object.
263 // |object| is the object being stored into, |value| is the object being
264 // stored. value and scratch registers are clobbered by the operation.
265 // The offset is the offset from the start of the object, not the offset from
266 // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
267 void RecordWriteField(
272 LinkRegisterStatus lr_status,
273 SaveFPRegsMode save_fp,
274 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
275 SmiCheck smi_check = INLINE_SMI_CHECK,
276 PointersToHereCheck pointers_to_here_check_for_value =
277 kPointersToHereMaybeInteresting);
279 // As above, but the offset has the tag presubtracted. For use with
280 // MemOperand(reg, off).
281 inline void RecordWriteContextSlot(
286 LinkRegisterStatus lr_status,
287 SaveFPRegsMode save_fp,
288 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
289 SmiCheck smi_check = INLINE_SMI_CHECK,
290 PointersToHereCheck pointers_to_here_check_for_value =
291 kPointersToHereMaybeInteresting) {
292 RecordWriteField(context,
293 offset + kHeapObjectTag,
298 remembered_set_action,
300 pointers_to_here_check_for_value);
303 void RecordWriteForMap(
307 LinkRegisterStatus lr_status,
308 SaveFPRegsMode save_fp);
310 // For a given |object| notify the garbage collector that the slot |address|
311 // has been written. |value| is the object being stored. The value and
312 // address registers are clobbered by the operation.
317 LinkRegisterStatus lr_status,
318 SaveFPRegsMode save_fp,
319 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
320 SmiCheck smi_check = INLINE_SMI_CHECK,
321 PointersToHereCheck pointers_to_here_check_for_value =
322 kPointersToHereMaybeInteresting);
325 void Push(Handle<Object> handle);
326 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
328 // Push two registers. Pushes leftmost register first (to highest address).
329 void Push(Register src1, Register src2, Condition cond = al) {
330 DCHECK(!src1.is(src2));
331 if (src1.code() > src2.code()) {
332 stm(db_w, sp, src1.bit() | src2.bit(), cond);
334 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
335 str(src2, MemOperand(sp, 4, NegPreIndex), cond);
339 // Push three registers. Pushes leftmost register first (to highest address).
340 void Push(Register src1, Register src2, Register src3, Condition cond = al) {
341 DCHECK(!AreAliased(src1, src2, src3));
342 if (src1.code() > src2.code()) {
343 if (src2.code() > src3.code()) {
344 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
346 stm(db_w, sp, src1.bit() | src2.bit(), cond);
347 str(src3, MemOperand(sp, 4, NegPreIndex), cond);
350 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
351 Push(src2, src3, cond);
355 // Push four registers. Pushes leftmost register first (to highest address).
356 void Push(Register src1,
360 Condition cond = al) {
361 DCHECK(!AreAliased(src1, src2, src3, src4));
362 if (src1.code() > src2.code()) {
363 if (src2.code() > src3.code()) {
364 if (src3.code() > src4.code()) {
367 src1.bit() | src2.bit() | src3.bit() | src4.bit(),
370 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
371 str(src4, MemOperand(sp, 4, NegPreIndex), cond);
374 stm(db_w, sp, src1.bit() | src2.bit(), cond);
375 Push(src3, src4, cond);
378 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
379 Push(src2, src3, src4, cond);
383 // Push five registers. Pushes leftmost register first (to highest address).
384 void Push(Register src1, Register src2, Register src3, Register src4,
385 Register src5, Condition cond = al) {
386 DCHECK(!AreAliased(src1, src2, src3, src4, src5));
387 if (src1.code() > src2.code()) {
388 if (src2.code() > src3.code()) {
389 if (src3.code() > src4.code()) {
390 if (src4.code() > src5.code()) {
392 src1.bit() | src2.bit() | src3.bit() | src4.bit() | src5.bit(),
395 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit() | src4.bit(),
397 str(src5, MemOperand(sp, 4, NegPreIndex), cond);
400 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
401 Push(src4, src5, cond);
404 stm(db_w, sp, src1.bit() | src2.bit(), cond);
405 Push(src3, src4, src5, cond);
408 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
409 Push(src2, src3, src4, src5, cond);
413 // Pop two registers. Pops rightmost register first (from lower address).
414 void Pop(Register src1, Register src2, Condition cond = al) {
415 DCHECK(!src1.is(src2));
416 if (src1.code() > src2.code()) {
417 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
419 ldr(src2, MemOperand(sp, 4, PostIndex), cond);
420 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
424 // Pop three registers. Pops rightmost register first (from lower address).
425 void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
426 DCHECK(!AreAliased(src1, src2, src3));
427 if (src1.code() > src2.code()) {
428 if (src2.code() > src3.code()) {
429 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
431 ldr(src3, MemOperand(sp, 4, PostIndex), cond);
432 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
435 Pop(src2, src3, cond);
436 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
440 // Pop four registers. Pops rightmost register first (from lower address).
441 void Pop(Register src1,
445 Condition cond = al) {
446 DCHECK(!AreAliased(src1, src2, src3, src4));
447 if (src1.code() > src2.code()) {
448 if (src2.code() > src3.code()) {
449 if (src3.code() > src4.code()) {
452 src1.bit() | src2.bit() | src3.bit() | src4.bit(),
455 ldr(src4, MemOperand(sp, 4, PostIndex), cond);
456 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
459 Pop(src3, src4, cond);
460 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
463 Pop(src2, src3, src4, cond);
464 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
468 // Push a fixed frame, consisting of lr, fp, constant pool (if
469 // FLAG_enable_embedded_constant_pool), context and JS function / marker id if
470 // marker_reg is a valid register.
471 void PushFixedFrame(Register marker_reg = no_reg);
472 void PopFixedFrame(Register marker_reg = no_reg);
474 // Push and pop the registers that can hold pointers, as defined by the
475 // RegList constant kSafepointSavedRegisters.
476 void PushSafepointRegisters();
477 void PopSafepointRegisters();
478 // Store value in register src in the safepoint stack slot for
480 void StoreToSafepointRegisterSlot(Register src, Register dst);
481 // Load the value of the src register from its safepoint stack slot
482 // into register dst.
483 void LoadFromSafepointRegisterSlot(Register dst, Register src);
485 // Load two consecutive registers with two consecutive memory locations.
486 void Ldrd(Register dst1,
488 const MemOperand& src,
489 Condition cond = al);
491 // Store two consecutive registers to two consecutive memory locations.
492 void Strd(Register src1,
494 const MemOperand& dst,
495 Condition cond = al);
497 // Ensure that FPSCR contains values needed by JavaScript.
498 // We need the NaNModeControlBit to be sure that operations like
499 // vadd and vsub generate the Canonical NaN (if a NaN must be generated).
500 // In VFP3 it will be always the Canonical NaN.
501 // In VFP2 it will be either the Canonical NaN or the negative version
502 // of the Canonical NaN. It doesn't matter if we have two values. The aim
503 // is to be sure to never generate the hole NaN.
504 void VFPEnsureFPSCRState(Register scratch);
506 // If the value is a NaN, canonicalize the value else, do nothing.
507 void VFPCanonicalizeNaN(const DwVfpRegister dst,
508 const DwVfpRegister src,
509 const Condition cond = al);
510 void VFPCanonicalizeNaN(const DwVfpRegister value,
511 const Condition cond = al) {
512 VFPCanonicalizeNaN(value, value, cond);
515 // Compare single values and move the result to the normal condition flags.
516 void VFPCompareAndSetFlags(const SwVfpRegister src1, const SwVfpRegister src2,
517 const Condition cond = al);
518 void VFPCompareAndSetFlags(const SwVfpRegister src1, const float src2,
519 const Condition cond = al);
521 // Compare double values and move the result to the normal condition flags.
522 void VFPCompareAndSetFlags(const DwVfpRegister src1,
523 const DwVfpRegister src2,
524 const Condition cond = al);
525 void VFPCompareAndSetFlags(const DwVfpRegister src1,
527 const Condition cond = al);
529 // Compare single values and then load the fpscr flags to a register.
530 void VFPCompareAndLoadFlags(const SwVfpRegister src1,
531 const SwVfpRegister src2,
532 const Register fpscr_flags,
533 const Condition cond = al);
534 void VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2,
535 const Register fpscr_flags,
536 const Condition cond = al);
538 // Compare double values and then load the fpscr flags to a register.
539 void VFPCompareAndLoadFlags(const DwVfpRegister src1,
540 const DwVfpRegister src2,
541 const Register fpscr_flags,
542 const Condition cond = al);
543 void VFPCompareAndLoadFlags(const DwVfpRegister src1,
545 const Register fpscr_flags,
546 const Condition cond = al);
548 void Vmov(const DwVfpRegister dst,
550 const Register scratch = no_reg);
552 void VmovHigh(Register dst, DwVfpRegister src);
553 void VmovHigh(DwVfpRegister dst, Register src);
554 void VmovLow(Register dst, DwVfpRegister src);
555 void VmovLow(DwVfpRegister dst, Register src);
557 // Loads the number from object into dst register.
558 // If |object| is neither smi nor heap number, |not_number| is jumped to
559 // with |object| still intact.
560 void LoadNumber(Register object,
561 LowDwVfpRegister dst,
562 Register heap_number_map,
566 // Loads the number from object into double_dst in the double format.
567 // Control will jump to not_int32 if the value cannot be exactly represented
568 // by a 32-bit integer.
569 // Floating point value in the 32-bit integer range that are not exact integer
571 void LoadNumberAsInt32Double(Register object,
572 DwVfpRegister double_dst,
573 Register heap_number_map,
575 LowDwVfpRegister double_scratch,
578 // Loads the number from object into dst as a 32-bit integer.
579 // Control will jump to not_int32 if the object cannot be exactly represented
580 // by a 32-bit integer.
581 // Floating point value in the 32-bit integer range that are not exact integer
582 // won't be converted.
583 void LoadNumberAsInt32(Register object,
585 Register heap_number_map,
587 DwVfpRegister double_scratch0,
588 LowDwVfpRegister double_scratch1,
591 // Generates function and stub prologue code.
593 void Prologue(bool code_pre_aging);
596 // stack_space - extra stack space, used for alignment before call to C.
597 void EnterExitFrame(bool save_doubles, int stack_space = 0);
599 // Leave the current exit frame. Expects the return value in r0.
600 // Expect the number of values, pushed prior to the exit frame, to
601 // remove in a register (or no_reg, if there is nothing to remove).
602 void LeaveExitFrame(bool save_doubles, Register argument_count,
603 bool restore_context,
604 bool argument_count_is_length = false);
606 // Get the actual activation frame alignment for target environment.
607 static int ActivationFrameAlignment();
609 void LoadContext(Register dst, int context_chain_length);
611 // Load the global proxy from the current context.
612 void LoadGlobalProxy(Register dst);
614 // Conditionally load the cached Array transitioned map of type
615 // transitioned_kind from the native context if the map in register
616 // map_in_out is the cached Array map in the native context of
618 void LoadTransitionedArrayMapConditional(
619 ElementsKind expected_kind,
620 ElementsKind transitioned_kind,
623 Label* no_map_match);
625 void LoadGlobalFunction(int index, Register function);
627 // Load the initial map from the global function. The registers
628 // function and map can be the same, function is then overwritten.
629 void LoadGlobalFunctionInitialMap(Register function,
633 void InitializeRootRegister() {
634 ExternalReference roots_array_start =
635 ExternalReference::roots_array_start(isolate());
636 mov(kRootRegister, Operand(roots_array_start));
639 // ---------------------------------------------------------------------------
640 // JavaScript invokes
642 // Invoke the JavaScript function code by either calling or jumping.
643 void InvokeCode(Register code,
644 const ParameterCount& expected,
645 const ParameterCount& actual,
647 const CallWrapper& call_wrapper);
649 // Invoke the JavaScript function in the given register. Changes the
650 // current context to the context in the function before invoking.
651 void InvokeFunction(Register function,
652 const ParameterCount& actual,
654 const CallWrapper& call_wrapper);
656 void InvokeFunction(Register function,
657 const ParameterCount& expected,
658 const ParameterCount& actual,
660 const CallWrapper& call_wrapper);
662 void InvokeFunction(Handle<JSFunction> function,
663 const ParameterCount& expected,
664 const ParameterCount& actual,
666 const CallWrapper& call_wrapper);
668 void IsObjectJSStringType(Register object,
672 void IsObjectNameType(Register object,
676 // ---------------------------------------------------------------------------
681 // ---------------------------------------------------------------------------
682 // Exception handling
684 // Push a new stack handler and link into stack handler chain.
685 void PushStackHandler();
687 // Unlink the stack handler on top of the stack from the stack handler chain.
688 // Must preserve the result register.
689 void PopStackHandler();
691 // ---------------------------------------------------------------------------
692 // Inline caching support
694 // Generate code for checking access rights - used for security checks
695 // on access to global objects across environments. The holder register
696 // is left untouched, whereas both scratch registers are clobbered.
697 void CheckAccessGlobalProxy(Register holder_reg,
701 void GetNumberHash(Register t0, Register scratch);
703 void LoadFromNumberDictionary(Label* miss,
712 inline void MarkCode(NopMarkerTypes type) {
716 // Check if the given instruction is a 'type' marker.
717 // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
718 // These instructions are generated to mark special location in the code,
719 // like some special IC code.
720 static inline bool IsMarkedCode(Instr instr, int type) {
721 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
722 return IsNop(instr, type);
726 static inline int GetCodeMarker(Instr instr) {
727 int dst_reg_offset = 12;
728 int dst_mask = 0xf << dst_reg_offset;
730 int dst_reg = (instr & dst_mask) >> dst_reg_offset;
731 int src_reg = instr & src_mask;
732 uint32_t non_register_mask = ~(dst_mask | src_mask);
733 uint32_t mov_mask = al | 13 << 21;
735 // Return <n> if we have a mov rn rn, else return -1.
736 int type = ((instr & non_register_mask) == mov_mask) &&
737 (dst_reg == src_reg) &&
738 (FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER)
741 DCHECK((type == -1) ||
742 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
747 // ---------------------------------------------------------------------------
748 // Allocation support
750 // Allocate an object in new space or old space. The object_size is
751 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
752 // is passed. If the space is exhausted control continues at the gc_required
753 // label. The allocated object is returned in result. If the flag
754 // tag_allocated_object is true the result is tagged as as a heap object.
755 // All registers are clobbered also when control continues at the gc_required
757 void Allocate(int object_size,
762 AllocationFlags flags);
764 void Allocate(Register object_size,
769 AllocationFlags flags);
771 void AllocateTwoByteString(Register result,
777 void AllocateOneByteString(Register result, Register length,
778 Register scratch1, Register scratch2,
779 Register scratch3, Label* gc_required);
780 void AllocateTwoByteConsString(Register result,
785 void AllocateOneByteConsString(Register result, Register length,
786 Register scratch1, Register scratch2,
788 void AllocateTwoByteSlicedString(Register result,
793 void AllocateOneByteSlicedString(Register result, Register length,
794 Register scratch1, Register scratch2,
797 // Allocates a heap number or jumps to the gc_required label if the young
798 // space is full and a scavenge is needed. All registers are clobbered also
799 // when control continues at the gc_required label.
800 void AllocateHeapNumber(Register result,
803 Register heap_number_map,
805 TaggingMode tagging_mode = TAG_RESULT,
806 MutableMode mode = IMMUTABLE);
807 void AllocateHeapNumberWithValue(Register result,
811 Register heap_number_map,
814 // Copies a fixed number of fields of heap objects from src to dst.
815 void CopyFields(Register dst,
817 LowDwVfpRegister double_scratch,
820 // Copies a number of bytes from src to dst. All registers are clobbered. On
821 // exit src and dst will point to the place just after where the last byte was
822 // read or written and length will be zero.
823 void CopyBytes(Register src,
828 // Initialize fields with filler values. Fields starting at |start_offset|
829 // not including end_offset are overwritten with the value in |filler|. At
830 // the end the loop, |start_offset| takes the value of |end_offset|.
831 void InitializeFieldsWithFiller(Register start_offset,
835 // ---------------------------------------------------------------------------
836 // Support functions.
838 // Machine code version of Map::GetConstructor().
839 // |temp| holds |result|'s map when done, and |temp2| its instance type.
840 void GetMapConstructor(Register result, Register map, Register temp,
843 // Try to get function prototype of a function and puts the value in
844 // the result register. Checks that the function really is a
845 // function and jumps to the miss label if the fast checks fail. The
846 // function register will be untouched; the other registers may be
848 void TryGetFunctionPrototype(Register function, Register result,
849 Register scratch, Label* miss);
851 // Compare object type for heap object. heap_object contains a non-Smi
852 // whose object type should be compared with the given type. This both
853 // sets the flags and leaves the object type in the type_reg register.
854 // It leaves the map in the map register (unless the type_reg and map register
855 // are the same register). It leaves the heap object in the heap_object
856 // register unless the heap_object register is the same register as one of the
858 // Type_reg can be no_reg. In that case ip is used.
859 void CompareObjectType(Register heap_object,
864 // Compare instance type in a map. map contains a valid map object whose
865 // object type should be compared with the given type. This both
866 // sets the flags and leaves the object type in the type_reg register.
867 void CompareInstanceType(Register map,
872 // Check if a map for a JSObject indicates that the object has fast elements.
873 // Jump to the specified label if it does not.
874 void CheckFastElements(Register map,
878 // Check if a map for a JSObject indicates that the object can have both smi
879 // and HeapObject elements. Jump to the specified label if it does not.
880 void CheckFastObjectElements(Register map,
884 // Check if a map for a JSObject indicates that the object has fast smi only
885 // elements. Jump to the specified label if it does not.
886 void CheckFastSmiElements(Register map,
890 // Check to see if maybe_number can be stored as a double in
891 // FastDoubleElements. If it can, store it at the index specified by key in
892 // the FastDoubleElements array elements. Otherwise jump to fail.
893 void StoreNumberToDoubleElements(Register value_reg,
895 Register elements_reg,
897 LowDwVfpRegister double_scratch,
899 int elements_offset = 0);
901 // Compare an object's map with the specified map and its transitioned
902 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
903 // set with result of map compare. If multiple map compares are required, the
904 // compare sequences branches to early_success.
905 void CompareMap(Register obj,
908 Label* early_success);
910 // As above, but the map of the object is already loaded into the register
911 // which is preserved by the code generated.
912 void CompareMap(Register obj_map,
914 Label* early_success);
916 // Check if the map of an object is equal to a specified map and branch to
917 // label if not. Skip the smi check if not required (object is known to be a
918 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
919 // against maps that are ElementsKind transition maps of the specified map.
920 void CheckMap(Register obj,
924 SmiCheckType smi_check_type);
927 void CheckMap(Register obj,
929 Heap::RootListIndex index,
931 SmiCheckType smi_check_type);
934 // Check if the map of an object is equal to a specified weak map and branch
935 // to a specified target if equal. Skip the smi check if not required
936 // (object is known to be a heap object)
937 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
938 Handle<WeakCell> cell, Handle<Code> success,
939 SmiCheckType smi_check_type);
941 // Compare the given value and the value of weak cell.
942 void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
944 void GetWeakValue(Register value, Handle<WeakCell> cell);
946 // Load the value of the weak cell in the value register. Branch to the given
947 // miss label if the weak cell was cleared.
948 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
950 // Compare the object in a register to a value from the root list.
951 // Uses the ip register as scratch.
952 void CompareRoot(Register obj, Heap::RootListIndex index);
953 void PushRoot(Heap::RootListIndex index) {
958 // Compare the object in a register to a value and jump if they are equal.
959 void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
960 CompareRoot(with, index);
964 // Compare the object in a register to a value and jump if they are not equal.
965 void JumpIfNotRoot(Register with, Heap::RootListIndex index,
966 Label* if_not_equal) {
967 CompareRoot(with, index);
971 // Load and check the instance type of an object for being a string.
972 // Loads the type into the second argument register.
973 // Returns a condition that will be enabled if the object was a string
974 // and the passed-in condition passed. If the passed-in condition failed
975 // then flags remain unchanged.
976 Condition IsObjectStringType(Register obj,
978 Condition cond = al) {
979 ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
980 ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
981 tst(type, Operand(kIsNotStringMask), cond);
982 DCHECK_EQ(0u, kStringTag);
987 // Picks out an array index from the hash field.
989 // hash - holds the index's hash. Clobbered.
990 // index - holds the overwritten index on exit.
991 void IndexFromHash(Register hash, Register index);
993 // Get the number of least significant bits from a register
994 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
995 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
997 // Load the value of a smi object into a double register.
998 // The register value must be between d0 and d15.
999 void SmiToDouble(LowDwVfpRegister value, Register smi);
1001 // Check if a double can be exactly represented as a signed 32-bit integer.
1002 // Z flag set to one if true.
1003 void TestDoubleIsInt32(DwVfpRegister double_input,
1004 LowDwVfpRegister double_scratch);
1006 // Try to convert a double to a signed 32-bit integer.
1007 // Z flag set to one and result assigned if the conversion is exact.
1008 void TryDoubleToInt32Exact(Register result,
1009 DwVfpRegister double_input,
1010 LowDwVfpRegister double_scratch);
1012 // Floor a double and writes the value to the result register.
1013 // Go to exact if the conversion is exact (to be able to test -0),
1014 // fall through calling code if an overflow occurred, else go to done.
1015 // In return, input_high is loaded with high bits of input.
1016 void TryInt32Floor(Register result,
1017 DwVfpRegister double_input,
1018 Register input_high,
1019 LowDwVfpRegister double_scratch,
1023 // Performs a truncating conversion of a floating point number as used by
1024 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
1025 // succeeds, otherwise falls through if result is saturated. On return
1026 // 'result' either holds answer, or is clobbered on fall through.
1028 // Only public for the test code in test-code-stubs-arm.cc.
1029 void TryInlineTruncateDoubleToI(Register result,
1030 DwVfpRegister input,
1033 // Performs a truncating conversion of a floating point number as used by
1034 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
1035 // Exits with 'result' holding the answer.
1036 void TruncateDoubleToI(Register result, DwVfpRegister double_input);
1038 // Performs a truncating conversion of a heap number as used by
1039 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
1040 // must be different registers. Exits with 'result' holding the answer.
1041 void TruncateHeapNumberToI(Register result, Register object);
1043 // Converts the smi or heap number in object to an int32 using the rules
1044 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
1045 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
1046 // different registers.
1047 void TruncateNumberToI(Register object,
1049 Register heap_number_map,
1053 // Check whether d16-d31 are available on the CPU. The result is given by the
1054 // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
1055 void CheckFor32DRegs(Register scratch);
1057 // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double
1058 // values to location, saving [d0..(d15|d31)].
1059 void SaveFPRegs(Register location, Register scratch);
1061 // Does a runtime check for 16/32 FP registers. Either way, pops 32 double
1062 // values to location, restoring [d0..(d15|d31)].
1063 void RestoreFPRegs(Register location, Register scratch);
1065 // ---------------------------------------------------------------------------
1068 // Call a code stub.
1069 void CallStub(CodeStub* stub,
1070 TypeFeedbackId ast_id = TypeFeedbackId::None(),
1071 Condition cond = al);
1073 // Call a code stub.
1074 void TailCallStub(CodeStub* stub, Condition cond = al);
1076 // Call a runtime routine.
1077 void CallRuntime(const Runtime::Function* f,
1079 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1080 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1081 const Runtime::Function* function = Runtime::FunctionForId(id);
1082 CallRuntime(function, function->nargs, kSaveFPRegs);
1085 // Convenience function: Same as above, but takes the fid instead.
1086 void CallRuntime(Runtime::FunctionId id,
1088 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1089 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
1092 // Convenience function: call an external reference.
1093 void CallExternalReference(const ExternalReference& ext,
1096 // Tail call of a runtime routine (jump).
1097 // Like JumpToExternalReference, but also takes care of passing the number
1099 void TailCallExternalReference(const ExternalReference& ext,
1103 // Convenience function: tail call a runtime routine (jump).
1104 void TailCallRuntime(Runtime::FunctionId fid,
1108 int CalculateStackPassedWords(int num_reg_arguments,
1109 int num_double_arguments);
1111 // Before calling a C-function from generated code, align arguments on stack.
1112 // After aligning the frame, non-register arguments must be stored in
1113 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
1114 // are word sized. If double arguments are used, this function assumes that
1115 // all double arguments are stored before core registers; otherwise the
1116 // correct alignment of the double values is not guaranteed.
1117 // Some compilers/platforms require the stack to be aligned when calling
1119 // Needs a scratch register to do some arithmetic. This register will be
1121 void PrepareCallCFunction(int num_reg_arguments,
1122 int num_double_registers,
1124 void PrepareCallCFunction(int num_reg_arguments,
1127 // There are two ways of passing double arguments on ARM, depending on
1128 // whether soft or hard floating point ABI is used. These functions
1129 // abstract parameter passing for the three different ways we call
1130 // C functions from generated code.
1131 void MovToFloatParameter(DwVfpRegister src);
1132 void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2);
1133 void MovToFloatResult(DwVfpRegister src);
1135 // Calls a C function and cleans up the space for arguments allocated
1136 // by PrepareCallCFunction. The called function is not allowed to trigger a
1137 // garbage collection, since that might move the code and invalidate the
1138 // return address (unless this is somehow accounted for by the called
1140 void CallCFunction(ExternalReference function, int num_arguments);
1141 void CallCFunction(Register function, int num_arguments);
1142 void CallCFunction(ExternalReference function,
1143 int num_reg_arguments,
1144 int num_double_arguments);
1145 void CallCFunction(Register function,
1146 int num_reg_arguments,
1147 int num_double_arguments);
1149 void MovFromFloatParameter(DwVfpRegister dst);
1150 void MovFromFloatResult(DwVfpRegister dst);
1152 // Jump to a runtime routine.
1153 void JumpToExternalReference(const ExternalReference& builtin);
1155 // Invoke specified builtin JavaScript function.
1156 void InvokeBuiltin(int native_context_index, InvokeFlag flag,
1157 const CallWrapper& call_wrapper = NullCallWrapper());
1159 // Store the code object for the given builtin in the target register and
1160 // setup the function in r1.
1161 void GetBuiltinEntry(Register target, int native_context_index);
1163 // Store the function for the given builtin in the target register.
1164 void GetBuiltinFunction(Register target, int native_context_index);
1166 Handle<Object> CodeObject() {
1167 DCHECK(!code_object_.is_null());
1168 return code_object_;
1172 // Emit code for a truncating division by a constant. The dividend register is
1173 // unchanged and ip gets clobbered. Dividend and result must be different.
1174 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1176 // ---------------------------------------------------------------------------
1177 // StatsCounter support
1179 void SetCounter(StatsCounter* counter, int value,
1180 Register scratch1, Register scratch2);
1181 void IncrementCounter(StatsCounter* counter, int value,
1182 Register scratch1, Register scratch2);
1183 void DecrementCounter(StatsCounter* counter, int value,
1184 Register scratch1, Register scratch2);
1187 // ---------------------------------------------------------------------------
1190 // Calls Abort(msg) if the condition cond is not satisfied.
1191 // Use --debug_code to enable.
1192 void Assert(Condition cond, BailoutReason reason);
1193 void AssertFastElements(Register elements);
1195 // Like Assert(), but always enabled.
1196 void Check(Condition cond, BailoutReason reason);
1198 // Print a message to stdout and abort execution.
1199 void Abort(BailoutReason msg);
1201 // Verify restrictions about code generated in stubs.
1202 void set_generating_stub(bool value) { generating_stub_ = value; }
1203 bool generating_stub() { return generating_stub_; }
1204 void set_has_frame(bool value) { has_frame_ = value; }
1205 bool has_frame() { return has_frame_; }
1206 inline bool AllowThisStubCall(CodeStub* stub);
1208 // EABI variant for double arguments in use.
1209 bool use_eabi_hardfloat() {
1211 return base::OS::ArmUsingHardFloat();
1212 #elif USE_EABI_HARDFLOAT
1219 // ---------------------------------------------------------------------------
1222 // Check whether the value of reg is a power of two and not zero. If not
1223 // control continues at the label not_power_of_two. If reg is a power of two
1224 // the register scratch contains the value of (reg - 1) when control falls
1226 void JumpIfNotPowerOfTwoOrZero(Register reg,
1228 Label* not_power_of_two_or_zero);
1229 // Check whether the value of reg is a power of two and not zero.
1230 // Control falls through if it is, with scratch containing the mask
1232 // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
1233 // zero or negative, or jumps to the 'not_power_of_two' label if the value is
1234 // strictly positive but not a power of two.
1235 void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
1237 Label* zero_and_neg,
1238 Label* not_power_of_two);
1240 // ---------------------------------------------------------------------------
1243 void SmiTag(Register reg, SBit s = LeaveCC) {
1244 add(reg, reg, Operand(reg), s);
1246 void SmiTag(Register dst, Register src, SBit s = LeaveCC) {
1247 add(dst, src, Operand(src), s);
1250 // Try to convert int32 to smi. If the value is to large, preserve
1251 // the original value and jump to not_a_smi. Destroys scratch and
1253 void TrySmiTag(Register reg, Label* not_a_smi) {
1254 TrySmiTag(reg, reg, not_a_smi);
1256 void TrySmiTag(Register reg, Register src, Label* not_a_smi) {
1257 SmiTag(ip, src, SetCC);
1263 void SmiUntag(Register reg, SBit s = LeaveCC) {
1264 mov(reg, Operand::SmiUntag(reg), s);
1266 void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
1267 mov(dst, Operand::SmiUntag(src), s);
1270 // Untag the source value into destination and jump if source is a smi.
1271 // Souce and destination can be the same register.
1272 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1274 // Untag the source value into destination and jump if source is not a smi.
1275 // Souce and destination can be the same register.
1276 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1278 // Test if the register contains a smi (Z == 0 (eq) if true).
1279 inline void SmiTst(Register value) {
1280 tst(value, Operand(kSmiTagMask));
1282 inline void NonNegativeSmiTst(Register value) {
1283 tst(value, Operand(kSmiTagMask | kSmiSignMask));
1285 // Jump if the register contains a smi.
1286 inline void JumpIfSmi(Register value, Label* smi_label) {
1287 tst(value, Operand(kSmiTagMask));
1290 // Jump if either of the registers contain a non-smi.
1291 inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
1292 tst(value, Operand(kSmiTagMask));
1293 b(ne, not_smi_label);
1295 // Jump if either of the registers contain a non-smi.
1296 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1297 // Jump if either of the registers contain a smi.
1298 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1300 // Abort execution if argument is a smi, enabled via --debug-code.
1301 void AssertNotSmi(Register object);
1302 void AssertSmi(Register object);
1304 // Abort execution if argument is not a string, enabled via --debug-code.
1305 void AssertString(Register object);
1307 // Abort execution if argument is not a name, enabled via --debug-code.
1308 void AssertName(Register object);
1310 // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1311 void AssertFunction(Register object);
1313 // Abort execution if argument is not undefined or an AllocationSite, enabled
1314 // via --debug-code.
1315 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1317 // Abort execution if reg is not the root value with the given index,
1318 // enabled via --debug-code.
1319 void AssertIsRoot(Register reg, Heap::RootListIndex index);
1321 // ---------------------------------------------------------------------------
1322 // HeapNumber utilities
1324 void JumpIfNotHeapNumber(Register object,
1325 Register heap_number_map,
1327 Label* on_not_heap_number);
1329 // ---------------------------------------------------------------------------
1332 // Generate code to do a lookup in the number string cache. If the number in
1333 // the register object is found in the cache the generated code falls through
1334 // with the result in the result register. The object and the result register
1335 // can be the same. If the number is not found in the cache the code jumps to
1336 // the label not_found with only the content of register object unchanged.
1337 void LookupNumberStringCache(Register object,
1344 // Checks if both objects are sequential one-byte strings and jumps to label
1345 // if either is not. Assumes that neither object is a smi.
1346 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
1352 // Checks if both objects are sequential one-byte strings and jumps to label
1353 // if either is not.
1354 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
1357 Label* not_flat_one_byte_strings);
1359 // Checks if both instance types are sequential one-byte strings and jumps to
1360 // label if either is not.
1361 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1362 Register first_object_instance_type, Register second_object_instance_type,
1363 Register scratch1, Register scratch2, Label* failure);
1365 // Check if instance type is sequential one-byte string and jump to label if
1367 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1370 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1372 void EmitSeqStringSetCharCheck(Register string,
1375 uint32_t encoding_mask);
1378 void ClampUint8(Register output_reg, Register input_reg);
1380 void ClampDoubleToUint8(Register result_reg,
1381 DwVfpRegister input_reg,
1382 LowDwVfpRegister double_scratch);
1385 void LoadInstanceDescriptors(Register map, Register descriptors);
1386 void EnumLength(Register dst, Register map);
1387 void NumberOfOwnDescriptors(Register dst, Register map);
1388 void LoadAccessor(Register dst, Register holder, int accessor_index,
1389 AccessorComponent accessor);
1391 template<typename Field>
1392 void DecodeField(Register dst, Register src) {
1393 Ubfx(dst, src, Field::kShift, Field::kSize);
1396 template<typename Field>
1397 void DecodeField(Register reg) {
1398 DecodeField<Field>(reg, reg);
1401 template<typename Field>
1402 void DecodeFieldToSmi(Register dst, Register src) {
1403 static const int shift = Field::kShift;
1404 static const int mask = Field::kMask >> shift << kSmiTagSize;
1405 STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
1406 STATIC_ASSERT(kSmiTag == 0);
1407 if (shift < kSmiTagSize) {
1408 mov(dst, Operand(src, LSL, kSmiTagSize - shift));
1409 and_(dst, dst, Operand(mask));
1410 } else if (shift > kSmiTagSize) {
1411 mov(dst, Operand(src, LSR, shift - kSmiTagSize));
1412 and_(dst, dst, Operand(mask));
1414 and_(dst, src, Operand(mask));
1418 template<typename Field>
1419 void DecodeFieldToSmi(Register reg) {
1420 DecodeField<Field>(reg, reg);
1423 // Activation support.
1424 void EnterFrame(StackFrame::Type type,
1425 bool load_constant_pool_pointer_reg = false);
1426 // Returns the pc offset at which the frame ends.
1427 int LeaveFrame(StackFrame::Type type);
1429 // Expects object in r0 and returns map with validated enum cache
1430 // in r0. Assumes that any other register can be used as a scratch.
1431 void CheckEnumCache(Register null_value, Label* call_runtime);
1433 // AllocationMemento support. Arrays may have an associated
1434 // AllocationMemento object that can be checked for in order to pretransition
1436 // On entry, receiver_reg should point to the array object.
1437 // scratch_reg gets clobbered.
1438 // If allocation info is present, condition flags are set to eq.
1439 void TestJSArrayForAllocationMemento(Register receiver_reg,
1440 Register scratch_reg,
1441 Label* no_memento_found);
1443 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1444 Register scratch_reg,
1445 Label* memento_found) {
1446 Label no_memento_found;
1447 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1449 b(eq, memento_found);
1450 bind(&no_memento_found);
1453 // Jumps to found label if a prototype map has dictionary elements.
1454 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1455 Register scratch1, Label* found);
1457 // Loads the constant pool pointer (pp) register.
1458 void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
1459 Register code_target_address);
1460 void LoadConstantPoolPointerRegister();
1463 void CallCFunctionHelper(Register function,
1464 int num_reg_arguments,
1465 int num_double_arguments);
1467 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
1469 // Helper functions for generating invokes.
1470 void InvokePrologue(const ParameterCount& expected,
1471 const ParameterCount& actual,
1472 Handle<Code> code_constant,
1475 bool* definitely_mismatches,
1477 const CallWrapper& call_wrapper);
1479 void InitializeNewString(Register string,
1481 Heap::RootListIndex map_index,
1485 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1486 void InNewSpace(Register object,
1488 Condition cond, // eq for new space, ne otherwise.
1491 // Helper for finding the mark bits for an address. Afterwards, the
1492 // bitmap register points at the word with the mark bits and the mask
1493 // the position of the first bit. Leaves addr_reg unchanged.
1494 inline void GetMarkBits(Register addr_reg,
1495 Register bitmap_reg,
1498 // Compute memory operands for safepoint stack slots.
1499 static int SafepointRegisterStackIndex(int reg_code);
1500 MemOperand SafepointRegisterSlot(Register reg);
1501 MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1503 bool generating_stub_;
1505 // This handle will be patched with the code object on installation.
1506 Handle<Object> code_object_;
1508 // Needs access to SafepointRegisterStackIndex for compiled frame
1510 friend class StandardFrame;
1514 // The code patcher is used to patch (typically) small parts of code e.g. for
1515 // debugging and other types of instrumentation. When using the code patcher
1516 // the exact number of bytes specified must be emitted. It is not legal to emit
1517 // relocation information. If any of these constraints are violated it causes
1518 // an assertion to fail.
1526 CodePatcher(byte* address,
1528 FlushICache flush_cache = FLUSH);
1531 // Macro assembler to emit code.
1532 MacroAssembler* masm() { return &masm_; }
1534 // Emit an instruction directly.
1535 void Emit(Instr instr);
1537 // Emit an address directly.
1538 void Emit(Address addr);
1540 // Emit the condition part of an instruction leaving the rest of the current
1541 // instruction unchanged.
1542 void EmitCondition(Condition cond);
1545 byte* address_; // The address of the code being patched.
1546 int size_; // Number of bytes of the expected patch size.
1547 MacroAssembler masm_; // Macro assembler used to generate the code.
1548 FlushICache flush_cache_; // Whether to flush the I cache after patching.
1552 // -----------------------------------------------------------------------------
1553 // Static helper functions.
1555 inline MemOperand ContextOperand(Register context, int index = 0) {
1556 return MemOperand(context, Context::SlotOffset(index));
1560 inline MemOperand GlobalObjectOperand() {
1561 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
1565 #ifdef GENERATED_CODE_COVERAGE
1566 #define CODE_COVERAGE_STRINGIFY(x) #x
1567 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1568 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1569 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
1571 #define ACCESS_MASM(masm) masm->
1575 } } // namespace v8::internal
1577 #endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_