1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
6 #define V8_ARM_MACRO_ASSEMBLER_ARM_H_
8 #include "src/assembler.h"
9 #include "src/bailout-reason.h"
10 #include "src/frames.h"
11 #include "src/globals.h"
16 // ----------------------------------------------------------------------------
17 // Static helper functions
19 // Generate a MemOperand for loading a field from an object.
20 inline MemOperand FieldMemOperand(Register object, int offset) {
21 return MemOperand(object, offset - kHeapObjectTag);
25 // Give alias names to registers
26 const Register cp = { kRegister_r7_Code }; // JavaScript context pointer.
27 const Register pp = { kRegister_r8_Code }; // Constant pool pointer.
28 const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer.
30 // Flags used for AllocateHeapNumber
39 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
40 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
41 enum PointersToHereCheck {
42 kPointersToHereMaybeInteresting,
43 kPointersToHereAreAlwaysInteresting
45 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
48 Register GetRegisterThatIsNotOneOf(Register reg1,
49 Register reg2 = no_reg,
50 Register reg3 = no_reg,
51 Register reg4 = no_reg,
52 Register reg5 = no_reg,
53 Register reg6 = no_reg);
57 bool AreAliased(Register reg1,
59 Register reg3 = no_reg,
60 Register reg4 = no_reg,
61 Register reg5 = no_reg,
62 Register reg6 = no_reg,
63 Register reg7 = no_reg,
64 Register reg8 = no_reg);
68 enum TargetAddressStorageMode {
69 CAN_INLINE_TARGET_ADDRESS,
70 NEVER_INLINE_TARGET_ADDRESS
73 // MacroAssembler implements a collection of frequently used macros.
74 class MacroAssembler: public Assembler {
76 // The isolate parameter can be NULL if the macro assembler should
77 // not use isolate-dependent functionality. In this case, it's the
78 // responsibility of the caller to never invoke such function on the
80 MacroAssembler(Isolate* isolate, void* buffer, int size);
83 // Returns the size of a call in instructions. Note, the value returned is
84 // only valid as long as no entries are added to the constant pool between
85 // checking the call size and emitting the actual call.
86 static int CallSize(Register target, Condition cond = al);
87 int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
88 int CallStubSize(CodeStub* stub,
89 TypeFeedbackId ast_id = TypeFeedbackId::None(),
91 static int CallSizeNotPredictableCodeSize(Isolate* isolate,
93 RelocInfo::Mode rmode,
96 // Jump, Call, and Ret pseudo instructions implementing inter-working.
97 void Jump(Register target, Condition cond = al);
98 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
99 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
100 void Call(Register target, Condition cond = al);
101 void Call(Address target, RelocInfo::Mode rmode,
103 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
104 int CallSize(Handle<Code> code,
105 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
106 TypeFeedbackId ast_id = TypeFeedbackId::None(),
107 Condition cond = al);
108 void Call(Handle<Code> code,
109 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
110 TypeFeedbackId ast_id = TypeFeedbackId::None(),
112 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
113 void Ret(Condition cond = al);
115 // Emit code to discard a non-negative number of pointer-sized elements
116 // from the stack, clobbering only the sp register.
117 void Drop(int count, Condition cond = al);
119 void Ret(int drop, Condition cond = al);
121 // Swap two registers. If the scratch register is omitted then a slightly
122 // less efficient form using xor instead of mov is emitted.
123 void Swap(Register reg1,
125 Register scratch = no_reg,
126 Condition cond = al);
128 void Mls(Register dst, Register src1, Register src2, Register srcA,
129 Condition cond = al);
130 void And(Register dst, Register src1, const Operand& src2,
131 Condition cond = al);
132 void Ubfx(Register dst, Register src, int lsb, int width,
133 Condition cond = al);
134 void Sbfx(Register dst, Register src, int lsb, int width,
135 Condition cond = al);
136 // The scratch register is not used for ARMv7.
137 // scratch can be the same register as src (in which case it is trashed), but
138 // not the same as dst.
139 void Bfi(Register dst,
144 Condition cond = al);
145 void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
146 void Usat(Register dst, int satpos, const Operand& src,
147 Condition cond = al);
149 void Call(Label* target);
150 void Push(Register src) { push(src); }
151 void Pop(Register dst) { pop(dst); }
153 // Register move. May do nothing if the registers are identical.
154 void Move(Register dst, Handle<Object> value);
155 void Move(Register dst, Register src, Condition cond = al);
156 void Move(Register dst, const Operand& src, SBit sbit = LeaveCC,
157 Condition cond = al) {
158 if (!src.is_reg() || !src.rm().is(dst) || sbit != LeaveCC) {
159 mov(dst, src, sbit, cond);
162 void Move(DwVfpRegister dst, DwVfpRegister src);
164 void Load(Register dst, const MemOperand& src, Representation r);
165 void Store(Register src, const MemOperand& dst, Representation r);
167 // Load an object from the root table.
168 void LoadRoot(Register destination,
169 Heap::RootListIndex index,
170 Condition cond = al);
171 // Store an object to the root table.
172 void StoreRoot(Register source,
173 Heap::RootListIndex index,
174 Condition cond = al);
176 // ---------------------------------------------------------------------------
179 void IncrementalMarkingRecordWriteHelper(Register object,
183 enum RememberedSetFinalAction {
188 // Record in the remembered set the fact that we have a pointer to new space
189 // at the address pointed to by the addr register. Only works if addr is not
191 void RememberedSetHelper(Register object, // Used for debug code.
194 SaveFPRegsMode save_fp,
195 RememberedSetFinalAction and_then);
197 void CheckPageFlag(Register object,
201 Label* condition_met);
203 void CheckMapDeprecated(Handle<Map> map,
205 Label* if_deprecated);
207 // Check if object is in new space. Jumps if the object is not in new space.
208 // The register scratch can be object itself, but scratch will be clobbered.
209 void JumpIfNotInNewSpace(Register object,
212 InNewSpace(object, scratch, ne, branch);
215 // Check if object is in new space. Jumps if the object is in new space.
216 // The register scratch can be object itself, but it will be clobbered.
217 void JumpIfInNewSpace(Register object,
220 InNewSpace(object, scratch, eq, branch);
223 // Check if an object has a given incremental marking color.
224 void HasColor(Register object,
231 void JumpIfBlack(Register object,
236 // Checks the color of an object. If the object is already grey or black
237 // then we just fall through, since it is already live. If it is white and
238 // we can determine that it doesn't need to be scanned, then we just mark it
239 // black and fall through. For the rest we jump to the label so the
240 // incremental marker can fix its assumptions.
241 void EnsureNotWhite(Register object,
245 Label* object_is_white_and_not_data);
247 // Detects conservatively whether an object is data-only, i.e. it does need to
248 // be scanned by the garbage collector.
249 void JumpIfDataObject(Register value,
251 Label* not_data_object);
253 // Notify the garbage collector that we wrote a pointer into an object.
254 // |object| is the object being stored into, |value| is the object being
255 // stored. value and scratch registers are clobbered by the operation.
256 // The offset is the offset from the start of the object, not the offset from
257 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
258 void RecordWriteField(
263 LinkRegisterStatus lr_status,
264 SaveFPRegsMode save_fp,
265 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
266 SmiCheck smi_check = INLINE_SMI_CHECK,
267 PointersToHereCheck pointers_to_here_check_for_value =
268 kPointersToHereMaybeInteresting);
270 // As above, but the offset has the tag presubtracted. For use with
271 // MemOperand(reg, off).
272 inline void RecordWriteContextSlot(
277 LinkRegisterStatus lr_status,
278 SaveFPRegsMode save_fp,
279 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
280 SmiCheck smi_check = INLINE_SMI_CHECK,
281 PointersToHereCheck pointers_to_here_check_for_value =
282 kPointersToHereMaybeInteresting) {
283 RecordWriteField(context,
284 offset + kHeapObjectTag,
289 remembered_set_action,
291 pointers_to_here_check_for_value);
294 void RecordWriteForMap(
298 LinkRegisterStatus lr_status,
299 SaveFPRegsMode save_fp);
301 // For a given |object| notify the garbage collector that the slot |address|
302 // has been written. |value| is the object being stored. The value and
303 // address registers are clobbered by the operation.
308 LinkRegisterStatus lr_status,
309 SaveFPRegsMode save_fp,
310 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
311 SmiCheck smi_check = INLINE_SMI_CHECK,
312 PointersToHereCheck pointers_to_here_check_for_value =
313 kPointersToHereMaybeInteresting);
316 void Push(Handle<Object> handle);
317 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
319 // Push two registers. Pushes leftmost register first (to highest address).
320 void Push(Register src1, Register src2, Condition cond = al) {
321 DCHECK(!src1.is(src2));
322 if (src1.code() > src2.code()) {
323 stm(db_w, sp, src1.bit() | src2.bit(), cond);
325 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
326 str(src2, MemOperand(sp, 4, NegPreIndex), cond);
330 // Push three registers. Pushes leftmost register first (to highest address).
331 void Push(Register src1, Register src2, Register src3, Condition cond = al) {
332 DCHECK(!src1.is(src2));
333 DCHECK(!src2.is(src3));
334 DCHECK(!src1.is(src3));
335 if (src1.code() > src2.code()) {
336 if (src2.code() > src3.code()) {
337 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
339 stm(db_w, sp, src1.bit() | src2.bit(), cond);
340 str(src3, MemOperand(sp, 4, NegPreIndex), cond);
343 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
344 Push(src2, src3, cond);
348 // Push four registers. Pushes leftmost register first (to highest address).
349 void Push(Register src1,
353 Condition cond = al) {
354 DCHECK(!src1.is(src2));
355 DCHECK(!src2.is(src3));
356 DCHECK(!src1.is(src3));
357 DCHECK(!src1.is(src4));
358 DCHECK(!src2.is(src4));
359 DCHECK(!src3.is(src4));
360 if (src1.code() > src2.code()) {
361 if (src2.code() > src3.code()) {
362 if (src3.code() > src4.code()) {
365 src1.bit() | src2.bit() | src3.bit() | src4.bit(),
368 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
369 str(src4, MemOperand(sp, 4, NegPreIndex), cond);
372 stm(db_w, sp, src1.bit() | src2.bit(), cond);
373 Push(src3, src4, cond);
376 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
377 Push(src2, src3, src4, cond);
381 // Pop two registers. Pops rightmost register first (from lower address).
382 void Pop(Register src1, Register src2, Condition cond = al) {
383 DCHECK(!src1.is(src2));
384 if (src1.code() > src2.code()) {
385 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
387 ldr(src2, MemOperand(sp, 4, PostIndex), cond);
388 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
392 // Pop three registers. Pops rightmost register first (from lower address).
393 void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
394 DCHECK(!src1.is(src2));
395 DCHECK(!src2.is(src3));
396 DCHECK(!src1.is(src3));
397 if (src1.code() > src2.code()) {
398 if (src2.code() > src3.code()) {
399 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
401 ldr(src3, MemOperand(sp, 4, PostIndex), cond);
402 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
405 Pop(src2, src3, cond);
406 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
410 // Pop four registers. Pops rightmost register first (from lower address).
411 void Pop(Register src1,
415 Condition cond = al) {
416 DCHECK(!src1.is(src2));
417 DCHECK(!src2.is(src3));
418 DCHECK(!src1.is(src3));
419 DCHECK(!src1.is(src4));
420 DCHECK(!src2.is(src4));
421 DCHECK(!src3.is(src4));
422 if (src1.code() > src2.code()) {
423 if (src2.code() > src3.code()) {
424 if (src3.code() > src4.code()) {
427 src1.bit() | src2.bit() | src3.bit() | src4.bit(),
430 ldr(src4, MemOperand(sp, 4, PostIndex), cond);
431 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
434 Pop(src3, src4, cond);
435 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
438 Pop(src2, src3, src4, cond);
439 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
443 // Push a fixed frame, consisting of lr, fp, constant pool (if
444 // FLAG_enable_ool_constant_pool), context and JS function / marker id if
445 // marker_reg is a valid register.
446 void PushFixedFrame(Register marker_reg = no_reg);
447 void PopFixedFrame(Register marker_reg = no_reg);
449 // Push and pop the registers that can hold pointers, as defined by the
450 // RegList constant kSafepointSavedRegisters.
451 void PushSafepointRegisters();
452 void PopSafepointRegisters();
453 // Store value in register src in the safepoint stack slot for
455 void StoreToSafepointRegisterSlot(Register src, Register dst);
456 // Load the value of the src register from its safepoint stack slot
457 // into register dst.
458 void LoadFromSafepointRegisterSlot(Register dst, Register src);
460 // Load two consecutive registers with two consecutive memory locations.
461 void Ldrd(Register dst1,
463 const MemOperand& src,
464 Condition cond = al);
466 // Store two consecutive registers to two consecutive memory locations.
467 void Strd(Register src1,
469 const MemOperand& dst,
470 Condition cond = al);
472 // Ensure that FPSCR contains values needed by JavaScript.
473 // We need the NaNModeControlBit to be sure that operations like
474 // vadd and vsub generate the Canonical NaN (if a NaN must be generated).
475 // In VFP3 it will be always the Canonical NaN.
476 // In VFP2 it will be either the Canonical NaN or the negative version
477 // of the Canonical NaN. It doesn't matter if we have two values. The aim
478 // is to be sure to never generate the hole NaN.
479 void VFPEnsureFPSCRState(Register scratch);
481 // If the value is a NaN, canonicalize the value else, do nothing.
482 void VFPCanonicalizeNaN(const DwVfpRegister dst,
483 const DwVfpRegister src,
484 const Condition cond = al);
485 void VFPCanonicalizeNaN(const DwVfpRegister value,
486 const Condition cond = al) {
487 VFPCanonicalizeNaN(value, value, cond);
490 // Compare double values and move the result to the normal condition flags.
491 void VFPCompareAndSetFlags(const DwVfpRegister src1,
492 const DwVfpRegister src2,
493 const Condition cond = al);
494 void VFPCompareAndSetFlags(const DwVfpRegister src1,
496 const Condition cond = al);
498 // Compare double values and then load the fpscr flags to a register.
499 void VFPCompareAndLoadFlags(const DwVfpRegister src1,
500 const DwVfpRegister src2,
501 const Register fpscr_flags,
502 const Condition cond = al);
503 void VFPCompareAndLoadFlags(const DwVfpRegister src1,
505 const Register fpscr_flags,
506 const Condition cond = al);
508 void Vmov(const DwVfpRegister dst,
510 const Register scratch = no_reg);
512 void VmovHigh(Register dst, DwVfpRegister src);
513 void VmovHigh(DwVfpRegister dst, Register src);
514 void VmovLow(Register dst, DwVfpRegister src);
515 void VmovLow(DwVfpRegister dst, Register src);
517 // Loads the number from object into dst register.
518 // If |object| is neither smi nor heap number, |not_number| is jumped to
519 // with |object| still intact.
520 void LoadNumber(Register object,
521 LowDwVfpRegister dst,
522 Register heap_number_map,
526 // Loads the number from object into double_dst in the double format.
527 // Control will jump to not_int32 if the value cannot be exactly represented
528 // by a 32-bit integer.
529 // Floating point value in the 32-bit integer range that are not exact integer
531 void LoadNumberAsInt32Double(Register object,
532 DwVfpRegister double_dst,
533 Register heap_number_map,
535 LowDwVfpRegister double_scratch,
538 // Loads the number from object into dst as a 32-bit integer.
539 // Control will jump to not_int32 if the object cannot be exactly represented
540 // by a 32-bit integer.
541 // Floating point value in the 32-bit integer range that are not exact integer
542 // won't be converted.
543 void LoadNumberAsInt32(Register object,
545 Register heap_number_map,
547 DwVfpRegister double_scratch0,
548 LowDwVfpRegister double_scratch1,
551 // Generates function and stub prologue code.
553 void Prologue(bool code_pre_aging);
556 // stack_space - extra stack space, used for alignment before call to C.
557 void EnterExitFrame(bool save_doubles, int stack_space = 0);
559 // Leave the current exit frame. Expects the return value in r0.
560 // Expect the number of values, pushed prior to the exit frame, to
561 // remove in a register (or no_reg, if there is nothing to remove).
562 void LeaveExitFrame(bool save_doubles,
563 Register argument_count,
564 bool restore_context);
566 // Get the actual activation frame alignment for target environment.
567 static int ActivationFrameAlignment();
569 void LoadContext(Register dst, int context_chain_length);
571 // Conditionally load the cached Array transitioned map of type
572 // transitioned_kind from the native context if the map in register
573 // map_in_out is the cached Array map in the native context of
575 void LoadTransitionedArrayMapConditional(
576 ElementsKind expected_kind,
577 ElementsKind transitioned_kind,
580 Label* no_map_match);
582 void LoadGlobalFunction(int index, Register function);
584 // Load the initial map from the global function. The registers
585 // function and map can be the same, function is then overwritten.
586 void LoadGlobalFunctionInitialMap(Register function,
590 void InitializeRootRegister() {
591 ExternalReference roots_array_start =
592 ExternalReference::roots_array_start(isolate());
593 mov(kRootRegister, Operand(roots_array_start));
596 // ---------------------------------------------------------------------------
597 // JavaScript invokes
599 // Invoke the JavaScript function code by either calling or jumping.
600 void InvokeCode(Register code,
601 const ParameterCount& expected,
602 const ParameterCount& actual,
604 const CallWrapper& call_wrapper);
606 // Invoke the JavaScript function in the given register. Changes the
607 // current context to the context in the function before invoking.
608 void InvokeFunction(Register function,
609 const ParameterCount& actual,
611 const CallWrapper& call_wrapper);
613 void InvokeFunction(Register function,
614 const ParameterCount& expected,
615 const ParameterCount& actual,
617 const CallWrapper& call_wrapper);
619 void InvokeFunction(Handle<JSFunction> function,
620 const ParameterCount& expected,
621 const ParameterCount& actual,
623 const CallWrapper& call_wrapper);
625 void IsObjectJSObjectType(Register heap_object,
630 void IsInstanceJSObjectType(Register map,
634 void IsObjectJSStringType(Register object,
638 void IsObjectNameType(Register object,
642 // ---------------------------------------------------------------------------
647 // ---------------------------------------------------------------------------
648 // Exception handling
650 // Push a new try handler and link into try handler chain.
651 void PushTryHandler(StackHandler::Kind kind, int handler_index);
653 // Unlink the stack handler on top of the stack from the try handler chain.
654 // Must preserve the result register.
655 void PopTryHandler();
657 // Passes thrown value to the handler of top of the try handler chain.
658 void Throw(Register value);
660 // Propagates an uncatchable exception to the top of the current JS stack's
662 void ThrowUncatchable(Register value);
664 // ---------------------------------------------------------------------------
665 // Inline caching support
667 // Generate code for checking access rights - used for security checks
668 // on access to global objects across environments. The holder register
669 // is left untouched, whereas both scratch registers are clobbered.
670 void CheckAccessGlobalProxy(Register holder_reg,
674 void GetNumberHash(Register t0, Register scratch);
676 void LoadFromNumberDictionary(Label* miss,
685 inline void MarkCode(NopMarkerTypes type) {
689 // Check if the given instruction is a 'type' marker.
690 // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
691 // These instructions are generated to mark special location in the code,
692 // like some special IC code.
693 static inline bool IsMarkedCode(Instr instr, int type) {
694 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
695 return IsNop(instr, type);
699 static inline int GetCodeMarker(Instr instr) {
700 int dst_reg_offset = 12;
701 int dst_mask = 0xf << dst_reg_offset;
703 int dst_reg = (instr & dst_mask) >> dst_reg_offset;
704 int src_reg = instr & src_mask;
705 uint32_t non_register_mask = ~(dst_mask | src_mask);
706 uint32_t mov_mask = al | 13 << 21;
708 // Return <n> if we have a mov rn rn, else return -1.
709 int type = ((instr & non_register_mask) == mov_mask) &&
710 (dst_reg == src_reg) &&
711 (FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER)
714 DCHECK((type == -1) ||
715 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
720 // ---------------------------------------------------------------------------
721 // Allocation support
723 // Allocate an object in new space or old pointer space. The object_size is
724 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
725 // is passed. If the space is exhausted control continues at the gc_required
726 // label. The allocated object is returned in result. If the flag
727 // tag_allocated_object is true the result is tagged as as a heap object.
728 // All registers are clobbered also when control continues at the gc_required
730 void Allocate(int object_size,
735 AllocationFlags flags);
737 void Allocate(Register object_size,
742 AllocationFlags flags);
744 // Undo allocation in new space. The object passed and objects allocated after
745 // it will no longer be allocated. The caller must make sure that no pointers
746 // are left to the object(s) no longer allocated as they would be invalid when
747 // allocation is undone.
748 void UndoAllocationInNewSpace(Register object, Register scratch);
751 void AllocateTwoByteString(Register result,
757 void AllocateOneByteString(Register result, Register length,
758 Register scratch1, Register scratch2,
759 Register scratch3, Label* gc_required);
760 void AllocateTwoByteConsString(Register result,
765 void AllocateOneByteConsString(Register result, Register length,
766 Register scratch1, Register scratch2,
768 void AllocateTwoByteSlicedString(Register result,
773 void AllocateOneByteSlicedString(Register result, Register length,
774 Register scratch1, Register scratch2,
777 // Allocates a heap number or jumps to the gc_required label if the young
778 // space is full and a scavenge is needed. All registers are clobbered also
779 // when control continues at the gc_required label.
780 void AllocateHeapNumber(Register result,
783 Register heap_number_map,
785 TaggingMode tagging_mode = TAG_RESULT,
786 MutableMode mode = IMMUTABLE);
787 void AllocateHeapNumberWithValue(Register result,
791 Register heap_number_map,
793 void AllocateSIMDHeapObject(int size,
799 TaggingMode tagging_mode = TAG_RESULT);
801 // Copies a fixed number of fields of heap objects from src to dst.
802 void CopyFields(Register dst,
804 LowDwVfpRegister double_scratch,
807 // Copies a number of bytes from src to dst. All registers are clobbered. On
808 // exit src and dst will point to the place just after where the last byte was
809 // read or written and length will be zero.
810 void CopyBytes(Register src,
815 // Initialize fields with filler values. Fields starting at |start_offset|
816 // not including end_offset are overwritten with the value in |filler|. At
817 // the end the loop, |start_offset| takes the value of |end_offset|.
818 void InitializeFieldsWithFiller(Register start_offset,
822 // ---------------------------------------------------------------------------
823 // Support functions.
825 // Try to get function prototype of a function and puts the value in
826 // the result register. Checks that the function really is a
827 // function and jumps to the miss label if the fast checks fail. The
828 // function register will be untouched; the other registers may be
830 void TryGetFunctionPrototype(Register function,
834 bool miss_on_bound_function = false);
836 // Compare object type for heap object. heap_object contains a non-Smi
837 // whose object type should be compared with the given type. This both
838 // sets the flags and leaves the object type in the type_reg register.
839 // It leaves the map in the map register (unless the type_reg and map register
840 // are the same register). It leaves the heap object in the heap_object
841 // register unless the heap_object register is the same register as one of the
843 // Type_reg can be no_reg. In that case ip is used.
844 void CompareObjectType(Register heap_object,
849 // Compare object type for heap object. Branch to false_label if type
850 // is lower than min_type or greater than max_type.
851 // Load map into the register map.
852 void CheckObjectTypeRange(Register heap_object,
854 InstanceType min_type,
855 InstanceType max_type,
858 // Compare instance type in a map. map contains a valid map object whose
859 // object type should be compared with the given type. This both
860 // sets the flags and leaves the object type in the type_reg register.
861 void CompareInstanceType(Register map,
866 // Check if a map for a JSObject indicates that the object has fast elements.
867 // Jump to the specified label if it does not.
868 void CheckFastElements(Register map,
872 // Check if a map for a JSObject indicates that the object can have both smi
873 // and HeapObject elements. Jump to the specified label if it does not.
874 void CheckFastObjectElements(Register map,
878 // Check if a map for a JSObject indicates that the object has fast smi only
879 // elements. Jump to the specified label if it does not.
880 void CheckFastSmiElements(Register map,
884 // Check to see if maybe_number can be stored as a double in
885 // FastDoubleElements. If it can, store it at the index specified by key in
886 // the FastDoubleElements array elements. Otherwise jump to fail.
887 void StoreNumberToDoubleElements(Register value_reg,
889 Register elements_reg,
891 LowDwVfpRegister double_scratch,
893 int elements_offset = 0);
895 // Compare an object's map with the specified map and its transitioned
896 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
897 // set with result of map compare. If multiple map compares are required, the
898 // compare sequences branches to early_success.
899 void CompareMap(Register obj,
902 Label* early_success);
904 // As above, but the map of the object is already loaded into the register
905 // which is preserved by the code generated.
906 void CompareMap(Register obj_map,
908 Label* early_success);
910 // Check if the map of an object is equal to a specified map and branch to
911 // label if not. Skip the smi check if not required (object is known to be a
912 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
913 // against maps that are ElementsKind transition maps of the specified map.
914 void CheckMap(Register obj,
918 SmiCheckType smi_check_type);
921 void CheckMap(Register obj,
923 Heap::RootListIndex index,
925 SmiCheckType smi_check_type);
928 // Check if the map of an object is equal to a specified map and branch to a
929 // specified target if equal. Skip the smi check if not required (object is
930 // known to be a heap object)
931 void DispatchMap(Register obj,
934 Handle<Code> success,
935 SmiCheckType smi_check_type);
938 // Compare the object in a register to a value from the root list.
939 // Uses the ip register as scratch.
940 void CompareRoot(Register obj, Heap::RootListIndex index);
943 // Load and check the instance type of an object for being a string.
944 // Loads the type into the second argument register.
945 // Returns a condition that will be enabled if the object was a string
946 // and the passed-in condition passed. If the passed-in condition failed
947 // then flags remain unchanged.
948 Condition IsObjectStringType(Register obj,
950 Condition cond = al) {
951 ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
952 ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
953 tst(type, Operand(kIsNotStringMask), cond);
954 DCHECK_EQ(0, kStringTag);
959 // Picks out an array index from the hash field.
961 // hash - holds the index's hash. Clobbered.
962 // index - holds the overwritten index on exit.
963 void IndexFromHash(Register hash, Register index);
965 // Get the number of least significant bits from a register
966 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
967 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
969 // Load the value of a smi object into a double register.
970 // The register value must be between d0 and d15.
971 void SmiToDouble(LowDwVfpRegister value, Register smi);
973 // Check if a double can be exactly represented as a signed 32-bit integer.
974 // Z flag set to one if true.
975 void TestDoubleIsInt32(DwVfpRegister double_input,
976 LowDwVfpRegister double_scratch);
978 // Try to convert a double to a signed 32-bit integer.
979 // Z flag set to one and result assigned if the conversion is exact.
980 void TryDoubleToInt32Exact(Register result,
981 DwVfpRegister double_input,
982 LowDwVfpRegister double_scratch);
984 // Floor a double and writes the value to the result register.
985 // Go to exact if the conversion is exact (to be able to test -0),
986 // fall through calling code if an overflow occurred, else go to done.
987 // In return, input_high is loaded with high bits of input.
988 void TryInt32Floor(Register result,
989 DwVfpRegister double_input,
991 LowDwVfpRegister double_scratch,
995 // Performs a truncating conversion of a floating point number as used by
996 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
997 // succeeds, otherwise falls through if result is saturated. On return
998 // 'result' either holds answer, or is clobbered on fall through.
1000 // Only public for the test code in test-code-stubs-arm.cc.
1001 void TryInlineTruncateDoubleToI(Register result,
1002 DwVfpRegister input,
1005 // Performs a truncating conversion of a floating point number as used by
1006 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
1007 // Exits with 'result' holding the answer.
1008 void TruncateDoubleToI(Register result, DwVfpRegister double_input);
1010 // Performs a truncating conversion of a heap number as used by
1011 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
1012 // must be different registers. Exits with 'result' holding the answer.
1013 void TruncateHeapNumberToI(Register result, Register object);
1015 // Converts the smi or heap number in object to an int32 using the rules
1016 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
1017 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
1018 // different registers.
1019 void TruncateNumberToI(Register object,
1021 Register heap_number_map,
1025 // Check whether d16-d31 are available on the CPU. The result is given by the
1026 // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
1027 void CheckFor32DRegs(Register scratch);
1029 // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double
1030 // values to location, saving [d0..(d15|d31)].
1031 void SaveFPRegs(Register location, Register scratch);
1033 // Does a runtime check for 16/32 FP registers. Either way, pops 32 double
1034 // values to location, restoring [d0..(d15|d31)].
1035 void RestoreFPRegs(Register location, Register scratch);
1037 // ---------------------------------------------------------------------------
1040 // Call a code stub.
1041 void CallStub(CodeStub* stub,
1042 TypeFeedbackId ast_id = TypeFeedbackId::None(),
1043 Condition cond = al);
1045 // Call a code stub.
1046 void TailCallStub(CodeStub* stub, Condition cond = al);
1048 // Call a runtime routine.
1049 void CallRuntime(const Runtime::Function* f,
1051 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1052 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1053 const Runtime::Function* function = Runtime::FunctionForId(id);
1054 CallRuntime(function, function->nargs, kSaveFPRegs);
1057 // Convenience function: Same as above, but takes the fid instead.
1058 void CallRuntime(Runtime::FunctionId id,
1060 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1061 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
1064 // Convenience function: call an external reference.
1065 void CallExternalReference(const ExternalReference& ext,
1068 // Tail call of a runtime routine (jump).
1069 // Like JumpToExternalReference, but also takes care of passing the number
1071 void TailCallExternalReference(const ExternalReference& ext,
1075 // Convenience function: tail call a runtime routine (jump).
1076 void TailCallRuntime(Runtime::FunctionId fid,
1080 int CalculateStackPassedWords(int num_reg_arguments,
1081 int num_double_arguments);
1083 // Before calling a C-function from generated code, align arguments on stack.
1084 // After aligning the frame, non-register arguments must be stored in
1085 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
1086 // are word sized. If double arguments are used, this function assumes that
1087 // all double arguments are stored before core registers; otherwise the
1088 // correct alignment of the double values is not guaranteed.
1089 // Some compilers/platforms require the stack to be aligned when calling
1091 // Needs a scratch register to do some arithmetic. This register will be
1093 void PrepareCallCFunction(int num_reg_arguments,
1094 int num_double_registers,
1096 void PrepareCallCFunction(int num_reg_arguments,
1099 // There are two ways of passing double arguments on ARM, depending on
1100 // whether soft or hard floating point ABI is used. These functions
1101 // abstract parameter passing for the three different ways we call
1102 // C functions from generated code.
1103 void MovToFloatParameter(DwVfpRegister src);
1104 void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2);
1105 void MovToFloatResult(DwVfpRegister src);
1107 // Calls a C function and cleans up the space for arguments allocated
1108 // by PrepareCallCFunction. The called function is not allowed to trigger a
1109 // garbage collection, since that might move the code and invalidate the
1110 // return address (unless this is somehow accounted for by the called
1112 void CallCFunction(ExternalReference function, int num_arguments);
1113 void CallCFunction(Register function, int num_arguments);
1114 void CallCFunction(ExternalReference function,
1115 int num_reg_arguments,
1116 int num_double_arguments);
1117 void CallCFunction(Register function,
1118 int num_reg_arguments,
1119 int num_double_arguments);
1121 void MovFromFloatParameter(DwVfpRegister dst);
1122 void MovFromFloatResult(DwVfpRegister dst);
1124 // Calls an API function. Allocates HandleScope, extracts returned value
1125 // from handle and propagates exceptions. Restores context. stack_space
1126 // - space to be unwound on exit (includes the call JS arguments space and
1127 // the additional space allocated for the fast call).
1128 void CallApiFunctionAndReturn(Register function_address,
1129 ExternalReference thunk_ref,
1131 MemOperand return_value_operand,
1132 MemOperand* context_restore_operand);
1134 // Jump to a runtime routine.
1135 void JumpToExternalReference(const ExternalReference& builtin);
1137 // Invoke specified builtin JavaScript function. Adds an entry to
1138 // the unresolved list if the name does not resolve.
1139 void InvokeBuiltin(Builtins::JavaScript id,
1141 const CallWrapper& call_wrapper = NullCallWrapper());
1143 // Store the code object for the given builtin in the target register and
1144 // setup the function in r1.
1145 void GetBuiltinEntry(Register target, Builtins::JavaScript id);
1147 // Store the function for the given builtin in the target register.
1148 void GetBuiltinFunction(Register target, Builtins::JavaScript id);
1150 Handle<Object> CodeObject() {
1151 DCHECK(!code_object_.is_null());
1152 return code_object_;
1156 // Emit code for a truncating division by a constant. The dividend register is
1157 // unchanged and ip gets clobbered. Dividend and result must be different.
1158 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1160 // ---------------------------------------------------------------------------
1161 // StatsCounter support
1163 void SetCounter(StatsCounter* counter, int value,
1164 Register scratch1, Register scratch2);
1165 void IncrementCounter(StatsCounter* counter, int value,
1166 Register scratch1, Register scratch2);
1167 void DecrementCounter(StatsCounter* counter, int value,
1168 Register scratch1, Register scratch2);
1171 // ---------------------------------------------------------------------------
1174 // Calls Abort(msg) if the condition cond is not satisfied.
1175 // Use --debug_code to enable.
1176 void Assert(Condition cond, BailoutReason reason);
1177 void AssertFastElements(Register elements);
1179 // Like Assert(), but always enabled.
1180 void Check(Condition cond, BailoutReason reason);
1182 // Print a message to stdout and abort execution.
1183 void Abort(BailoutReason msg);
1185 // Verify restrictions about code generated in stubs.
1186 void set_generating_stub(bool value) { generating_stub_ = value; }
1187 bool generating_stub() { return generating_stub_; }
1188 void set_has_frame(bool value) { has_frame_ = value; }
1189 bool has_frame() { return has_frame_; }
1190 inline bool AllowThisStubCall(CodeStub* stub);
1192 // EABI variant for double arguments in use.
1193 bool use_eabi_hardfloat() {
1195 return base::OS::ArmUsingHardFloat();
1196 #elif USE_EABI_HARDFLOAT
1203 // ---------------------------------------------------------------------------
1206 // Check whether the value of reg is a power of two and not zero. If not
1207 // control continues at the label not_power_of_two. If reg is a power of two
1208 // the register scratch contains the value of (reg - 1) when control falls
1210 void JumpIfNotPowerOfTwoOrZero(Register reg,
1212 Label* not_power_of_two_or_zero);
1213 // Check whether the value of reg is a power of two and not zero.
1214 // Control falls through if it is, with scratch containing the mask
1216 // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
1217 // zero or negative, or jumps to the 'not_power_of_two' label if the value is
1218 // strictly positive but not a power of two.
1219 void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
1221 Label* zero_and_neg,
1222 Label* not_power_of_two);
1224 // ---------------------------------------------------------------------------
1227 void SmiTag(Register reg, SBit s = LeaveCC) {
1228 add(reg, reg, Operand(reg), s);
1230 void SmiTag(Register dst, Register src, SBit s = LeaveCC) {
1231 add(dst, src, Operand(src), s);
1234 // Try to convert int32 to smi. If the value is to large, preserve
1235 // the original value and jump to not_a_smi. Destroys scratch and
1237 void TrySmiTag(Register reg, Label* not_a_smi) {
1238 TrySmiTag(reg, reg, not_a_smi);
1240 void TrySmiTag(Register reg, Register src, Label* not_a_smi) {
1241 SmiTag(ip, src, SetCC);
1247 void SmiUntag(Register reg, SBit s = LeaveCC) {
1248 mov(reg, Operand::SmiUntag(reg), s);
1250 void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
1251 mov(dst, Operand::SmiUntag(src), s);
1254 // Untag the source value into destination and jump if source is a smi.
1255 // Souce and destination can be the same register.
1256 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1258 // Untag the source value into destination and jump if source is not a smi.
1259 // Souce and destination can be the same register.
1260 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1262 // Test if the register contains a smi (Z == 0 (eq) if true).
1263 inline void SmiTst(Register value) {
1264 tst(value, Operand(kSmiTagMask));
1266 inline void NonNegativeSmiTst(Register value) {
1267 tst(value, Operand(kSmiTagMask | kSmiSignMask));
1269 // Jump if the register contains a smi.
1270 inline void JumpIfSmi(Register value, Label* smi_label) {
1271 tst(value, Operand(kSmiTagMask));
1274 // Jump if either of the registers contain a non-smi.
1275 inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
1276 tst(value, Operand(kSmiTagMask));
1277 b(ne, not_smi_label);
1279 // Jump if either of the registers contain a non-smi.
1280 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1281 // Jump if either of the registers contain a smi.
1282 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1284 // Abort execution if argument is a smi, enabled via --debug-code.
1285 void AssertNotSmi(Register object);
1286 void AssertSmi(Register object);
1288 // Abort execution if argument is not a string, enabled via --debug-code.
1289 void AssertString(Register object);
1291 // Abort execution if argument is not a name, enabled via --debug-code.
1292 void AssertName(Register object);
1294 // Abort execution if argument is not undefined or an AllocationSite, enabled
1295 // via --debug-code.
1296 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1298 // Abort execution if reg is not the root value with the given index,
1299 // enabled via --debug-code.
1300 void AssertIsRoot(Register reg, Heap::RootListIndex index);
1302 // ---------------------------------------------------------------------------
1303 // HeapNumber utilities
1305 void JumpIfNotHeapNumber(Register object,
1306 Register heap_number_map,
1308 Label* on_not_heap_number);
1310 // ---------------------------------------------------------------------------
1313 // Generate code to do a lookup in the number string cache. If the number in
1314 // the register object is found in the cache the generated code falls through
1315 // with the result in the result register. The object and the result register
1316 // can be the same. If the number is not found in the cache the code jumps to
1317 // the label not_found with only the content of register object unchanged.
1318 void LookupNumberStringCache(Register object,
1325 // Checks if both objects are sequential one-byte strings and jumps to label
1326 // if either is not. Assumes that neither object is a smi.
1327 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
1333 // Checks if both objects are sequential one-byte strings and jumps to label
1334 // if either is not.
1335 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
1338 Label* not_flat_one_byte_strings);
1340 // Checks if both instance types are sequential one-byte strings and jumps to
1341 // label if either is not.
1342 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1343 Register first_object_instance_type, Register second_object_instance_type,
1344 Register scratch1, Register scratch2, Label* failure);
1346 // Check if instance type is sequential one-byte string and jump to label if
1348 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1351 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1353 void EmitSeqStringSetCharCheck(Register string,
1356 uint32_t encoding_mask);
1358 // ---------------------------------------------------------------------------
1359 // Patching helpers.
1361 // Get the location of a relocated constant (its address in the constant pool)
1362 // from its load site.
1363 void GetRelocatedValueLocation(Register ldr_location, Register result,
1367 void ClampUint8(Register output_reg, Register input_reg);
1369 void ClampDoubleToUint8(Register result_reg,
1370 DwVfpRegister input_reg,
1371 LowDwVfpRegister double_scratch);
1374 void LoadInstanceDescriptors(Register map, Register descriptors);
1375 void EnumLength(Register dst, Register map);
1376 void NumberOfOwnDescriptors(Register dst, Register map);
1378 template<typename Field>
1379 void DecodeField(Register dst, Register src) {
1380 Ubfx(dst, src, Field::kShift, Field::kSize);
1383 template<typename Field>
1384 void DecodeField(Register reg) {
1385 DecodeField<Field>(reg, reg);
1388 template<typename Field>
1389 void DecodeFieldToSmi(Register dst, Register src) {
1390 static const int shift = Field::kShift;
1391 static const int mask = Field::kMask >> shift << kSmiTagSize;
1392 STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
1393 STATIC_ASSERT(kSmiTag == 0);
1394 if (shift < kSmiTagSize) {
1395 mov(dst, Operand(src, LSL, kSmiTagSize - shift));
1396 and_(dst, dst, Operand(mask));
1397 } else if (shift > kSmiTagSize) {
1398 mov(dst, Operand(src, LSR, shift - kSmiTagSize));
1399 and_(dst, dst, Operand(mask));
1401 and_(dst, src, Operand(mask));
1405 template<typename Field>
1406 void DecodeFieldToSmi(Register reg) {
1407 DecodeField<Field>(reg, reg);
1410 // Activation support.
1411 void EnterFrame(StackFrame::Type type,
1412 bool load_constant_pool_pointer_reg = false);
1413 // Returns the pc offset at which the frame ends.
1414 int LeaveFrame(StackFrame::Type type);
1416 // Expects object in r0 and returns map with validated enum cache
1417 // in r0. Assumes that any other register can be used as a scratch.
1418 void CheckEnumCache(Register null_value, Label* call_runtime);
1420 // AllocationMemento support. Arrays may have an associated
1421 // AllocationMemento object that can be checked for in order to pretransition
1423 // On entry, receiver_reg should point to the array object.
1424 // scratch_reg gets clobbered.
1425 // If allocation info is present, condition flags are set to eq.
1426 void TestJSArrayForAllocationMemento(Register receiver_reg,
1427 Register scratch_reg,
1428 Label* no_memento_found);
1430 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1431 Register scratch_reg,
1432 Label* memento_found) {
1433 Label no_memento_found;
1434 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1436 b(eq, memento_found);
1437 bind(&no_memento_found);
1440 // Jumps to found label if a prototype map has dictionary elements.
1441 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1442 Register scratch1, Label* found);
1445 void CallCFunctionHelper(Register function,
1446 int num_reg_arguments,
1447 int num_double_arguments);
1449 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
1451 // Helper functions for generating invokes.
1452 void InvokePrologue(const ParameterCount& expected,
1453 const ParameterCount& actual,
1454 Handle<Code> code_constant,
1457 bool* definitely_mismatches,
1459 const CallWrapper& call_wrapper);
1461 void InitializeNewString(Register string,
1463 Heap::RootListIndex map_index,
1467 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1468 void InNewSpace(Register object,
1470 Condition cond, // eq for new space, ne otherwise.
1473 // Helper for finding the mark bits for an address. Afterwards, the
1474 // bitmap register points at the word with the mark bits and the mask
1475 // the position of the first bit. Leaves addr_reg unchanged.
1476 inline void GetMarkBits(Register addr_reg,
1477 Register bitmap_reg,
1480 // Helper for throwing exceptions. Compute a handler address and jump to
1481 // it. See the implementation for register usage.
1482 void JumpToHandlerEntry();
1484 // Compute memory operands for safepoint stack slots.
1485 static int SafepointRegisterStackIndex(int reg_code);
1486 MemOperand SafepointRegisterSlot(Register reg);
1487 MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1489 // Loads the constant pool pointer (pp) register.
1490 void LoadConstantPoolPointerRegister();
1492 bool generating_stub_;
1494 // This handle will be patched with the code object on installation.
1495 Handle<Object> code_object_;
1497 // Needs access to SafepointRegisterStackIndex for compiled frame
1499 friend class StandardFrame;
1503 // The code patcher is used to patch (typically) small parts of code e.g. for
1504 // debugging and other types of instrumentation. When using the code patcher
1505 // the exact number of bytes specified must be emitted. It is not legal to emit
1506 // relocation information. If any of these constraints are violated it causes
1507 // an assertion to fail.
1515 CodePatcher(byte* address,
1517 FlushICache flush_cache = FLUSH);
1518 virtual ~CodePatcher();
1520 // Macro assembler to emit code.
1521 MacroAssembler* masm() { return &masm_; }
1523 // Emit an instruction directly.
1524 void Emit(Instr instr);
1526 // Emit an address directly.
1527 void Emit(Address addr);
1529 // Emit the condition part of an instruction leaving the rest of the current
1530 // instruction unchanged.
1531 void EmitCondition(Condition cond);
1534 byte* address_; // The address of the code being patched.
1535 int size_; // Number of bytes of the expected patch size.
1536 MacroAssembler masm_; // Macro assembler used to generate the code.
1537 FlushICache flush_cache_; // Whether to flush the I cache after patching.
1541 // -----------------------------------------------------------------------------
1542 // Static helper functions.
1544 inline MemOperand ContextOperand(Register context, int index) {
1545 return MemOperand(context, Context::SlotOffset(index));
1549 inline MemOperand GlobalObjectOperand() {
1550 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
1554 #ifdef GENERATED_CODE_COVERAGE
1555 #define CODE_COVERAGE_STRINGIFY(x) #x
1556 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1557 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1558 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
1560 #define ACCESS_MASM(masm) masm->
1564 } } // namespace v8::internal
1566 #endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_