1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
6 #define V8_ARM_MACRO_ASSEMBLER_ARM_H_
8 #include "src/assembler.h"
9 #include "src/bailout-reason.h"
10 #include "src/frames.h"
11 #include "src/globals.h"
16 // ----------------------------------------------------------------------------
17 // Static helper functions
19 // Generate a MemOperand for loading a field from an object.
20 inline MemOperand FieldMemOperand(Register object, int offset) {
21 return MemOperand(object, offset - kHeapObjectTag);
25 // Give alias names to registers
26 const Register cp = { kRegister_r7_Code }; // JavaScript context pointer.
27 const Register pp = { kRegister_r8_Code }; // Constant pool pointer.
28 const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer.
30 // Flags used for AllocateHeapNumber
39 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
40 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
41 enum PointersToHereCheck {
42 kPointersToHereMaybeInteresting,
43 kPointersToHereAreAlwaysInteresting
45 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
48 Register GetRegisterThatIsNotOneOf(Register reg1,
49 Register reg2 = no_reg,
50 Register reg3 = no_reg,
51 Register reg4 = no_reg,
52 Register reg5 = no_reg,
53 Register reg6 = no_reg);
57 bool AreAliased(Register reg1,
59 Register reg3 = no_reg,
60 Register reg4 = no_reg,
61 Register reg5 = no_reg,
62 Register reg6 = no_reg,
63 Register reg7 = no_reg,
64 Register reg8 = no_reg);
68 enum TargetAddressStorageMode {
69 CAN_INLINE_TARGET_ADDRESS,
70 NEVER_INLINE_TARGET_ADDRESS
73 // MacroAssembler implements a collection of frequently used macros.
74 class MacroAssembler: public Assembler {
76 // The isolate parameter can be NULL if the macro assembler should
77 // not use isolate-dependent functionality. In this case, it's the
78 // responsibility of the caller to never invoke such function on the
80 MacroAssembler(Isolate* isolate, void* buffer, int size);
83 // Returns the size of a call in instructions. Note, the value returned is
84 // only valid as long as no entries are added to the constant pool between
85 // checking the call size and emitting the actual call.
86 static int CallSize(Register target, Condition cond = al);
87 int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
88 int CallStubSize(CodeStub* stub,
89 TypeFeedbackId ast_id = TypeFeedbackId::None(),
91 static int CallSizeNotPredictableCodeSize(Isolate* isolate,
93 RelocInfo::Mode rmode,
96 // Jump, Call, and Ret pseudo instructions implementing inter-working.
97 void Jump(Register target, Condition cond = al);
98 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
99 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
100 void Call(Register target, Condition cond = al);
101 void Call(Address target, RelocInfo::Mode rmode,
103 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
104 int CallSize(Handle<Code> code,
105 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
106 TypeFeedbackId ast_id = TypeFeedbackId::None(),
107 Condition cond = al);
108 void Call(Handle<Code> code,
109 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
110 TypeFeedbackId ast_id = TypeFeedbackId::None(),
112 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
113 void Ret(Condition cond = al);
115 // Emit code to discard a non-negative number of pointer-sized elements
116 // from the stack, clobbering only the sp register.
117 void Drop(int count, Condition cond = al);
119 void Ret(int drop, Condition cond = al);
121 // Swap two registers. If the scratch register is omitted then a slightly
122 // less efficient form using xor instead of mov is emitted.
123 void Swap(Register reg1,
125 Register scratch = no_reg,
126 Condition cond = al);
128 void Mls(Register dst, Register src1, Register src2, Register srcA,
129 Condition cond = al);
130 void And(Register dst, Register src1, const Operand& src2,
131 Condition cond = al);
132 void Ubfx(Register dst, Register src, int lsb, int width,
133 Condition cond = al);
134 void Sbfx(Register dst, Register src, int lsb, int width,
135 Condition cond = al);
136 // The scratch register is not used for ARMv7.
137 // scratch can be the same register as src (in which case it is trashed), but
138 // not the same as dst.
139 void Bfi(Register dst,
144 Condition cond = al);
145 void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
146 void Usat(Register dst, int satpos, const Operand& src,
147 Condition cond = al);
149 void Call(Label* target);
150 void Push(Register src) { push(src); }
151 void Pop(Register dst) { pop(dst); }
153 // Register move. May do nothing if the registers are identical.
154 void Move(Register dst, Handle<Object> value);
155 void Move(Register dst, Register src, Condition cond = al);
156 void Move(Register dst, const Operand& src, SBit sbit = LeaveCC,
157 Condition cond = al) {
158 if (!src.is_reg() || !src.rm().is(dst) || sbit != LeaveCC) {
159 mov(dst, src, sbit, cond);
162 void Move(DwVfpRegister dst, DwVfpRegister src);
164 void Load(Register dst, const MemOperand& src, Representation r);
165 void Store(Register src, const MemOperand& dst, Representation r);
167 // Load an object from the root table.
168 void LoadRoot(Register destination,
169 Heap::RootListIndex index,
170 Condition cond = al);
171 // Store an object to the root table.
172 void StoreRoot(Register source,
173 Heap::RootListIndex index,
174 Condition cond = al);
176 // ---------------------------------------------------------------------------
179 void IncrementalMarkingRecordWriteHelper(Register object,
183 enum RememberedSetFinalAction {
188 // Record in the remembered set the fact that we have a pointer to new space
189 // at the address pointed to by the addr register. Only works if addr is not
191 void RememberedSetHelper(Register object, // Used for debug code.
194 SaveFPRegsMode save_fp,
195 RememberedSetFinalAction and_then);
197 void CheckPageFlag(Register object,
201 Label* condition_met);
203 void CheckMapDeprecated(Handle<Map> map,
205 Label* if_deprecated);
207 // Check if object is in new space. Jumps if the object is not in new space.
208 // The register scratch can be object itself, but scratch will be clobbered.
209 void JumpIfNotInNewSpace(Register object,
212 InNewSpace(object, scratch, ne, branch);
215 // Check if object is in new space. Jumps if the object is in new space.
216 // The register scratch can be object itself, but it will be clobbered.
217 void JumpIfInNewSpace(Register object,
220 InNewSpace(object, scratch, eq, branch);
223 // Check if an object has a given incremental marking color.
224 void HasColor(Register object,
231 void JumpIfBlack(Register object,
236 // Checks the color of an object. If the object is already grey or black
237 // then we just fall through, since it is already live. If it is white and
238 // we can determine that it doesn't need to be scanned, then we just mark it
239 // black and fall through. For the rest we jump to the label so the
240 // incremental marker can fix its assumptions.
241 void EnsureNotWhite(Register object,
245 Label* object_is_white_and_not_data);
247 // Detects conservatively whether an object is data-only, i.e. it does need to
248 // be scanned by the garbage collector.
249 void JumpIfDataObject(Register value,
251 Label* not_data_object);
253 // Notify the garbage collector that we wrote a pointer into an object.
254 // |object| is the object being stored into, |value| is the object being
255 // stored. value and scratch registers are clobbered by the operation.
256 // The offset is the offset from the start of the object, not the offset from
257 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
258 void RecordWriteField(
263 LinkRegisterStatus lr_status,
264 SaveFPRegsMode save_fp,
265 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
266 SmiCheck smi_check = INLINE_SMI_CHECK,
267 PointersToHereCheck pointers_to_here_check_for_value =
268 kPointersToHereMaybeInteresting);
270 // As above, but the offset has the tag presubtracted. For use with
271 // MemOperand(reg, off).
272 inline void RecordWriteContextSlot(
277 LinkRegisterStatus lr_status,
278 SaveFPRegsMode save_fp,
279 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
280 SmiCheck smi_check = INLINE_SMI_CHECK,
281 PointersToHereCheck pointers_to_here_check_for_value =
282 kPointersToHereMaybeInteresting) {
283 RecordWriteField(context,
284 offset + kHeapObjectTag,
289 remembered_set_action,
291 pointers_to_here_check_for_value);
294 void RecordWriteForMap(
298 LinkRegisterStatus lr_status,
299 SaveFPRegsMode save_fp);
301 // For a given |object| notify the garbage collector that the slot |address|
302 // has been written. |value| is the object being stored. The value and
303 // address registers are clobbered by the operation.
308 LinkRegisterStatus lr_status,
309 SaveFPRegsMode save_fp,
310 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
311 SmiCheck smi_check = INLINE_SMI_CHECK,
312 PointersToHereCheck pointers_to_here_check_for_value =
313 kPointersToHereMaybeInteresting);
316 void Push(Handle<Object> handle);
317 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
319 // Push two registers. Pushes leftmost register first (to highest address).
320 void Push(Register src1, Register src2, Condition cond = al) {
321 DCHECK(!src1.is(src2));
322 if (src1.code() > src2.code()) {
323 stm(db_w, sp, src1.bit() | src2.bit(), cond);
325 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
326 str(src2, MemOperand(sp, 4, NegPreIndex), cond);
330 // Push three registers. Pushes leftmost register first (to highest address).
331 void Push(Register src1, Register src2, Register src3, Condition cond = al) {
332 DCHECK(!src1.is(src2));
333 DCHECK(!src2.is(src3));
334 DCHECK(!src1.is(src3));
335 if (src1.code() > src2.code()) {
336 if (src2.code() > src3.code()) {
337 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
339 stm(db_w, sp, src1.bit() | src2.bit(), cond);
340 str(src3, MemOperand(sp, 4, NegPreIndex), cond);
343 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
344 Push(src2, src3, cond);
348 // Push four registers. Pushes leftmost register first (to highest address).
349 void Push(Register src1,
353 Condition cond = al) {
354 DCHECK(!src1.is(src2));
355 DCHECK(!src2.is(src3));
356 DCHECK(!src1.is(src3));
357 DCHECK(!src1.is(src4));
358 DCHECK(!src2.is(src4));
359 DCHECK(!src3.is(src4));
360 if (src1.code() > src2.code()) {
361 if (src2.code() > src3.code()) {
362 if (src3.code() > src4.code()) {
365 src1.bit() | src2.bit() | src3.bit() | src4.bit(),
368 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
369 str(src4, MemOperand(sp, 4, NegPreIndex), cond);
372 stm(db_w, sp, src1.bit() | src2.bit(), cond);
373 Push(src3, src4, cond);
376 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
377 Push(src2, src3, src4, cond);
381 // Pop two registers. Pops rightmost register first (from lower address).
382 void Pop(Register src1, Register src2, Condition cond = al) {
383 DCHECK(!src1.is(src2));
384 if (src1.code() > src2.code()) {
385 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
387 ldr(src2, MemOperand(sp, 4, PostIndex), cond);
388 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
392 // Pop three registers. Pops rightmost register first (from lower address).
393 void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
394 DCHECK(!src1.is(src2));
395 DCHECK(!src2.is(src3));
396 DCHECK(!src1.is(src3));
397 if (src1.code() > src2.code()) {
398 if (src2.code() > src3.code()) {
399 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
401 ldr(src3, MemOperand(sp, 4, PostIndex), cond);
402 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
405 Pop(src2, src3, cond);
406 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
410 // Pop four registers. Pops rightmost register first (from lower address).
411 void Pop(Register src1,
415 Condition cond = al) {
416 DCHECK(!src1.is(src2));
417 DCHECK(!src2.is(src3));
418 DCHECK(!src1.is(src3));
419 DCHECK(!src1.is(src4));
420 DCHECK(!src2.is(src4));
421 DCHECK(!src3.is(src4));
422 if (src1.code() > src2.code()) {
423 if (src2.code() > src3.code()) {
424 if (src3.code() > src4.code()) {
427 src1.bit() | src2.bit() | src3.bit() | src4.bit(),
430 ldr(src4, MemOperand(sp, 4, PostIndex), cond);
431 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
434 Pop(src3, src4, cond);
435 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
438 Pop(src2, src3, src4, cond);
439 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
443 // Push a fixed frame, consisting of lr, fp, constant pool (if
444 // FLAG_enable_ool_constant_pool), context and JS function / marker id if
445 // marker_reg is a valid register.
446 void PushFixedFrame(Register marker_reg = no_reg);
447 void PopFixedFrame(Register marker_reg = no_reg);
449 // Push and pop the registers that can hold pointers, as defined by the
450 // RegList constant kSafepointSavedRegisters.
451 void PushSafepointRegisters();
452 void PopSafepointRegisters();
453 // Store value in register src in the safepoint stack slot for
455 void StoreToSafepointRegisterSlot(Register src, Register dst);
456 // Load the value of the src register from its safepoint stack slot
457 // into register dst.
458 void LoadFromSafepointRegisterSlot(Register dst, Register src);
460 // Load two consecutive registers with two consecutive memory locations.
461 void Ldrd(Register dst1,
463 const MemOperand& src,
464 Condition cond = al);
466 // Store two consecutive registers to two consecutive memory locations.
467 void Strd(Register src1,
469 const MemOperand& dst,
470 Condition cond = al);
472 // Ensure that FPSCR contains values needed by JavaScript.
473 // We need the NaNModeControlBit to be sure that operations like
474 // vadd and vsub generate the Canonical NaN (if a NaN must be generated).
475 // In VFP3 it will be always the Canonical NaN.
476 // In VFP2 it will be either the Canonical NaN or the negative version
477 // of the Canonical NaN. It doesn't matter if we have two values. The aim
478 // is to be sure to never generate the hole NaN.
479 void VFPEnsureFPSCRState(Register scratch);
481 // If the value is a NaN, canonicalize the value else, do nothing.
482 void VFPCanonicalizeNaN(const DwVfpRegister dst,
483 const DwVfpRegister src,
484 const Condition cond = al);
485 void VFPCanonicalizeNaN(const DwVfpRegister value,
486 const Condition cond = al) {
487 VFPCanonicalizeNaN(value, value, cond);
490 // Compare double values and move the result to the normal condition flags.
491 void VFPCompareAndSetFlags(const DwVfpRegister src1,
492 const DwVfpRegister src2,
493 const Condition cond = al);
494 void VFPCompareAndSetFlags(const DwVfpRegister src1,
496 const Condition cond = al);
498 // Compare double values and then load the fpscr flags to a register.
499 void VFPCompareAndLoadFlags(const DwVfpRegister src1,
500 const DwVfpRegister src2,
501 const Register fpscr_flags,
502 const Condition cond = al);
503 void VFPCompareAndLoadFlags(const DwVfpRegister src1,
505 const Register fpscr_flags,
506 const Condition cond = al);
508 void Vmov(const DwVfpRegister dst,
510 const Register scratch = no_reg);
512 void VmovHigh(Register dst, DwVfpRegister src);
513 void VmovHigh(DwVfpRegister dst, Register src);
514 void VmovLow(Register dst, DwVfpRegister src);
515 void VmovLow(DwVfpRegister dst, Register src);
517 // Loads the number from object into dst register.
518 // If |object| is neither smi nor heap number, |not_number| is jumped to
519 // with |object| still intact.
520 void LoadNumber(Register object,
521 LowDwVfpRegister dst,
522 Register heap_number_map,
526 // Loads the number from object into double_dst in the double format.
527 // Control will jump to not_int32 if the value cannot be exactly represented
528 // by a 32-bit integer.
529 // Floating point value in the 32-bit integer range that are not exact integer
531 void LoadNumberAsInt32Double(Register object,
532 DwVfpRegister double_dst,
533 Register heap_number_map,
535 LowDwVfpRegister double_scratch,
538 // Loads the number from object into dst as a 32-bit integer.
539 // Control will jump to not_int32 if the object cannot be exactly represented
540 // by a 32-bit integer.
541 // Floating point value in the 32-bit integer range that are not exact integer
542 // won't be converted.
543 void LoadNumberAsInt32(Register object,
545 Register heap_number_map,
547 DwVfpRegister double_scratch0,
548 LowDwVfpRegister double_scratch1,
551 // Generates function and stub prologue code.
553 void Prologue(bool code_pre_aging);
556 // stack_space - extra stack space, used for alignment before call to C.
557 void EnterExitFrame(bool save_doubles, int stack_space = 0);
559 // Leave the current exit frame. Expects the return value in r0.
560 // Expect the number of values, pushed prior to the exit frame, to
561 // remove in a register (or no_reg, if there is nothing to remove).
562 void LeaveExitFrame(bool save_doubles,
563 Register argument_count,
564 bool restore_context);
566 // Get the actual activation frame alignment for target environment.
567 static int ActivationFrameAlignment();
569 void LoadContext(Register dst, int context_chain_length);
571 // Conditionally load the cached Array transitioned map of type
572 // transitioned_kind from the native context if the map in register
573 // map_in_out is the cached Array map in the native context of
575 void LoadTransitionedArrayMapConditional(
576 ElementsKind expected_kind,
577 ElementsKind transitioned_kind,
580 Label* no_map_match);
582 void LoadGlobalFunction(int index, Register function);
584 // Load the initial map from the global function. The registers
585 // function and map can be the same, function is then overwritten.
586 void LoadGlobalFunctionInitialMap(Register function,
590 void InitializeRootRegister() {
591 ExternalReference roots_array_start =
592 ExternalReference::roots_array_start(isolate());
593 mov(kRootRegister, Operand(roots_array_start));
596 // ---------------------------------------------------------------------------
597 // JavaScript invokes
599 // Invoke the JavaScript function code by either calling or jumping.
600 void InvokeCode(Register code,
601 const ParameterCount& expected,
602 const ParameterCount& actual,
604 const CallWrapper& call_wrapper);
606 // Invoke the JavaScript function in the given register. Changes the
607 // current context to the context in the function before invoking.
608 void InvokeFunction(Register function,
609 const ParameterCount& actual,
611 const CallWrapper& call_wrapper);
613 void InvokeFunction(Register function,
614 const ParameterCount& expected,
615 const ParameterCount& actual,
617 const CallWrapper& call_wrapper);
619 void InvokeFunction(Handle<JSFunction> function,
620 const ParameterCount& expected,
621 const ParameterCount& actual,
623 const CallWrapper& call_wrapper);
625 void IsObjectJSObjectType(Register heap_object,
630 void IsInstanceJSObjectType(Register map,
634 void IsObjectJSStringType(Register object,
638 void IsObjectNameType(Register object,
642 // ---------------------------------------------------------------------------
647 // ---------------------------------------------------------------------------
648 // Exception handling
650 // Push a new try handler and link into try handler chain.
651 void PushTryHandler(StackHandler::Kind kind, int handler_index);
653 // Unlink the stack handler on top of the stack from the try handler chain.
654 // Must preserve the result register.
655 void PopTryHandler();
657 // Passes thrown value to the handler of top of the try handler chain.
658 void Throw(Register value);
660 // Propagates an uncatchable exception to the top of the current JS stack's
662 void ThrowUncatchable(Register value);
664 // ---------------------------------------------------------------------------
665 // Inline caching support
667 // Generate code for checking access rights - used for security checks
668 // on access to global objects across environments. The holder register
669 // is left untouched, whereas both scratch registers are clobbered.
670 void CheckAccessGlobalProxy(Register holder_reg,
674 void GetNumberHash(Register t0, Register scratch);
676 void LoadFromNumberDictionary(Label* miss,
685 inline void MarkCode(NopMarkerTypes type) {
689 // Check if the given instruction is a 'type' marker.
690 // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
691 // These instructions are generated to mark special location in the code,
692 // like some special IC code.
693 static inline bool IsMarkedCode(Instr instr, int type) {
694 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
695 return IsNop(instr, type);
699 static inline int GetCodeMarker(Instr instr) {
700 int dst_reg_offset = 12;
701 int dst_mask = 0xf << dst_reg_offset;
703 int dst_reg = (instr & dst_mask) >> dst_reg_offset;
704 int src_reg = instr & src_mask;
705 uint32_t non_register_mask = ~(dst_mask | src_mask);
706 uint32_t mov_mask = al | 13 << 21;
708 // Return <n> if we have a mov rn rn, else return -1.
709 int type = ((instr & non_register_mask) == mov_mask) &&
710 (dst_reg == src_reg) &&
711 (FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER)
714 DCHECK((type == -1) ||
715 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
720 // ---------------------------------------------------------------------------
721 // Allocation support
723 // Allocate an object in new space or old pointer space. The object_size is
724 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
725 // is passed. If the space is exhausted control continues at the gc_required
726 // label. The allocated object is returned in result. If the flag
727 // tag_allocated_object is true the result is tagged as as a heap object.
728 // All registers are clobbered also when control continues at the gc_required
730 void Allocate(int object_size,
735 AllocationFlags flags);
737 void Allocate(Register object_size,
742 AllocationFlags flags);
744 // Undo allocation in new space. The object passed and objects allocated after
745 // it will no longer be allocated. The caller must make sure that no pointers
746 // are left to the object(s) no longer allocated as they would be invalid when
747 // allocation is undone.
748 void UndoAllocationInNewSpace(Register object, Register scratch);
751 void AllocateTwoByteString(Register result,
757 void AllocateOneByteString(Register result, Register length,
758 Register scratch1, Register scratch2,
759 Register scratch3, Label* gc_required);
760 void AllocateTwoByteConsString(Register result,
765 void AllocateOneByteConsString(Register result, Register length,
766 Register scratch1, Register scratch2,
768 void AllocateTwoByteSlicedString(Register result,
773 void AllocateOneByteSlicedString(Register result, Register length,
774 Register scratch1, Register scratch2,
777 // Allocates a heap number or jumps to the gc_required label if the young
778 // space is full and a scavenge is needed. All registers are clobbered also
779 // when control continues at the gc_required label.
780 void AllocateHeapNumber(Register result,
783 Register heap_number_map,
785 TaggingMode tagging_mode = TAG_RESULT,
786 MutableMode mode = IMMUTABLE);
787 void AllocateHeapNumberWithValue(Register result,
791 Register heap_number_map,
794 // Copies a fixed number of fields of heap objects from src to dst.
795 void CopyFields(Register dst,
797 LowDwVfpRegister double_scratch,
800 // Copies a number of bytes from src to dst. All registers are clobbered. On
801 // exit src and dst will point to the place just after where the last byte was
802 // read or written and length will be zero.
803 void CopyBytes(Register src,
808 // Initialize fields with filler values. Fields starting at |start_offset|
809 // not including end_offset are overwritten with the value in |filler|. At
810 // the end the loop, |start_offset| takes the value of |end_offset|.
811 void InitializeFieldsWithFiller(Register start_offset,
815 // ---------------------------------------------------------------------------
816 // Support functions.
818 // Try to get function prototype of a function and puts the value in
819 // the result register. Checks that the function really is a
820 // function and jumps to the miss label if the fast checks fail. The
821 // function register will be untouched; the other registers may be
823 void TryGetFunctionPrototype(Register function,
827 bool miss_on_bound_function = false);
829 // Compare object type for heap object. heap_object contains a non-Smi
830 // whose object type should be compared with the given type. This both
831 // sets the flags and leaves the object type in the type_reg register.
832 // It leaves the map in the map register (unless the type_reg and map register
833 // are the same register). It leaves the heap object in the heap_object
834 // register unless the heap_object register is the same register as one of the
836 // Type_reg can be no_reg. In that case ip is used.
837 void CompareObjectType(Register heap_object,
842 // Compare object type for heap object. Branch to false_label if type
843 // is lower than min_type or greater than max_type.
844 // Load map into the register map.
845 void CheckObjectTypeRange(Register heap_object,
847 InstanceType min_type,
848 InstanceType max_type,
851 // Compare instance type in a map. map contains a valid map object whose
852 // object type should be compared with the given type. This both
853 // sets the flags and leaves the object type in the type_reg register.
854 void CompareInstanceType(Register map,
859 // Check if a map for a JSObject indicates that the object has fast elements.
860 // Jump to the specified label if it does not.
861 void CheckFastElements(Register map,
865 // Check if a map for a JSObject indicates that the object can have both smi
866 // and HeapObject elements. Jump to the specified label if it does not.
867 void CheckFastObjectElements(Register map,
871 // Check if a map for a JSObject indicates that the object has fast smi only
872 // elements. Jump to the specified label if it does not.
873 void CheckFastSmiElements(Register map,
877 // Check to see if maybe_number can be stored as a double in
878 // FastDoubleElements. If it can, store it at the index specified by key in
879 // the FastDoubleElements array elements. Otherwise jump to fail.
880 void StoreNumberToDoubleElements(Register value_reg,
882 Register elements_reg,
884 LowDwVfpRegister double_scratch,
886 int elements_offset = 0);
888 // Compare an object's map with the specified map and its transitioned
889 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
890 // set with result of map compare. If multiple map compares are required, the
891 // compare sequences branches to early_success.
892 void CompareMap(Register obj,
895 Label* early_success);
897 // As above, but the map of the object is already loaded into the register
898 // which is preserved by the code generated.
899 void CompareMap(Register obj_map,
901 Label* early_success);
903 // Check if the map of an object is equal to a specified map and branch to
904 // label if not. Skip the smi check if not required (object is known to be a
905 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
906 // against maps that are ElementsKind transition maps of the specified map.
907 void CheckMap(Register obj,
911 SmiCheckType smi_check_type);
914 void CheckMap(Register obj,
916 Heap::RootListIndex index,
918 SmiCheckType smi_check_type);
921 // Check if the map of an object is equal to a specified map and branch to a
922 // specified target if equal. Skip the smi check if not required (object is
923 // known to be a heap object)
924 void DispatchMap(Register obj,
927 Handle<Code> success,
928 SmiCheckType smi_check_type);
931 // Compare the object in a register to a value from the root list.
932 // Uses the ip register as scratch.
933 void CompareRoot(Register obj, Heap::RootListIndex index);
936 // Load and check the instance type of an object for being a string.
937 // Loads the type into the second argument register.
938 // Returns a condition that will be enabled if the object was a string
939 // and the passed-in condition passed. If the passed-in condition failed
940 // then flags remain unchanged.
941 Condition IsObjectStringType(Register obj,
943 Condition cond = al) {
944 ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
945 ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
946 tst(type, Operand(kIsNotStringMask), cond);
947 DCHECK_EQ(0, kStringTag);
952 // Picks out an array index from the hash field.
954 // hash - holds the index's hash. Clobbered.
955 // index - holds the overwritten index on exit.
956 void IndexFromHash(Register hash, Register index);
958 // Get the number of least significant bits from a register
959 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
960 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
962 // Load the value of a smi object into a double register.
963 // The register value must be between d0 and d15.
964 void SmiToDouble(LowDwVfpRegister value, Register smi);
966 // Check if a double can be exactly represented as a signed 32-bit integer.
967 // Z flag set to one if true.
968 void TestDoubleIsInt32(DwVfpRegister double_input,
969 LowDwVfpRegister double_scratch);
971 // Try to convert a double to a signed 32-bit integer.
972 // Z flag set to one and result assigned if the conversion is exact.
973 void TryDoubleToInt32Exact(Register result,
974 DwVfpRegister double_input,
975 LowDwVfpRegister double_scratch);
977 // Floor a double and writes the value to the result register.
978 // Go to exact if the conversion is exact (to be able to test -0),
979 // fall through calling code if an overflow occurred, else go to done.
980 // In return, input_high is loaded with high bits of input.
981 void TryInt32Floor(Register result,
982 DwVfpRegister double_input,
984 LowDwVfpRegister double_scratch,
988 // Performs a truncating conversion of a floating point number as used by
989 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
990 // succeeds, otherwise falls through if result is saturated. On return
991 // 'result' either holds answer, or is clobbered on fall through.
993 // Only public for the test code in test-code-stubs-arm.cc.
994 void TryInlineTruncateDoubleToI(Register result,
998 // Performs a truncating conversion of a floating point number as used by
999 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
1000 // Exits with 'result' holding the answer.
1001 void TruncateDoubleToI(Register result, DwVfpRegister double_input);
1003 // Performs a truncating conversion of a heap number as used by
1004 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
1005 // must be different registers. Exits with 'result' holding the answer.
1006 void TruncateHeapNumberToI(Register result, Register object);
1008 // Converts the smi or heap number in object to an int32 using the rules
1009 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
1010 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
1011 // different registers.
1012 void TruncateNumberToI(Register object,
1014 Register heap_number_map,
1018 // Check whether d16-d31 are available on the CPU. The result is given by the
1019 // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
1020 void CheckFor32DRegs(Register scratch);
1022 // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double
1023 // values to location, saving [d0..(d15|d31)].
1024 void SaveFPRegs(Register location, Register scratch);
1026 // Does a runtime check for 16/32 FP registers. Either way, pops 32 double
1027 // values to location, restoring [d0..(d15|d31)].
1028 void RestoreFPRegs(Register location, Register scratch);
1030 // ---------------------------------------------------------------------------
1033 // Call a code stub.
1034 void CallStub(CodeStub* stub,
1035 TypeFeedbackId ast_id = TypeFeedbackId::None(),
1036 Condition cond = al);
1038 // Call a code stub.
1039 void TailCallStub(CodeStub* stub, Condition cond = al);
1041 // Call a runtime routine.
1042 void CallRuntime(const Runtime::Function* f,
1044 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1045 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1046 const Runtime::Function* function = Runtime::FunctionForId(id);
1047 CallRuntime(function, function->nargs, kSaveFPRegs);
1050 // Convenience function: Same as above, but takes the fid instead.
1051 void CallRuntime(Runtime::FunctionId id,
1053 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1054 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
1057 // Convenience function: call an external reference.
1058 void CallExternalReference(const ExternalReference& ext,
1061 // Tail call of a runtime routine (jump).
1062 // Like JumpToExternalReference, but also takes care of passing the number
1064 void TailCallExternalReference(const ExternalReference& ext,
1068 // Convenience function: tail call a runtime routine (jump).
1069 void TailCallRuntime(Runtime::FunctionId fid,
1073 int CalculateStackPassedWords(int num_reg_arguments,
1074 int num_double_arguments);
1076 // Before calling a C-function from generated code, align arguments on stack.
1077 // After aligning the frame, non-register arguments must be stored in
1078 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
1079 // are word sized. If double arguments are used, this function assumes that
1080 // all double arguments are stored before core registers; otherwise the
1081 // correct alignment of the double values is not guaranteed.
1082 // Some compilers/platforms require the stack to be aligned when calling
1084 // Needs a scratch register to do some arithmetic. This register will be
1086 void PrepareCallCFunction(int num_reg_arguments,
1087 int num_double_registers,
1089 void PrepareCallCFunction(int num_reg_arguments,
1092 // There are two ways of passing double arguments on ARM, depending on
1093 // whether soft or hard floating point ABI is used. These functions
1094 // abstract parameter passing for the three different ways we call
1095 // C functions from generated code.
1096 void MovToFloatParameter(DwVfpRegister src);
1097 void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2);
1098 void MovToFloatResult(DwVfpRegister src);
1100 // Calls a C function and cleans up the space for arguments allocated
1101 // by PrepareCallCFunction. The called function is not allowed to trigger a
1102 // garbage collection, since that might move the code and invalidate the
1103 // return address (unless this is somehow accounted for by the called
1105 void CallCFunction(ExternalReference function, int num_arguments);
1106 void CallCFunction(Register function, int num_arguments);
1107 void CallCFunction(ExternalReference function,
1108 int num_reg_arguments,
1109 int num_double_arguments);
1110 void CallCFunction(Register function,
1111 int num_reg_arguments,
1112 int num_double_arguments);
1114 void MovFromFloatParameter(DwVfpRegister dst);
1115 void MovFromFloatResult(DwVfpRegister dst);
1117 // Calls an API function. Allocates HandleScope, extracts returned value
1118 // from handle and propagates exceptions. Restores context. stack_space
1119 // - space to be unwound on exit (includes the call JS arguments space and
1120 // the additional space allocated for the fast call).
1121 void CallApiFunctionAndReturn(Register function_address,
1122 ExternalReference thunk_ref,
1124 MemOperand return_value_operand,
1125 MemOperand* context_restore_operand);
1127 // Jump to a runtime routine.
1128 void JumpToExternalReference(const ExternalReference& builtin);
1130 // Invoke specified builtin JavaScript function. Adds an entry to
1131 // the unresolved list if the name does not resolve.
1132 void InvokeBuiltin(Builtins::JavaScript id,
1134 const CallWrapper& call_wrapper = NullCallWrapper());
1136 // Store the code object for the given builtin in the target register and
1137 // setup the function in r1.
1138 void GetBuiltinEntry(Register target, Builtins::JavaScript id);
1140 // Store the function for the given builtin in the target register.
1141 void GetBuiltinFunction(Register target, Builtins::JavaScript id);
1143 Handle<Object> CodeObject() {
1144 DCHECK(!code_object_.is_null());
1145 return code_object_;
1149 // Emit code for a truncating division by a constant. The dividend register is
1150 // unchanged and ip gets clobbered. Dividend and result must be different.
1151 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1153 // ---------------------------------------------------------------------------
1154 // StatsCounter support
1156 void SetCounter(StatsCounter* counter, int value,
1157 Register scratch1, Register scratch2);
1158 void IncrementCounter(StatsCounter* counter, int value,
1159 Register scratch1, Register scratch2);
1160 void DecrementCounter(StatsCounter* counter, int value,
1161 Register scratch1, Register scratch2);
1164 // ---------------------------------------------------------------------------
1167 // Calls Abort(msg) if the condition cond is not satisfied.
1168 // Use --debug_code to enable.
1169 void Assert(Condition cond, BailoutReason reason);
1170 void AssertFastElements(Register elements);
1172 // Like Assert(), but always enabled.
1173 void Check(Condition cond, BailoutReason reason);
1175 // Print a message to stdout and abort execution.
1176 void Abort(BailoutReason msg);
1178 // Verify restrictions about code generated in stubs.
1179 void set_generating_stub(bool value) { generating_stub_ = value; }
1180 bool generating_stub() { return generating_stub_; }
1181 void set_has_frame(bool value) { has_frame_ = value; }
1182 bool has_frame() { return has_frame_; }
1183 inline bool AllowThisStubCall(CodeStub* stub);
1185 // EABI variant for double arguments in use.
1186 bool use_eabi_hardfloat() {
1188 return base::OS::ArmUsingHardFloat();
1189 #elif USE_EABI_HARDFLOAT
1196 // ---------------------------------------------------------------------------
1199 // Check whether the value of reg is a power of two and not zero. If not
1200 // control continues at the label not_power_of_two. If reg is a power of two
1201 // the register scratch contains the value of (reg - 1) when control falls
1203 void JumpIfNotPowerOfTwoOrZero(Register reg,
1205 Label* not_power_of_two_or_zero);
1206 // Check whether the value of reg is a power of two and not zero.
1207 // Control falls through if it is, with scratch containing the mask
1209 // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
1210 // zero or negative, or jumps to the 'not_power_of_two' label if the value is
1211 // strictly positive but not a power of two.
1212 void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
1214 Label* zero_and_neg,
1215 Label* not_power_of_two);
1217 // ---------------------------------------------------------------------------
1220 void SmiTag(Register reg, SBit s = LeaveCC) {
1221 add(reg, reg, Operand(reg), s);
1223 void SmiTag(Register dst, Register src, SBit s = LeaveCC) {
1224 add(dst, src, Operand(src), s);
1227 // Try to convert int32 to smi. If the value is to large, preserve
1228 // the original value and jump to not_a_smi. Destroys scratch and
1230 void TrySmiTag(Register reg, Label* not_a_smi) {
1231 TrySmiTag(reg, reg, not_a_smi);
1233 void TrySmiTag(Register reg, Register src, Label* not_a_smi) {
1234 SmiTag(ip, src, SetCC);
1240 void SmiUntag(Register reg, SBit s = LeaveCC) {
1241 mov(reg, Operand::SmiUntag(reg), s);
1243 void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
1244 mov(dst, Operand::SmiUntag(src), s);
1247 // Untag the source value into destination and jump if source is a smi.
1248 // Souce and destination can be the same register.
1249 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1251 // Untag the source value into destination and jump if source is not a smi.
1252 // Souce and destination can be the same register.
1253 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1255 // Test if the register contains a smi (Z == 0 (eq) if true).
1256 inline void SmiTst(Register value) {
1257 tst(value, Operand(kSmiTagMask));
1259 inline void NonNegativeSmiTst(Register value) {
1260 tst(value, Operand(kSmiTagMask | kSmiSignMask));
1262 // Jump if the register contains a smi.
1263 inline void JumpIfSmi(Register value, Label* smi_label) {
1264 tst(value, Operand(kSmiTagMask));
1267 // Jump if either of the registers contain a non-smi.
1268 inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
1269 tst(value, Operand(kSmiTagMask));
1270 b(ne, not_smi_label);
1272 // Jump if either of the registers contain a non-smi.
1273 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1274 // Jump if either of the registers contain a smi.
1275 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1277 // Abort execution if argument is a smi, enabled via --debug-code.
1278 void AssertNotSmi(Register object);
1279 void AssertSmi(Register object);
1281 // Abort execution if argument is not a string, enabled via --debug-code.
1282 void AssertString(Register object);
1284 // Abort execution if argument is not a name, enabled via --debug-code.
1285 void AssertName(Register object);
1287 // Abort execution if argument is not undefined or an AllocationSite, enabled
1288 // via --debug-code.
1289 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1291 // Abort execution if reg is not the root value with the given index,
1292 // enabled via --debug-code.
1293 void AssertIsRoot(Register reg, Heap::RootListIndex index);
1295 // ---------------------------------------------------------------------------
1296 // HeapNumber utilities
1298 void JumpIfNotHeapNumber(Register object,
1299 Register heap_number_map,
1301 Label* on_not_heap_number);
1303 // ---------------------------------------------------------------------------
1306 // Generate code to do a lookup in the number string cache. If the number in
1307 // the register object is found in the cache the generated code falls through
1308 // with the result in the result register. The object and the result register
1309 // can be the same. If the number is not found in the cache the code jumps to
1310 // the label not_found with only the content of register object unchanged.
1311 void LookupNumberStringCache(Register object,
1318 // Checks if both objects are sequential one-byte strings and jumps to label
1319 // if either is not. Assumes that neither object is a smi.
1320 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
1326 // Checks if both objects are sequential one-byte strings and jumps to label
1327 // if either is not.
1328 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
1331 Label* not_flat_one_byte_strings);
1333 // Checks if both instance types are sequential one-byte strings and jumps to
1334 // label if either is not.
1335 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1336 Register first_object_instance_type, Register second_object_instance_type,
1337 Register scratch1, Register scratch2, Label* failure);
1339 // Check if instance type is sequential one-byte string and jump to label if
1341 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1344 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1346 void EmitSeqStringSetCharCheck(Register string,
1349 uint32_t encoding_mask);
1351 // ---------------------------------------------------------------------------
1352 // Patching helpers.
1354 // Get the location of a relocated constant (its address in the constant pool)
1355 // from its load site.
1356 void GetRelocatedValueLocation(Register ldr_location, Register result,
1360 void ClampUint8(Register output_reg, Register input_reg);
1362 void ClampDoubleToUint8(Register result_reg,
1363 DwVfpRegister input_reg,
1364 LowDwVfpRegister double_scratch);
1367 void LoadInstanceDescriptors(Register map, Register descriptors);
1368 void EnumLength(Register dst, Register map);
1369 void NumberOfOwnDescriptors(Register dst, Register map);
1371 template<typename Field>
1372 void DecodeField(Register dst, Register src) {
1373 Ubfx(dst, src, Field::kShift, Field::kSize);
1376 template<typename Field>
1377 void DecodeField(Register reg) {
1378 DecodeField<Field>(reg, reg);
1381 template<typename Field>
1382 void DecodeFieldToSmi(Register dst, Register src) {
1383 static const int shift = Field::kShift;
1384 static const int mask = Field::kMask >> shift << kSmiTagSize;
1385 STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
1386 STATIC_ASSERT(kSmiTag == 0);
1387 if (shift < kSmiTagSize) {
1388 mov(dst, Operand(src, LSL, kSmiTagSize - shift));
1389 and_(dst, dst, Operand(mask));
1390 } else if (shift > kSmiTagSize) {
1391 mov(dst, Operand(src, LSR, shift - kSmiTagSize));
1392 and_(dst, dst, Operand(mask));
1394 and_(dst, src, Operand(mask));
1398 template<typename Field>
1399 void DecodeFieldToSmi(Register reg) {
1400 DecodeField<Field>(reg, reg);
1403 // Activation support.
1404 void EnterFrame(StackFrame::Type type,
1405 bool load_constant_pool_pointer_reg = false);
1406 // Returns the pc offset at which the frame ends.
1407 int LeaveFrame(StackFrame::Type type);
1409 // Expects object in r0 and returns map with validated enum cache
1410 // in r0. Assumes that any other register can be used as a scratch.
1411 void CheckEnumCache(Register null_value, Label* call_runtime);
1413 // AllocationMemento support. Arrays may have an associated
1414 // AllocationMemento object that can be checked for in order to pretransition
1416 // On entry, receiver_reg should point to the array object.
1417 // scratch_reg gets clobbered.
1418 // If allocation info is present, condition flags are set to eq.
1419 void TestJSArrayForAllocationMemento(Register receiver_reg,
1420 Register scratch_reg,
1421 Label* no_memento_found);
1423 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1424 Register scratch_reg,
1425 Label* memento_found) {
1426 Label no_memento_found;
1427 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1429 b(eq, memento_found);
1430 bind(&no_memento_found);
1433 // Jumps to found label if a prototype map has dictionary elements.
1434 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1435 Register scratch1, Label* found);
1438 void CallCFunctionHelper(Register function,
1439 int num_reg_arguments,
1440 int num_double_arguments);
1442 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
1444 // Helper functions for generating invokes.
1445 void InvokePrologue(const ParameterCount& expected,
1446 const ParameterCount& actual,
1447 Handle<Code> code_constant,
1450 bool* definitely_mismatches,
1452 const CallWrapper& call_wrapper);
1454 void InitializeNewString(Register string,
1456 Heap::RootListIndex map_index,
1460 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1461 void InNewSpace(Register object,
1463 Condition cond, // eq for new space, ne otherwise.
1466 // Helper for finding the mark bits for an address. Afterwards, the
1467 // bitmap register points at the word with the mark bits and the mask
1468 // the position of the first bit. Leaves addr_reg unchanged.
1469 inline void GetMarkBits(Register addr_reg,
1470 Register bitmap_reg,
1473 // Helper for throwing exceptions. Compute a handler address and jump to
1474 // it. See the implementation for register usage.
1475 void JumpToHandlerEntry();
1477 // Compute memory operands for safepoint stack slots.
1478 static int SafepointRegisterStackIndex(int reg_code);
1479 MemOperand SafepointRegisterSlot(Register reg);
1480 MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1482 // Loads the constant pool pointer (pp) register.
1483 void LoadConstantPoolPointerRegister();
1485 bool generating_stub_;
1487 // This handle will be patched with the code object on installation.
1488 Handle<Object> code_object_;
1490 // Needs access to SafepointRegisterStackIndex for compiled frame
1492 friend class StandardFrame;
1496 // The code patcher is used to patch (typically) small parts of code e.g. for
1497 // debugging and other types of instrumentation. When using the code patcher
1498 // the exact number of bytes specified must be emitted. It is not legal to emit
1499 // relocation information. If any of these constraints are violated it causes
1500 // an assertion to fail.
1508 CodePatcher(byte* address,
1510 FlushICache flush_cache = FLUSH);
1511 virtual ~CodePatcher();
1513 // Macro assembler to emit code.
1514 MacroAssembler* masm() { return &masm_; }
1516 // Emit an instruction directly.
1517 void Emit(Instr instr);
1519 // Emit an address directly.
1520 void Emit(Address addr);
1522 // Emit the condition part of an instruction leaving the rest of the current
1523 // instruction unchanged.
1524 void EmitCondition(Condition cond);
1527 byte* address_; // The address of the code being patched.
1528 int size_; // Number of bytes of the expected patch size.
1529 MacroAssembler masm_; // Macro assembler used to generate the code.
1530 FlushICache flush_cache_; // Whether to flush the I cache after patching.
1534 // -----------------------------------------------------------------------------
1535 // Static helper functions.
1537 inline MemOperand ContextOperand(Register context, int index) {
1538 return MemOperand(context, Context::SlotOffset(index));
1542 inline MemOperand GlobalObjectOperand() {
1543 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
1547 #ifdef GENERATED_CODE_COVERAGE
1548 #define CODE_COVERAGE_STRINGIFY(x) #x
1549 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1550 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1551 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
1553 #define ACCESS_MASM(masm) masm->
1557 } } // namespace v8::internal
1559 #endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_