1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
6 #define V8_ARM_MACRO_ASSEMBLER_ARM_H_
8 #include "src/assembler.h"
9 #include "src/bailout-reason.h"
10 #include "src/frames.h"
11 #include "src/globals.h"
16 // ----------------------------------------------------------------------------
17 // Static helper functions
19 // Generate a MemOperand for loading a field from an object.
20 inline MemOperand FieldMemOperand(Register object, int offset) {
21 return MemOperand(object, offset - kHeapObjectTag);
25 // Give alias names to registers
26 const Register cp = { kRegister_r7_Code }; // JavaScript context pointer.
27 const Register pp = { kRegister_r8_Code }; // Constant pool pointer.
28 const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer.
30 // Flags used for AllocateHeapNumber
39 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
40 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
41 enum PointersToHereCheck {
42 kPointersToHereMaybeInteresting,
43 kPointersToHereAreAlwaysInteresting
45 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
48 Register GetRegisterThatIsNotOneOf(Register reg1,
49 Register reg2 = no_reg,
50 Register reg3 = no_reg,
51 Register reg4 = no_reg,
52 Register reg5 = no_reg,
53 Register reg6 = no_reg);
57 bool AreAliased(Register reg1,
59 Register reg3 = no_reg,
60 Register reg4 = no_reg,
61 Register reg5 = no_reg,
62 Register reg6 = no_reg,
63 Register reg7 = no_reg,
64 Register reg8 = no_reg);
68 enum TargetAddressStorageMode {
69 CAN_INLINE_TARGET_ADDRESS,
70 NEVER_INLINE_TARGET_ADDRESS
73 // MacroAssembler implements a collection of frequently used macros.
74 class MacroAssembler: public Assembler {
76 // The isolate parameter can be NULL if the macro assembler should
77 // not use isolate-dependent functionality. In this case, it's the
78 // responsibility of the caller to never invoke such function on the
80 MacroAssembler(Isolate* isolate, void* buffer, int size);
83 // Returns the size of a call in instructions. Note, the value returned is
84 // only valid as long as no entries are added to the constant pool between
85 // checking the call size and emitting the actual call.
86 static int CallSize(Register target, Condition cond = al);
87 int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
88 int CallStubSize(CodeStub* stub,
89 TypeFeedbackId ast_id = TypeFeedbackId::None(),
91 static int CallSizeNotPredictableCodeSize(Isolate* isolate,
93 RelocInfo::Mode rmode,
96 // Jump, Call, and Ret pseudo instructions implementing inter-working.
97 void Jump(Register target, Condition cond = al);
98 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
99 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
100 void Call(Register target, Condition cond = al);
101 void Call(Address target, RelocInfo::Mode rmode,
103 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
104 int CallSize(Handle<Code> code,
105 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
106 TypeFeedbackId ast_id = TypeFeedbackId::None(),
107 Condition cond = al);
108 void Call(Handle<Code> code,
109 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
110 TypeFeedbackId ast_id = TypeFeedbackId::None(),
112 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
113 void Ret(Condition cond = al);
115 // Emit code to discard a non-negative number of pointer-sized elements
116 // from the stack, clobbering only the sp register.
117 void Drop(int count, Condition cond = al);
119 void Ret(int drop, Condition cond = al);
121 // Swap two registers. If the scratch register is omitted then a slightly
122 // less efficient form using xor instead of mov is emitted.
123 void Swap(Register reg1,
125 Register scratch = no_reg,
126 Condition cond = al);
128 void Mls(Register dst, Register src1, Register src2, Register srcA,
129 Condition cond = al);
130 void And(Register dst, Register src1, const Operand& src2,
131 Condition cond = al);
132 void Ubfx(Register dst, Register src, int lsb, int width,
133 Condition cond = al);
134 void Sbfx(Register dst, Register src, int lsb, int width,
135 Condition cond = al);
136 // The scratch register is not used for ARMv7.
137 // scratch can be the same register as src (in which case it is trashed), but
138 // not the same as dst.
139 void Bfi(Register dst,
144 Condition cond = al);
145 void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
146 void Usat(Register dst, int satpos, const Operand& src,
147 Condition cond = al);
149 void Call(Label* target);
150 void Push(Register src) { push(src); }
151 void Pop(Register dst) { pop(dst); }
153 // Register move. May do nothing if the registers are identical.
154 void Move(Register dst, Handle<Object> value);
155 void Move(Register dst, Register src, Condition cond = al);
156 void Move(Register dst, const Operand& src, SBit sbit = LeaveCC,
157 Condition cond = al) {
158 if (!src.is_reg() || !src.rm().is(dst) || sbit != LeaveCC) {
159 mov(dst, src, sbit, cond);
162 void Move(DwVfpRegister dst, DwVfpRegister src);
164 void Load(Register dst, const MemOperand& src, Representation r);
165 void Store(Register src, const MemOperand& dst, Representation r);
167 // Load an object from the root table.
168 void LoadRoot(Register destination,
169 Heap::RootListIndex index,
170 Condition cond = al);
171 // Store an object to the root table.
172 void StoreRoot(Register source,
173 Heap::RootListIndex index,
174 Condition cond = al);
176 // ---------------------------------------------------------------------------
179 void IncrementalMarkingRecordWriteHelper(Register object,
183 enum RememberedSetFinalAction {
188 // Record in the remembered set the fact that we have a pointer to new space
189 // at the address pointed to by the addr register. Only works if addr is not
191 void RememberedSetHelper(Register object, // Used for debug code.
194 SaveFPRegsMode save_fp,
195 RememberedSetFinalAction and_then);
197 void CheckPageFlag(Register object,
201 Label* condition_met);
203 // Check if object is in new space. Jumps if the object is not in new space.
204 // The register scratch can be object itself, but scratch will be clobbered.
205 void JumpIfNotInNewSpace(Register object,
208 InNewSpace(object, scratch, ne, branch);
211 // Check if object is in new space. Jumps if the object is in new space.
212 // The register scratch can be object itself, but it will be clobbered.
213 void JumpIfInNewSpace(Register object,
216 InNewSpace(object, scratch, eq, branch);
219 // Check if an object has a given incremental marking color.
220 void HasColor(Register object,
227 void JumpIfBlack(Register object,
232 // Checks the color of an object. If the object is already grey or black
233 // then we just fall through, since it is already live. If it is white and
234 // we can determine that it doesn't need to be scanned, then we just mark it
235 // black and fall through. For the rest we jump to the label so the
236 // incremental marker can fix its assumptions.
237 void EnsureNotWhite(Register object,
241 Label* object_is_white_and_not_data);
243 // Detects conservatively whether an object is data-only, i.e. it does need to
244 // be scanned by the garbage collector.
245 void JumpIfDataObject(Register value,
247 Label* not_data_object);
249 // Notify the garbage collector that we wrote a pointer into an object.
250 // |object| is the object being stored into, |value| is the object being
251 // stored. value and scratch registers are clobbered by the operation.
252 // The offset is the offset from the start of the object, not the offset from
253 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
254 void RecordWriteField(
259 LinkRegisterStatus lr_status,
260 SaveFPRegsMode save_fp,
261 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
262 SmiCheck smi_check = INLINE_SMI_CHECK,
263 PointersToHereCheck pointers_to_here_check_for_value =
264 kPointersToHereMaybeInteresting);
266 // As above, but the offset has the tag presubtracted. For use with
267 // MemOperand(reg, off).
268 inline void RecordWriteContextSlot(
273 LinkRegisterStatus lr_status,
274 SaveFPRegsMode save_fp,
275 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
276 SmiCheck smi_check = INLINE_SMI_CHECK,
277 PointersToHereCheck pointers_to_here_check_for_value =
278 kPointersToHereMaybeInteresting) {
279 RecordWriteField(context,
280 offset + kHeapObjectTag,
285 remembered_set_action,
287 pointers_to_here_check_for_value);
290 void RecordWriteForMap(
294 LinkRegisterStatus lr_status,
295 SaveFPRegsMode save_fp);
297 // For a given |object| notify the garbage collector that the slot |address|
298 // has been written. |value| is the object being stored. The value and
299 // address registers are clobbered by the operation.
304 LinkRegisterStatus lr_status,
305 SaveFPRegsMode save_fp,
306 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
307 SmiCheck smi_check = INLINE_SMI_CHECK,
308 PointersToHereCheck pointers_to_here_check_for_value =
309 kPointersToHereMaybeInteresting);
312 void Push(Handle<Object> handle);
313 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
315 // Push two registers. Pushes leftmost register first (to highest address).
316 void Push(Register src1, Register src2, Condition cond = al) {
317 DCHECK(!src1.is(src2));
318 if (src1.code() > src2.code()) {
319 stm(db_w, sp, src1.bit() | src2.bit(), cond);
321 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
322 str(src2, MemOperand(sp, 4, NegPreIndex), cond);
326 // Push three registers. Pushes leftmost register first (to highest address).
327 void Push(Register src1, Register src2, Register src3, Condition cond = al) {
328 DCHECK(!src1.is(src2));
329 DCHECK(!src2.is(src3));
330 DCHECK(!src1.is(src3));
331 if (src1.code() > src2.code()) {
332 if (src2.code() > src3.code()) {
333 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
335 stm(db_w, sp, src1.bit() | src2.bit(), cond);
336 str(src3, MemOperand(sp, 4, NegPreIndex), cond);
339 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
340 Push(src2, src3, cond);
344 // Push four registers. Pushes leftmost register first (to highest address).
345 void Push(Register src1,
349 Condition cond = al) {
350 DCHECK(!src1.is(src2));
351 DCHECK(!src2.is(src3));
352 DCHECK(!src1.is(src3));
353 DCHECK(!src1.is(src4));
354 DCHECK(!src2.is(src4));
355 DCHECK(!src3.is(src4));
356 if (src1.code() > src2.code()) {
357 if (src2.code() > src3.code()) {
358 if (src3.code() > src4.code()) {
361 src1.bit() | src2.bit() | src3.bit() | src4.bit(),
364 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
365 str(src4, MemOperand(sp, 4, NegPreIndex), cond);
368 stm(db_w, sp, src1.bit() | src2.bit(), cond);
369 Push(src3, src4, cond);
372 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
373 Push(src2, src3, src4, cond);
377 // Pop two registers. Pops rightmost register first (from lower address).
378 void Pop(Register src1, Register src2, Condition cond = al) {
379 DCHECK(!src1.is(src2));
380 if (src1.code() > src2.code()) {
381 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
383 ldr(src2, MemOperand(sp, 4, PostIndex), cond);
384 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
388 // Pop three registers. Pops rightmost register first (from lower address).
389 void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
390 DCHECK(!src1.is(src2));
391 DCHECK(!src2.is(src3));
392 DCHECK(!src1.is(src3));
393 if (src1.code() > src2.code()) {
394 if (src2.code() > src3.code()) {
395 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
397 ldr(src3, MemOperand(sp, 4, PostIndex), cond);
398 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
401 Pop(src2, src3, cond);
402 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
406 // Pop four registers. Pops rightmost register first (from lower address).
407 void Pop(Register src1,
411 Condition cond = al) {
412 DCHECK(!src1.is(src2));
413 DCHECK(!src2.is(src3));
414 DCHECK(!src1.is(src3));
415 DCHECK(!src1.is(src4));
416 DCHECK(!src2.is(src4));
417 DCHECK(!src3.is(src4));
418 if (src1.code() > src2.code()) {
419 if (src2.code() > src3.code()) {
420 if (src3.code() > src4.code()) {
423 src1.bit() | src2.bit() | src3.bit() | src4.bit(),
426 ldr(src4, MemOperand(sp, 4, PostIndex), cond);
427 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
430 Pop(src3, src4, cond);
431 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
434 Pop(src2, src3, src4, cond);
435 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
439 // Push a fixed frame, consisting of lr, fp, constant pool (if
440 // FLAG_enable_ool_constant_pool), context and JS function / marker id if
441 // marker_reg is a valid register.
442 void PushFixedFrame(Register marker_reg = no_reg);
443 void PopFixedFrame(Register marker_reg = no_reg);
445 // Push and pop the registers that can hold pointers, as defined by the
446 // RegList constant kSafepointSavedRegisters.
447 void PushSafepointRegisters();
448 void PopSafepointRegisters();
449 // Store value in register src in the safepoint stack slot for
451 void StoreToSafepointRegisterSlot(Register src, Register dst);
452 // Load the value of the src register from its safepoint stack slot
453 // into register dst.
454 void LoadFromSafepointRegisterSlot(Register dst, Register src);
456 // Load two consecutive registers with two consecutive memory locations.
457 void Ldrd(Register dst1,
459 const MemOperand& src,
460 Condition cond = al);
462 // Store two consecutive registers to two consecutive memory locations.
463 void Strd(Register src1,
465 const MemOperand& dst,
466 Condition cond = al);
468 // Ensure that FPSCR contains values needed by JavaScript.
469 // We need the NaNModeControlBit to be sure that operations like
470 // vadd and vsub generate the Canonical NaN (if a NaN must be generated).
471 // In VFP3 it will be always the Canonical NaN.
472 // In VFP2 it will be either the Canonical NaN or the negative version
473 // of the Canonical NaN. It doesn't matter if we have two values. The aim
474 // is to be sure to never generate the hole NaN.
475 void VFPEnsureFPSCRState(Register scratch);
477 // If the value is a NaN, canonicalize the value else, do nothing.
478 void VFPCanonicalizeNaN(const DwVfpRegister dst,
479 const DwVfpRegister src,
480 const Condition cond = al);
481 void VFPCanonicalizeNaN(const DwVfpRegister value,
482 const Condition cond = al) {
483 VFPCanonicalizeNaN(value, value, cond);
486 // Compare double values and move the result to the normal condition flags.
487 void VFPCompareAndSetFlags(const DwVfpRegister src1,
488 const DwVfpRegister src2,
489 const Condition cond = al);
490 void VFPCompareAndSetFlags(const DwVfpRegister src1,
492 const Condition cond = al);
494 // Compare double values and then load the fpscr flags to a register.
495 void VFPCompareAndLoadFlags(const DwVfpRegister src1,
496 const DwVfpRegister src2,
497 const Register fpscr_flags,
498 const Condition cond = al);
499 void VFPCompareAndLoadFlags(const DwVfpRegister src1,
501 const Register fpscr_flags,
502 const Condition cond = al);
504 void Vmov(const DwVfpRegister dst,
506 const Register scratch = no_reg);
508 void VmovHigh(Register dst, DwVfpRegister src);
509 void VmovHigh(DwVfpRegister dst, Register src);
510 void VmovLow(Register dst, DwVfpRegister src);
511 void VmovLow(DwVfpRegister dst, Register src);
513 // Loads the number from object into dst register.
514 // If |object| is neither smi nor heap number, |not_number| is jumped to
515 // with |object| still intact.
516 void LoadNumber(Register object,
517 LowDwVfpRegister dst,
518 Register heap_number_map,
522 // Loads the number from object into double_dst in the double format.
523 // Control will jump to not_int32 if the value cannot be exactly represented
524 // by a 32-bit integer.
525 // Floating point value in the 32-bit integer range that are not exact integer
527 void LoadNumberAsInt32Double(Register object,
528 DwVfpRegister double_dst,
529 Register heap_number_map,
531 LowDwVfpRegister double_scratch,
534 // Loads the number from object into dst as a 32-bit integer.
535 // Control will jump to not_int32 if the object cannot be exactly represented
536 // by a 32-bit integer.
537 // Floating point value in the 32-bit integer range that are not exact integer
538 // won't be converted.
539 void LoadNumberAsInt32(Register object,
541 Register heap_number_map,
543 DwVfpRegister double_scratch0,
544 LowDwVfpRegister double_scratch1,
547 // Generates function and stub prologue code.
549 void Prologue(bool code_pre_aging);
552 // stack_space - extra stack space, used for alignment before call to C.
553 void EnterExitFrame(bool save_doubles, int stack_space = 0);
555 // Leave the current exit frame. Expects the return value in r0.
556 // Expect the number of values, pushed prior to the exit frame, to
557 // remove in a register (or no_reg, if there is nothing to remove).
558 void LeaveExitFrame(bool save_doubles, Register argument_count,
559 bool restore_context,
560 bool argument_count_is_length = false);
562 // Get the actual activation frame alignment for target environment.
563 static int ActivationFrameAlignment();
565 void LoadContext(Register dst, int context_chain_length);
567 // Conditionally load the cached Array transitioned map of type
568 // transitioned_kind from the native context if the map in register
569 // map_in_out is the cached Array map in the native context of
571 void LoadTransitionedArrayMapConditional(
572 ElementsKind expected_kind,
573 ElementsKind transitioned_kind,
576 Label* no_map_match);
578 void LoadGlobalFunction(int index, Register function);
580 // Load the initial map from the global function. The registers
581 // function and map can be the same, function is then overwritten.
582 void LoadGlobalFunctionInitialMap(Register function,
586 void InitializeRootRegister() {
587 ExternalReference roots_array_start =
588 ExternalReference::roots_array_start(isolate());
589 mov(kRootRegister, Operand(roots_array_start));
592 // ---------------------------------------------------------------------------
593 // JavaScript invokes
595 // Invoke the JavaScript function code by either calling or jumping.
596 void InvokeCode(Register code,
597 const ParameterCount& expected,
598 const ParameterCount& actual,
600 const CallWrapper& call_wrapper);
602 // Invoke the JavaScript function in the given register. Changes the
603 // current context to the context in the function before invoking.
604 void InvokeFunction(Register function,
605 const ParameterCount& actual,
607 const CallWrapper& call_wrapper);
609 void InvokeFunction(Register function,
610 const ParameterCount& expected,
611 const ParameterCount& actual,
613 const CallWrapper& call_wrapper);
615 void InvokeFunction(Handle<JSFunction> function,
616 const ParameterCount& expected,
617 const ParameterCount& actual,
619 const CallWrapper& call_wrapper);
621 void IsObjectJSObjectType(Register heap_object,
626 void IsInstanceJSObjectType(Register map,
630 void IsObjectJSStringType(Register object,
634 void IsObjectNameType(Register object,
638 // ---------------------------------------------------------------------------
643 // ---------------------------------------------------------------------------
644 // Exception handling
646 // Push a new stack handler and link into stack handler chain.
647 void PushStackHandler();
649 // Unlink the stack handler on top of the stack from the stack handler chain.
650 // Must preserve the result register.
651 void PopStackHandler();
653 // ---------------------------------------------------------------------------
654 // Inline caching support
656 // Generate code for checking access rights - used for security checks
657 // on access to global objects across environments. The holder register
658 // is left untouched, whereas both scratch registers are clobbered.
659 void CheckAccessGlobalProxy(Register holder_reg,
663 void GetNumberHash(Register t0, Register scratch);
665 void LoadFromNumberDictionary(Label* miss,
674 inline void MarkCode(NopMarkerTypes type) {
678 // Check if the given instruction is a 'type' marker.
679 // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
680 // These instructions are generated to mark special location in the code,
681 // like some special IC code.
682 static inline bool IsMarkedCode(Instr instr, int type) {
683 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
684 return IsNop(instr, type);
688 static inline int GetCodeMarker(Instr instr) {
689 int dst_reg_offset = 12;
690 int dst_mask = 0xf << dst_reg_offset;
692 int dst_reg = (instr & dst_mask) >> dst_reg_offset;
693 int src_reg = instr & src_mask;
694 uint32_t non_register_mask = ~(dst_mask | src_mask);
695 uint32_t mov_mask = al | 13 << 21;
697 // Return <n> if we have a mov rn rn, else return -1.
698 int type = ((instr & non_register_mask) == mov_mask) &&
699 (dst_reg == src_reg) &&
700 (FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER)
703 DCHECK((type == -1) ||
704 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
709 // ---------------------------------------------------------------------------
710 // Allocation support
712 // Allocate an object in new space or old pointer space. The object_size is
713 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
714 // is passed. If the space is exhausted control continues at the gc_required
715 // label. The allocated object is returned in result. If the flag
716 // tag_allocated_object is true the result is tagged as as a heap object.
717 // All registers are clobbered also when control continues at the gc_required
719 void Allocate(int object_size,
724 AllocationFlags flags);
726 void Allocate(Register object_size,
731 AllocationFlags flags);
733 // Undo allocation in new space. The object passed and objects allocated after
734 // it will no longer be allocated. The caller must make sure that no pointers
735 // are left to the object(s) no longer allocated as they would be invalid when
736 // allocation is undone.
737 void UndoAllocationInNewSpace(Register object, Register scratch);
740 void AllocateTwoByteString(Register result,
746 void AllocateOneByteString(Register result, Register length,
747 Register scratch1, Register scratch2,
748 Register scratch3, Label* gc_required);
749 void AllocateTwoByteConsString(Register result,
754 void AllocateOneByteConsString(Register result, Register length,
755 Register scratch1, Register scratch2,
757 void AllocateTwoByteSlicedString(Register result,
762 void AllocateOneByteSlicedString(Register result, Register length,
763 Register scratch1, Register scratch2,
766 // Allocates a heap number or jumps to the gc_required label if the young
767 // space is full and a scavenge is needed. All registers are clobbered also
768 // when control continues at the gc_required label.
769 void AllocateHeapNumber(Register result,
772 Register heap_number_map,
774 TaggingMode tagging_mode = TAG_RESULT,
775 MutableMode mode = IMMUTABLE);
776 void AllocateHeapNumberWithValue(Register result,
780 Register heap_number_map,
783 // Copies a fixed number of fields of heap objects from src to dst.
784 void CopyFields(Register dst,
786 LowDwVfpRegister double_scratch,
789 // Copies a number of bytes from src to dst. All registers are clobbered. On
790 // exit src and dst will point to the place just after where the last byte was
791 // read or written and length will be zero.
792 void CopyBytes(Register src,
797 // Initialize fields with filler values. Fields starting at |start_offset|
798 // not including end_offset are overwritten with the value in |filler|. At
799 // the end the loop, |start_offset| takes the value of |end_offset|.
800 void InitializeFieldsWithFiller(Register start_offset,
804 // ---------------------------------------------------------------------------
805 // Support functions.
807 // Machine code version of Map::GetConstructor().
808 // |temp| holds |result|'s map when done, and |temp2| its instance type.
809 void GetMapConstructor(Register result, Register map, Register temp,
812 // Try to get function prototype of a function and puts the value in
813 // the result register. Checks that the function really is a
814 // function and jumps to the miss label if the fast checks fail. The
815 // function register will be untouched; the other registers may be
817 void TryGetFunctionPrototype(Register function,
821 bool miss_on_bound_function = false);
823 // Compare object type for heap object. heap_object contains a non-Smi
824 // whose object type should be compared with the given type. This both
825 // sets the flags and leaves the object type in the type_reg register.
826 // It leaves the map in the map register (unless the type_reg and map register
827 // are the same register). It leaves the heap object in the heap_object
828 // register unless the heap_object register is the same register as one of the
830 // Type_reg can be no_reg. In that case ip is used.
831 void CompareObjectType(Register heap_object,
836 // Compare object type for heap object. Branch to false_label if type
837 // is lower than min_type or greater than max_type.
838 // Load map into the register map.
839 void CheckObjectTypeRange(Register heap_object,
841 InstanceType min_type,
842 InstanceType max_type,
845 // Compare instance type in a map. map contains a valid map object whose
846 // object type should be compared with the given type. This both
847 // sets the flags and leaves the object type in the type_reg register.
848 void CompareInstanceType(Register map,
853 // Check if a map for a JSObject indicates that the object has fast elements.
854 // Jump to the specified label if it does not.
855 void CheckFastElements(Register map,
859 // Check if a map for a JSObject indicates that the object can have both smi
860 // and HeapObject elements. Jump to the specified label if it does not.
861 void CheckFastObjectElements(Register map,
865 // Check if a map for a JSObject indicates that the object has fast smi only
866 // elements. Jump to the specified label if it does not.
867 void CheckFastSmiElements(Register map,
871 // Check to see if maybe_number can be stored as a double in
872 // FastDoubleElements. If it can, store it at the index specified by key in
873 // the FastDoubleElements array elements. Otherwise jump to fail.
874 void StoreNumberToDoubleElements(Register value_reg,
876 Register elements_reg,
878 LowDwVfpRegister double_scratch,
880 int elements_offset = 0);
882 // Compare an object's map with the specified map and its transitioned
883 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
884 // set with result of map compare. If multiple map compares are required, the
885 // compare sequences branches to early_success.
886 void CompareMap(Register obj,
889 Label* early_success);
891 // As above, but the map of the object is already loaded into the register
892 // which is preserved by the code generated.
893 void CompareMap(Register obj_map,
895 Label* early_success);
897 // Check if the map of an object is equal to a specified map and branch to
898 // label if not. Skip the smi check if not required (object is known to be a
899 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
900 // against maps that are ElementsKind transition maps of the specified map.
901 void CheckMap(Register obj,
905 SmiCheckType smi_check_type);
908 void CheckMap(Register obj,
910 Heap::RootListIndex index,
912 SmiCheckType smi_check_type);
915 // Check if the map of an object is equal to a specified weak map and branch
916 // to a specified target if equal. Skip the smi check if not required
917 // (object is known to be a heap object)
918 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
919 Handle<WeakCell> cell, Handle<Code> success,
920 SmiCheckType smi_check_type);
922 // Compare the given value and the value of weak cell.
923 void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
925 void GetWeakValue(Register value, Handle<WeakCell> cell);
927 // Load the value of the weak cell in the value register. Branch to the given
928 // miss label if the weak cell was cleared.
929 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
931 // Compare the object in a register to a value from the root list.
932 // Uses the ip register as scratch.
933 void CompareRoot(Register obj, Heap::RootListIndex index);
936 // Load and check the instance type of an object for being a string.
937 // Loads the type into the second argument register.
938 // Returns a condition that will be enabled if the object was a string
939 // and the passed-in condition passed. If the passed-in condition failed
940 // then flags remain unchanged.
941 Condition IsObjectStringType(Register obj,
943 Condition cond = al) {
944 ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
945 ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
946 tst(type, Operand(kIsNotStringMask), cond);
947 DCHECK_EQ(0u, kStringTag);
952 // Picks out an array index from the hash field.
954 // hash - holds the index's hash. Clobbered.
955 // index - holds the overwritten index on exit.
956 void IndexFromHash(Register hash, Register index);
958 // Get the number of least significant bits from a register
959 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
960 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
962 // Load the value of a smi object into a double register.
963 // The register value must be between d0 and d15.
964 void SmiToDouble(LowDwVfpRegister value, Register smi);
966 // Check if a double can be exactly represented as a signed 32-bit integer.
967 // Z flag set to one if true.
968 void TestDoubleIsInt32(DwVfpRegister double_input,
969 LowDwVfpRegister double_scratch);
971 // Try to convert a double to a signed 32-bit integer.
972 // Z flag set to one and result assigned if the conversion is exact.
973 void TryDoubleToInt32Exact(Register result,
974 DwVfpRegister double_input,
975 LowDwVfpRegister double_scratch);
977 // Floor a double and writes the value to the result register.
978 // Go to exact if the conversion is exact (to be able to test -0),
979 // fall through calling code if an overflow occurred, else go to done.
980 // In return, input_high is loaded with high bits of input.
981 void TryInt32Floor(Register result,
982 DwVfpRegister double_input,
984 LowDwVfpRegister double_scratch,
988 // Performs a truncating conversion of a floating point number as used by
989 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
990 // succeeds, otherwise falls through if result is saturated. On return
991 // 'result' either holds answer, or is clobbered on fall through.
993 // Only public for the test code in test-code-stubs-arm.cc.
994 void TryInlineTruncateDoubleToI(Register result,
998 // Performs a truncating conversion of a floating point number as used by
999 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
1000 // Exits with 'result' holding the answer.
1001 void TruncateDoubleToI(Register result, DwVfpRegister double_input);
1003 // Performs a truncating conversion of a heap number as used by
1004 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
1005 // must be different registers. Exits with 'result' holding the answer.
1006 void TruncateHeapNumberToI(Register result, Register object);
1008 // Converts the smi or heap number in object to an int32 using the rules
1009 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
1010 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
1011 // different registers.
1012 void TruncateNumberToI(Register object,
1014 Register heap_number_map,
1018 // Check whether d16-d31 are available on the CPU. The result is given by the
1019 // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
1020 void CheckFor32DRegs(Register scratch);
1022 // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double
1023 // values to location, saving [d0..(d15|d31)].
1024 void SaveFPRegs(Register location, Register scratch);
1026 // Does a runtime check for 16/32 FP registers. Either way, pops 32 double
1027 // values to location, restoring [d0..(d15|d31)].
1028 void RestoreFPRegs(Register location, Register scratch);
1030 // ---------------------------------------------------------------------------
1033 // Call a code stub.
1034 void CallStub(CodeStub* stub,
1035 TypeFeedbackId ast_id = TypeFeedbackId::None(),
1036 Condition cond = al);
1038 // Call a code stub.
1039 void TailCallStub(CodeStub* stub, Condition cond = al);
1041 // Call a runtime routine.
1042 void CallRuntime(const Runtime::Function* f,
1044 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1045 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1046 const Runtime::Function* function = Runtime::FunctionForId(id);
1047 CallRuntime(function, function->nargs, kSaveFPRegs);
1050 // Convenience function: Same as above, but takes the fid instead.
1051 void CallRuntime(Runtime::FunctionId id,
1053 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1054 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
1057 // Convenience function: call an external reference.
1058 void CallExternalReference(const ExternalReference& ext,
1061 // Tail call of a runtime routine (jump).
1062 // Like JumpToExternalReference, but also takes care of passing the number
1064 void TailCallExternalReference(const ExternalReference& ext,
1068 // Convenience function: tail call a runtime routine (jump).
1069 void TailCallRuntime(Runtime::FunctionId fid,
1073 int CalculateStackPassedWords(int num_reg_arguments,
1074 int num_double_arguments);
1076 // Before calling a C-function from generated code, align arguments on stack.
1077 // After aligning the frame, non-register arguments must be stored in
1078 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
1079 // are word sized. If double arguments are used, this function assumes that
1080 // all double arguments are stored before core registers; otherwise the
1081 // correct alignment of the double values is not guaranteed.
1082 // Some compilers/platforms require the stack to be aligned when calling
1084 // Needs a scratch register to do some arithmetic. This register will be
1086 void PrepareCallCFunction(int num_reg_arguments,
1087 int num_double_registers,
1089 void PrepareCallCFunction(int num_reg_arguments,
1092 // There are two ways of passing double arguments on ARM, depending on
1093 // whether soft or hard floating point ABI is used. These functions
1094 // abstract parameter passing for the three different ways we call
1095 // C functions from generated code.
1096 void MovToFloatParameter(DwVfpRegister src);
1097 void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2);
1098 void MovToFloatResult(DwVfpRegister src);
1100 // Calls a C function and cleans up the space for arguments allocated
1101 // by PrepareCallCFunction. The called function is not allowed to trigger a
1102 // garbage collection, since that might move the code and invalidate the
1103 // return address (unless this is somehow accounted for by the called
1105 void CallCFunction(ExternalReference function, int num_arguments);
1106 void CallCFunction(Register function, int num_arguments);
1107 void CallCFunction(ExternalReference function,
1108 int num_reg_arguments,
1109 int num_double_arguments);
1110 void CallCFunction(Register function,
1111 int num_reg_arguments,
1112 int num_double_arguments);
1114 void MovFromFloatParameter(DwVfpRegister dst);
1115 void MovFromFloatResult(DwVfpRegister dst);
1117 // Jump to a runtime routine.
1118 void JumpToExternalReference(const ExternalReference& builtin);
1120 // Invoke specified builtin JavaScript function. Adds an entry to
1121 // the unresolved list if the name does not resolve.
1122 void InvokeBuiltin(Builtins::JavaScript id,
1124 const CallWrapper& call_wrapper = NullCallWrapper());
1126 // Store the code object for the given builtin in the target register and
1127 // setup the function in r1.
1128 void GetBuiltinEntry(Register target, Builtins::JavaScript id);
1130 // Store the function for the given builtin in the target register.
1131 void GetBuiltinFunction(Register target, Builtins::JavaScript id);
1133 Handle<Object> CodeObject() {
1134 DCHECK(!code_object_.is_null());
1135 return code_object_;
1139 // Emit code for a truncating division by a constant. The dividend register is
1140 // unchanged and ip gets clobbered. Dividend and result must be different.
1141 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1143 // ---------------------------------------------------------------------------
1144 // StatsCounter support
1146 void SetCounter(StatsCounter* counter, int value,
1147 Register scratch1, Register scratch2);
1148 void IncrementCounter(StatsCounter* counter, int value,
1149 Register scratch1, Register scratch2);
1150 void DecrementCounter(StatsCounter* counter, int value,
1151 Register scratch1, Register scratch2);
1154 // ---------------------------------------------------------------------------
1157 // Calls Abort(msg) if the condition cond is not satisfied.
1158 // Use --debug_code to enable.
1159 void Assert(Condition cond, BailoutReason reason);
1160 void AssertFastElements(Register elements);
1162 // Like Assert(), but always enabled.
1163 void Check(Condition cond, BailoutReason reason);
1165 // Print a message to stdout and abort execution.
1166 void Abort(BailoutReason msg);
1168 // Verify restrictions about code generated in stubs.
1169 void set_generating_stub(bool value) { generating_stub_ = value; }
1170 bool generating_stub() { return generating_stub_; }
1171 void set_has_frame(bool value) { has_frame_ = value; }
1172 bool has_frame() { return has_frame_; }
1173 inline bool AllowThisStubCall(CodeStub* stub);
1175 // EABI variant for double arguments in use.
1176 bool use_eabi_hardfloat() {
1178 return base::OS::ArmUsingHardFloat();
1179 #elif USE_EABI_HARDFLOAT
1186 // ---------------------------------------------------------------------------
1189 // Check whether the value of reg is a power of two and not zero. If not
1190 // control continues at the label not_power_of_two. If reg is a power of two
1191 // the register scratch contains the value of (reg - 1) when control falls
1193 void JumpIfNotPowerOfTwoOrZero(Register reg,
1195 Label* not_power_of_two_or_zero);
1196 // Check whether the value of reg is a power of two and not zero.
1197 // Control falls through if it is, with scratch containing the mask
1199 // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
1200 // zero or negative, or jumps to the 'not_power_of_two' label if the value is
1201 // strictly positive but not a power of two.
1202 void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
1204 Label* zero_and_neg,
1205 Label* not_power_of_two);
1207 // ---------------------------------------------------------------------------
1210 void SmiTag(Register reg, SBit s = LeaveCC) {
1211 add(reg, reg, Operand(reg), s);
1213 void SmiTag(Register dst, Register src, SBit s = LeaveCC) {
1214 add(dst, src, Operand(src), s);
1217 // Try to convert int32 to smi. If the value is to large, preserve
1218 // the original value and jump to not_a_smi. Destroys scratch and
1220 void TrySmiTag(Register reg, Label* not_a_smi) {
1221 TrySmiTag(reg, reg, not_a_smi);
1223 void TrySmiTag(Register reg, Register src, Label* not_a_smi) {
1224 SmiTag(ip, src, SetCC);
1230 void SmiUntag(Register reg, SBit s = LeaveCC) {
1231 mov(reg, Operand::SmiUntag(reg), s);
1233 void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
1234 mov(dst, Operand::SmiUntag(src), s);
1237 // Untag the source value into destination and jump if source is a smi.
1238 // Souce and destination can be the same register.
1239 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1241 // Untag the source value into destination and jump if source is not a smi.
1242 // Souce and destination can be the same register.
1243 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1245 // Test if the register contains a smi (Z == 0 (eq) if true).
1246 inline void SmiTst(Register value) {
1247 tst(value, Operand(kSmiTagMask));
1249 inline void NonNegativeSmiTst(Register value) {
1250 tst(value, Operand(kSmiTagMask | kSmiSignMask));
1252 // Jump if the register contains a smi.
1253 inline void JumpIfSmi(Register value, Label* smi_label) {
1254 tst(value, Operand(kSmiTagMask));
1257 // Jump if either of the registers contain a non-smi.
1258 inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
1259 tst(value, Operand(kSmiTagMask));
1260 b(ne, not_smi_label);
1262 // Jump if either of the registers contain a non-smi.
1263 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1264 // Jump if either of the registers contain a smi.
1265 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1267 // Abort execution if argument is a smi, enabled via --debug-code.
1268 void AssertNotSmi(Register object);
1269 void AssertSmi(Register object);
1271 // Abort execution if argument is not a string, enabled via --debug-code.
1272 void AssertString(Register object);
1274 // Abort execution if argument is not a name, enabled via --debug-code.
1275 void AssertName(Register object);
1277 // Abort execution if argument is not undefined or an AllocationSite, enabled
1278 // via --debug-code.
1279 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1281 // Abort execution if reg is not the root value with the given index,
1282 // enabled via --debug-code.
1283 void AssertIsRoot(Register reg, Heap::RootListIndex index);
1285 // ---------------------------------------------------------------------------
1286 // HeapNumber utilities
1288 void JumpIfNotHeapNumber(Register object,
1289 Register heap_number_map,
1291 Label* on_not_heap_number);
1293 // ---------------------------------------------------------------------------
1296 // Generate code to do a lookup in the number string cache. If the number in
1297 // the register object is found in the cache the generated code falls through
1298 // with the result in the result register. The object and the result register
1299 // can be the same. If the number is not found in the cache the code jumps to
1300 // the label not_found with only the content of register object unchanged.
1301 void LookupNumberStringCache(Register object,
1308 // Checks if both objects are sequential one-byte strings and jumps to label
1309 // if either is not. Assumes that neither object is a smi.
1310 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
1316 // Checks if both objects are sequential one-byte strings and jumps to label
1317 // if either is not.
1318 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
1321 Label* not_flat_one_byte_strings);
1323 // Checks if both instance types are sequential one-byte strings and jumps to
1324 // label if either is not.
1325 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1326 Register first_object_instance_type, Register second_object_instance_type,
1327 Register scratch1, Register scratch2, Label* failure);
1329 // Check if instance type is sequential one-byte string and jump to label if
1331 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1334 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1336 void EmitSeqStringSetCharCheck(Register string,
1339 uint32_t encoding_mask);
1341 // ---------------------------------------------------------------------------
1342 // Patching helpers.
1344 // Get the location of a relocated constant (its address in the constant pool)
1345 // from its load site.
1346 void GetRelocatedValueLocation(Register ldr_location, Register result,
1350 void ClampUint8(Register output_reg, Register input_reg);
1352 void ClampDoubleToUint8(Register result_reg,
1353 DwVfpRegister input_reg,
1354 LowDwVfpRegister double_scratch);
1357 void LoadInstanceDescriptors(Register map, Register descriptors);
1358 void EnumLength(Register dst, Register map);
1359 void NumberOfOwnDescriptors(Register dst, Register map);
1360 void LoadAccessor(Register dst, Register holder, int accessor_index,
1361 AccessorComponent accessor);
1363 template<typename Field>
1364 void DecodeField(Register dst, Register src) {
1365 Ubfx(dst, src, Field::kShift, Field::kSize);
1368 template<typename Field>
1369 void DecodeField(Register reg) {
1370 DecodeField<Field>(reg, reg);
1373 template<typename Field>
1374 void DecodeFieldToSmi(Register dst, Register src) {
1375 static const int shift = Field::kShift;
1376 static const int mask = Field::kMask >> shift << kSmiTagSize;
1377 STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
1378 STATIC_ASSERT(kSmiTag == 0);
1379 if (shift < kSmiTagSize) {
1380 mov(dst, Operand(src, LSL, kSmiTagSize - shift));
1381 and_(dst, dst, Operand(mask));
1382 } else if (shift > kSmiTagSize) {
1383 mov(dst, Operand(src, LSR, shift - kSmiTagSize));
1384 and_(dst, dst, Operand(mask));
1386 and_(dst, src, Operand(mask));
1390 template<typename Field>
1391 void DecodeFieldToSmi(Register reg) {
1392 DecodeField<Field>(reg, reg);
1395 // Activation support.
1396 void EnterFrame(StackFrame::Type type,
1397 bool load_constant_pool_pointer_reg = false);
1398 // Returns the pc offset at which the frame ends.
1399 int LeaveFrame(StackFrame::Type type);
1401 // Expects object in r0 and returns map with validated enum cache
1402 // in r0. Assumes that any other register can be used as a scratch.
1403 void CheckEnumCache(Register null_value, Label* call_runtime);
1405 // AllocationMemento support. Arrays may have an associated
1406 // AllocationMemento object that can be checked for in order to pretransition
1408 // On entry, receiver_reg should point to the array object.
1409 // scratch_reg gets clobbered.
1410 // If allocation info is present, condition flags are set to eq.
1411 void TestJSArrayForAllocationMemento(Register receiver_reg,
1412 Register scratch_reg,
1413 Label* no_memento_found);
1415 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1416 Register scratch_reg,
1417 Label* memento_found) {
1418 Label no_memento_found;
1419 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1421 b(eq, memento_found);
1422 bind(&no_memento_found);
1425 // Jumps to found label if a prototype map has dictionary elements.
1426 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1427 Register scratch1, Label* found);
1430 void CallCFunctionHelper(Register function,
1431 int num_reg_arguments,
1432 int num_double_arguments);
1434 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
1436 // Helper functions for generating invokes.
1437 void InvokePrologue(const ParameterCount& expected,
1438 const ParameterCount& actual,
1439 Handle<Code> code_constant,
1442 bool* definitely_mismatches,
1444 const CallWrapper& call_wrapper);
1446 void InitializeNewString(Register string,
1448 Heap::RootListIndex map_index,
1452 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1453 void InNewSpace(Register object,
1455 Condition cond, // eq for new space, ne otherwise.
1458 // Helper for finding the mark bits for an address. Afterwards, the
1459 // bitmap register points at the word with the mark bits and the mask
1460 // the position of the first bit. Leaves addr_reg unchanged.
1461 inline void GetMarkBits(Register addr_reg,
1462 Register bitmap_reg,
1465 // Compute memory operands for safepoint stack slots.
1466 static int SafepointRegisterStackIndex(int reg_code);
1467 MemOperand SafepointRegisterSlot(Register reg);
1468 MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1470 // Loads the constant pool pointer (pp) register.
1471 void LoadConstantPoolPointerRegister();
1473 bool generating_stub_;
1475 // This handle will be patched with the code object on installation.
1476 Handle<Object> code_object_;
1478 // Needs access to SafepointRegisterStackIndex for compiled frame
1480 friend class StandardFrame;
1484 // The code patcher is used to patch (typically) small parts of code e.g. for
1485 // debugging and other types of instrumentation. When using the code patcher
1486 // the exact number of bytes specified must be emitted. It is not legal to emit
1487 // relocation information. If any of these constraints are violated it causes
1488 // an assertion to fail.
1496 CodePatcher(byte* address,
1498 FlushICache flush_cache = FLUSH);
1499 virtual ~CodePatcher();
1501 // Macro assembler to emit code.
1502 MacroAssembler* masm() { return &masm_; }
1504 // Emit an instruction directly.
1505 void Emit(Instr instr);
1507 // Emit an address directly.
1508 void Emit(Address addr);
1510 // Emit the condition part of an instruction leaving the rest of the current
1511 // instruction unchanged.
1512 void EmitCondition(Condition cond);
1515 byte* address_; // The address of the code being patched.
1516 int size_; // Number of bytes of the expected patch size.
1517 MacroAssembler masm_; // Macro assembler used to generate the code.
1518 FlushICache flush_cache_; // Whether to flush the I cache after patching.
1522 // -----------------------------------------------------------------------------
1523 // Static helper functions.
1525 inline MemOperand ContextOperand(Register context, int index) {
1526 return MemOperand(context, Context::SlotOffset(index));
1530 inline MemOperand GlobalObjectOperand() {
1531 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
1535 #ifdef GENERATED_CODE_COVERAGE
1536 #define CODE_COVERAGE_STRINGIFY(x) #x
1537 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1538 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1539 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
1541 #define ACCESS_MASM(masm) masm->
1545 } } // namespace v8::internal
1547 #endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_